diff --git a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/BehaviorTestKit.scala b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/BehaviorTestKit.scala index 0cf95d95648..385b6549e2f 100644 --- a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/BehaviorTestKit.scala +++ b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/BehaviorTestKit.scala @@ -134,7 +134,7 @@ abstract class BehaviorTestKit[T] { /** * Returns the current behavior as it was returned from processing the previous message. - * For example if [[Behaviors.unhandled]] is returned it will be kept here, but not in + * For example if [[pekko.actor.typed.javadsl.Behaviors.unhandled]] is returned it will be kept here, but not in * [[currentBehavior]]. */ def returnedBehavior: Behavior[T] diff --git a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala index 83ca1037e47..1d7bf621221 100644 --- a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala +++ b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/javadsl/TestProbe.scala @@ -100,7 +100,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Obtain time remaining for execution of the innermost enclosing `within` - * block or throw an [[AssertionError]] if no `within` block surrounds this + * block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this * call. */ def getRemaining: Duration @@ -143,7 +143,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Receive one message from the test actor and assert that it equals the * given object. Wait time is bounded by the given duration, with an - * [[AssertionError]] being thrown in case of timeout. + * [[java.lang.AssertionError]] being thrown in case of timeout. * * @return the received object */ @@ -152,7 +152,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Receive one message from the test actor and assert that it equals the * given object. Wait time is bounded by the given duration, with an - * [[AssertionError]] being thrown in case of timeout. + * [[java.lang.AssertionError]] being thrown in case of timeout. * * @return the received object */ @@ -190,7 +190,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Receive one message of type `M`. Wait time is bounded by the `max` duration, - * with an [[AssertionError]] raised in case of timeout. + * with an [[java.lang.AssertionError]] raised in case of timeout. */ def receiveMessage(max: Duration): M @@ -235,7 +235,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef /** * Expect the given actor to be stopped or stop within the given timeout or - * throw an [[AssertionError]]. + * throw an [[java.lang.AssertionError]]. * * Note that the timeout is scaled using the configuration entry "pekko.actor.testkit.typed.timefactor". */ diff --git a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/BehaviorTestKit.scala b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/BehaviorTestKit.scala index 54b9db0393f..a69b05778b6 100644 --- a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/BehaviorTestKit.scala +++ b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/BehaviorTestKit.scala @@ -126,7 +126,7 @@ trait BehaviorTestKit[T] { /** * Returns the current behavior as it was returned from processing the previous message. - * For example if [[Behaviors.unhandled]] is returned it will be kept here, but not in + * For example if [[pekko.actor.typed.scaladsl.Behaviors.unhandled]] is returned it will be kept here, but not in * [[currentBehavior]]. */ def returnedBehavior: Behavior[T] diff --git a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala index c505bab9c24..322f9a71723 100644 --- a/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala +++ b/actor-testkit-typed/src/main/scala/org/apache/pekko/actor/testkit/typed/scaladsl/TestProbe.scala @@ -86,7 +86,7 @@ object TestProbe { /** * Obtain time remaining for execution of the innermost enclosing `within` - * block or throw an [[AssertionError]] if no `within` block surrounds this + * block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this * call. */ def remaining: FiniteDuration @@ -128,7 +128,7 @@ object TestProbe { /** * Receive one message from the test actor and assert that it equals the * given object. Wait time is bounded by the given duration, with an - * [[AssertionError]] being thrown in case of timeout. + * [[java.lang.AssertionError]] being thrown in case of timeout. * * @return the received object */ @@ -137,7 +137,7 @@ object TestProbe { /** * Receive one message from the test actor and assert that it equals the * given object. Wait time is bounded by the given duration, with an - * [[AssertionError]] being thrown in case of timeout. + * [[java.lang.AssertionError]] being thrown in case of timeout. * * @return the received object */ @@ -172,7 +172,7 @@ object TestProbe { /** * Receive one message of type `M`. Wait time is bounded by the `max` duration, - * with an [[AssertionError]] raised in case of timeout. + * with an [[java.lang.AssertionError]] raised in case of timeout. */ def receiveMessage(max: FiniteDuration): M @@ -225,7 +225,7 @@ object TestProbe { /** * Expect the given actor to be stopped or stop within the given timeout or - * throw an [[AssertionError]]. + * throw an [[java.lang.AssertionError]]. */ def expectTerminated[U](actorRef: ActorRef[U], max: FiniteDuration): Unit diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorRef.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorRef.scala index 364e9365fe5..b25f0560186 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorRef.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/ActorRef.scala @@ -49,8 +49,8 @@ trait ActorRef[-T] extends RecipientRef[T] with java.lang.Comparable[ActorRef[_] /** * Unsafe utility method for widening the type accepted by this ActorRef; * provided to avoid having to use `asInstanceOf` on the full reference type, - * which would unfortunately also work on non-ActorRefs. Use it with caution,it may cause a [[ClassCastException]] when you send a message - * to the widened [[ActorRef[U]]]. + * which would unfortunately also work on non-ActorRefs. Use it with caution,it may cause a [[java.lang.ClassCastException]] when you send a message + * to the widened [[ActorRef ActorRef[U]]]. */ def unsafeUpcast[U >: T @uncheckedVariance]: ActorRef[U] diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Behavior.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Behavior.scala index b69017393e4..f9e4c80bcac 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Behavior.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Behavior.scala @@ -64,7 +64,7 @@ abstract class Behavior[T](private[pekko] val _tag: Int) { behavior => * provided as an alternative to the universally available `asInstanceOf`, which * casts the entire type rather than just the type parameter. * Typically used to upcast a type, for instance from `Nothing` to some type `U`. - * Use it with caution, it may lead to a [[ClassCastException]] when you send a message + * Use it with caution, it may lead to a [[java.lang.ClassCastException]] when you send a message * to the resulting [[Behavior[U]]]. */ @InternalApi private[pekko] final def unsafeCast[U]: Behavior[U] = this.asInstanceOf[Behavior[U]] diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Scheduler.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Scheduler.scala index f05a85911ce..e0b75376548 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Scheduler.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/Scheduler.scala @@ -34,7 +34,7 @@ trait Scheduler { * Scala API: Schedules a Runnable to be run once with a delay, i.e. a time period that * has to pass before the runnable is executed. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` or `ActorContext.scheduleOnce` should be preferred. @@ -45,7 +45,7 @@ trait Scheduler { * Java API: Schedules a Runnable to be run once with a delay, i.e. a time period that * has to pass before the runnable is executed. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` or `ActorContext.scheduleOnce` should be preferred. @@ -67,7 +67,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` should be preferred. @@ -91,7 +91,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling in actors `Behaviors.withTimers` should be preferred. @@ -128,7 +128,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` should be preferred. @@ -162,7 +162,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling in actors `Behaviors.withTimers` should be preferred. diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala index a72a5da3802..bbe094f3c21 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/delivery/WorkPullingProducerController.scala @@ -45,7 +45,7 @@ import pekko.util.OptionConverters._ * messages may be routed to two different workers and processed independent of each other. * * A worker actor (consumer) and its `ConsumerController` is dynamically registered to the - * `WorkPullingProducerController` via a [[ServiceKey]]. It will register itself to the + * `WorkPullingProducerController` via a [[pekko.actor.typed.receptionist.ServiceKey]]. It will register itself to the * * [[pekko.actor.typed.receptionist.Receptionist]], and the `WorkPullingProducerController` * subscribes to the same key to find active workers. In this way workers can be dynamically * added or removed from any node in the cluster. diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/ActorContext.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/ActorContext.scala index d069ae7ffa8..7dab2e674f8 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/ActorContext.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/ActorContext.scala @@ -173,7 +173,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi * *Warning*: This method is not thread-safe and must not be accessed from threads other * than the ordinary actor message processing thread, such as [[java.util.concurrent.CompletionStage]] callbacks. * - * @throws IllegalArgumentException if the given actor ref is not a direct child of this actor + * @throws java.lang.IllegalArgumentException if the given actor ref is not a direct child of this actorc */ def stop[U](child: ActorRef[U]): Unit @@ -184,7 +184,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi * * `watch` is idempotent if it is not mixed with `watchWith`. * - * It will fail with an [[IllegalStateException]] if the same subject was watched before using `watchWith`. + * It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watchWith`. * To clear the termination message, unwatch first. * * *Warning*: This method is not thread-safe and must not be accessed from threads other @@ -199,7 +199,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi * * `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`. * - * It will fail with an [[IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with + * It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with * another termination message. To change the termination message, unwatch first. * * *Warning*: This method is not thread-safe and must not be accessed from threads other diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/StashBuffer.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/StashBuffer.scala index ccd71545cc5..f63d7d5c17c 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/StashBuffer.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/javadsl/StashBuffer.scala @@ -78,7 +78,7 @@ import java.util.function.{ Function => JFunction, Predicate } * Return the first element of the message buffer without removing it. * * @return the first element or throws `NoSuchElementException` if the buffer is empty - * @throws `NoSuchElementException` if the buffer is empty + * @throws java.util.NoSuchElementException if the buffer is empty */ def head: T diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/ActorContext.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/ActorContext.scala index 76d8d520faf..b8448cc2a75 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/ActorContext.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/ActorContext.scala @@ -156,7 +156,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi * *Warning*: This method is not thread-safe and must not be accessed from threads other * than the ordinary actor message processing thread, such as [[scala.concurrent.Future]] callbacks. * - * @throws IllegalArgumentException if the given actor ref is not a direct child of this actor + * @throws java.lang.IllegalArgumentException if the given actor ref is not a direct child of this actor */ def stop[U](child: ActorRef[U]): Unit @@ -167,7 +167,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi * * `watch` is idempotent if it is not mixed with `watchWith`. * - * It will fail with an [[IllegalStateException]] if the same subject was watched before using `watchWith`. + * It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watchWith`. * To clear the termination message, unwatch first. * * *Warning*: This method is not thread-safe and must not be accessed from threads other @@ -182,7 +182,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi * * `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`. * - * It will fail with an [[IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with + * It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with * another termination message. To change the termination message, unwatch first. * * *Warning*: This method is not thread-safe and must not be accessed from threads other diff --git a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/StashBuffer.scala b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/StashBuffer.scala index e36c93c8d6f..acc28f52d30 100644 --- a/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/StashBuffer.scala +++ b/actor-typed/src/main/scala/org/apache/pekko/actor/typed/scaladsl/StashBuffer.scala @@ -90,7 +90,7 @@ import pekko.annotation.{ DoNotInherit, InternalApi } * Return the first element of the message buffer without removing it. * * @return the first element or throws `NoSuchElementException` if the buffer is empty - * @throws `NoSuchElementException` if the buffer is empty + * @throws java.util.NoSuchElementException if the buffer is empty */ def head: T diff --git a/actor/src/main/java/org/apache/pekko/japi/pf/DeciderBuilder.java b/actor/src/main/java/org/apache/pekko/japi/pf/DeciderBuilder.java index 950fdc88c2b..35f342c5eec 100644 --- a/actor/src/main/java/org/apache/pekko/japi/pf/DeciderBuilder.java +++ b/actor/src/main/java/org/apache/pekko/japi/pf/DeciderBuilder.java @@ -16,7 +16,7 @@ import static org.apache.pekko.actor.SupervisorStrategy.Directive; /** - * Used for building a partial function for {@link org.apache.pekko.actor.Actor#supervisorStrategy() + * Used for building a partial function for {@link org.apache.pekko.actor.Actor#supervisorStrategy * Actor.supervisorStrategy()}. * Inside an actor you can use it like this with Java 8 to define * your supervisorStrategy. * diff --git a/actor/src/main/java/org/apache/pekko/japi/pf/FSMStopBuilder.java b/actor/src/main/java/org/apache/pekko/japi/pf/FSMStopBuilder.java index d6291c79217..6baeb9c839c 100644 --- a/actor/src/main/java/org/apache/pekko/japi/pf/FSMStopBuilder.java +++ b/actor/src/main/java/org/apache/pekko/japi/pf/FSMStopBuilder.java @@ -28,7 +28,7 @@ public class FSMStopBuilder { private UnitPFBuilder> builder = new UnitPFBuilder>(); /** - * Add a case statement that matches on an {@link FSM.Reason}. + * Add a case statement that matches on an {@link org.apache.pekko.actor.FSM.Reason}. * * @param reason the reason for the termination * @param apply an action to apply to the event and state data if there is a match diff --git a/actor/src/main/java/org/apache/pekko/japi/pf/Match.java b/actor/src/main/java/org/apache/pekko/japi/pf/Match.java index ef1e0dc949f..f18f4699a91 100644 --- a/actor/src/main/java/org/apache/pekko/japi/pf/Match.java +++ b/actor/src/main/java/org/apache/pekko/japi/pf/Match.java @@ -126,7 +126,7 @@ public static final Match create(PFBuilder builder) { * * @param i the argument to apply the match to * @return the result of the application - * @throws MatchError if there is no match + * @throws scala.MatchError if there is no match */ public R match(I i) throws MatchError { return statements.apply(i); diff --git a/actor/src/main/java/org/apache/pekko/japi/pf/ReceiveBuilder.java b/actor/src/main/java/org/apache/pekko/japi/pf/ReceiveBuilder.java index 48d7fd8cc26..3e337fcf073 100644 --- a/actor/src/main/java/org/apache/pekko/japi/pf/ReceiveBuilder.java +++ b/actor/src/main/java/org/apache/pekko/japi/pf/ReceiveBuilder.java @@ -19,7 +19,7 @@ /** * Used for building a partial function for {@link - * org.apache.pekko.actor.AbstractActor#createReceive() AbstractActor.createReceive()}. + * org.apache.pekko.actor.AbstractActor#createReceive AbstractActor.createReceive()}. * *

There is both a match on type only, and a match on type and predicate. * diff --git a/actor/src/main/scala/org/apache/pekko/actor/AbstractFSM.scala b/actor/src/main/scala/org/apache/pekko/actor/AbstractFSM.scala index 4d899fe882c..d9b63c6a3e2 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/AbstractFSM.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/AbstractFSM.scala @@ -127,8 +127,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { } /** - * Set initial state. Call this method from the constructor before the [[#initialize]] method. - * If different state is needed after a restart this method, followed by [[#initialize]], can + * Set initial state. Call this method from the constructor before the [[initialize]] method. + * If different state is needed after a restart this method, followed by [[initialize]], can * be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]]. * * @param stateName initial state designator @@ -138,8 +138,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { startWith(stateName, stateData, null: FiniteDuration) /** - * Set initial state. Call this method from the constructor before the [[#initialize]] method. - * If different state is needed after a restart this method, followed by [[#initialize]], can + * Set initial state. Call this method from the constructor before the [[initialize]] method. + * If different state is needed after a restart this method, followed by [[initialize]], can * be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]]. * * @param stateName initial state designator @@ -150,8 +150,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { super.startWith(stateName, stateData, Option(timeout)) /** - * Set initial state. Call this method from the constructor before the [[#initialize]] method. - * If different state is needed after a restart this method, followed by [[#initialize]], can + * Set initial state. Call this method from the constructor before the [[initialize]] method. + * If different state is needed after a restart this method, followed by [[initialize]], can * be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]]. * * @param stateName initial state designator diff --git a/actor/src/main/scala/org/apache/pekko/actor/ActorCell.scala b/actor/src/main/scala/org/apache/pekko/actor/ActorCell.scala index 986b2182e0b..b6a085bacdd 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/ActorCell.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/ActorCell.scala @@ -201,7 +201,7 @@ trait ActorContext extends ActorRefFactory with ClassicActorContextProvider { * * `watch` is idempotent if it is not mixed with `watchWith`. * - * It will fail with an [[IllegalStateException]] if the same subject was watched before using `watchWith`. + * It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watchWith`. * To clear the termination message, unwatch first. * * *Warning*: This method is not thread-safe and must not be accessed from threads other @@ -218,7 +218,7 @@ trait ActorContext extends ActorRefFactory with ClassicActorContextProvider { * * `watchWith` is idempotent if it is called with the same `msg` and not mixed with `watch`. * - * It will fail with an [[IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with + * It will fail with an [[java.lang.IllegalStateException]] if the same subject was watched before using `watch` or `watchWith` with * another termination message. To change the termination message, unwatch first. * * *Warning*: This method is not thread-safe and must not be accessed from threads other diff --git a/actor/src/main/scala/org/apache/pekko/actor/ActorRefProvider.scala b/actor/src/main/scala/org/apache/pekko/actor/ActorRefProvider.scala index 14b4c528cb2..79c0e7c811e 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/ActorRefProvider.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/ActorRefProvider.scala @@ -224,7 +224,7 @@ trait ActorRefFactory { * * @throws pekko.ConfigurationException if deployment, dispatcher * or mailbox configuration is wrong - * @throws UnsupportedOperationException if invoked on an ActorSystem that + * @throws java.lang.UnsupportedOperationException if invoked on an ActorSystem that * uses a custom user guardian */ def actorOf(props: Props): ActorRef @@ -240,7 +240,7 @@ trait ActorRefFactory { * invalid or already in use * @throws pekko.ConfigurationException if deployment, dispatcher * or mailbox configuration is wrong - * @throws UnsupportedOperationException if invoked on an ActorSystem that + * @throws java.lang.UnsupportedOperationException if invoked on an ActorSystem that * uses a custom user guardian */ def actorOf(props: Props, name: String): ActorRef diff --git a/actor/src/main/scala/org/apache/pekko/actor/FSM.scala b/actor/src/main/scala/org/apache/pekko/actor/FSM.scala index 79ae1e0f4fe..7ee801a2476 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/FSM.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/FSM.scala @@ -476,8 +476,8 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { register(stateName, stateFunction, Option(stateTimeout)) /** - * Set initial state. Call this method from the constructor before the [[#initialize]] method. - * If different state is needed after a restart this method, followed by [[#initialize]], can + * Set initial state. Call this method from the constructor before the [[initialize]] method. + * If different state is needed after a restart this method, followed by [[initialize]], can * be used in the actor life cycle hooks [[pekko.actor.Actor#preStart]] and [[pekko.actor.Actor#postRestart]]. * * @param stateName initial state designator @@ -941,7 +941,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { } /** - * By default [[FSM.Failure]] is logged at error level and other reason + * By default [[pekko.actor.FSM.Failure]] is logged at error level and other reason * types are not logged. It is possible to override this behavior. */ protected def logTermination(reason: Reason): Unit = reason match { diff --git a/actor/src/main/scala/org/apache/pekko/actor/Scheduler.scala b/actor/src/main/scala/org/apache/pekko/actor/Scheduler.scala index e71d7bfbc92..ee8b63ae343 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/Scheduler.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/Scheduler.scala @@ -75,7 +75,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `with Timers` should be preferred. @@ -116,7 +116,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `AbstractActorWithTimers` should be preferred. @@ -215,7 +215,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `with Timers` should be preferred. @@ -251,7 +251,7 @@ trait Scheduler { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `AbstractActorWithTimers` should be preferred. @@ -415,7 +415,7 @@ trait Scheduler { * Scala API: Schedules a message to be sent once with a delay, i.e. a time period that has * to pass before the message is sent. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `with Timers` should be preferred. @@ -433,7 +433,7 @@ trait Scheduler { * Java API: Schedules a message to be sent once with a delay, i.e. a time period that has * to pass before the message is sent. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `AbstractActorWithTimers` should be preferred. @@ -452,7 +452,7 @@ trait Scheduler { * Scala API: Schedules a function to be run once with a delay, i.e. a time period that has * to pass before the function is run. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `with Timers` should be preferred. @@ -466,7 +466,7 @@ trait Scheduler { * Scala API: Schedules a Runnable to be run once with a delay, i.e. a time period that * has to pass before the runnable is executed. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `with Timers` should be preferred. @@ -477,7 +477,7 @@ trait Scheduler { * Java API: Schedules a Runnable to be run once with a delay, i.e. a time period that * has to pass before the runnable is executed. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `AbstractActorWithTimers` should be preferred. diff --git a/actor/src/main/scala/org/apache/pekko/actor/Stash.scala b/actor/src/main/scala/org/apache/pekko/actor/Stash.scala index a503878e59b..a56f9bfc497 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/Stash.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/Stash.scala @@ -176,8 +176,8 @@ private[pekko] trait StashSupport { * Adds the current message (the message that the actor received last) to the * actor's stash. * - * @throws StashOverflowException in case of a stash capacity violation - * @throws IllegalStateException if the same message is stashed more than once + * @throws pekko.actor.StashOverflowException in case of a stash capacity violation + * @throws java.lang.IllegalStateException if the same message is stashed more than once */ def stash(): Unit = { val currMsg = actorCell.currentMessage diff --git a/actor/src/main/scala/org/apache/pekko/actor/dungeon/DeathWatch.scala b/actor/src/main/scala/org/apache/pekko/actor/dungeon/DeathWatch.scala index 26eac3b3da7..4c67e848b9d 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/dungeon/DeathWatch.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/dungeon/DeathWatch.scala @@ -23,7 +23,7 @@ import pekko.util.unused private[pekko] trait DeathWatch { this: ActorCell => /** - * This map holds a [[None]] for actors for which we send a [[Terminated]] notification on termination, + * This map holds a [[scala.None]] for actors for which we send a [[Terminated]] notification on termination, * ``Some(message)`` for actors for which we send a custom termination message. */ private var watching: Map[ActorRef, Option[Any]] = Map.empty diff --git a/actor/src/main/scala/org/apache/pekko/actor/setup/ActorSystemSetup.scala b/actor/src/main/scala/org/apache/pekko/actor/setup/ActorSystemSetup.scala index dd01a021caa..db019283793 100644 --- a/actor/src/main/scala/org/apache/pekko/actor/setup/ActorSystemSetup.scala +++ b/actor/src/main/scala/org/apache/pekko/actor/setup/ActorSystemSetup.scala @@ -23,15 +23,15 @@ import pekko.annotation.InternalApi import pekko.util.OptionConverters._ /** - * Marker supertype for a setup part that can be put inside [[ActorSystemSetup]], if a specific concrete setup + * Marker supertype for a setup part that can be put inside [[pekko.actor.setup.ActorSystemSetup]], if a specific concrete setup * is not specified in the actor system setup that means defaults are used (usually from the config file) - no concrete - * setup instance should be mandatory in the [[ActorSystemSetup]] that an actor system is created with. + * setup instance should be mandatory in the [[pekko.actor.setup.ActorSystemSetup]] that an actor system is created with. */ abstract class Setup { /** - * Construct an [[ActorSystemSetup]] with this setup combined with another one. Allows for - * fluent creation of settings. If `other` is a setting of the same concrete [[Setup]] as this + * Construct an [[pekko.actor.setup.ActorSystemSetup]] with this setup combined with another one. Allows for + * fluent creation of settings. If `other` is a setting of the same concrete [[pekko.actor.setup.Setup]] as this * it will replace this. */ final def and(other: Setup): ActorSystemSetup = ActorSystemSetup(this, other) @@ -43,13 +43,13 @@ object ActorSystemSetup { val empty = new ActorSystemSetup(Map.empty) /** - * Scala API: Create an [[ActorSystemSetup]] containing all the provided settings + * Scala API: Create an [[pekko.actor.setup.ActorSystemSetup]] containing all the provided settings */ def apply(settings: Setup*): ActorSystemSetup = new ActorSystemSetup(settings.map(s => s.getClass -> s).toMap) /** - * Java API: Create an [[ActorSystemSetup]] containing all the provided settings + * Java API: Create an [[pekko.actor.setup.ActorSystemSetup]] containing all the provided settings */ @varargs def create(settings: Setup*): ActorSystemSetup = apply(settings: _*) @@ -58,20 +58,20 @@ object ActorSystemSetup { /** * A set of setup settings for programmatic configuration of the actor system. * - * Constructor is *Internal API*. Use the factory methods [[ActorSystemSetup#create]] and [[pekko.actor.Actor#apply]] to create + * Constructor is *Internal API*. Use the factory methods [[ActorSystemSetup#create]] and [[ActorSystemSetup#apply]] to create * instances. */ final class ActorSystemSetup private[pekko] (@InternalApi private[pekko] val setups: Map[Class[_], AnyRef]) { /** - * Java API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings. + * Java API: Extract a concrete [[pekko.actor.setup.Setup]] of type `T` if it is defined in the settings. */ def get[T <: Setup](clazz: Class[T]): Optional[T] = { setups.get(clazz).map(_.asInstanceOf[T]).toJava } /** - * Scala API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings. + * Scala API: Extract a concrete [[pekko.actor.setup.Setup]] of type `T` if it is defined in the settings. */ def get[T <: Setup: ClassTag]: Option[T] = { val clazz = implicitly[ClassTag[T]].runtimeClass @@ -79,7 +79,7 @@ final class ActorSystemSetup private[pekko] (@InternalApi private[pekko] val set } /** - * Add a concrete [[Setup]]. If a setting of the same concrete [[Setup]] already is + * Add a concrete [[pekko.actor.setup.Setup]]. If a setting of the same concrete [[pekko.actor.setup.Setup]] already is * present it will be replaced. */ def withSetup[T <: Setup](t: T): ActorSystemSetup = { @@ -88,7 +88,7 @@ final class ActorSystemSetup private[pekko] (@InternalApi private[pekko] val set /** * alias for `withSetup` allowing for fluent combination of settings: `a and b and c`, where `a`, `b` and `c` are - * concrete [[Setup]] instances. If a setting of the same concrete [[Setup]] already is + * concrete [[pekko.actor.setup.Setup]] instances. If a setting of the same concrete [[pekko.actor.setup.Setup]] already is * present it will be replaced. */ def and[T <: Setup](t: T): ActorSystemSetup = withSetup(t) diff --git a/actor/src/main/scala/org/apache/pekko/dispatch/affinity/AffinityPool.scala b/actor/src/main/scala/org/apache/pekko/dispatch/affinity/AffinityPool.scala index aa82be903e8..f83b5a456ae 100644 --- a/actor/src/main/scala/org/apache/pekko/dispatch/affinity/AffinityPool.scala +++ b/actor/src/main/scala/org/apache/pekko/dispatch/affinity/AffinityPool.scala @@ -384,7 +384,7 @@ trait QueueSelector { /** * Must be deterministic—return the same value for the same input. * @return given a `Runnable` a number between 0 .. `queues` (exclusive) - * @throws NullPointerException when `command` is `null` + * @throws java.lang.NullPointerException when `command` is `null` */ def getQueue(command: Runnable, queues: Int): Int } diff --git a/actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala b/actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala index 38bcbf190ad..9e4270950d8 100644 --- a/actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala +++ b/actor/src/main/scala/org/apache/pekko/io/dns/DnsProtocol.scala @@ -57,7 +57,7 @@ object DnsProtocol { def srvRequestType(): RequestType = Srv /** - * Sending this to the [[AsyncDnsManager]] will either lead to a [[Resolved]] or a [[pekko.actor.Status.Failure]] response. + * Sending this to the [[internal.AsyncDnsManager]] will either lead to a [[Resolved]] or a [[pekko.actor.Status.Failure]] response. * If request type are both, both resolutions must succeed or the response is a failure. */ final case class Resolve(name: String, requestType: RequestType) extends ConsistentHashable { @@ -108,7 +108,8 @@ object DnsProtocol { /** * Return the host, taking into account the "java.net.preferIPv6Addresses" system property. - * @throws UnknownHostException + * + * @throws java.net.UnknownHostException */ @throws[UnknownHostException] def address(): InetAddress = _address match { diff --git a/actor/src/main/scala/org/apache/pekko/pattern/Patterns.scala b/actor/src/main/scala/org/apache/pekko/pattern/Patterns.scala index dd179a93d40..09e90526e0c 100644 --- a/actor/src/main/scala/org/apache/pekko/pattern/Patterns.scala +++ b/actor/src/main/scala/org/apache/pekko/pattern/Patterns.scala @@ -623,7 +623,7 @@ object Patterns { * Return an empty [[Optional]] instance for no delay. * A scheduler (eg context.system.scheduler) must be provided to delay each retry. * You could provide a function to generate the next delay duration after first attempt, - * this function should never return `null`, otherwise an [[IllegalArgumentException]] will be through. + * this function should never return `null`, otherwise an [[java.lang.IllegalArgumentException]] will be through. * * If attempts are exhausted the returned future is simply the result of invoking attempt. * Note that the attempt function will be invoked on the given execution context for subsequent tries and diff --git a/actor/src/main/scala/org/apache/pekko/pattern/RetrySupport.scala b/actor/src/main/scala/org/apache/pekko/pattern/RetrySupport.scala index 09c4cdd0ef2..95bad97a73d 100644 --- a/actor/src/main/scala/org/apache/pekko/pattern/RetrySupport.scala +++ b/actor/src/main/scala/org/apache/pekko/pattern/RetrySupport.scala @@ -123,10 +123,10 @@ trait RetrySupport { * Given a function from Unit to Future, returns an internally retrying Future. * The first attempt will be made immediately, each subsequent attempt will be made after * the 'delay' return by `delayFunction`(the input next attempt count start from 1). - * Returns [[None]] for no delay. + * Returns [[scala.None]] for no delay. * A scheduler (eg context.system.scheduler) must be provided to delay each retry. * You could provide a function to generate the next delay duration after first attempt, - * this function should never return `null`, otherwise an [[IllegalArgumentException]] will be through. + * this function should never return `null`, otherwise an [[java.lang.IllegalArgumentException]] will be through. * * If attempts are exhausted the returned future is simply the result of invoking attempt. * Note that the attempt function will be invoked on the given execution context for subsequent diff --git a/actor/src/main/scala/org/apache/pekko/pattern/StatusReply.scala b/actor/src/main/scala/org/apache/pekko/pattern/StatusReply.scala index 7a614dba89e..e31bd108f33 100644 --- a/actor/src/main/scala/org/apache/pekko/pattern/StatusReply.scala +++ b/actor/src/main/scala/org/apache/pekko/pattern/StatusReply.scala @@ -92,7 +92,7 @@ object StatusReply { def error[T](errorMessage: String): StatusReply[T] = Error(errorMessage) /** - * Java API: Create an error response with a user defined [[Throwable]]. + * Java API: Create an error response with a user defined [[java.lang.Throwable]]. * * Prefer the string based error response over this one when possible to avoid tightly coupled logic across * actors and passing internal failure details on to callers that can not do much to handle them. @@ -150,7 +150,7 @@ object StatusReply { def apply[T](errorMessage: String): StatusReply[T] = error(new ErrorMessage(errorMessage)) /** - * Scala API: Create an error response with a user defined [[Throwable]]. + * Scala API: Create an error response with a user defined [[java.lang.Throwable]]. * * Prefer the string based error response over this one when possible to avoid tightly coupled logic across * actors and passing internal failure details on to callers that can not do much to handle them. diff --git a/actor/src/main/scala/org/apache/pekko/routing/Router.scala b/actor/src/main/scala/org/apache/pekko/routing/Router.scala index b6ea48583f9..8a8dc0fee92 100644 --- a/actor/src/main/scala/org/apache/pekko/routing/Router.scala +++ b/actor/src/main/scala/org/apache/pekko/routing/Router.scala @@ -35,7 +35,7 @@ trait RoutingLogic extends NoSerializationVerificationNeeded { /** * Pick the destination for a given message. Normally it picks one of the * passed `routees`, but in the end it is up to the implementation to - * return whatever [[Routee]] to use for sending a specific message. + * return whatever [[pekko.routing.Routee]] to use for sending a specific message. * * When implemented from Java it can be good to know that * `routees.apply(index)` can be used to get an element diff --git a/actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala b/actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala index 76b5c2901df..e1c45503723 100644 --- a/actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala +++ b/actor/src/main/scala/org/apache/pekko/serialization/Serialization.scala @@ -128,7 +128,7 @@ object Serialization { * local actor refs, or if serializer library e.g. custom serializer/deserializer * in Jackson need access to the current `ActorSystem`. * - * @throws IllegalStateException if the information was not set + * @throws java.lang.IllegalStateException if the information was not set */ def getCurrentTransportInformation(): Information = { Serialization.currentTransportInformation.value match { @@ -541,7 +541,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { } /** - * @throws `NoSuchElementException` if no serializer with given `id` + * @throws java.util.NoSuchElementException if no serializer with given `id` */ private def getSerializerById(id: Int): Serializer = { if (0 <= id && id < quickSerializerByIdentity.length) { diff --git a/actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala b/actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala index fed4232ad9a..de8fc3943f4 100644 --- a/actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala +++ b/actor/src/main/scala/org/apache/pekko/serialization/Serializer.scala @@ -249,7 +249,7 @@ trait BaseSerializer extends Serializer { /** * Globally unique serialization identifier configured in the `reference.conf`. * - * See [[Serializer.identifier]]. + * See [[pekko.serialization.Serializer.identifier]]. */ override val identifier: Int = identifierFromConfig diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedEntityProvider.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedEntityProvider.scala index 17f901c8012..cf99fc3ce7d 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedEntityProvider.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ReplicatedEntityProvider.scala @@ -34,7 +34,7 @@ object ReplicatedEntityProvider { /** * Java API: * - * Provides full control over the [[ReplicatedEntity]] and the [[Entity]] + * Provides full control over the [[ReplicatedEntity]] and the [[javadsl.Entity]] * Most use cases can use the [[createPerDataCenter]] and [[createPerRole]] * * @tparam M The type of messages the replicated entity accepts @@ -53,10 +53,10 @@ object ReplicatedEntityProvider { /** * Scala API: * - * Provides full control over the [[ReplicatedEntity]] and the [[Entity]] + * Provides full control over the [[ReplicatedEntity]] and the [[scaladsl.Entity]] * Most use cases can use the [[perDataCenter]] and [[perRole]] * - * @param typeName The type name used in the [[EntityTypeKey]] + * @param typeName The type name used in the [[scaladsl.EntityTypeKey]] * @tparam M The type of messages the replicated entity accepts */ def apply[M: ClassTag](typeName: String, allReplicaIds: Set[ReplicaId])( @@ -75,7 +75,7 @@ object ReplicatedEntityProvider { /** * Scala API * - * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in + * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in * ClusterSharding. A replica will be run per data center. */ def perDataCenter[M: ClassTag, E](typeName: String, allReplicaIds: Set[ReplicaId])( @@ -91,7 +91,7 @@ object ReplicatedEntityProvider { /** * Scala API * - * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in + * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in * ClusterSharding. The replicas in allReplicaIds should be roles used by nodes. A replica for each * entity will run on each role. */ @@ -108,7 +108,7 @@ object ReplicatedEntityProvider { /** * Java API * - * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in + * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in * ClusterSharding. A replica will be run per data center. */ def createPerDataCenter[M]( @@ -128,7 +128,7 @@ object ReplicatedEntityProvider { /** * Java API * - * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[Entity]] when running in + * Create a [[ReplicatedEntityProvider]] that uses the defaults for [[scaladsl.Entity]] when running in * ClusterSharding. * * Map replicas to roles and then there will be a replica per role e.g. to match to availability zones/racks diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardingMessageExtractor.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardingMessageExtractor.scala index 8c327657b84..f9e70b03c43 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardingMessageExtractor.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/ShardingMessageExtractor.scala @@ -118,7 +118,7 @@ abstract class HashCodeNoEnvelopeMessageExtractor[M](val numberOfShards: Int) ex * * @param entityId The business domain identifier of the entity. * @param message The message to be send to the entity. - * @throws `InvalidMessageException` if message is null. + * @throws pekko.actor.InvalidMessageException if message is null. */ final case class ShardingEnvelope[M](entityId: String, message: M) extends WrappedMessage { if (message == null) throw InvalidMessageException("[null] is not an allowed message") diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala index 46f7e008872..1e58675bc00 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingConsumerController.scala @@ -33,13 +33,13 @@ import pekko.cluster.sharding.typed.delivery.internal.ShardingConsumerController * `ShardingConsumerController` is the entity that is initialized in `ClusterSharding`. It will manage * the lifecycle and message delivery to the destination consumer actor. * - * The destination consumer actor will start the flow by sending an initial [[ConsumerController.Start]] + * The destination consumer actor will start the flow by sending an initial [[pekko.actor.typed.delivery.ConsumerController.Start]] * message via the `ActorRef` provided in the factory function of the consumer `Behavior`. * The `ActorRef` in the `Start` message is typically constructed as a message adapter to map the - * [[ConsumerController.Delivery]] to the protocol of the consumer actor. + * [[pekko.actor.typed.delivery.ConsumerController.Delivery]] to the protocol of the consumer actor. * - * Received messages from the producer are wrapped in [[ConsumerController.Delivery]] when sent to the consumer, - * which is supposed to reply with [[ConsumerController.Confirmed]] when it has processed the message. + * Received messages from the producer are wrapped in [[pekko.actor.typed.delivery.ConsumerController.Delivery]] when sent to the consumer, + * which is supposed to reply with [[pekko.actor.typed.delivery.ConsumerController.Confirmed]] when it has processed the message. * Next message from a specific producer is not delivered until the previous is confirmed. However, since * there can be several producers, e.g. one per node, sending to the same destination entity there can be * several `Delivery` in flight at the same time. diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala index 4cb6e486b2e..b38d954277d 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/delivery/ShardingProducerController.scala @@ -76,7 +76,7 @@ import pekko.util.OptionConverters._ * Until sent messages have been confirmed the `ShardingProducerController` keeps them in memory to be able to * resend them. If the JVM of the `ShardingProducerController` crashes those unconfirmed messages are lost. * To make sure the messages can be delivered also in that scenario the `ShardingProducerController` can be - * used with a [[DurableProducerQueue]]. Then the unconfirmed messages are stored in a durable way so + * used with a [[pekko.actor.typed.delivery.DurableProducerQueue]]. Then the unconfirmed messages are stored in a durable way so * that they can be redelivered when the producer is started again. An implementation of the * `DurableProducerQueue` is provided by `EventSourcedProducerQueue` in `pekko-persistence-typed`. * diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala index 52ce3ede2ea..450a075e580 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/javadsl/ClusterSharding.scala @@ -439,13 +439,13 @@ object EntityTypeKey { /** * A reference to an sharded Entity, which allows `ActorRef`-like usage. * - * An [[EntityRef]] is NOT an [[ActorRef]]–by design–in order to be explicit about the fact that the life-cycle + * An [[EntityRef]] is NOT an [[pekko.actor.typed.ActorRef ActorRef]]–by design–in order to be explicit about the fact that the life-cycle * of a sharded Entity is very different than a plain Actor. Most notably, this is shown by features of Entities * such as re-balancing (an active Entity to a different node) or passivation. Both of which are aimed to be completely * transparent to users of such Entity. In other words, if this were to be a plain ActorRef, it would be possible to * apply DeathWatch to it, which in turn would then trigger when the sharded Actor stopped, breaking the illusion that * Entity refs are "always there". Please note that while not encouraged, it is possible to expose an Actor's `self` - * [[ActorRef]] and watch it in case such notification is desired. + * [[pekko.actor.typed.ActorRef ActorRef]] and watch it in case such notification is desired. * * Not for user extension. */ diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala index 768db09f350..ed1908bd7e0 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/scaladsl/ClusterSharding.scala @@ -416,13 +416,13 @@ object EntityTypeKey { /** * A reference to an sharded Entity, which allows `ActorRef`-like usage. * - * An [[EntityRef]] is NOT an [[ActorRef]]–by design–in order to be explicit about the fact that the life-cycle + * An [[EntityRef]] is NOT an [[pekko.actor.typed.ActorRef ActorRef]]–by design–in order to be explicit about the fact that the life-cycle * of a sharded Entity is very different than a plain Actors. Most notably, this is shown by features of Entities * such as re-balancing (an active Entity to a different node) or passivation. Both of which are aimed to be completely * transparent to users of such Entity. In other words, if this were to be a plain ActorRef, it would be possible to * apply DeathWatch to it, which in turn would then trigger when the sharded Actor stopped, breaking the illusion that * Entity refs are "always there". Please note that while not encouraged, it is possible to expose an Actor's `self` - * [[ActorRef]] and watch it in case such notification is desired. + * [[pekko.actor.typed.ActorRef ActorRef]] and watch it in case such notification is desired. * Not for user extension. */ @DoNotInherit trait EntityRef[-M] extends RecipientRef[M] { this: InternalRecipientRef[M] => @@ -544,7 +544,7 @@ object ClusterShardingSetup { } /** - * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]] + * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]] * to replace the default implementation of the [[ClusterSharding]] extension. Intended * for tests that need to replace extension with stub/mock implementations. */ diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/javadsl/EntityRef.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/javadsl/EntityRef.scala index 171159d28a6..ff77a8e3356 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/javadsl/EntityRef.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/javadsl/EntityRef.scala @@ -20,7 +20,7 @@ import pekko.cluster.sharding.typed.javadsl.EntityRef import pekko.cluster.sharding.typed.javadsl.EntityTypeKey /** - * For testing purposes this `EntityRef` can be used in place of a real [[EntityRef]]. + * For testing purposes this `EntityRef` can be used in place of a real [[pekko.cluster.sharding.typed.javadsl.EntityRef]]. * It forwards all messages to the `probe`. */ object TestEntityRef { diff --git a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/scaladsl/EntityRef.scala b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/scaladsl/EntityRef.scala index 9f3ff61798d..d56542290d0 100644 --- a/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/scaladsl/EntityRef.scala +++ b/cluster-sharding-typed/src/main/scala/org/apache/pekko/cluster/sharding/typed/testkit/scaladsl/EntityRef.scala @@ -20,7 +20,7 @@ import pekko.cluster.sharding.typed.scaladsl.EntityRef import pekko.cluster.sharding.typed.scaladsl.EntityTypeKey /** - * For testing purposes this `EntityRef` can be used in place of a real [[EntityRef]]. + * For testing purposes this `EntityRef` can be used in place of a real [[pekko.cluster.sharding.typed.scaladsl.EntityRef]]. * It forwards all messages to the `probe`. */ object TestEntityRef { diff --git a/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala b/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala index 8070f97d1a1..d4db740609e 100644 --- a/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala +++ b/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/ShardRegion.scala @@ -304,7 +304,7 @@ object ShardRegion { * Intended for testing purpose to see when cluster sharding is "ready" or to monitor * the state of the shard regions. * - * For the statistics for the entire cluster, see [[GetClusterShardingStats$]]. + * For the statistics for the entire cluster, see [[GetClusterShardingStats]]. */ @SerialVersionUID(1L) case object GetShardRegionStats extends ShardRegionQuery with ClusterShardingSerializable diff --git a/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala b/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala index 7d3b6cc6194..dcac16f8101 100644 --- a/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala +++ b/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala @@ -31,7 +31,7 @@ import pekko.cluster.sharding.external.ShardLocations trait ExternalShardAllocationClient { /** - * Update the given shard's location. The [[Address]] should + * Update the given shard's location. The [[pekko.actor.Address]] should * match one of the nodes in the cluster. If the node has not joined * the cluster yet it will be moved to that node after the first cluster * sharding rebalance. @@ -44,7 +44,7 @@ trait ExternalShardAllocationClient { /** * Update all of the provided ShardLocations. - * The [[Address]] should match one of the nodes in the cluster. If the node has not joined + * The [[pekko.actor.Address]] should match one of the nodes in the cluster. If the node has not joined * the cluster yet it will be moved to that node after the first cluster * sharding rebalance it does. * diff --git a/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala b/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala index ce15c492236..6f57fda3cd2 100644 --- a/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala +++ b/cluster-sharding/src/main/scala/org/apache/pekko/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala @@ -31,7 +31,7 @@ import pekko.cluster.sharding.external.ShardLocations trait ExternalShardAllocationClient { /** - * Update the given shard's location. The [[Address]] should + * Update the given shard's location. The [[pekko.actor.Address]] should * match one of the nodes in the cluster. If the node has not joined * the cluster yet it will be moved to that node after the first cluster * sharding rebalance it does. @@ -44,7 +44,7 @@ trait ExternalShardAllocationClient { /** * Update all of the provided ShardLocations. - * The [[Address]] should match one of the nodes in the cluster. If the node has not joined + * The [[pekko.actor.Address]] should match one of the nodes in the cluster. If the node has not joined * the cluster yet it will be moved to that node after the first cluster * sharding rebalance it does. * diff --git a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala index 9a7b98bc146..7c8ce8400ac 100644 --- a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala +++ b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/DistributedData.scala @@ -114,7 +114,7 @@ object DistributedDataSetup { } /** - * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]] + * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]] * to replace the default implementation of the [[DistributedData]] extension. Intended * for tests that need to replace extension with stub/mock implementations. */ diff --git a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorMessageAdapter.scala b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorMessageAdapter.scala index 14b732346f6..d6cf1e39d7e 100644 --- a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorMessageAdapter.scala +++ b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/javadsl/ReplicatorMessageAdapter.scala @@ -47,7 +47,7 @@ import pekko.util.Timeout * than the ordinary actor message processing thread, such as [[java.util.concurrent.CompletionStage]] * callbacks. It must not be shared between several actor instances. * - * @param context The [[ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can + * @param context The [[pekko.actor.typed.javadsl.ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can * only be used in this actor. * @param replicator The replicator to interact with, typically `DistributedData.get(system).replicator`. * @param unexpectedAskTimeout The timeout to use for `ask` operations. This should be longer than diff --git a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/Replicator.scala b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/Replicator.scala index a6abb0a6492..36cf2f13cfa 100644 --- a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/Replicator.scala +++ b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/Replicator.scala @@ -103,7 +103,7 @@ object Replicator { extends Command /** - * Reply from `Get`. The data value is retrieved with [[dd.Replicator.GetSuccess.get]] using the typed key. + * Reply from `Get`. The data value is retrieved with [[pekko.cluster.ddata.Replicator.GetSuccess.get]] using the typed key. */ type GetResponse[A <: ReplicatedData] = dd.Replicator.GetResponse[A] object GetSuccess { @@ -117,7 +117,7 @@ object Replicator { /** * The [[Get]] request could not be fulfill according to the given - * [[ReadConsistency consistency level]] and [[ReadConsistency#timeout timeout]]. + * [[ReadConsistency consistency level]] and [[pekko.cluster.ddata.Replicator.ReadConsistency#timeout timeout]]. */ type GetFailure[A <: ReplicatedData] = dd.Replicator.GetFailure[A] object GetFailure { @@ -199,7 +199,7 @@ object Replicator { /** * The direct replication of the [[Update]] could not be fulfill according to * the given [[WriteConsistency consistency level]] and - * [[WriteConsistency#timeout timeout]]. + * [[pekko.cluster.ddata.Replicator.WriteConsistency#timeout timeout]]. * * The `Update` was still performed locally and possibly replicated to some nodes. * It will eventually be disseminated to other replicas, unless the local replica @@ -277,7 +277,7 @@ object Replicator { type SubscribeResponse[A <: ReplicatedData] = dd.Replicator.SubscribeResponse[A] /** - * The data value is retrieved with [[dd.Replicator.Changed.get]] using the typed key. + * The data value is retrieved with [[pekko.cluster.ddata.Replicator.Changed.get]] using the typed key. * * @see [[Subscribe]] */ @@ -286,7 +286,7 @@ object Replicator { } /** - * The data value is retrieved with [[dd.Replicator.Changed.get]] using the typed key. + * The data value is retrieved with [[pekko.cluster.ddata.Replicator.Changed.get]] using the typed key. * * @see [[Subscribe]] */ diff --git a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorMessageAdapter.scala b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorMessageAdapter.scala index 2711d74ce14..2544515e80d 100644 --- a/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorMessageAdapter.scala +++ b/cluster-typed/src/main/scala/org/apache/pekko/cluster/ddata/typed/scaladsl/ReplicatorMessageAdapter.scala @@ -50,7 +50,7 @@ object ReplicatorMessageAdapter { * than the ordinary actor message processing thread, such as [[scala.concurrent.Future]] callbacks. * It must not be shared between several actor instances. * - * @param context The [[ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can + * @param context The [[pekko.actor.typed.scaladsl.ActorContext]] of the requesting actor. The `ReplicatorMessageAdapter` can * only be used in this actor. * @param replicator The replicator to interact with, typically `DistributedData(system).replicator`. * @param unexpectedAskTimeout The timeout to use for `ask` operations. This should be longer than diff --git a/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/Cluster.scala b/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/Cluster.scala index 346f7b38fe3..e2fdf08a765 100644 --- a/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/Cluster.scala +++ b/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/Cluster.scala @@ -229,7 +229,7 @@ object ClusterSetup { } /** - * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]] + * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]] * to replace the default implementation of the [[Cluster]] extension. Intended * for tests that need to replace extension with stub/mock implementations. */ diff --git a/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala b/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala index 861d87fee7f..92e2cbbcfeb 100644 --- a/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala +++ b/cluster-typed/src/main/scala/org/apache/pekko/cluster/typed/ClusterSingleton.scala @@ -223,7 +223,7 @@ abstract class ClusterSingleton extends Extension { * Start if needed and provide a proxy to a named singleton * * If there already is a manager running for the given `singletonName` on this node, no additional manager is started. - * If there already is a proxy running for the given `singletonName` on this node, an [[ActorRef]] to that is returned. + * If there already is a proxy running for the given `singletonName` on this node, an [[pekko.actor.typed.ActorRef]] to that is returned. * * @return A proxy actor that can be used to communicate with the singleton in the cluster */ @@ -349,7 +349,7 @@ object ClusterSingletonSetup { } /** - * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[ActorSystem]] + * Can be used in [[pekko.actor.setup.ActorSystemSetup]] when starting the [[pekko.actor.typed.ActorSystem]] * to replace the default implementation of the [[ClusterSingleton]] extension. Intended * for tests that need to replace extension with stub/mock implementations. */ diff --git a/cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala b/cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala index 81d7cf1051d..d92b2ebb2fa 100644 --- a/cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala +++ b/cluster/src/main/scala/org/apache/pekko/cluster/JoinConfigCompatChecker.scala @@ -102,7 +102,7 @@ object JoinConfigCompatChecker { * INTERNAL API * Builds a new Config object containing only the required entries defined by `requiredKeys` * - * This method is used from the joining side to prepare the [[Config]] instance that will be sent over the wire. + * This method is used from the joining side to prepare the [[com.typesafe.config.Config]] instance that will be sent over the wire. * We don't send the full config to avoid unnecessary data transfer, but also to avoid leaking any sensitive * information that users may have added to their configuration. */ diff --git a/discovery/src/main/scala/org/apache/pekko/discovery/ServiceDiscovery.scala b/discovery/src/main/scala/org/apache/pekko/discovery/ServiceDiscovery.scala index ff544f5a50f..aa9b0961d08 100644 --- a/discovery/src/main/scala/org/apache/pekko/discovery/ServiceDiscovery.scala +++ b/discovery/src/main/scala/org/apache/pekko/discovery/ServiceDiscovery.scala @@ -318,7 +318,7 @@ abstract class ServiceDiscovery { * the passed `resolveTimeout` should never be exceeded, as it signals the application's * eagerness to wait for a result for this specific lookup. * - * The returned future should be failed once resolveTimeout has passed with a [[DiscoveryTimeoutException]]. + * The returned future should be failed once resolveTimeout has passed with a [[ServiceDiscovery.DiscoveryTimeoutException]]. */ def lookup(query: Lookup, resolveTimeout: java.time.Duration): CompletionStage[Resolved] = { import pekko.util.FutureConverters._ diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala index ddb97b079f1..b29d996800c 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala @@ -24,7 +24,7 @@ import pekko.stream.javadsl.Source trait CurrentEventsByPersistenceIdQuery extends ReadJournal { /** - * Same type of query as [[EventsByPersistenceIdQuery#eventsByPersistenceId]] + * Same type of query as [[pekko.persistence.query.javadsl.EventsByPersistenceIdQuery#eventsByPersistenceId]] * but the event stream is completed immediately when it reaches the end of * the "result set". Events that are stored after the query is completed are * not included in the event stream. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByTagQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByTagQuery.scala index 2c6ea827709..cccbd0ad09f 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByTagQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/CurrentEventsByTagQuery.scala @@ -24,7 +24,7 @@ import pekko.stream.javadsl.Source trait CurrentEventsByTagQuery extends ReadJournal { /** - * Same type of query as [[EventsByTagQuery#eventsByTag]] but the event stream + * Same type of query as [[pekko.persistence.query.javadsl.EventsByTagQuery#eventsByTag]] but the event stream * is completed immediately when it reaches the end of the "result set". Depending * on journal implementation, this may mean all events up to when the query is * started, or it may include events that are persisted while the query is still diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/DurableStateStoreQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/DurableStateStoreQuery.scala index 61f416ba07e..826840403cc 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/DurableStateStoreQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/DurableStateStoreQuery.scala @@ -36,7 +36,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] { * This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to * objects made since materialization are not guaranteed to be included in the results. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. * * @param tag The tag to get changes for. @@ -58,7 +58,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] { * in quick succession are likely to be skipped, with only the last update resulting in a change from this * source. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. * * @param tag The tag to get changes for. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/EventsByPersistenceIdQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/EventsByPersistenceIdQuery.scala index b32bc3c0e06..4b65d036ec7 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/EventsByPersistenceIdQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/javadsl/EventsByPersistenceIdQuery.scala @@ -35,7 +35,7 @@ trait EventsByPersistenceIdQuery extends ReadJournal { * The stream is not completed when it reaches the end of the currently stored events, * but it continues to push new events when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently - * stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]]. + * stored events is provided by [[pekko.persistence.query.javadsl.CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]]. */ def eventsByPersistenceId( persistenceId: String, diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala index 793fc45b495..afd53db72d2 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala @@ -24,7 +24,7 @@ import pekko.stream.scaladsl.Source trait CurrentEventsByPersistenceIdQuery extends ReadJournal { /** - * Same type of query as [[EventsByPersistenceIdQuery#eventsByPersistenceId]] + * Same type of query as [[pekko.persistence.query.scaladsl.EventsByPersistenceIdQuery#eventsByPersistenceId]] * but the event stream is completed immediately when it reaches the end of * the "result set". Events that are stored after the query is completed are * not included in the event stream. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByTagQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByTagQuery.scala index 83e5ea28ccf..f62a766865d 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByTagQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/CurrentEventsByTagQuery.scala @@ -24,7 +24,7 @@ import pekko.stream.scaladsl.Source trait CurrentEventsByTagQuery extends ReadJournal { /** - * Same type of query as [[EventsByTagQuery#eventsByTag]] but the event stream + * Same type of query as [[pekko.persistence.query.scaladsl.EventsByTagQuery#eventsByTag]] but the event stream * is completed immediately when it reaches the end of the "result set". Depending * on journal implementation, this may mean all events up to when the query is * started, or it may include events that are persisted while the query is still diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala index 6327235477c..dec7bb0dbe4 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala @@ -29,7 +29,7 @@ trait DurableStateStorePagedPersistenceIdsQuery[A] extends DurableStateStore[A] * Not all plugins may support in database paging, and may simply use drop/take Pekko streams operators * to manipulate the result set according to the paging parameters. * - * @param afterId The ID to start returning results from, or [[None]] to return all ids. This should be an id + * @param afterId The ID to start returning results from, or [[scala.None]] to return all ids. This should be an id * returned from a previous invocation of this command. Callers should not assume that ids are * returned in sorted order. * @param limit The maximum results to return. Use Long.MaxValue to return all results. Must be greater than zero. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStoreQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStoreQuery.scala index 39485e1bf2d..13155bf65a5 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStoreQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/DurableStateStoreQuery.scala @@ -36,7 +36,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] { * This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to * objects made since materialization are not guaranteed to be included in the results. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. * * @param tag The tag to get changes for. @@ -58,7 +58,7 @@ trait DurableStateStoreQuery[A] extends DurableStateStore[A] { * in quick succession are likely to be skipped, with only the last update resulting in a change from this * source. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. * * @param tag The tag to get changes for. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala index a56a6f752e4..20a5ec03a46 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala @@ -35,7 +35,7 @@ trait EventsByPersistenceIdQuery extends ReadJournal { * The stream is not completed when it reaches the end of the currently stored events, * but it continues to push new events when new events are persisted. * Corresponding query that is completed when it reaches the end of the currently - * stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]]. + * stored events is provided by [[pekko.persistence.query.scaladsl.CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]]. */ def eventsByPersistenceId( persistenceId: String, diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala index 02ae836bff4..7759e5751f2 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala @@ -28,7 +28,7 @@ trait PagedPersistenceIdsQuery extends ReadJournal { * Not all plugins may support in database paging, and may simply use drop/take Pekko streams operators * to manipulate the result set according to the paging parameters. * - * @param afterId The ID to start returning results from, or [[None]] to return all ids. This should be an id + * @param afterId The ID to start returning results from, or [[scala.None]] to return all ids. This should be an id * returned from a previous invocation of this command. Callers should not assume that ids are * returned in sorted order. * @param limit The maximum results to return. Use Long.MaxValue to return all results. Must be greater than zero. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala index 5968c9c44c2..553146061b5 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala @@ -31,7 +31,7 @@ import pekko.stream.javadsl.Source trait CurrentEventsBySliceQuery extends ReadJournal { /** - * Same type of query as [[EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it + * Same type of query as [[pekko.persistence.query.typed.javadsl.EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it * reaches the end of the "result set". Depending on journal implementation, this may mean all events up to when the * query is started, or it may include events that are persisted while the query is still streaming results. For * eventually consistent stores, it may only include all events up to some point before the query is started. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/DurableStateStoreBySliceQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/DurableStateStoreBySliceQuery.scala index f3bb2c45793..f061a1decbf 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/DurableStateStoreBySliceQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/javadsl/DurableStateStoreBySliceQuery.scala @@ -44,7 +44,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] { * This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to * objects made since materialization are not guaranteed to be included in the results. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. */ def currentChangesBySlices( @@ -66,7 +66,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] { * change for each object since the offset will be emitted. In particular, multiple updates to a given object in quick * succession are likely to be skipped, with only the last update resulting in a change from this source. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. */ def changesBySlices( diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala index 904477e85fe..4dda4b4315d 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala @@ -32,7 +32,7 @@ import pekko.stream.scaladsl.Source trait CurrentEventsBySliceQuery extends ReadJournal { /** - * Same type of query as [[EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it + * Same type of query as [[pekko.persistence.query.typed.scaladsl.EventsBySliceQuery.eventsBySlices]] but the event stream is completed immediately when it * reaches the end of the "result set". Depending on journal implementation, this may mean all events up to when the * query is started, or it may include events that are persisted while the query is still streaming results. For * eventually consistent stores, it may only include all events up to some point before the query is started. diff --git a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/DurableStateStoreBySliceQuery.scala b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/DurableStateStoreBySliceQuery.scala index 9c11e97643d..fcf49d2355b 100644 --- a/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/DurableStateStoreBySliceQuery.scala +++ b/persistence-query/src/main/scala/org/apache/pekko/persistence/query/typed/scaladsl/DurableStateStoreBySliceQuery.scala @@ -45,7 +45,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] { * This will return changes that occurred up to when the `Source` returned by this call is materialized. Changes to * objects made since materialization are not guaranteed to be included in the results. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. */ def currentChangesBySlices( @@ -67,7 +67,7 @@ trait DurableStateStoreBySliceQuery[A] extends DurableStateStore[A] { * change for each object since the offset will be emitted. In particular, multiple updates to a given object in quick * succession are likely to be skipped, with only the last update resulting in a change from this source. * - * The [[DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or + * The [[pekko.persistence.query.DurableStateChange]] elements can be [[pekko.persistence.query.UpdatedDurableState]] or * [[pekko.persistence.query.DeletedDurableState]]. */ def changesBySlices( diff --git a/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/PersistenceTestKit.scala b/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/PersistenceTestKit.scala index 0bad4d02797..aaf7fcca5f5 100644 --- a/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/PersistenceTestKit.scala +++ b/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/PersistenceTestKit.scala @@ -371,7 +371,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) { * * NOTE! Also clears sequence numbers in storage! * - * @see [[PersistenceTestKit.clearAllPreservingSeqNumbers()]] + * @see [[clearAllPreservingSeqNumbers]] */ def clearAll(): Unit = scalaTestkit.clearAll() @@ -380,21 +380,21 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) { * * NOTE! Also clears sequence number in storage! * - * @see [[PersistenceTestKit.clearByIdPreservingSeqNumbers()]] + * @see [[clearByIdPreservingSeqNumbers]] */ def clearByPersistenceId(persistenceId: String): Unit = scalaTestkit.clearByPersistenceId(persistenceId) /** * Clear all data in storage preserving sequence numbers. * - * @see [[PersistenceTestKit.clearAll()]] + * @see [[clearAll]] */ def clearAllPreservingSeqNumbers(): Unit = scalaTestkit.clearAllPreservingSeqNumbers() /** * Clear all data in storage for particular persistence id preserving sequence numbers. * - * @see [[PersistenceTestKit.clearByPersistenceId()]] + * @see [[clearByPersistenceId]] */ def clearByIdPreservingSeqNumbers(persistenceId: String): Unit = scalaTestkit.clearByIdPreservingSeqNumbers(persistenceId) @@ -439,7 +439,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) { } /** - * Returns default policy if it was changed by [[PersistenceTestKit.withPolicy()]]. + * Returns default policy if it was changed by [[withPolicy]]. */ def resetPolicy(): Unit = scalaTestkit.resetPolicy() diff --git a/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/SnapshotTestKit.scala b/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/SnapshotTestKit.scala index f74971423ab..96c70dd6cbf 100644 --- a/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/SnapshotTestKit.scala +++ b/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/javadsl/SnapshotTestKit.scala @@ -266,7 +266,7 @@ class SnapshotTestKit(scalaTestkit: ScalaTestKit) { } /** - * Returns default policy if it was changed by [[SnapshotTestKit.withPolicy()]]. + * Returns default policy if it was changed by [[withPolicy]]. */ def resetPolicy(): Unit = scalaTestkit.resetPolicy() diff --git a/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala b/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala index acd12f033c7..859f731e53a 100644 --- a/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala +++ b/persistence-testkit/src/main/scala/org/apache/pekko/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala @@ -149,7 +149,7 @@ final class PersistenceTestKitReadJournal(system: ExtendedActorSystem, @unused c * Not all plugins may support in database paging, and may simply use drop/take Pekko streams operators * to manipulate the result set according to the paging parameters. * - * @param afterId The ID to start returning results from, or [[None]] to return all ids. This should be an id + * @param afterId The ID to start returning results from, or [[scala.None]] to return all ids. This should be an id * returned from a previous invocation of this command. Callers should not assume that ids are * returned in sorted order. * @param limit The maximum results to return. Use Long.MaxValue to return all results. Must be greater than zero. diff --git a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/PersistenceId.scala b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/PersistenceId.scala index 6b0113e8f85..08bba0f44cd 100644 --- a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/PersistenceId.scala +++ b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/PersistenceId.scala @@ -41,7 +41,7 @@ object PersistenceId { * in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity` * you should use `""` as the separator. * - * @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|` + * @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|` */ def apply(entityTypeHint: String, entityId: String): PersistenceId = apply(entityTypeHint, entityId, DefaultSeparator) @@ -65,7 +65,7 @@ object PersistenceId { * in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity` * you should use `""` as the separator. * - * @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator` + * @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator` */ def apply(entityTypeHint: String, entityId: String, separator: String): PersistenceId = { if (separator.nonEmpty) { @@ -99,7 +99,7 @@ object PersistenceId { * in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity` * you should use `""` as the separator. * - * @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|` + * @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `|` */ def of(entityTypeHint: String, entityId: String): PersistenceId = apply(entityTypeHint, entityId) @@ -123,7 +123,7 @@ object PersistenceId { * in Lagom's `javadsl.PersistentEntity`. For compatibility with Lagom's `javadsl.PersistentEntity` * you should use `""` as the separator. * - * @throws IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator` + * @throws java.lang.IllegalArgumentException if the `entityTypeHint` or `entityId` contains `separator` */ def of(entityTypeHint: String, entityId: String, separator: String): PersistenceId = apply(entityTypeHint, entityId, separator) diff --git a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala index 68c0138ad8c..8af725db33c 100644 --- a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala +++ b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/delivery/EventSourcedProducerQueue.scala @@ -35,7 +35,7 @@ import pekko.persistence.typed.scaladsl.RetentionCriteria import pekko.util.JavaDurationConverters._ /** - * [[DurableProducerQueue]] that can be used with [[pekko.actor.typed.delivery.ProducerController]] + * [[pekko.actor.typed.delivery.DurableProducerQueue]] that can be used with [[pekko.actor.typed.delivery.ProducerController]] * for reliable delivery of messages. It is implemented with Event Sourcing and stores one * event before sending the message to the destination and one event for the confirmation * that the message has been delivered and processed. diff --git a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehavior.scala b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehavior.scala index 8218cf3614c..c133bcbdbdb 100644 --- a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehavior.scala +++ b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/javadsl/EventSourcedBehavior.scala @@ -287,7 +287,7 @@ abstract class EventSourcedBehaviorWithEnforcedReplies[Command, Event, State]( * Use [[EventSourcedBehaviorWithEnforcedReplies#newCommandHandlerWithReplyBuilder]] instead, or * extend [[EventSourcedBehavior]] instead of [[EventSourcedBehaviorWithEnforcedReplies]]. * - * @throws UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead + * @throws java.lang.UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead */ override protected def newCommandHandlerBuilder(): CommandHandlerBuilder[Command, Event, State] = throw new UnsupportedOperationException("Use newCommandHandlerWithReplyBuilder instead") diff --git a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/javadsl/DurableStateBehavior.scala b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/javadsl/DurableStateBehavior.scala index a4bf546e7e7..7aebd4422bc 100644 --- a/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/javadsl/DurableStateBehavior.scala +++ b/persistence-typed/src/main/scala/org/apache/pekko/persistence/typed/state/javadsl/DurableStateBehavior.scala @@ -202,7 +202,7 @@ abstract class DurableStateBehaviorWithEnforcedReplies[Command, State]( * Use [[DurableStateBehaviorWithEnforcedReplies#newCommandHandlerWithReplyBuilder]] instead, or * extend [[DurableStateBehavior]] instead of [[DurableStateBehaviorWithEnforcedReplies]]. * - * @throws UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead + * @throws java.lang.UnsupportedOperationException use newCommandHandlerWithReplyBuilder instead */ override protected def newCommandHandlerBuilder(): CommandHandlerBuilder[Command, State] = throw new UnsupportedOperationException("Use newCommandHandlerWithReplyBuilder instead") diff --git a/persistence/src/main/scala/org/apache/pekko/persistence/Eventsourced.scala b/persistence/src/main/scala/org/apache/pekko/persistence/Eventsourced.scala index f7f546c2f3b..8ce0562bcd3 100644 --- a/persistence/src/main/scala/org/apache/pekko/persistence/Eventsourced.scala +++ b/persistence/src/main/scala/org/apache/pekko/persistence/Eventsourced.scala @@ -371,7 +371,7 @@ private[persistence] trait Eventsourced /** * Recovery handler that receives persisted events during recovery. If a state snapshot - * has been captured and saved, this handler will receive a [[SnapshotOffer]] message + * has been captured and saved, this handler will receive a [[pekko.persistence.SnapshotOffer]] message * followed by events that are younger than the offered snapshot. * * This handler must not have side-effects other than changing persistent actor state i.e. it @@ -381,7 +381,7 @@ private[persistence] trait Eventsourced * If there is a problem with recovering the state of the actor from the journal, the error * will be logged and the actor will be stopped. * - * @see [[Recovery]] + * @see [[pekko.persistence.Recovery]] */ def receiveRecover: Receive @@ -519,11 +519,11 @@ private[persistence] trait Eventsourced /** * Permanently deletes all persistent messages with sequence numbers less than or equal `toSequenceNr`. * - * If the delete is successful a [[DeleteMessagesSuccess]] will be sent to the actor. - * If the delete fails a [[DeleteMessagesFailure]] will be sent to the actor. + * If the delete is successful a [[pekko.persistence.DeleteMessagesSuccess]] will be sent to the actor. + * If the delete fails a [[pekko.persistence.DeleteMessagesFailure]] will be sent to the actor. * * The given `toSequenceNr` must be less than or equal to [[Eventsourced#lastSequenceNr]], otherwise - * [[DeleteMessagesFailure]] is sent to the actor without performing the delete. All persistent + * [[pekko.persistence.DeleteMessagesFailure]] is sent to the actor without performing the delete. All persistent * messages may be deleted without specifying the actual sequence number by using `Long.MaxValue` * as the `toSequenceNr`. * diff --git a/persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala b/persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala index ff2136c5e00..9efbb112bc2 100644 --- a/persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala +++ b/persistence/src/main/scala/org/apache/pekko/persistence/Persistence.scala @@ -104,7 +104,7 @@ trait PersistenceRecovery { // #persistence-recovery /** * Called when the persistent actor is started for the first time. - * The returned [[Recovery]] object defines how the Actor will recover its persistent state before + * The returned [[pekko.persistence.Recovery]] object defines how the Actor will recover its persistent state before * handling the first incoming message. * * To skip recovery completely return `Recovery.none`. @@ -117,7 +117,7 @@ trait PersistenceRecovery { trait PersistenceStash extends Stash with StashFactory { /** - * The returned [[StashOverflowStrategy]] object determines how to handle the message failed to stash + * The returned [[pekko.persistence.StashOverflowStrategy]] object determines how to handle the message failed to stash * when the internal Stash capacity exceeded. */ def internalStashOverflowStrategy: StashOverflowStrategy = @@ -129,9 +129,9 @@ trait RuntimePluginConfig { /** * Additional configuration of the journal plugin servicing this persistent actor. * When empty, the whole configuration of the journal plugin will be taken from the [[com.typesafe.config.Config]] loaded into the - * [[ActorSystem]]. + * [[pekko.actor.ActorSystem]]. * When configured, the journal plugin configuration will be taken from this [[com.typesafe.config.Config]] merged with the [[com.typesafe.config.Config]] - * loaded into the [[ActorSystem]]. + * loaded into the [[pekko.actor.ActorSystem]]. * * @return an additional configuration used to configure the journal plugin. */ @@ -140,9 +140,9 @@ trait RuntimePluginConfig { /** * Additional configuration of the snapshot plugin servicing this persistent actor. * When empty, the whole configuration of the snapshot plugin will be taken from the [[com.typesafe.config.Config]] loaded into the - * [[ActorSystem]]. + * [[pekko.actor.ActorSystem]]. * When configured, the snapshot plugin configuration will be taken from this [[com.typesafe.config.Config]] merged with the [[com.typesafe.config.Config]] - * loaded into the [[ActorSystem]]. + * loaded into the [[pekko.actor.ActorSystem]]. * * @return an additional configuration used to configure the snapshot plugin. */ @@ -178,7 +178,7 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { /** * INTERNAL API - * @throws IllegalArgumentException if config path for the `pluginId` doesn't exist + * @throws java.lang.IllegalArgumentException if config path for the `pluginId` doesn't exist */ @InternalApi private[pekko] def verifyPluginConfigExists( config: Config, @@ -190,7 +190,7 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { /** * INTERNAL API - * @throws IllegalArgumentException if `pluginId` is empty (undefined) + * @throws java.lang.IllegalArgumentException if `pluginId` is empty (undefined) */ @InternalApi private[pekko] def verifyPluginConfigIsDefined(pluginId: String, pluginType: String): Unit = { if (isEmpty(pluginId)) @@ -282,13 +282,13 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { }) /** - * @throws IllegalArgumentException if `configPath` doesn't exist + * @throws java.lang.IllegalArgumentException if `configPath` doesn't exist */ private def verifyJournalPluginConfigExists(pluginConfig: Config, configPath: String): Unit = verifyPluginConfigExists(pluginConfig.withFallback(system.settings.config), configPath, "Journal") /** - * @throws IllegalArgumentException if `configPath` doesn't exist + * @throws java.lang.IllegalArgumentException if `configPath` doesn't exist */ private def verifySnapshotPluginConfigExists(pluginConfig: Config, configPath: String): Unit = verifyPluginConfigExists(pluginConfig.withFallback(system.settings.config), configPath, "Snapshot store") diff --git a/persistence/src/main/scala/org/apache/pekko/persistence/Snapshotter.scala b/persistence/src/main/scala/org/apache/pekko/persistence/Snapshotter.scala index 8a1a6bba2b0..a52713a25ed 100644 --- a/persistence/src/main/scala/org/apache/pekko/persistence/Snapshotter.scala +++ b/persistence/src/main/scala/org/apache/pekko/persistence/Snapshotter.scala @@ -36,8 +36,8 @@ trait Snapshotter extends Actor { def snapshotSequenceNr: Long /** - * Instructs the snapshot store to load the specified snapshot and send it via an [[SnapshotOffer]] - * to the running [[PersistentActor]]. + * Instructs the snapshot store to load the specified snapshot and send it via an [[pekko.persistence.SnapshotOffer SnapshotOffer]] + * to the running [[pekko.persistence.PersistentActor PersistentActor]]. */ def loadSnapshot(persistenceId: String, criteria: SnapshotSelectionCriteria, toSequenceNr: Long): Unit = snapshotStore ! LoadSnapshot(persistenceId, criteria, toSequenceNr) @@ -45,8 +45,8 @@ trait Snapshotter extends Actor { /** * Saves a `snapshot` of this snapshotter's state. * - * The [[PersistentActor]] will be notified about the success or failure of this - * via an [[SaveSnapshotSuccess]] or [[SaveSnapshotFailure]] message. + * The [[pekko.persistence.PersistentActor PersistentActor]] will be notified about the success or failure of this + * via an [[pekko.persistence.SaveSnapshotSuccess SaveSnapshotSuccess]] or [[pekko.persistence.SaveSnapshotFailure SaveSnapshotFailure]] message. */ def saveSnapshot(snapshot: Any): Unit = { snapshotStore ! SaveSnapshot(SnapshotMetadata(snapshotterId, snapshotSequenceNr), snapshot) @@ -55,8 +55,8 @@ trait Snapshotter extends Actor { /** * Deletes the snapshot identified by `sequenceNr`. * - * The [[PersistentActor]] will be notified about the status of the deletion - * via an [[DeleteSnapshotSuccess]] or [[DeleteSnapshotFailure]] message. + * The [[pekko.persistence.PersistentActor PersistentActor]] will be notified about the status of the deletion + * via an [[pekko.persistence.DeleteSnapshotSuccess DeleteSnapshotSuccess]] or [[pekko.persistence.DeleteSnapshotFailure DeleteSnapshotFailure]] message. */ def deleteSnapshot(sequenceNr: Long): Unit = { snapshotStore ! DeleteSnapshot(SnapshotMetadata(snapshotterId, sequenceNr)) @@ -65,8 +65,8 @@ trait Snapshotter extends Actor { /** * Deletes all snapshots matching `criteria`. * - * The [[PersistentActor]] will be notified about the status of the deletion - * via an [[DeleteSnapshotsSuccess]] or [[DeleteSnapshotsFailure]] message. + * The [[pekko.persistence.PersistentActor PersistentActor]] will be notified about the status of the deletion + * via an [[pekko.persistence.DeleteSnapshotsSuccess DeleteSnapshotsSuccess]] or [[pekko.persistence.DeleteSnapshotsFailure DeleteSnapshotsFailure]] message. */ def deleteSnapshots(criteria: SnapshotSelectionCriteria): Unit = { snapshotStore ! DeleteSnapshots(snapshotterId, criteria) diff --git a/persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala b/persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala index 443c7ee9892..2ea3a3081d0 100644 --- a/persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala +++ b/persistence/src/main/scala/org/apache/pekko/persistence/fsm/PersistentFSM.scala @@ -514,14 +514,14 @@ abstract class AbstractPersistentFSM[S <: FSMState, D, E] data => action.accept(data) /** - * Adapter from Java [[Class]] to [[scala.reflect.ClassTag]] + * Adapter from Java [[java.lang.Class]] to [[scala.reflect.ClassTag]] * @return domain event [[scala.reflect.ClassTag]] */ final override def domainEventClassTag: ClassTag[E] = ClassTag(domainEventClass) /** - * Domain event's [[Class]] + * Domain event's [[java.lang.Class]] * Used for identifying domain events during recovery */ def domainEventClass: Class[E] diff --git a/persistence/src/main/scala/org/apache/pekko/persistence/journal/AsyncRecovery.scala b/persistence/src/main/scala/org/apache/pekko/persistence/journal/AsyncRecovery.scala index 2ad5c3f8ce6..eecc43370d5 100644 --- a/persistence/src/main/scala/org/apache/pekko/persistence/journal/AsyncRecovery.scala +++ b/persistence/src/main/scala/org/apache/pekko/persistence/journal/AsyncRecovery.scala @@ -52,7 +52,7 @@ trait AsyncRecovery { * @param recoveryCallback called to replay a single message. Can be called from any * thread. * - * @see [[AsyncWriteJournal]] + * @see [[pekko.persistence.journal.AsyncWriteJournal]] */ def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( recoveryCallback: PersistentRepr => Unit): Future[Unit] diff --git a/persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreProvider.scala b/persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreProvider.scala index efcbced8b3b..b765645313d 100644 --- a/persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreProvider.scala +++ b/persistence/src/main/scala/org/apache/pekko/persistence/state/DurableStateStoreProvider.scala @@ -25,13 +25,13 @@ trait DurableStateStoreProvider { /** * The `ReadJournal` implementation for the Scala API. - * This corresponds to the instance that is returned by [[DurableStateStoreRegistry#durableStateStoreFor]]. + * This corresponds to the instance that is returned by [[org.apache.pekko.persistence.state.DurableStateStoreRegistry.durableStateStoreFor DurableStateStoreRegistry#durableStateStoreFor]]. */ def scaladslDurableStateStore(): scaladsl.DurableStateStore[Any] /** * The `DurableStateStore` implementation for the Java API. - * This corresponds to the instance that is returned by [[DurableStateStoreRegistry#getDurableStateStoreFor]]. + * This corresponds to the instance that is returned by [[org.apache.pekko.persistence.state.DurableStateStoreRegistry.getDurableStateStoreFor DurableStateStoreRegistry#getDurableStateStoreFor]]. */ def javadslDurableStateStore(): javadsl.DurableStateStore[AnyRef] } diff --git a/stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala b/stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala index 2e4ea4e05ca..520c11cf69a 100644 --- a/stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala +++ b/stream-testkit/src/main/scala/org/apache/pekko/stream/testkit/StreamTestKit.scala @@ -491,14 +491,14 @@ object TestSubscriber { } /** - * Expect and return the signalled [[Throwable]]. + * Expect and return the signalled [[java.lang.Throwable]]. */ def expectError(): Throwable = probe.expectMsgType[OnError].cause /** * Fluent DSL * - * Expect given [[Throwable]]. + * Expect given [[java.lang.Throwable]]. */ def expectError(cause: Throwable): Self = { probe.expectMsg(OnError(cause)) @@ -510,7 +510,7 @@ object TestSubscriber { * * By default `1` demand will be signalled in order to wake up a possibly lazy upstream. * - * See also [[#expectSubscriptionAndError(Boolean)]] if no demand should be signalled. + * See also [[#expectSubscriptionAndError(signalDemand:Boolean)* #expectSubscriptionAndError(signalDemand: Boolean)]] if no demand should be signalled. */ def expectSubscriptionAndError(): Throwable = { expectSubscriptionAndError(true) @@ -522,7 +522,7 @@ object TestSubscriber { * Depending on the `signalDemand` parameter demand may be signalled immediately after obtaining the subscription * in order to wake up a possibly lazy upstream. You can disable this by setting the `signalDemand` parameter to `false`. * - * See also [[#expectSubscriptionAndError]]. + * See also [[#expectSubscriptionAndError()* #expectSubscriptionAndError()]]. */ def expectSubscriptionAndError(signalDemand: Boolean): Throwable = { val sub = expectSubscription() @@ -537,7 +537,7 @@ object TestSubscriber { * * By default `1` demand will be signalled in order to wake up a possibly lazy upstream. * - * See also [[#expectSubscriptionAndComplete(cause: Throwable, signalDemand: Boolean)]] if no demand should be signalled. + * See also [[#expectSubscriptionAndError(cause:Throwable,signalDemand:Boolean)* #expectSubscriptionAndError(cause: Throwable, signalDemand: Boolean)]] if no demand should be signalled. */ def expectSubscriptionAndError(cause: Throwable): Self = expectSubscriptionAndError(cause, signalDemand = true) @@ -548,7 +548,7 @@ object TestSubscriber { * Expect subscription followed by immediate stream completion. * By default `1` demand will be signalled in order to wake up a possibly lazy upstream * - * See also [[#expectSubscriptionAndError(cause: Throwable)]]. + * See also [[#expectSubscriptionAndError(cause:Throwable)* #expectSubscriptionAndError(cause: Throwable)]]. */ def expectSubscriptionAndError(cause: Throwable, signalDemand: Boolean): Self = { val sub = expectSubscription() @@ -563,7 +563,7 @@ object TestSubscriber { * Expect subscription followed by immediate stream completion. * By default `1` demand will be signalled in order to wake up a possibly lazy upstream * - * See also [[#expectSubscriptionAndComplete(signalDemand: Boolean)]] if no demand should be signalled. + * See also [[#expectSubscriptionAndComplete(signalDemand:Boolean)* #expectSubscriptionAndComplete(signalDemand: Boolean)]] if no demand should be signalled. */ def expectSubscriptionAndComplete(): Self = expectSubscriptionAndComplete(true) @@ -576,7 +576,7 @@ object TestSubscriber { * Depending on the `signalDemand` parameter demand may be signalled immediately after obtaining the subscription * in order to wake up a possibly lazy upstream. You can disable this by setting the `signalDemand` parameter to `false`. * - * See also [[#expectSubscriptionAndComplete]]. + * See also [[#expectSubscriptionAndComplete()* #expectSubscriptionAndComplete]]. */ def expectSubscriptionAndComplete(signalDemand: Boolean): Self = { val sub = expectSubscription() diff --git a/stream-typed/src/main/scala/org/apache/pekko/stream/typed/javadsl/ActorSource.scala b/stream-typed/src/main/scala/org/apache/pekko/stream/typed/javadsl/ActorSource.scala index a387363564f..0bb36679313 100644 --- a/stream-typed/src/main/scala/org/apache/pekko/stream/typed/javadsl/ActorSource.scala +++ b/stream-typed/src/main/scala/org/apache/pekko/stream/typed/javadsl/ActorSource.scala @@ -87,7 +87,7 @@ object ActorSource { * if the stream is to drained before completion or should complete immediately. * * A message that is matched by `failureMatcher` fails the stream. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * @@ -126,7 +126,7 @@ object ActorSource { * The stream will complete with failure if a message is sent before the acknowledgement has been replied back. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * diff --git a/stream-typed/src/main/scala/org/apache/pekko/stream/typed/scaladsl/ActorSource.scala b/stream-typed/src/main/scala/org/apache/pekko/stream/typed/scaladsl/ActorSource.scala index 394ab120630..b86083b7b02 100644 --- a/stream-typed/src/main/scala/org/apache/pekko/stream/typed/scaladsl/ActorSource.scala +++ b/stream-typed/src/main/scala/org/apache/pekko/stream/typed/scaladsl/ActorSource.scala @@ -44,7 +44,7 @@ object ActorSource { * completion. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * @@ -79,7 +79,7 @@ object ActorSource { * if the stream is to drained before completion or should complete immediately. * * A message that is matched by `failureMatcher` fails the stream. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * @@ -91,7 +91,7 @@ object ActorSource { * @param completionMatcher a partial function applied to the messages received materialized actor, * a matching message will complete the stream with the return [[CompletionStrategy]] * @param failureMatcher a partial function applied to the messages received materialized actor, - * a matching message will fail the stream with the returned [[Throwable]] + * a matching message will fail the stream with the returned [[java.lang.Throwable]] */ def actorRefWithBackpressure[T, Ack]( ackTo: ActorRef[Ack], @@ -113,7 +113,7 @@ object ActorSource { * The stream will complete with failure if a message is sent before the acknowledgement has been replied back. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * diff --git a/stream/src/main/scala/org/apache/pekko/stream/Graph.scala b/stream/src/main/scala/org/apache/pekko/stream/Graph.scala index 37db675507a..f6cb2aca97e 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/Graph.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/Graph.scala @@ -53,7 +53,7 @@ trait Graph[+S <: Shape, +M] { /** * Specifies the name of the Graph. - * If the name is null or empty the name is ignored, i.e. [[#none]] is returned. + * If the name is null or empty the name is ignored, i.e. [[Attributes.none]] is returned. */ def named(name: String): Graph[S, M] = addAttributes(Attributes.name(name)) diff --git a/stream/src/main/scala/org/apache/pekko/stream/IOResult.scala b/stream/src/main/scala/org/apache/pekko/stream/IOResult.scala index 43193a8fa57..71d48c8e768 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/IOResult.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/IOResult.scala @@ -49,7 +49,7 @@ final case class IOResult( def wasSuccessful: Boolean = status.isSuccess /** - * Java API: If the IO operation resulted in an error, returns the corresponding [[Throwable]] + * Java API: If the IO operation resulted in an error, returns the corresponding [[java.lang.Throwable]] * or throws [[UnsupportedOperationException]] otherwise. */ @deprecated("status is always set to Success(Done)", "Akka 2.6.0") diff --git a/stream/src/main/scala/org/apache/pekko/stream/Materializer.scala b/stream/src/main/scala/org/apache/pekko/stream/Materializer.scala index 47a9e1cca52..1ca121276b8 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/Materializer.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/Materializer.scala @@ -100,7 +100,7 @@ abstract class Materializer { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * supported by the `Scheduler`. * * @return A [[pekko.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event @@ -136,7 +136,7 @@ abstract class Materializer { * If the `Runnable` throws an exception the repeated scheduling is aborted, * i.e. the function will not be invoked any more. * - * @throws IllegalArgumentException if the given delays exceed the maximum + * @throws java.lang.IllegalArgumentException if the given delays exceed the maximum * supported by the `Scheduler`. * * @return A [[pekko.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event diff --git a/stream/src/main/scala/org/apache/pekko/stream/StreamRefs.scala b/stream/src/main/scala/org/apache/pekko/stream/StreamRefs.scala index 13364388959..a5c56274857 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/StreamRefs.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/StreamRefs.scala @@ -32,12 +32,12 @@ import pekko.stream.scaladsl.{ Sink, Source } */ object SinkRef { - /** Implicitly converts a [[SinkRef]] to a [[Sink]]. The same can be achieved by calling `.sink` on the reference. */ + /** Implicitly converts a [[SinkRef]] to a [[scaladsl.Sink]]. The same can be achieved by calling `.sink` on the reference. */ implicit def convertRefToSink[T](sinkRef: SinkRef[T]): Sink[T, NotUsed] = sinkRef.sink() } /** - * A [[SinkRef]] allows sharing a "reference" to a [[Sink]] with others, with the main purpose of crossing a network boundary. + * A [[SinkRef]] allows sharing a "reference" to a [[scaladsl.Sink]] with others, with the main purpose of crossing a network boundary. * Usually obtaining a SinkRef would be done via Actor messaging, in which one system asks a remote one, * to accept some data from it, and the remote one decides to accept the request to send data in a back-pressured * streaming fashion -- using a sink ref. @@ -54,7 +54,7 @@ object SinkRef { @DoNotInherit trait SinkRef[In] { - /** Scala API: Get [[Sink]] underlying to this source ref. */ + /** Scala API: Get [[scaladsl.Sink]] underlying to this source ref. */ def sink(): Sink[In, NotUsed] /** Java API: Get [[javadsl.Sink]] underlying to this source ref. */ diff --git a/stream/src/main/scala/org/apache/pekko/stream/impl/JsonObjectParser.scala b/stream/src/main/scala/org/apache/pekko/stream/impl/JsonObjectParser.scala index 4b87c356bd0..860ecc2c4b0 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/impl/JsonObjectParser.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/impl/JsonObjectParser.scala @@ -50,7 +50,7 @@ import pekko.util.ByteString /** * INTERNAL API: Use [[pekko.stream.scaladsl.JsonFraming]] instead. * - * **Mutable** framing implementation that given any number of [[ByteString]] chunks, can emit JSON objects contained within them. + * **Mutable** framing implementation that given any number of [[pekko.util.ByteString]] chunks, can emit JSON objects contained within them. * Typically JSON objects are separated by new-lines or commas, however a top-level JSON Array can also be understood and chunked up * into valid JSON objects by this framing implementation. * diff --git a/stream/src/main/scala/org/apache/pekko/stream/javadsl/Compression.scala b/stream/src/main/scala/org/apache/pekko/stream/javadsl/Compression.scala index 2527059fab2..8821e0bba41 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/javadsl/Compression.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/javadsl/Compression.scala @@ -23,7 +23,7 @@ object Compression { /** * Creates a Flow that decompresses gzip-compressed stream of data. * - * @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk. + * @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk. */ def gunzip(maxBytesPerChunk: Int): Flow[ByteString, ByteString, NotUsed] = scaladsl.Compression.gunzip(maxBytesPerChunk).asJava @@ -31,7 +31,7 @@ object Compression { /** * Creates a Flow that decompresses deflate-compressed stream of data. * - * @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk. + * @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk. */ def inflate(maxBytesPerChunk: Int): Flow[ByteString, ByteString, NotUsed] = inflate(maxBytesPerChunk, false) @@ -39,7 +39,7 @@ object Compression { /** * Same as [[inflate]] with configurable maximum output length and nowrap * - * @param maxBytesPerChunk Maximum length of the output [[ByteString]] chunk. + * @param maxBytesPerChunk Maximum length of the output [[pekko.util.ByteString]] chunk. * @param nowrap if true then use GZIP compatible decompression */ def inflate(maxBytesPerChunk: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] = @@ -47,7 +47,7 @@ object Compression { /** * Creates a flow that gzip-compresses a stream of ByteStrings. Note that the compressor - * will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]] + * will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]] * coming out of the flow can be fully decompressed without waiting for additional data. This may * come at a compression performance cost for very small chunks. */ @@ -64,7 +64,7 @@ object Compression { /** * Creates a flow that deflate-compresses a stream of ByteString. Note that the compressor - * will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]] + * will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]] * coming out of the flow can be fully decompressed without waiting for additional data. This may * come at a compression performance cost for very small chunks. */ diff --git a/stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala b/stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala index bd0bf4cbf63..b76a4067189 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/javadsl/FileIO.scala @@ -31,7 +31,7 @@ import pekko.stream.scaladsl.SourceToCompletionStage object FileIO { /** - * Creates a Sink that writes incoming [[ByteString]] elements to the given file. + * Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file. * Overwrites existing files by truncating their contents, if you want to append to an existing file use * [[toFile(File, util.Set[OpenOption])]] with [[java.nio.file.StandardOpenOption.APPEND]]. * @@ -47,7 +47,7 @@ object FileIO { def toFile(f: File): javadsl.Sink[ByteString, CompletionStage[IOResult]] = toPath(f.toPath) /** - * Creates a Sink that writes incoming [[ByteString]] elements to the given file path. + * Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path. * Overwrites existing files by truncating their contents, if you want to append to an existing file * [[toPath(Path, util.Set[OpenOption])]] with [[java.nio.file.StandardOpenOption.APPEND]]. * @@ -69,7 +69,7 @@ object FileIO { new Sink(scaladsl.FileIO.toPath(f).toCompletionStage()) /** - * Creates a Sink that writes incoming [[ByteString]] elements to the given file. + * Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file. * * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. @@ -85,7 +85,7 @@ object FileIO { toPath(f.toPath) /** - * Creates a Sink that writes incoming [[ByteString]] elements to the given file path. + * Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path. * * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. @@ -106,7 +106,7 @@ object FileIO { new Sink(scaladsl.FileIO.toPath(f, options.asScala.toSet).toCompletionStage()) /** - * Creates a Sink that writes incoming [[ByteString]] elements to the given file path. + * Creates a Sink that writes incoming [[pekko.util.ByteString]] elements to the given file path. * * Materializes a [[java.util.concurrent.CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. @@ -132,7 +132,7 @@ object FileIO { /** * Creates a Source from a files contents. - * Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes, + * Emitted elements are [[pekko.util.ByteString]] elements, chunked by default by 8192 bytes, * except the last element, which will be up to 8192 in size. * * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or @@ -149,7 +149,7 @@ object FileIO { /** * Creates a Source from a files contents. - * Emitted elements are [[ByteString]] elements, chunked by default by 8192 bytes, + * Emitted elements are [[pekko.util.ByteString]] elements, chunked by default by 8192 bytes, * except the last element, which will be up to 8192 in size. * * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or @@ -165,7 +165,7 @@ object FileIO { /** * Creates a synchronous Source from a files contents. - * Emitted elements are `chunkSize` sized [[ByteString]] elements, + * Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements, * except the last element, which will be up to `chunkSize` in size. * * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or @@ -184,7 +184,7 @@ object FileIO { /** * Creates a synchronous Source from a files contents. - * Emitted elements are `chunkSize` sized [[ByteString]] elements, + * Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements, * except the last element, which will be up to `chunkSize` in size. * * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or @@ -202,7 +202,7 @@ object FileIO { /** * Creates a synchronous Source from a files contents. - * Emitted elements are `chunkSize` sized [[ByteString]] elements, + * Emitted elements are `chunkSize` sized [[pekko.util.ByteString]] elements, * except the last element, which will be up to `chunkSize` in size. * * You can configure the default dispatcher for this Source by changing the `pekko.stream.materializer.blocking-io-dispatcher` or diff --git a/stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala b/stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala index 416652e5b71..b87c5399c74 100755 --- a/stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/javadsl/Source.scala @@ -432,7 +432,7 @@ object Source { * completion. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * @@ -528,7 +528,7 @@ object Source { * The stream will complete with failure if a message is sent before the acknowledgement has been replied back. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * @@ -562,7 +562,7 @@ object Source { * The stream will complete with failure if a message is sent before the acknowledgement has been replied back. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * diff --git a/stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala b/stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala index dc14d489de4..6ef3945d666 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/javadsl/StreamConverters.scala @@ -36,7 +36,7 @@ import pekko.stream.scaladsl.SourceToCompletionStage object StreamConverters { /** - * Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function. + * Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function. * * Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. @@ -55,7 +55,7 @@ object StreamConverters { fromOutputStream(f, autoFlush = false) /** - * Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function. + * Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function. * * Materializes a [[CompletionStage]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. diff --git a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Compression.scala b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Compression.scala index 81f26774d1a..b219c069fcf 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Compression.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Compression.scala @@ -25,7 +25,7 @@ object Compression { /** * Creates a flow that gzip-compresses a stream of ByteStrings. Note that the compressor - * will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]] + * will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]] * coming out of the flow can be fully decompressed without waiting for additional data. This may * come at a compression performance cost for very small chunks. * @@ -44,14 +44,14 @@ object Compression { /** * Creates a Flow that decompresses a gzip-compressed stream of data. * - * @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk. + * @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk. */ def gunzip(maxBytesPerChunk: Int = MaxBytesPerChunkDefault): Flow[ByteString, ByteString, NotUsed] = Flow[ByteString].via(new GzipDecompressor(maxBytesPerChunk)).named("gunzip") /** * Creates a flow that deflate-compresses a stream of ByteString. Note that the compressor - * will SYNC_FLUSH after every [[ByteString]] so that it is guaranteed that every [[ByteString]] + * will SYNC_FLUSH after every [[pekko.util.ByteString]] so that it is guaranteed that every [[pekko.util.ByteString]] * coming out of the flow can be fully decompressed without waiting for additional data. This may * come at a compression performance cost for very small chunks. * @@ -71,7 +71,7 @@ object Compression { /** * Creates a Flow that decompresses a deflate-compressed stream of data. * - * @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk. + * @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk. */ def inflate(maxBytesPerChunk: Int = MaxBytesPerChunkDefault): Flow[ByteString, ByteString, NotUsed] = inflate(maxBytesPerChunk, false) @@ -79,7 +79,7 @@ object Compression { /** * Creates a Flow that decompresses a deflate-compressed stream of data. * - * @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk. + * @param maxBytesPerChunk Maximum length of an output [[pekko.util.ByteString]] chunk. * @param nowrap if true then use GZIP compatible decompression */ def inflate(maxBytesPerChunk: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] = diff --git a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala index a12b1525647..7aa24b65bb9 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/FileIO.scala @@ -87,7 +87,7 @@ object FileIO { Source.fromGraph(new FileSource(f, chunkSize, startPosition)).withAttributes(DefaultAttributes.fileSource) /** - * Creates a Sink which writes incoming [[ByteString]] elements to the given file. Overwrites existing files + * Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file. Overwrites existing files * by truncating their contents as default. * * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, @@ -106,7 +106,7 @@ object FileIO { toPath(f.toPath, options) /** - * Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files + * Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file path. Overwrites existing files * by truncating their contents as default. * * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, @@ -130,7 +130,7 @@ object FileIO { toPath(f, options, startPosition = 0) /** - * Creates a Sink which writes incoming [[ByteString]] elements to the given file path. Overwrites existing files + * Creates a Sink which writes incoming [[pekko.util.ByteString]] elements to the given file path. Overwrites existing files * by truncating their contents as default. * * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, diff --git a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/JsonFraming.scala b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/JsonFraming.scala index 2d4534a8236..931daad04c4 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/JsonFraming.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/JsonFraming.scala @@ -24,7 +24,7 @@ import pekko.util.ByteString import scala.util.control.NonFatal -/** Provides JSON framing operators that can separate valid JSON objects from incoming [[ByteString]] objects. */ +/** Provides JSON framing operators that can separate valid JSON objects from incoming [[pekko.util.ByteString]] objects. */ object JsonFraming { /** Thrown if upstream completes with a partial object in the buffer. */ diff --git a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Sink.scala b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Sink.scala index e4666dfd8b8..7e379a350be 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Sink.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Sink.scala @@ -219,7 +219,7 @@ object Sink { /** * A `Sink` that materializes into a `Future` of the optional first value received. - * If the stream completes before signaling at least a single element, the value of the Future will be [[None]]. + * If the stream completes before signaling at least a single element, the value of the Future will be [[scala.None]]. * If the stream signals an error errors before signaling at least a single element, the Future will be failed with the streams exception. * * See also [[head]]. @@ -243,7 +243,7 @@ object Sink { /** * A `Sink` that materializes into a `Future` of the optional last value received. - * If the stream completes before signaling at least a single element, the value of the Future will be [[None]]. + * If the stream completes before signaling at least a single element, the value of the Future will be [[scala.None]]. * If the stream signals an error, the Future will be failed with the stream's exception. * * See also [[last]], [[takeLast]]. diff --git a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala index 0c55691475a..8635a42ad01 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/Source.scala @@ -648,7 +648,7 @@ object Source { * completion. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * @@ -738,7 +738,7 @@ object Source { * The stream will complete with failure if a message is sent before the acknowledgement has been replied back. * * The stream can be completed with failure by sending a message that is matched by `failureMatcher`. The extracted - * [[Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received + * [[java.lang.Throwable]] will be used to fail the stream. In case the Actor is still draining its internal buffer (after having received * a message matched by `completionMatcher`) before signaling completion and it receives a message matched by `failureMatcher`, * the failure will be signaled downstream immediately (instead of the completion signal). * diff --git a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala index c733fdf8b28..7b1a743889a 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/StreamConverters.scala @@ -74,7 +74,7 @@ object StreamConverters { Source.fromGraph(new OutputStreamSourceStage(writeTimeout)) /** - * Creates a Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function. + * Creates a Sink which writes incoming [[pekko.util.ByteString]]s to an [[OutputStream]] created by the given function. * * Materializes a [[Future]] of [[IOResult]] that will be completed with the size of the file (in bytes) at the streams completion, * and a possible exception if IO operation was not completed successfully. diff --git a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/package.scala b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/package.scala index a01dac9cccc..b1dac9ce36c 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/scaladsl/package.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/scaladsl/package.scala @@ -53,7 +53,7 @@ import scala.concurrent.Future * elements a given transformation step might buffer before handing elements * downstream, which means that transformation functions may be invoked more * often than for corresponding transformations on strict collections like - * [[List]]. *An important consequence* is that elements that were produced + * [[scala.collection.immutable.List]]. *An important consequence* is that elements that were produced * into a stream may be discarded by later processors, e.g. when using the * [[#take]] operator. * diff --git a/stream/src/main/scala/org/apache/pekko/stream/stage/GraphStage.scala b/stream/src/main/scala/org/apache/pekko/stream/stage/GraphStage.scala index dc0f51187a6..b18630843fc 100644 --- a/stream/src/main/scala/org/apache/pekko/stream/stage/GraphStage.scala +++ b/stream/src/main/scala/org/apache/pekko/stream/stage/GraphStage.scala @@ -203,7 +203,7 @@ object GraphStageLogic { /** * Minimal actor to work with other actors and watch them in a synchronous ways * - * Not for user instantiation, use [[#getStageActor]]. + * Not for user instantiation, use [[GraphStageLogic.getStageActor]]. */ final class StageActor @InternalApi() private[pekko] ( materializer: Materializer, @@ -1158,9 +1158,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: /** * Obtain a callback object that can be used asynchronously to re-enter the - * current [[GraphStage]] with an asynchronous notification. The [[invoke]] method of the returned + * current [[GraphStage]] with an asynchronous notification. The [[AsyncCallback.invoke]] method of the returned * [[AsyncCallback]] is safe to be called from other threads. It will in the background thread-safely - * delegate to the passed callback function. I.e. [[invoke]] will be called by other thread and + * delegate to the passed callback function. I.e. [[AsyncCallback.invoke]] will be called by other thread and * the passed handler will be invoked eventually in a thread-safe way by the execution environment. * * In case stream is not yet materialized [[AsyncCallback]] will buffer events until stream is available. @@ -1268,9 +1268,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: /** * Java API: Obtain a callback object that can be used asynchronously to re-enter the - * current [[GraphStage]] with an asynchronous notification. The [[invoke]] method of the returned + * current [[GraphStage]] with an asynchronous notification. The [[AsyncCallback.invoke]] method of the returned * [[AsyncCallback]] is safe to be called from other threads. It will in the background thread-safely - * delegate to the passed callback function. I.e. [[invoke]] will be called by other thread and + * delegate to the passed callback function. I.e. [[AsyncCallback.invoke]] will be called by other thread and * the passed handler will be invoked eventually in a thread-safe way by the execution environment. * * [[AsyncCallback.invokeWithFeedback]] has an internal promise that will be failed if event cannot be processed due to stream completion. @@ -1310,18 +1310,18 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: _subInletsAndOutlets -= outlet /** - * Initialize a [[StageActorRef]] which can be used to interact with from the outside world "as-if" an [[Actor]]. + * Initialize a [[GraphStageLogic.StageActorRef]] which can be used to interact with from the outside world "as-if" an [[pekko.actor.Actor]]. * The messages are looped through the [[getAsyncCallback]] mechanism of [[GraphStage]] so they are safe to modify * internal state of this operator. * * This method must (the earliest) be called after the [[GraphStageLogic]] constructor has finished running, * for example from the [[preStart]] callback the graph operator logic provides. * - * Created [[StageActorRef]] to get messages and watch other actors in synchronous way. + * Created [[GraphStageLogic.StageActorRef]] to get messages and watch other actors in synchronous way. * - * The [[StageActorRef]]'s lifecycle is bound to the operator, in other words when the operator is finished, - * the Actor will be terminated as well. The entity backing the [[StageActorRef]] is not a real Actor, - * but the [[GraphStageLogic]] itself, therefore it does not react to [[PoisonPill]]. + * The [[GraphStageLogic.StageActorRef]]'s lifecycle is bound to the operator, in other words when the operator is finished, + * the Actor will be terminated as well. The entity backing the [[GraphStageLogic.StageActorRef]] is not a real Actor, + * but the [[GraphStageLogic]] itself, therefore it does not react to [[pekko.actor.PoisonPill]]. * * To be thread safe this method must only be called from either the constructor of the graph operator during * materialization or one of the methods invoked by the graph operator machinery, such as `onPush` and `onPull`. diff --git a/testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala b/testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala index 97307a19cd9..1b43f585011 100644 --- a/testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala +++ b/testkit/src/main/scala/org/apache/pekko/testkit/TestKit.scala @@ -255,7 +255,7 @@ trait TestKitBase { /** * Obtain time remaining for execution of the innermost enclosing `within` - * block or throw an [[AssertionError]] if no `within` block surrounds this + * block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this * call. */ def remaining: FiniteDuration = end match { diff --git a/testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala b/testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala index 096fd838da7..b295655bd37 100644 --- a/testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala +++ b/testkit/src/main/scala/org/apache/pekko/testkit/javadsl/TestKit.scala @@ -149,7 +149,7 @@ class TestKit(system: ActorSystem) { /** * Obtain time remaining for execution of the innermost enclosing `within` - * block or throw an [[AssertionError]] if no `within` block surrounds this + * block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this * call. */ @deprecated("Use getRemaining which returns java.time.Duration instead.", since = "Akka 2.5.12") @@ -157,7 +157,7 @@ class TestKit(system: ActorSystem) { /** * Obtain time remaining for execution of the innermost enclosing `within` - * block or throw an [[AssertionError]] if no `within` block surrounds this + * block or throw an [[java.lang.AssertionError]] if no `within` block surrounds this * call. */ def getRemaining: java.time.Duration = tp.remaining.asJava