From 2feb13dee29bcac38049735b4c5f1e1e9ebae863 Mon Sep 17 00:00:00 2001 From: pablf Date: Sat, 19 Oct 2024 18:05:38 +0200 Subject: [PATCH 01/23] add rate limiter --- .../scala/ox/resilience/RateLimiter.scala | 73 ++ .../ox/resilience/RateLimiterConfig.scala | 290 +++++++ core/src/main/scala/ox/unsupervised.scala | 10 +- .../scala/ox/resilience/RateLimiterTest.scala | 714 ++++++++++++++++++ 4 files changed, 1082 insertions(+), 5 deletions(-) create mode 100644 core/src/main/scala/ox/resilience/RateLimiter.scala create mode 100644 core/src/main/scala/ox/resilience/RateLimiterConfig.scala create mode 100644 core/src/test/scala/ox/resilience/RateLimiterTest.scala diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala new file mode 100644 index 00000000..d0f5a902 --- /dev/null +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -0,0 +1,73 @@ +package ox.resilience + +import scala.concurrent.duration.* +import RateLimiterConfig.* + +/** Configurable rate limiter + */ +case class RateLimiter( + config: RateLimiterConfig +): + /** Limits the rate of execution of the given operation + */ + def apply[T](operation: => T): Option[T] = + if config.blockingPolicy.isUnblocked then + if config.algorithm.isUnblocked then + if config.isReady then + config.acceptOperation + val result = operation + Some(result) + else + config.algorithm.rejectOperation + config.block(operation) + else config.block(operation) + else config.block(operation) +end RateLimiter + +object RateLimiter: + + def leakyBucket( + capacity: Int, + leakInterval: FiniteDuration, + blocks: Boolean = true + ): RateLimiter = + val algorithm = RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval) + val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) + val config = RateLimiterConfig(blockingPolicy, algorithm) + RateLimiter(config) + end leakyBucket + + def tokenBucket( + maxTokens: Int, + refillInterval: FiniteDuration, + blocks: Boolean = true + ): RateLimiter = + val algorithm = RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval) + val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) + val config = RateLimiterConfig(blockingPolicy, algorithm) + RateLimiter(config) + end tokenBucket + + def fixedRate( + maxRequests: Int, + windowSize: FiniteDuration, + blocks: Boolean = true + ): RateLimiter = + val algorithm = RateLimiterAlgorithm.FixedRate(maxRequests, windowSize) + val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) + val config = RateLimiterConfig(blockingPolicy, algorithm) + RateLimiter(config) + end fixedRate + + def slidingWindow( + maxRequests: Int, + windowSize: FiniteDuration, + blocks: Boolean = true + ): RateLimiter = + val algorithm = RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize) + val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) + val config = RateLimiterConfig(blockingPolicy, algorithm) + RateLimiter(config) + end slidingWindow + +end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterConfig.scala b/core/src/main/scala/ox/resilience/RateLimiterConfig.scala new file mode 100644 index 00000000..9c9f7ab8 --- /dev/null +++ b/core/src/main/scala/ox/resilience/RateLimiterConfig.scala @@ -0,0 +1,290 @@ +package ox.resilience + +import ox.* +import ox.resilience.RateLimiterConfig.* +import scala.concurrent.duration.* +import ox.scheduling.* +import java.util.concurrent.atomic.{AtomicInteger, AtomicBoolean, AtomicLong} +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.* +import scala.util.{Try, Success, Failure} +import javax.swing.text.html.HTML.Tag +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.locks.ReentrantLock + +/** Configuration for a rate limiter + * + * @param blockingPolicy + * blocking policy to apply when the rate limiter is full + * @param algorithm + * algorithm to use for the rate limiter + */ +final case class RateLimiterConfig( + blockingPolicy: BlockingPolicy, + algorithm: RateLimiterAlgorithm +): + def isUnblocked: Boolean = + algorithm.isUnblocked && blockingPolicy.isUnblocked + + def isReady: Boolean = + algorithm.isReady + + def acceptOperation: Unit = + algorithm.acceptOperation + + def block[T](operation: => T): Option[T] = + blockingPolicy.block(algorithm, operation) +end RateLimiterConfig + +object RateLimiterConfig: + /** Determines the policy to apply when the rate limiter is full + */ + trait BlockingPolicy: + /** This method is called when a new operation can't be readily accepted by the rate limiter. Return None for discarded operations, or + * Some(result) for result of operations after unblocking. Implementations should execute the operation only if the algorithm and the + * BlockingPolicy are both unblocked and they are responsible for checking when the algorithm is ready to accept a new operation, + * unblocking it and updating its internal state. + */ + def block[T](algorithm: RateLimiterAlgorithm, operation: => T): Option[T] + + /** Returns whether a new operation will be the first one to be passed to the RateLimiterAlgorithm after unblocking + */ + def isUnblocked: Boolean + end BlockingPolicy + + object BlockingPolicy: + + def apply(blocks: Boolean): BlockingPolicy = + if blocks then Block() else Drop() + + /** Block rejected operations until the rate limiter is ready to accept them + */ + case class Block() extends BlockingPolicy: + + def isUnblocked: Boolean = + block.peek() == null + + val block = new ConcurrentLinkedQueue[Promise[Unit]]() + + def block[T](algorithm: RateLimiterAlgorithm, operation: => T): Option[T] = + // blocks until it can accept current operation and returns next time it will be unblocked + blockUntilReady(algorithm, Duration.Inf) + + // updating internal state of algorithm + algorithm.tryUnblock + algorithm.acceptOperation + block.poll() + + // fulfilling next promise in queue after waiting time given by algorithm + fulfillNextPromise(algorithm, FiniteDuration(algorithm.getNextTime(), "nanoseconds")) + + val result = operation + Some(result) + end block + + private def blockUntilReady(algorithm: RateLimiterAlgorithm, timeout: Duration): Unit = + // creates a promise for the current operation and waits until fulfilled + val waitTime = + if block.peek() == null then Some((algorithm.getNextTime())) + else None + val promise = Promise[Unit]() + + block.add(promise) + val future = promise.future + // if it's not the first promise, it will be fulfilled later + waitTime.map { wt => + fulfillNextPromise(algorithm, FiniteDuration(wt, "nanoseconds")) + } + + Await.ready(future, timeout) + end blockUntilReady + + private def fulfillNextPromise(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = + // sleeps waitTime and fulfills next promise in queue + if block.peek() != null then + val p = block.peek() + if waitTime.toNanos != 0 then + Future { + val wt1 = waitTime.toMillis + val wt2 = waitTime.toNanos - wt1 * 1000000 + blocking(Thread.sleep(wt1, wt2.toInt)) + }.onComplete { _ => + p.success(()) + } + else p.success(()) + end if + end Block + + /** Drop rejected operations + */ + case class Drop() extends BlockingPolicy: + def isUnblocked: Boolean = true + def block[T](algorithm: RateLimiterAlgorithm, operation: => T): Option[T] = + if algorithm.tryUnblock && algorithm.isReady then + algorithm.acceptOperation + val result = operation + Some(result) + else None + end Drop + end BlockingPolicy + + /** Determines the algorithm to use for the rate limiter + */ + trait RateLimiterAlgorithm: + + val blocked = new AtomicBoolean(false) + def isUnblocked: Boolean = !blocked.get() || tryUnblock + + /** Update internal state to check whether the algorithm can be unblocked. + */ + def tryUnblock: Boolean + + /** Determines if the operation can be accepted. Implementations should update internal state only to determine if the operation can be + * accepted, e.g., updating availability after time elapsed. `acceptOperation` and `rejectOperation` are used for updating internal + * state after accepting or rejecting an operation. + */ + def isReady: Boolean + + /** Modifies internal state to mark that an operation has been accepted. + */ + def acceptOperation: Unit + + /** Modifies internal state to mark that an operation has been rejected. + */ + def rejectOperation: Unit + + /** Returns the time until the next operation can be accepted to be used by the BlockingPolicy + */ + def getNextTime(): Long + end RateLimiterAlgorithm + + object RateLimiterAlgorithm: + /** Fixed rate algorithm + */ + case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + private val counter = new AtomicInteger(0) + private lazy val lastUpdate = new AtomicLong(System.nanoTime()) + + def tryUnblock: Boolean = + if lastUpdate.get() + per.toNanos < System.nanoTime() then + reset + true + else false + + def isReady: Boolean = + lastUpdate.get() + counter.get() < rate + + def rejectOperation: Unit = + blocked.set(true) + + def acceptOperation: Unit = + counter.incrementAndGet() + + def getNextTime(): Long = + if isReady then 0 + else lastUpdate.get() + per.toNanos - System.nanoTime() + + private def reset: Unit = + lastUpdate.set(System.nanoTime()) + counter.set(0) + blocked.set(false) + end FixedRate + + /** Sliding window algorithm + */ + case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + private val counter = new AtomicInteger(0) + private val log = new ConcurrentLinkedQueue[Long]() + + def tryUnblock: Boolean = + val now = System.nanoTime() + while counter.get() > 0 && log.peek() < now - per.toNanos do + log.poll() + counter.decrementAndGet() + isReady + + def isReady: Boolean = + counter.get() < rate + + def rejectOperation: Unit = () + + def acceptOperation: Unit = + val now = System.nanoTime() + log.add(now) + counter.incrementAndGet() + + def getNextTime(): Long = + if isReady then 0 + else log.peek() + per.toNanos - System.nanoTime() + end SlidingWindow + + /** Token bucket algorithm + */ + case class TokenBucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + private val maxTokens = rate + private val refillInterval = per.toNanos + private val tokens = new AtomicInteger(1) + private val lastRefillTime = new AtomicLong(System.nanoTime()) + + def tryUnblock: Boolean = + isReady || refillTokens > 0 + + def isReady: Boolean = + tokens.get() > 0 + + def rejectOperation: Unit = () + + def acceptOperation: Unit = + tokens.decrementAndGet() + + private def refillTokens: Int = + val now = System.nanoTime() + val elapsed = now - lastRefillTime.get() + val newTokens = elapsed / refillInterval + tokens.set(Math.min(tokens.get() + newTokens.toInt, maxTokens)) + lastRefillTime.set(newTokens * refillInterval + lastRefillTime.get()) + newTokens.toInt + + def getNextTime(): Long = + if isReady then 0 + else lastRefillTime.get() + refillInterval - System.nanoTime() + + /** Leaky bucket algorithm + */ + end TokenBucket + case class LeakyBucket(capacity: Int, leakRate: FiniteDuration) extends RateLimiterAlgorithm: + private val counter = new AtomicReference[Double](0.0) + private val leakInterval = leakRate.toNanos + private val lastLeakTime = new AtomicLong(System.nanoTime()) + + def tryUnblock: Boolean = + val leaking = leak + isReady || leaking > 0.0 + + def isReady: Boolean = + counter.get() <= capacity - 1.0 + + def rejectOperation: Unit = () + + def acceptOperation: Unit = + counter.getAndUpdate(_ + 1.0) + + private def leak: Double = + val now = System.nanoTime() + val lastLeak = lastLeakTime.get() + val elapsed = now - lastLeak + val leaking: Double = (elapsed.toDouble / leakInterval.toDouble) + counter.set(Math.max(counter.get() - leaking, 0)) + lastLeakTime.set(now) // lastLeak + leaking * leakInterval) + leaking + end leak + + def getNextTime(): Long = + if isReady then 0 + else lastLeakTime.get() + leakInterval - System.nanoTime() + end LeakyBucket + end RateLimiterAlgorithm + +end RateLimiterConfig diff --git a/core/src/main/scala/ox/unsupervised.scala b/core/src/main/scala/ox/unsupervised.scala index 5086ed47..e97a75d0 100644 --- a/core/src/main/scala/ox/unsupervised.scala +++ b/core/src/main/scala/ox/unsupervised.scala @@ -53,11 +53,11 @@ private[ox] def scopedWithCapability[T](capability: Ox)(f: Ox ?=> T): T = try val t = try - try f(using capability) - finally - scope.shutdown() - scope.join().discard - // join might have been interrupted + try f(using capability) + finally + scope.shutdown() + scope.join().discard + // join might have been interrupted finally scope.close() // running the finalizers only once we are sure that all child threads have been terminated, so that no new diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala new file mode 100644 index 00000000..1116c97d --- /dev/null +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -0,0 +1,714 @@ +package ox.resilience + +import ox.* +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.{EitherValues, TryValues} +import ox.util.ElapsedTime +import scala.concurrent.duration._ + +class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: + + behavior of "fixed rate RateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + + it should "restart rate limiter after given duration" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val before = System.nanoTime() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "block operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val before = System.currentTimeMillis() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + val after = System.currentTimeMillis() + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe Some(0) + (after-before) should be >= 1000L + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result6 = operationN(6) + val result7 = operationN(7) // blocks until 3 seconds elapsed + val time4 = System.currentTimeMillis() + val result8 = operationN(8) + val result9 = operationN(9) // blocks until 4 seconds elapsed + val time5 = System.currentTimeMillis() + + + result1 shouldBe Some(1) + result2 shouldBe Some(2) + result3 shouldBe Some(3) + result4 shouldBe Some(4) + result5 shouldBe Some(5) + result6 shouldBe Some(6) + result7 shouldBe Some(7) + result8 shouldBe Some(8) + result9 shouldBe Some(9) + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time4-time1) should be >= 3000L - 10 + (time5-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + (time4-time1) should be <= 3200L + (time5-time1) should be <= 4200L + order should be (List(9, 8,7,6,5,4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + var time3 = 0L + var time4 = 0L + + + val time1 = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + forkUser: + sleep(200.millis) + operationN(5) + time3 = System.currentTimeMillis + forkUser: + sleep(250.millis) + operationN(6) + forkUser: + sleep(300.millis) + operationN(7) + time4 = System.currentTimeMillis + forkUser: + sleep(350.millis) + operationN(8) + forkUser: + sleep(400.millis) + operationN(9) + } + val time5 = System.currentTimeMillis() + + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time4-time1) should be >= 3000L - 10 + (time5-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + (time4-time1) should be <= 3200L + (time5-time1) should be <= 4200L + order should be (List(9, 8,7,6,5,4, 3,2,1)) + } + + + + behavior of "sliding window RateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + + it should "restart rate limiter after given duration" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "block operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var executions = 0 + def operation = { + + executions +=1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe Some(0) + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + ox.sleep(500.millis) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed + + + result1 shouldBe Some(1) + result2 shouldBe Some(2) + result3 shouldBe Some(3) + result4 shouldBe Some(4) + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 1500L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 1700L + order should be (List(4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + var time3 = 0L + + + val time1 = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(300.millis) + operationN(2) + forkUser: + sleep(400.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(700.millis) + operationN(4) + time3 = System.currentTimeMillis + } + + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 1300L - 10 + (time2-time1) should be <= 1100L + (time3-time1) should be <= 1400L + order should be (List(4, 3,2,1)) + } + + behavior of "token bucket RateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + executions shouldBe 1 + } + + it should "refill token after time elapsed from last refill and not before" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(600.millis) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe Some(0) + executions shouldBe 2 + } + + it should "block operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + + val r3 = rateLimiter(operation) + (r1, r2, r3) + } + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val time2 = System.currentTimeMillis() // 1 second + sleep(2.seconds) //add 2 tokens + val result3 = operationN(3) //blocks until 1 second elapsed + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time4 = System.currentTimeMillis() + + result1 shouldBe Some(1) + result2 shouldBe Some(2) + result3 shouldBe Some(3) + result4 shouldBe Some(4) + result5 shouldBe Some(5) + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 3000L - 10 + (time4-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 3200L + (time4-time1) should be <= 4200L + order should be (List(5, 4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + + val time1 = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + } + val time3 = System.currentTimeMillis() + + (time2-time1) should be >= 2000L - 10 + (time3-time1) should be >= 3000L - 10 + (time2-time1) should be <= 2200L + (time3-time1) should be <= 3200L + order should be (List(4, 3,2,1)) + } + + behavior of "leaky bucket RateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + + it should "reject operation before leaking and accepting after it" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(400.millis) + val result3 = rateLimiter(operation) + ox.sleep(101.millis) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "restart rate limiter after given duration" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Drop(), + RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "block operation when rate limit is exceeded" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var executions = 0 + def operation = { + + executions +=1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe Some(0) + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + + result1 shouldBe Some(1) + result2 shouldBe Some(2) + result3 shouldBe Some(3) + result4 shouldBe Some(4) + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + order should be (List(4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val config = RateLimiterConfig( + RateLimiterConfig.BlockingPolicy.Block(), + RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + val rateLimiter = RateLimiter(config) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + + val time1 = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + } + val time3 = System.currentTimeMillis() + + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + order should be (List(4, 3,2,1)) + } + +end RateLimiterTest \ No newline at end of file From 81c2827d8601f1e03003b952c015b96241b375e3 Mon Sep 17 00:00:00 2001 From: pablf Date: Sat, 19 Oct 2024 18:17:17 +0200 Subject: [PATCH 02/23] fmt --- .../src/main/scala/ox/resilience/RateLimiterConfig.scala | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiterConfig.scala b/core/src/main/scala/ox/resilience/RateLimiterConfig.scala index 9c9f7ab8..00ae728c 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterConfig.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterConfig.scala @@ -248,12 +248,13 @@ object RateLimiterConfig: newTokens.toInt def getNextTime(): Long = - if isReady then 0 - else lastRefillTime.get() + refillInterval - System.nanoTime() + if isReady then 0 + else lastRefillTime.get() + refillInterval - System.nanoTime() - /** Leaky bucket algorithm - */ end TokenBucket + + /** Leaky bucket algorithm + */ case class LeakyBucket(capacity: Int, leakRate: FiniteDuration) extends RateLimiterAlgorithm: private val counter = new AtomicReference[Double](0.0) private val leakInterval = leakRate.toNanos From 57f1903d1a07611d5bd308676ce6f0d5b9e11978 Mon Sep 17 00:00:00 2001 From: pablf Date: Tue, 22 Oct 2024 17:26:36 +0200 Subject: [PATCH 03/23] review --- .../ox/resilience/GenericRateLimiter.scala | 192 +++++ .../scala/ox/resilience/RateLimiter.scala | 61 +- .../ox/resilience/RateLimiterAlgorithm.scala | 172 +++++ .../ox/resilience/RateLimiterConfig.scala | 291 -------- core/src/main/scala/ox/unsupervised.scala | 10 +- .../resilience/GenericRateLimiterTest.scala | 689 ++++++++++++++++++ .../scala/ox/resilience/RateLimiterTest.scala | 684 +---------------- 7 files changed, 1112 insertions(+), 987 deletions(-) create mode 100644 core/src/main/scala/ox/resilience/GenericRateLimiter.scala create mode 100644 core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala delete mode 100644 core/src/main/scala/ox/resilience/RateLimiterConfig.scala create mode 100644 core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala new file mode 100644 index 00000000..2d4e5bc0 --- /dev/null +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -0,0 +1,192 @@ +package ox.resilience + +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.locks.ReentrantLock +import scala.concurrent.{Await, Future, Promise} +import scala.concurrent.duration.{Duration, FiniteDuration} +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.blocking +import GenericRateLimiter.* + +case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( + executor: Executor[Returns], + algorithm: RateLimiterAlgorithm +): + + import GenericRateLimiter.Strategy.given + + /** Limits the rate of execution of the given operation + */ + def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = + executor.lock.lock() + if executor.isUnblocked then + if algorithm.isUnblocked then + if algorithm.isReady then + algorithm.acceptOperation + executor.lock.unlock() + executor.run(operation) + else + algorithm.rejectOperation + executor.lock.unlock() + executor.block(algorithm, operation) + else + executor.lock.unlock() + executor.block(algorithm, operation) + else + executor.lock.unlock() + executor.block(algorithm, operation) + end if + end apply +end GenericRateLimiter + +object GenericRateLimiter: + type Id[A] = A + sealed trait Strategy[F[*]]: + def run[T](operation: => T): F[T] + + object Strategy: + sealed trait Blocking[F[*]] extends Strategy[F] + sealed trait Dropping[F[*]] extends Strategy[F] + // def drop[T]: F[T] + sealed trait BlockOrDrop[F[*]] extends Strategy[F] + + case class Block() extends Blocking[Id] with BlockOrDrop[Id]: + def run[T](operation: => T): T = operation + + case class Drop() extends Dropping[Option] with BlockOrDrop[Option]: + def run[T](operation: => T): Option[T] = Some(operation) + + given Blocking[Id] = Block() + given Dropping[Option] = Drop() + end Strategy + + /** Determines the policy to apply when the rate limiter is full + */ + trait Executor[Returns[_[_]] <: Strategy[_]]: + + val lock = new ReentrantLock() + + /** This method is called when a new operation can't be readily accepted by the rate limiter. Implementations should execute the + * operation only if the algorithm and the Executor are both unblocked and they are responsible for checking when the algorithm is + * ready to accept a new operation, unblocking it and updating its internal state. + */ + def block[T, Result[_]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Result[T] + + /** Runs the operation and returns the result using the given strategy + */ + def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] = + cfg.run(operation).asInstanceOf[Result[T]] + + /** Returns whether a new operation will be the first one to be passed to the RateLimiterAlgorithm after unblocking + */ + def isUnblocked: Boolean + end Executor + + object Executor: + /** Block rejected operations until the rate limiter is ready to accept them + */ + case class Block() extends Executor[Strategy.Blocking]: + + def isUnblocked: Boolean = + block.peek() == null + + val block = new ConcurrentLinkedQueue[Promise[Unit]]() + val queueLock = new ReentrantLock() + + def block[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Result[T] = + // blocks until it can accept current operation and returns next time it will be unblocked + blockUntilReady(algorithm) + + // updating internal state of algorithm + lock.lock() + algorithm.tryUnblock + lock.unlock() + algorithm.acceptOperation + block.poll() + + // fulfilling next promise in queue after waiting time given by algorithm + fulfillNextPromise(FiniteDuration(algorithm.getNextTime(), "nanoseconds")) + + run(operation) + end block + + private def blockUntilReady(algorithm: RateLimiterAlgorithm): Unit = + // creates a promise for the current operation and waits until fulfilled + queueLock.lock() + val waitTime = + if block.peek() == null then Some((algorithm.getNextTime())) + else None + + val promise = Promise[Unit]() + + block.add(promise) + queueLock.unlock() + + val future = promise.future + // if it's not the first promise, it will be fulfilled later + waitTime.map { wt => + fulfillNextPromise(FiniteDuration(wt, "nanoseconds")) + } + + Await.ready(future, Duration.Inf) + end blockUntilReady + + private def fulfillNextPromise(waitTime: FiniteDuration): Unit = + // sleeps waitTime and fulfills next promise in queue + queueLock.lock() + if block.peek() != null then + val p = block.peek() + queueLock.unlock() + if waitTime.toNanos != 0 then + Future { + val wt1 = waitTime.toMillis + val wt2 = waitTime.toNanos - wt1 * 1000000 + blocking(Thread.sleep(wt1, wt2.toInt)) + }.onComplete { _ => + p.success(()) + } + else p.success(()) + end if + else queueLock.unlock() + end if + end fulfillNextPromise + end Block + + /** Drop rejected operations + */ + case class Drop() extends Executor[Strategy.Dropping]: + def isUnblocked: Boolean = true + def block[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Result[T] = + lock.lock() + if algorithm.tryUnblock && algorithm.isReady then + algorithm.acceptOperation + lock.unlock() + cfg.run(operation) + else + lock.unlock() + None.asInstanceOf[Result[T]] + end block + end Drop + + /** Block rejected operations until the rate limiter is ready to accept them + */ + case class BlockOrDrop() extends Executor[Strategy.BlockOrDrop]: + + val blockExecutor = Block() + val dropExecutor = Drop() + + def isUnblocked: Boolean = + blockExecutor.isUnblocked + + def block[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Result[T] = + cfg match + case cfg: Strategy.Block => + blockExecutor.block(algorithm, operation)(using cfg) + case cfg: Strategy.Drop => + dropExecutor.block(algorithm, operation)(using cfg) + + end block + end BlockOrDrop + + end Executor +end GenericRateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index d0f5a902..f3c5ec5a 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -1,73 +1,54 @@ package ox.resilience import scala.concurrent.duration.* -import RateLimiterConfig.* -/** Configurable rate limiter +/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an operation. */ case class RateLimiter( - config: RateLimiterConfig + algorithm: RateLimiterAlgorithm ): - /** Limits the rate of execution of the given operation + import GenericRateLimiter.* + + private val rateLimiter = GenericRateLimiter(Executor.BlockOrDrop(), algorithm) + + /** Blocks the operation until the rate limiter allows it. */ - def apply[T](operation: => T): Option[T] = - if config.blockingPolicy.isUnblocked then - if config.algorithm.isUnblocked then - if config.isReady then - config.acceptOperation - val result = operation - Some(result) - else - config.algorithm.rejectOperation - config.block(operation) - else config.block(operation) - else config.block(operation) + def runBlocking[T](operation: => T): T = rateLimiter(operation)(using Strategy.Block()) + + /** Drops the operation if not allowed by the rate limiter. + */ + def runOrDrop[T](operation: => T): Option[T] = rateLimiter(operation)(using Strategy.Drop()) + end RateLimiter object RateLimiter: def leakyBucket( capacity: Int, - leakInterval: FiniteDuration, - blocks: Boolean = true + leakInterval: FiniteDuration ): RateLimiter = - val algorithm = RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval) - val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) - val config = RateLimiterConfig(blockingPolicy, algorithm) - RateLimiter(config) + RateLimiter(RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval)) end leakyBucket def tokenBucket( maxTokens: Int, - refillInterval: FiniteDuration, - blocks: Boolean = true + refillInterval: FiniteDuration ): RateLimiter = - val algorithm = RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval) - val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) - val config = RateLimiterConfig(blockingPolicy, algorithm) - RateLimiter(config) + RateLimiter(RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval)) end tokenBucket def fixedRate( maxRequests: Int, - windowSize: FiniteDuration, - blocks: Boolean = true + windowSize: FiniteDuration ): RateLimiter = - val algorithm = RateLimiterAlgorithm.FixedRate(maxRequests, windowSize) - val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) - val config = RateLimiterConfig(blockingPolicy, algorithm) - RateLimiter(config) + RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) end fixedRate def slidingWindow( maxRequests: Int, - windowSize: FiniteDuration, - blocks: Boolean = true + windowSize: FiniteDuration ): RateLimiter = - val algorithm = RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize) - val blockingPolicy = RateLimiterConfig.BlockingPolicy(blocks) - val config = RateLimiterConfig(blockingPolicy, algorithm) - RateLimiter(config) + RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) end slidingWindow end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala new file mode 100644 index 00000000..65e14e26 --- /dev/null +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -0,0 +1,172 @@ +package ox.resilience + +import ox.* +import ox.resilience.RateLimiterAlgorithm.* +import scala.concurrent.duration.* +import ox.scheduling.* +import java.util.concurrent.atomic.{AtomicInteger, AtomicBoolean, AtomicLong} +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.* +import scala.util.{Try, Success, Failure} +import javax.swing.text.html.HTML.Tag +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.locks.ReentrantLock + +/** Determines the algorithm to use for the rate limiter + */ +trait RateLimiterAlgorithm: + + val blocked = new AtomicBoolean(false) + def isUnblocked: Boolean = !blocked.get() || tryUnblock + + /** Update internal state to check whether the algorithm can be unblocked. + */ + def tryUnblock: Boolean + + /** Determines if the operation can be accepted. Implementations should not update internal state. `acceptOperation` and `rejectOperation` + * are used for updating internal state after accepting or rejecting an operation. + */ + def isReady: Boolean + + /** Modifies internal state to mark that an operation has been accepted. + */ + def acceptOperation: Unit + + /** Modifies internal state to mark that an operation has been rejected. + */ + def rejectOperation: Unit + + /** Returns the time until the next operation can be accepted to be used by the BlockingPolicy. IT should not modify internal state + */ + def getNextTime(): Long +end RateLimiterAlgorithm + +object RateLimiterAlgorithm: + /** Fixed rate algorithm + */ + case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + private val counter = new AtomicInteger(0) + private lazy val lastUpdate = new AtomicLong(System.nanoTime()) + + def tryUnblock: Boolean = + if lastUpdate.get() + per.toNanos < System.nanoTime() then + reset + true + else false + + def isReady: Boolean = + lastUpdate.get() + counter.get() < rate + + def rejectOperation: Unit = + blocked.set(true) + + def acceptOperation: Unit = + counter.incrementAndGet() + + def getNextTime(): Long = + if isReady then 0 + else lastUpdate.get() + per.toNanos - System.nanoTime() + + private def reset: Unit = + lastUpdate.set(System.nanoTime()) + counter.set(0) + blocked.set(false) + end FixedRate + + /** Sliding window algorithm + */ + case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + private val counter = new AtomicInteger(0) + private val log = new ConcurrentLinkedQueue[Long]() + + def tryUnblock: Boolean = + val now = System.nanoTime() + while counter.get() > 0 && log.peek() < now - per.toNanos do + log.poll() + counter.decrementAndGet() + isReady + + def isReady: Boolean = + counter.get() < rate + + def rejectOperation: Unit = () + + def acceptOperation: Unit = + val now = System.nanoTime() + log.add(now) + counter.incrementAndGet() + + def getNextTime(): Long = + if isReady then 0 + else log.peek() + per.toNanos - System.nanoTime() + end SlidingWindow + + /** Token bucket algorithm + */ + case class TokenBucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + private val maxTokens = rate + private val refillInterval = per.toNanos + private val tokens = new AtomicInteger(1) + private val lastRefillTime = new AtomicLong(System.nanoTime()) + + def tryUnblock: Boolean = + isReady || refillTokens > 0 + + def isReady: Boolean = + tokens.get() > 0 + + def rejectOperation: Unit = () + + def acceptOperation: Unit = + tokens.decrementAndGet() + + private def refillTokens: Int = + val now = System.nanoTime() + val elapsed = now - lastRefillTime.get() + val newTokens = elapsed / refillInterval + tokens.set(Math.min(tokens.get() + newTokens.toInt, maxTokens)) + lastRefillTime.set(newTokens * refillInterval + lastRefillTime.get()) + newTokens.toInt + + def getNextTime(): Long = + if isReady then 0 + else lastRefillTime.get() + refillInterval - System.nanoTime() + + end TokenBucket + + /** Leaky bucket algorithm + */ + case class LeakyBucket(capacity: Int, leakRate: FiniteDuration) extends RateLimiterAlgorithm: + private val counter = new AtomicReference[Double](0.0) + private val leakInterval = leakRate.toNanos + private val lastLeakTime = new AtomicLong(System.nanoTime()) + + def tryUnblock: Boolean = + val leaking = leak + isReady || leaking > 0.0 + + def isReady: Boolean = + counter.get() <= capacity - 1.0 + + def rejectOperation: Unit = () + + def acceptOperation: Unit = + counter.getAndUpdate(_ + 1.0) + + private def leak: Double = + val now = System.nanoTime() + val lastLeak = lastLeakTime.get() + val elapsed = now - lastLeak + val leaking: Double = (elapsed.toDouble / leakInterval.toDouble) + counter.set(Math.max(counter.get() - leaking, 0)) + lastLeakTime.set(now) + leaking + end leak + + def getNextTime(): Long = + if isReady then 0 + else lastLeakTime.get() + leakInterval - System.nanoTime() + end LeakyBucket +end RateLimiterAlgorithm diff --git a/core/src/main/scala/ox/resilience/RateLimiterConfig.scala b/core/src/main/scala/ox/resilience/RateLimiterConfig.scala deleted file mode 100644 index 00ae728c..00000000 --- a/core/src/main/scala/ox/resilience/RateLimiterConfig.scala +++ /dev/null @@ -1,291 +0,0 @@ -package ox.resilience - -import ox.* -import ox.resilience.RateLimiterConfig.* -import scala.concurrent.duration.* -import ox.scheduling.* -import java.util.concurrent.atomic.{AtomicInteger, AtomicBoolean, AtomicLong} -import scala.concurrent.ExecutionContext.Implicits.global -import scala.concurrent.* -import scala.util.{Try, Success, Failure} -import javax.swing.text.html.HTML.Tag -import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.ConcurrentLinkedQueue -import java.util.concurrent.locks.ReentrantLock - -/** Configuration for a rate limiter - * - * @param blockingPolicy - * blocking policy to apply when the rate limiter is full - * @param algorithm - * algorithm to use for the rate limiter - */ -final case class RateLimiterConfig( - blockingPolicy: BlockingPolicy, - algorithm: RateLimiterAlgorithm -): - def isUnblocked: Boolean = - algorithm.isUnblocked && blockingPolicy.isUnblocked - - def isReady: Boolean = - algorithm.isReady - - def acceptOperation: Unit = - algorithm.acceptOperation - - def block[T](operation: => T): Option[T] = - blockingPolicy.block(algorithm, operation) -end RateLimiterConfig - -object RateLimiterConfig: - /** Determines the policy to apply when the rate limiter is full - */ - trait BlockingPolicy: - /** This method is called when a new operation can't be readily accepted by the rate limiter. Return None for discarded operations, or - * Some(result) for result of operations after unblocking. Implementations should execute the operation only if the algorithm and the - * BlockingPolicy are both unblocked and they are responsible for checking when the algorithm is ready to accept a new operation, - * unblocking it and updating its internal state. - */ - def block[T](algorithm: RateLimiterAlgorithm, operation: => T): Option[T] - - /** Returns whether a new operation will be the first one to be passed to the RateLimiterAlgorithm after unblocking - */ - def isUnblocked: Boolean - end BlockingPolicy - - object BlockingPolicy: - - def apply(blocks: Boolean): BlockingPolicy = - if blocks then Block() else Drop() - - /** Block rejected operations until the rate limiter is ready to accept them - */ - case class Block() extends BlockingPolicy: - - def isUnblocked: Boolean = - block.peek() == null - - val block = new ConcurrentLinkedQueue[Promise[Unit]]() - - def block[T](algorithm: RateLimiterAlgorithm, operation: => T): Option[T] = - // blocks until it can accept current operation and returns next time it will be unblocked - blockUntilReady(algorithm, Duration.Inf) - - // updating internal state of algorithm - algorithm.tryUnblock - algorithm.acceptOperation - block.poll() - - // fulfilling next promise in queue after waiting time given by algorithm - fulfillNextPromise(algorithm, FiniteDuration(algorithm.getNextTime(), "nanoseconds")) - - val result = operation - Some(result) - end block - - private def blockUntilReady(algorithm: RateLimiterAlgorithm, timeout: Duration): Unit = - // creates a promise for the current operation and waits until fulfilled - val waitTime = - if block.peek() == null then Some((algorithm.getNextTime())) - else None - val promise = Promise[Unit]() - - block.add(promise) - val future = promise.future - // if it's not the first promise, it will be fulfilled later - waitTime.map { wt => - fulfillNextPromise(algorithm, FiniteDuration(wt, "nanoseconds")) - } - - Await.ready(future, timeout) - end blockUntilReady - - private def fulfillNextPromise(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = - // sleeps waitTime and fulfills next promise in queue - if block.peek() != null then - val p = block.peek() - if waitTime.toNanos != 0 then - Future { - val wt1 = waitTime.toMillis - val wt2 = waitTime.toNanos - wt1 * 1000000 - blocking(Thread.sleep(wt1, wt2.toInt)) - }.onComplete { _ => - p.success(()) - } - else p.success(()) - end if - end Block - - /** Drop rejected operations - */ - case class Drop() extends BlockingPolicy: - def isUnblocked: Boolean = true - def block[T](algorithm: RateLimiterAlgorithm, operation: => T): Option[T] = - if algorithm.tryUnblock && algorithm.isReady then - algorithm.acceptOperation - val result = operation - Some(result) - else None - end Drop - end BlockingPolicy - - /** Determines the algorithm to use for the rate limiter - */ - trait RateLimiterAlgorithm: - - val blocked = new AtomicBoolean(false) - def isUnblocked: Boolean = !blocked.get() || tryUnblock - - /** Update internal state to check whether the algorithm can be unblocked. - */ - def tryUnblock: Boolean - - /** Determines if the operation can be accepted. Implementations should update internal state only to determine if the operation can be - * accepted, e.g., updating availability after time elapsed. `acceptOperation` and `rejectOperation` are used for updating internal - * state after accepting or rejecting an operation. - */ - def isReady: Boolean - - /** Modifies internal state to mark that an operation has been accepted. - */ - def acceptOperation: Unit - - /** Modifies internal state to mark that an operation has been rejected. - */ - def rejectOperation: Unit - - /** Returns the time until the next operation can be accepted to be used by the BlockingPolicy - */ - def getNextTime(): Long - end RateLimiterAlgorithm - - object RateLimiterAlgorithm: - /** Fixed rate algorithm - */ - case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val counter = new AtomicInteger(0) - private lazy val lastUpdate = new AtomicLong(System.nanoTime()) - - def tryUnblock: Boolean = - if lastUpdate.get() + per.toNanos < System.nanoTime() then - reset - true - else false - - def isReady: Boolean = - lastUpdate.get() - counter.get() < rate - - def rejectOperation: Unit = - blocked.set(true) - - def acceptOperation: Unit = - counter.incrementAndGet() - - def getNextTime(): Long = - if isReady then 0 - else lastUpdate.get() + per.toNanos - System.nanoTime() - - private def reset: Unit = - lastUpdate.set(System.nanoTime()) - counter.set(0) - blocked.set(false) - end FixedRate - - /** Sliding window algorithm - */ - case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val counter = new AtomicInteger(0) - private val log = new ConcurrentLinkedQueue[Long]() - - def tryUnblock: Boolean = - val now = System.nanoTime() - while counter.get() > 0 && log.peek() < now - per.toNanos do - log.poll() - counter.decrementAndGet() - isReady - - def isReady: Boolean = - counter.get() < rate - - def rejectOperation: Unit = () - - def acceptOperation: Unit = - val now = System.nanoTime() - log.add(now) - counter.incrementAndGet() - - def getNextTime(): Long = - if isReady then 0 - else log.peek() + per.toNanos - System.nanoTime() - end SlidingWindow - - /** Token bucket algorithm - */ - case class TokenBucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val maxTokens = rate - private val refillInterval = per.toNanos - private val tokens = new AtomicInteger(1) - private val lastRefillTime = new AtomicLong(System.nanoTime()) - - def tryUnblock: Boolean = - isReady || refillTokens > 0 - - def isReady: Boolean = - tokens.get() > 0 - - def rejectOperation: Unit = () - - def acceptOperation: Unit = - tokens.decrementAndGet() - - private def refillTokens: Int = - val now = System.nanoTime() - val elapsed = now - lastRefillTime.get() - val newTokens = elapsed / refillInterval - tokens.set(Math.min(tokens.get() + newTokens.toInt, maxTokens)) - lastRefillTime.set(newTokens * refillInterval + lastRefillTime.get()) - newTokens.toInt - - def getNextTime(): Long = - if isReady then 0 - else lastRefillTime.get() + refillInterval - System.nanoTime() - - end TokenBucket - - /** Leaky bucket algorithm - */ - case class LeakyBucket(capacity: Int, leakRate: FiniteDuration) extends RateLimiterAlgorithm: - private val counter = new AtomicReference[Double](0.0) - private val leakInterval = leakRate.toNanos - private val lastLeakTime = new AtomicLong(System.nanoTime()) - - def tryUnblock: Boolean = - val leaking = leak - isReady || leaking > 0.0 - - def isReady: Boolean = - counter.get() <= capacity - 1.0 - - def rejectOperation: Unit = () - - def acceptOperation: Unit = - counter.getAndUpdate(_ + 1.0) - - private def leak: Double = - val now = System.nanoTime() - val lastLeak = lastLeakTime.get() - val elapsed = now - lastLeak - val leaking: Double = (elapsed.toDouble / leakInterval.toDouble) - counter.set(Math.max(counter.get() - leaking, 0)) - lastLeakTime.set(now) // lastLeak + leaking * leakInterval) - leaking - end leak - - def getNextTime(): Long = - if isReady then 0 - else lastLeakTime.get() + leakInterval - System.nanoTime() - end LeakyBucket - end RateLimiterAlgorithm - -end RateLimiterConfig diff --git a/core/src/main/scala/ox/unsupervised.scala b/core/src/main/scala/ox/unsupervised.scala index e97a75d0..5086ed47 100644 --- a/core/src/main/scala/ox/unsupervised.scala +++ b/core/src/main/scala/ox/unsupervised.scala @@ -53,11 +53,11 @@ private[ox] def scopedWithCapability[T](capability: Ox)(f: Ox ?=> T): T = try val t = try - try f(using capability) - finally - scope.shutdown() - scope.join().discard - // join might have been interrupted + try f(using capability) + finally + scope.shutdown() + scope.join().discard + // join might have been interrupted finally scope.close() // running the finalizers only once we are sure that all child threads have been terminated, so that no new diff --git a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala new file mode 100644 index 00000000..25337ed7 --- /dev/null +++ b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala @@ -0,0 +1,689 @@ +package ox.resilience + +import ox.* +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.{EitherValues, TryValues} +import ox.util.ElapsedTime +import scala.concurrent.duration._ + +class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: + + behavior of "fixed rate GenericRateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val before = System.nanoTime() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val before = System.currentTimeMillis() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + val after = System.currentTimeMillis() + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + (after-before) should be >= 1000L + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result6 = operationN(6) + val result7 = operationN(7) // blocks until 3 seconds elapsed + val time4 = System.currentTimeMillis() + val result8 = operationN(8) + val result9 = operationN(9) // blocks until 4 seconds elapsed + val time5 = System.currentTimeMillis() + + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + result6 shouldBe 6 + result7 shouldBe 7 + result8 shouldBe 8 + result9 shouldBe 9 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time4-time1) should be >= 3000L - 10 + (time5-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + (time4-time1) should be <= 3200L + (time5-time1) should be <= 4200L + order should be (List(9, 8,7,6,5,4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + var time3 = 0L + var time4 = 0L + + + val time1 = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + forkUser: + sleep(200.millis) + operationN(5) + time3 = System.currentTimeMillis + forkUser: + sleep(250.millis) + operationN(6) + forkUser: + sleep(300.millis) + operationN(7) + time4 = System.currentTimeMillis + forkUser: + sleep(350.millis) + operationN(8) + forkUser: + sleep(400.millis) + operationN(9) + } + val time5 = System.currentTimeMillis() + + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time4-time1) should be >= 3000L - 10 + (time5-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + (time4-time1) should be <= 3200L + (time5-time1) should be <= 4200L + order should be (List(9, 8,7,6,5,4, 3,2,1)) + } + + + + behavior of "sliding window GenericRateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + + executions +=1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + ox.sleep(500.millis) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed + + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 1500L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 1700L + order should be (List(4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + var time3 = 0L + + + val time1 = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(300.millis) + operationN(2) + forkUser: + sleep(400.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(700.millis) + operationN(4) + time3 = System.currentTimeMillis + } + + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 1300L - 10 + (time2-time1) should be <= 1100L + (time3-time1) should be <= 1400L + order should be (List(4, 3,2,1)) + } + + behavior of "token bucket GenericRateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + executions shouldBe 1 + } + + it should "refill token after time elapsed from last refill and not before" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(600.millis) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe Some(0) + executions shouldBe 2 + } + + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + + val r3 = rateLimiter(operation) + (r1, r2, r3) + } + + result1 shouldBe 0 + result2 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val time2 = System.currentTimeMillis() // 1 second + sleep(2.seconds) //add 2 tokens + val result3 = operationN(3) //blocks until 1 second elapsed + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time4 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 3000L - 10 + (time4-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 3200L + (time4-time1) should be <= 4200L + order should be (List(5, 4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + + val time1 = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + } + val time3 = System.currentTimeMillis() + + (time2-time1) should be >= 2000L - 10 + (time3-time1) should be >= 3000L - 10 + (time2-time1) should be <= 2200L + (time3-time1) should be <= 3200L + order should be (List(4, 3,2,1)) + } + + behavior of "leaky bucket GenericRateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + + it should "reject operation before leaking and accepting after it" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(400.millis) + val result3 = rateLimiter(operation) + ox.sleep(101.millis) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } + + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + + executions +=1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + order should be (List(4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } + + var time2 = 0L + + val time1 = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + } + val time3 = System.currentTimeMillis() + + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + order should be (List(4, 3,2,1)) + } + +end GenericRateLimiterTest \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 1116c97d..79b1c4ed 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -9,706 +9,88 @@ import scala.concurrent.duration._ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: - behavior of "fixed rate RateLimiter" - - it should "drop operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } - - it should "restart rate limiter after given duration" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - + behavior of "RateLimiter" + it should "drop or block operation depending on method used for fixed rate algorithm" in { + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + var executions = 0 def operation = { executions +=1 0 } - val before = System.nanoTime() - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None - result4 shouldBe Some(0) + result4 shouldBe 0 executions shouldBe 3 } - it should "block operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val before = System.currentTimeMillis() - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - val after = System.currentTimeMillis() - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe Some(0) - (after-before) should be >= 1000L - executions shouldBe 3 - } - - it should "respect queueing order when blocking" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result6 = operationN(6) - val result7 = operationN(7) // blocks until 3 seconds elapsed - val time4 = System.currentTimeMillis() - val result8 = operationN(8) - val result9 = operationN(9) // blocks until 4 seconds elapsed - val time5 = System.currentTimeMillis() - - - result1 shouldBe Some(1) - result2 shouldBe Some(2) - result3 shouldBe Some(3) - result4 shouldBe Some(4) - result5 shouldBe Some(5) - result6 shouldBe Some(6) - result7 shouldBe Some(7) - result8 shouldBe Some(8) - result9 shouldBe Some(9) - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time4-time1) should be >= 3000L - 10 - (time5-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - (time4-time1) should be <= 3200L - (time5-time1) should be <= 4200L - order should be (List(9, 8,7,6,5,4, 3,2,1)) - } - - it should "respect queueing order when blocking concurrently" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - var time2 = 0L - var time3 = 0L - var time4 = 0L - - - val time1 = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) - forkUser: - sleep(200.millis) - operationN(5) - time3 = System.currentTimeMillis - forkUser: - sleep(250.millis) - operationN(6) - forkUser: - sleep(300.millis) - operationN(7) - time4 = System.currentTimeMillis - forkUser: - sleep(350.millis) - operationN(8) - forkUser: - sleep(400.millis) - operationN(9) - } - val time5 = System.currentTimeMillis() - - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time4-time1) should be >= 3000L - 10 - (time5-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - (time4-time1) should be <= 3200L - (time5-time1) should be <= 4200L - order should be (List(9, 8,7,6,5,4, 3,2,1)) - } - - - - behavior of "sliding window RateLimiter" - - it should "drop operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } - - it should "restart rate limiter after given duration" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - + it should "drop or block operation depending on method used for sliding window algorithm" in { + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + var executions = 0 def operation = { executions +=1 0 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None - result4 shouldBe Some(0) + result4 shouldBe 0 executions shouldBe 3 } - it should "block operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var executions = 0 - def operation = { + it should "drop or block operation depending on method used for token bucket algorithm" in { + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - executions +=1 - 0 - } - - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe Some(0) - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } - - it should "respect queueing order when blocking" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - ox.sleep(500.millis) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed - - - result1 shouldBe Some(1) - result2 shouldBe Some(2) - result3 shouldBe Some(3) - result4 shouldBe Some(4) - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 1500L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 1700L - order should be (List(4, 3,2,1)) - } - - it should "respect queueing order when blocking concurrently" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - var time2 = 0L - var time3 = 0L - - - val time1 = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(300.millis) - operationN(2) - forkUser: - sleep(400.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(700.millis) - operationN(4) - time3 = System.currentTimeMillis - } - - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 1300L - 10 - (time2-time1) should be <= 1100L - (time3-time1) should be <= 1400L - order should be (List(4, 3,2,1)) - } - - behavior of "token bucket RateLimiter" - - it should "drop operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var executions = 0 def operation = { executions +=1 0 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe None - executions shouldBe 1 - } - - it should "refill token after time elapsed from last refill and not before" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(600.millis) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe Some(0) - executions shouldBe 2 - } - - it should "block operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } - - it should "respect queueing order when blocking" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val time2 = System.currentTimeMillis() // 1 second - sleep(2.seconds) //add 2 tokens - val result3 = operationN(3) //blocks until 1 second elapsed - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time4 = System.currentTimeMillis() - - result1 shouldBe Some(1) - result2 shouldBe Some(2) - result3 shouldBe Some(3) - result4 shouldBe Some(4) - result5 shouldBe Some(5) - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 3000L - 10 - (time4-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 3200L - (time4-time1) should be <= 4200L - order should be (List(5, 4, 3,2,1)) - } - - it should "respect queueing order when blocking concurrently" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - var time2 = 0L - - val time1 = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) - } - val time3 = System.currentTimeMillis() - - (time2-time1) should be >= 2000L - 10 - (time3-time1) should be >= 3000L - 10 - (time2-time1) should be <= 2200L - (time3-time1) should be <= 3200L - order should be (List(4, 3,2,1)) - } - - behavior of "leaky bucket RateLimiter" - - it should "drop operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } - - it should "reject operation before leaking and accepting after it" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(400.millis) - val result3 = rateLimiter(operation) - ox.sleep(101.millis) - val result4 = rateLimiter(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None - result4 shouldBe Some(0) + result4 shouldBe 0 executions shouldBe 3 } - it should "restart rate limiter after given duration" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Drop(), - RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - + it should "drop or block operation depending on method used for leaky bucker algorithm" in { + val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) + var executions = 0 def operation = { executions +=1 0 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } - - it should "block operation when rate limit is exceeded" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var executions = 0 - def operation = { - - executions +=1 - 0 - } - - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe Some(0) - timeElapsed.toMillis should be >= 1000L - 10 + result4 shouldBe 0 executions shouldBe 3 - } - - it should "respect queueing order when blocking" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - - result1 shouldBe Some(1) - result2 shouldBe Some(2) - result3 shouldBe Some(3) - result4 shouldBe Some(4) - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - order should be (List(4, 3,2,1)) - } - - it should "respect queueing order when blocking concurrently" in { - val config = RateLimiterConfig( - RateLimiterConfig.BlockingPolicy.Block(), - RateLimiterConfig.RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - val rateLimiter = RateLimiter(config) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - var time2 = 0L - - val time1 = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) - } - val time3 = System.currentTimeMillis() - - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - order should be (List(4, 3,2,1)) - } - -end RateLimiterTest \ No newline at end of file + } \ No newline at end of file From f4088a1735140a4b9edcf02193a5afb9b7a2b381 Mon Sep 17 00:00:00 2001 From: pablf Date: Tue, 22 Oct 2024 17:27:42 +0200 Subject: [PATCH 04/23] review --- core/src/main/scala/ox/resilience/GenericRateLimiter.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index 2d4e5bc0..b86e7d81 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -15,7 +15,7 @@ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( import GenericRateLimiter.Strategy.given - /** Limits the rate of execution of the given operation + /** Limits the rate of execution of the given operation with custom Result type */ def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = executor.lock.lock() @@ -47,7 +47,6 @@ object GenericRateLimiter: object Strategy: sealed trait Blocking[F[*]] extends Strategy[F] sealed trait Dropping[F[*]] extends Strategy[F] - // def drop[T]: F[T] sealed trait BlockOrDrop[F[*]] extends Strategy[F] case class Block() extends Blocking[Id] with BlockOrDrop[Id]: From febd526b7429e8ca6066c3a45ad4e0db55e6e746 Mon Sep 17 00:00:00 2001 From: pablf Date: Tue, 22 Oct 2024 17:30:57 +0200 Subject: [PATCH 05/23] fix warnings --- .../scala/ox/resilience/RateLimiterAlgorithm.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 65e14e26..65816795 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -3,15 +3,10 @@ package ox.resilience import ox.* import ox.resilience.RateLimiterAlgorithm.* import scala.concurrent.duration.* -import ox.scheduling.* import java.util.concurrent.atomic.{AtomicInteger, AtomicBoolean, AtomicLong} -import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.* -import scala.util.{Try, Success, Failure} -import javax.swing.text.html.HTML.Tag import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.ConcurrentLinkedQueue -import java.util.concurrent.locks.ReentrantLock /** Determines the algorithm to use for the rate limiter */ @@ -64,6 +59,7 @@ object RateLimiterAlgorithm: def acceptOperation: Unit = counter.incrementAndGet() + () def getNextTime(): Long = if isReady then 0 @@ -86,6 +82,7 @@ object RateLimiterAlgorithm: while counter.get() > 0 && log.peek() < now - per.toNanos do log.poll() counter.decrementAndGet() + () isReady def isReady: Boolean = @@ -97,6 +94,7 @@ object RateLimiterAlgorithm: val now = System.nanoTime() log.add(now) counter.incrementAndGet() + () def getNextTime(): Long = if isReady then 0 @@ -121,6 +119,7 @@ object RateLimiterAlgorithm: def acceptOperation: Unit = tokens.decrementAndGet() + () private def refillTokens: Int = val now = System.nanoTime() @@ -154,6 +153,7 @@ object RateLimiterAlgorithm: def acceptOperation: Unit = counter.getAndUpdate(_ + 1.0) + () private def leak: Double = val now = System.nanoTime() From 77ef1051771614b15d90262c6846d109b279bc5a Mon Sep 17 00:00:00 2001 From: pablf Date: Sun, 27 Oct 2024 14:51:20 +0100 Subject: [PATCH 06/23] use semaphore for implementation --- .../ox/resilience/GenericRateLimiter.scala | 159 +++++++--------- .../ox/resilience/RateLimiterAlgorithm.scala | 174 ++++++++---------- 2 files changed, 147 insertions(+), 186 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index b86e7d81..293d9948 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -1,12 +1,12 @@ package ox.resilience -import java.util.concurrent.ConcurrentLinkedQueue -import java.util.concurrent.locks.ReentrantLock +import java.util.concurrent.{ConcurrentLinkedQueue, Semaphore} import scala.concurrent.{Await, Future, Promise} import scala.concurrent.duration.{Duration, FiniteDuration} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.blocking import GenericRateLimiter.* +import ox.resilience.GenericRateLimiter.Strategy.Blocking case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( executor: Executor[Returns], @@ -18,29 +18,18 @@ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( /** Limits the rate of execution of the given operation with custom Result type */ def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = - executor.lock.lock() - if executor.isUnblocked then - if algorithm.isUnblocked then - if algorithm.isReady then - algorithm.acceptOperation - executor.lock.unlock() - executor.run(operation) - else - algorithm.rejectOperation - executor.lock.unlock() - executor.block(algorithm, operation) - else - executor.lock.unlock() - executor.block(algorithm, operation) - else - executor.lock.unlock() - executor.block(algorithm, operation) - end if + val future = executor.add(algorithm, operation) + executor.execute(algorithm, operation) + Await.result(future, Duration.Inf) end apply end GenericRateLimiter object GenericRateLimiter: + type Id[A] = A + + /** Describe the execution strategy that must be used by the rate limiter in a given operation + */ sealed trait Strategy[F[*]]: def run[T](operation: => T): F[T] @@ -63,22 +52,19 @@ object GenericRateLimiter: */ trait Executor[Returns[_[_]] <: Strategy[_]]: - val lock = new ReentrantLock() + /** Returns a future that will be completed when the operation is executed + */ + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Future[Result[T]] - /** This method is called when a new operation can't be readily accepted by the rate limiter. Implementations should execute the - * operation only if the algorithm and the Executor are both unblocked and they are responsible for checking when the algorithm is - * ready to accept a new operation, unblocking it and updating its internal state. - */ - def block[T, Result[_]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Result[T] + /** Ensures that the future returned by `add` is completed + */ + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit /** Runs the operation and returns the result using the given strategy */ def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] = cfg.run(operation).asInstanceOf[Result[T]] - /** Returns whether a new operation will be the first one to be passed to the RateLimiterAlgorithm after unblocking - */ - def isUnblocked: Boolean end Executor object Executor: @@ -86,85 +72,67 @@ object GenericRateLimiter: */ case class Block() extends Executor[Strategy.Blocking]: - def isUnblocked: Boolean = - block.peek() == null - val block = new ConcurrentLinkedQueue[Promise[Unit]]() - val queueLock = new ReentrantLock() - - def block[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Result[T] = - // blocks until it can accept current operation and returns next time it will be unblocked - blockUntilReady(algorithm) - - // updating internal state of algorithm - lock.lock() - algorithm.tryUnblock - lock.unlock() - algorithm.acceptOperation - block.poll() - - // fulfilling next promise in queue after waiting time given by algorithm - fulfillNextPromise(FiniteDuration(algorithm.getNextTime(), "nanoseconds")) - - run(operation) - end block - - private def blockUntilReady(algorithm: RateLimiterAlgorithm): Unit = - // creates a promise for the current operation and waits until fulfilled - queueLock.lock() - val waitTime = - if block.peek() == null then Some((algorithm.getNextTime())) - else None + val schedule = new Semaphore(1) + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Future[Result[T]] = { val promise = Promise[Unit]() - block.add(promise) - queueLock.unlock() - - val future = promise.future - // if it's not the first promise, it will be fulfilled later - waitTime.map { wt => - fulfillNextPromise(FiniteDuration(wt, "nanoseconds")) + promise.future.map { _ => + run(operation) } + } - Await.ready(future, Duration.Inf) - end blockUntilReady + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Unit = { + // can't be called with empty queue + if algorithm.tryAcquire then + val p = block.poll() + p.success(()) + else + schedule.acquire() + val wt = algorithm.getNextTime() + fulfillNext(algorithm, FiniteDuration(wt, "nanoseconds")) + } - private def fulfillNextPromise(waitTime: FiniteDuration): Unit = + private def fulfillNext(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = // sleeps waitTime and fulfills next promise in queue - queueLock.lock() - if block.peek() != null then - val p = block.peek() - queueLock.unlock() + val p = block.poll() if waitTime.toNanos != 0 then Future { val wt1 = waitTime.toMillis val wt2 = waitTime.toNanos - wt1 * 1000000 blocking(Thread.sleep(wt1, wt2.toInt)) }.onComplete { _ => + algorithm.acquire p.success(()) + schedule.release() } - else p.success(()) - end if - else queueLock.unlock() - end if - end fulfillNextPromise + else + algorithm.acquire + p.success(()) + schedule.release() + end fulfillNext end Block /** Drop rejected operations */ case class Drop() extends Executor[Strategy.Dropping]: - def isUnblocked: Boolean = true - def block[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Result[T] = - lock.lock() - if algorithm.tryUnblock && algorithm.isReady then - algorithm.acceptOperation - lock.unlock() - cfg.run(operation) - else - lock.unlock() - None.asInstanceOf[Result[T]] - end block + + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Future[Result[T]] = { + val promise = Promise[Unit]() + val f = promise.future + if algorithm.tryAcquire then promise.success(()) + else promise.failure(new Exception("Rate limiter is full")) + f.map { + _ => cfg.run(operation) + }.recover { + case e: Exception => None.asInstanceOf[Result[T]] + } + } + + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Unit = + () + end Drop /** Block rejected operations until the rate limiter is ready to accept them @@ -174,17 +142,22 @@ object GenericRateLimiter: val blockExecutor = Block() val dropExecutor = Drop() - def isUnblocked: Boolean = - blockExecutor.isUnblocked + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Future[Result[T]] = { - def block[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Result[T] = cfg match case cfg: Strategy.Block => - blockExecutor.block(algorithm, operation)(using cfg) + blockExecutor.add(algorithm, operation)(using cfg) case cfg: Strategy.Drop => - dropExecutor.block(algorithm, operation)(using cfg) + dropExecutor.add(algorithm, operation)(using cfg) + } - end block + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Unit = { + cfg match + case cfg: Strategy.Block => + blockExecutor.execute(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) + case cfg: Strategy.Drop => + dropExecutor.execute(algorithm, operation)(using cfg) + } end BlockOrDrop end Executor diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 65816795..3342bf52 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -3,170 +3,158 @@ package ox.resilience import ox.* import ox.resilience.RateLimiterAlgorithm.* import scala.concurrent.duration.* -import java.util.concurrent.atomic.{AtomicInteger, AtomicBoolean, AtomicLong} +import java.util.concurrent.atomic.AtomicLong import scala.concurrent.* -import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.Semaphore /** Determines the algorithm to use for the rate limiter */ trait RateLimiterAlgorithm: - - val blocked = new AtomicBoolean(false) - def isUnblocked: Boolean = !blocked.get() || tryUnblock - - /** Update internal state to check whether the algorithm can be unblocked. + /** Acquire a permit to execute the operation. This method should block until a permit is available. */ - def tryUnblock: Boolean + def acquire: Unit - /** Determines if the operation can be accepted. Implementations should not update internal state. `acceptOperation` and `rejectOperation` - * are used for updating internal state after accepting or rejecting an operation. + /** Try to acquire a permit to execute the operation. This method should not block. */ - def isReady: Boolean + def tryAcquire: Boolean - /** Modifies internal state to mark that an operation has been accepted. + /** Returns whether the rate limiter is ready to accept a new operation without modifying internal state */ - def acceptOperation: Unit - - /** Modifies internal state to mark that an operation has been rejected. + def isReady: Boolean + /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. IT should not modify internal state */ - def rejectOperation: Unit + def getNextTime(): Long = + if isReady then 0 + else computeNextTime() - /** Returns the time until the next operation can be accepted to be used by the BlockingPolicy. IT should not modify internal state + /** Compute the time until the next operation can be accepted. */ - def getNextTime(): Long + def computeNextTime(): Long end RateLimiterAlgorithm object RateLimiterAlgorithm: /** Fixed rate algorithm */ case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val counter = new AtomicInteger(0) private lazy val lastUpdate = new AtomicLong(System.nanoTime()) + private val semaphore = new Semaphore(rate) - def tryUnblock: Boolean = - if lastUpdate.get() + per.toNanos < System.nanoTime() then - reset - true - else false + def acquire: Unit = + tryUnblock + semaphore.acquire() + + def tryAcquire: Boolean = + tryUnblock + semaphore.tryAcquire() def isReady: Boolean = lastUpdate.get() - counter.get() < rate - - def rejectOperation: Unit = - blocked.set(true) - - def acceptOperation: Unit = - counter.incrementAndGet() - () + semaphore.availablePermits() > 0 - def getNextTime(): Long = - if isReady then 0 - else lastUpdate.get() + per.toNanos - System.nanoTime() + def computeNextTime(): Long = + lastUpdate.get() + per.toNanos - System.nanoTime() - private def reset: Unit = - lastUpdate.set(System.nanoTime()) - counter.set(0) - blocked.set(false) + private def tryUnblock: Unit = + if lastUpdate.get() + per.toNanos < System.nanoTime() then + lastUpdate.set(System.nanoTime()) + semaphore.release(rate) + end FixedRate /** Sliding window algorithm */ case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val counter = new AtomicInteger(0) private val log = new ConcurrentLinkedQueue[Long]() + private val semaphore = new Semaphore(rate) - def tryUnblock: Boolean = + def acquire: Unit = + tryUnblock + semaphore.acquire() val now = System.nanoTime() - while counter.get() > 0 && log.peek() < now - per.toNanos do - log.poll() - counter.decrementAndGet() - () - isReady + log.add(now) + () + + def tryAcquire: Boolean = + tryUnblock + if semaphore.tryAcquire() then + val now = System.nanoTime() + log.add(now) + true + else false def isReady: Boolean = - counter.get() < rate + semaphore.availablePermits() > 0 - def rejectOperation: Unit = () + def computeNextTime(): Long = + log.peek() + per.toNanos - System.nanoTime() - def acceptOperation: Unit = + private def tryUnblock: Unit = val now = System.nanoTime() - log.add(now) - counter.incrementAndGet() - () - - def getNextTime(): Long = - if isReady then 0 - else log.peek() + per.toNanos - System.nanoTime() + while semaphore.availablePermits() < rate && log.peek() < now - per.toNanos do + log.poll() + semaphore.release() + () end SlidingWindow /** Token bucket algorithm */ case class TokenBucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val maxTokens = rate private val refillInterval = per.toNanos - private val tokens = new AtomicInteger(1) private val lastRefillTime = new AtomicLong(System.nanoTime()) + private val semaphore = new Semaphore(1) - def tryUnblock: Boolean = - isReady || refillTokens > 0 + def acquire: Unit = + refillTokens + semaphore.acquire() - def isReady: Boolean = - tokens.get() > 0 + def tryAcquire: Boolean = + refillTokens + semaphore.tryAcquire() - def rejectOperation: Unit = () + def isReady: Boolean = + semaphore.availablePermits() > 0 - def acceptOperation: Unit = - tokens.decrementAndGet() - () + def computeNextTime(): Long = + lastRefillTime.get() + refillInterval - System.nanoTime() - private def refillTokens: Int = + private def refillTokens: Unit = val now = System.nanoTime() val elapsed = now - lastRefillTime.get() val newTokens = elapsed / refillInterval - tokens.set(Math.min(tokens.get() + newTokens.toInt, maxTokens)) lastRefillTime.set(newTokens * refillInterval + lastRefillTime.get()) - newTokens.toInt - - def getNextTime(): Long = - if isReady then 0 - else lastRefillTime.get() + refillInterval - System.nanoTime() + semaphore.release(newTokens.toInt) end TokenBucket /** Leaky bucket algorithm */ case class LeakyBucket(capacity: Int, leakRate: FiniteDuration) extends RateLimiterAlgorithm: - private val counter = new AtomicReference[Double](0.0) private val leakInterval = leakRate.toNanos private val lastLeakTime = new AtomicLong(System.nanoTime()) + private val semaphore = new Semaphore(capacity) - def tryUnblock: Boolean = - val leaking = leak - isReady || leaking > 0.0 + def acquire: Unit = + leak + semaphore.acquire() - def isReady: Boolean = - counter.get() <= capacity - 1.0 + def tryAcquire: Boolean = + leak + semaphore.tryAcquire() - def rejectOperation: Unit = () + def isReady: Boolean = + semaphore.availablePermits() > 0 - def acceptOperation: Unit = - counter.getAndUpdate(_ + 1.0) - () + def computeNextTime(): Long = + lastLeakTime.get() + leakInterval - System.nanoTime() - private def leak: Double = + private def leak: Unit = val now = System.nanoTime() val lastLeak = lastLeakTime.get() val elapsed = now - lastLeak - val leaking: Double = (elapsed.toDouble / leakInterval.toDouble) - counter.set(Math.max(counter.get() - leaking, 0)) - lastLeakTime.set(now) - leaking - end leak - - def getNextTime(): Long = - if isReady then 0 - else lastLeakTime.get() + leakInterval - System.nanoTime() + val leaking = elapsed / leakInterval + val newTime = leaking * leakInterval + lastLeak + semaphore.release(leaking.toInt) + lastLeakTime.set(newTime) end LeakyBucket end RateLimiterAlgorithm From 093c5c2d45d3348cb2274de0bc1c895f2ed68b1b Mon Sep 17 00:00:00 2001 From: pablf Date: Sun, 27 Oct 2024 14:59:49 +0100 Subject: [PATCH 07/23] lint --- .../ox/resilience/GenericRateLimiter.scala | 68 +++++++++---------- .../ox/resilience/RateLimiterAlgorithm.scala | 7 +- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index 293d9948..c41e7e34 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -25,9 +25,9 @@ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( end GenericRateLimiter object GenericRateLimiter: - + type Id[A] = A - + /** Describe the execution strategy that must be used by the rate limiter in a given operation */ sealed trait Strategy[F[*]]: @@ -53,11 +53,11 @@ object GenericRateLimiter: trait Executor[Returns[_[_]] <: Strategy[_]]: /** Returns a future that will be completed when the operation is executed - */ - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Future[Result[T]] + */ + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Future[Result[T]] - /** Ensures that the future returned by `add` is completed - */ + /** Ensures that the future returned by `add` is completed + */ def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit /** Runs the operation and returns the result using the given strategy @@ -75,15 +75,14 @@ object GenericRateLimiter: val block = new ConcurrentLinkedQueue[Promise[Unit]]() val schedule = new Semaphore(1) - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Future[Result[T]] = { + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Future[Result[T]] = val promise = Promise[Unit]() block.add(promise) promise.future.map { _ => run(operation) } - } - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Unit = { + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Unit = // can't be called with empty queue if algorithm.tryAcquire then val p = block.poll() @@ -91,44 +90,44 @@ object GenericRateLimiter: else schedule.acquire() val wt = algorithm.getNextTime() - fulfillNext(algorithm, FiniteDuration(wt, "nanoseconds")) - } + releaseNext(algorithm, FiniteDuration(wt, "nanoseconds")) - private def fulfillNext(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = + private def releaseNext(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = // sleeps waitTime and fulfills next promise in queue - val p = block.poll() - if waitTime.toNanos != 0 then - Future { - val wt1 = waitTime.toMillis - val wt2 = waitTime.toNanos - wt1 * 1000000 - blocking(Thread.sleep(wt1, wt2.toInt)) - }.onComplete { _ => - algorithm.acquire - p.success(()) - schedule.release() - } - else + val p = block.poll() + if waitTime.toNanos != 0 then + Future { + val wt1 = waitTime.toMillis + val wt2 = waitTime.toNanos - wt1 * 1000000 + blocking(Thread.sleep(wt1, wt2.toInt)) + }.onComplete { _ => algorithm.acquire p.success(()) schedule.release() - end fulfillNext + } + else + algorithm.acquire + p.success(()) + schedule.release() + end if + end releaseNext end Block /** Drop rejected operations */ case class Drop() extends Executor[Strategy.Dropping]: - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Future[Result[T]] = { + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Future[Result[T]] = val promise = Promise[Unit]() val f = promise.future if algorithm.tryAcquire then promise.success(()) else promise.failure(new Exception("Rate limiter is full")) - f.map { - _ => cfg.run(operation) - }.recover { - case e: Exception => None.asInstanceOf[Result[T]] + f.map { _ => + cfg.run(operation) + }.recover { case e: Exception => + None.asInstanceOf[Result[T]] } - } + end add def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Unit = () @@ -142,22 +141,19 @@ object GenericRateLimiter: val blockExecutor = Block() val dropExecutor = Drop() - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Future[Result[T]] = { - + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Future[Result[T]] = cfg match case cfg: Strategy.Block => blockExecutor.add(algorithm, operation)(using cfg) case cfg: Strategy.Drop => dropExecutor.add(algorithm, operation)(using cfg) - } - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Unit = { + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Unit = cfg match case cfg: Strategy.Block => blockExecutor.execute(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) case cfg: Strategy.Drop => dropExecutor.execute(algorithm, operation)(using cfg) - } end BlockOrDrop end Executor diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 3342bf52..58d1befe 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -22,7 +22,9 @@ trait RateLimiterAlgorithm: /** Returns whether the rate limiter is ready to accept a new operation without modifying internal state */ def isReady: Boolean - /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. IT should not modify internal state + + /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. IT should not modify + * internal state */ def getNextTime(): Long = if isReady then 0 @@ -59,7 +61,7 @@ object RateLimiterAlgorithm: if lastUpdate.get() + per.toNanos < System.nanoTime() then lastUpdate.set(System.nanoTime()) semaphore.release(rate) - + end FixedRate /** Sliding window algorithm @@ -156,5 +158,6 @@ object RateLimiterAlgorithm: val newTime = leaking * leakInterval + lastLeak semaphore.release(leaking.toInt) lastLeakTime.set(newTime) + end leak end LeakyBucket end RateLimiterAlgorithm From 212687f349013f99f1929335e0986ac4ba44d371 Mon Sep 17 00:00:00 2001 From: pablf Date: Tue, 29 Oct 2024 18:54:04 +0100 Subject: [PATCH 08/23] modify thread of execution --- .../ox/resilience/GenericRateLimiter.scala | 63 +++++++++------ .../resilience/GenericRateLimiterTest.scala | 27 +++---- .../scala/ox/resilience/RateLimiterTest.scala | 8 +- doc/custom-rate-limiter.md | 81 +++++++++++++++++++ doc/index.md | 2 + doc/rate-limiter.md | 46 +++++++++++ 6 files changed, 185 insertions(+), 42 deletions(-) create mode 100644 doc/custom-rate-limiter.md create mode 100644 doc/rate-limiter.md diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index c41e7e34..34fbd3b3 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -19,8 +19,9 @@ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( */ def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = val future = executor.add(algorithm, operation) + executor.schedule(algorithm, operation) + future.map(f => Await.result(f, Duration.Inf)) executor.execute(algorithm, operation) - Await.result(future, Duration.Inf) end apply end GenericRateLimiter @@ -52,13 +53,17 @@ object GenericRateLimiter: */ trait Executor[Returns[_[_]] <: Strategy[_]]: - /** Returns a future that will be completed when the operation is executed + /** Returns a future that will be completed when the operation is execute. It can be used for queueing mechanisms. */ - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Future[Result[T]] + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Option[Future[Unit]] - /** Ensures that the future returned by `add` is completed + /** Ensures that the future returned by `add` can be completed */ - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit + + /** Executes the operation and returns the expected result depending on the strategy + */ + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Result[T] /** Runs the operation and returns the result using the given strategy */ @@ -75,14 +80,17 @@ object GenericRateLimiter: val block = new ConcurrentLinkedQueue[Promise[Unit]]() val schedule = new Semaphore(1) - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Future[Result[T]] = + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using + cfg: Strategy.Blocking[Result[*]] + ): Option[Future[Unit]] = val promise = Promise[Unit]() block.add(promise) - promise.future.map { _ => - run(operation) - } + Some(promise.future) + + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result]): Result[T] = + cfg.run(operation) - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Unit = + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Unit = // can't be called with empty queue if algorithm.tryAcquire then val p = block.poll() @@ -117,21 +125,19 @@ object GenericRateLimiter: */ case class Drop() extends Executor[Strategy.Dropping]: - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Future[Result[T]] = - val promise = Promise[Unit]() - val f = promise.future - if algorithm.tryAcquire then promise.success(()) - else promise.failure(new Exception("Rate limiter is full")) - f.map { _ => - cfg.run(operation) - }.recover { case e: Exception => - None.asInstanceOf[Result[T]] - } + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using + cfg: Strategy.Dropping[Result[*]] + ): Option[Future[Unit]] = + None end add - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Unit = + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Unit = () + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Result[T] = + if algorithm.tryAcquire then cfg.run(operation) + else None.asInstanceOf[Result[T]] + end Drop /** Block rejected operations until the rate limiter is ready to accept them @@ -141,19 +147,28 @@ object GenericRateLimiter: val blockExecutor = Block() val dropExecutor = Drop() - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Future[Result[T]] = + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using + cfg: Strategy.BlockOrDrop[Result] + ): Option[Future[Unit]] = cfg match case cfg: Strategy.Block => - blockExecutor.add(algorithm, operation)(using cfg) + blockExecutor.add(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) case cfg: Strategy.Drop => dropExecutor.add(algorithm, operation)(using cfg) - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Unit = + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Result[T] = cfg match case cfg: Strategy.Block => blockExecutor.execute(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) case cfg: Strategy.Drop => dropExecutor.execute(algorithm, operation)(using cfg) + + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Unit = + cfg match + case cfg: Strategy.Block => + blockExecutor.schedule(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) + case cfg: Strategy.Drop => + dropExecutor.schedule(algorithm, operation)(using cfg) end BlockOrDrop end Executor diff --git a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala index 25337ed7..f30bde65 100644 --- a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala @@ -6,6 +6,7 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.{EitherValues, TryValues} import ox.util.ElapsedTime import scala.concurrent.duration._ +import java.util.concurrent.atomic.AtomicReference class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: @@ -46,7 +47,6 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues 0 } - val before = System.nanoTime() val result1 = rateLimiter(operation) val result2 = rateLimiter(operation) val result3 = rateLimiter(operation) @@ -141,10 +141,10 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) ) - var order = List.empty[Int] + val order = new AtomicReference(List.empty[Int]) def operationN(n: Int) = { rateLimiter { - order = n :: order + order.updateAndGet(ord => n :: ord) n } } @@ -196,7 +196,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues (time3-time1) should be <= 2200L (time4-time1) should be <= 3200L (time5-time1) should be <= 4200L - order should be (List(9, 8,7,6,5,4, 3,2,1)) + order.get() should be (List(9, 8,7,6,5,4, 3,2,1)) } @@ -319,10 +319,10 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) ) - var order = List.empty[Int] + val order = new AtomicReference(List.empty[Int]) def operationN(n: Int) = { rateLimiter { - order = n :: order + order.updateAndGet(ord => n :: ord) n } } @@ -352,7 +352,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues (time3-time1) should be >= 1300L - 10 (time2-time1) should be <= 1100L (time3-time1) should be <= 1400L - order should be (List(4, 3,2,1)) + order.get() should be (List(4, 3,2,1)) } behavior of "token bucket GenericRateLimiter" @@ -371,7 +371,6 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues val result1 = rateLimiter(operation) val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) result1 shouldBe Some(0) result2 shouldBe None @@ -474,10 +473,10 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) ) - var order = List.empty[Int] + val order = new AtomicReference(List.empty[Int]) def operationN(n: Int) = { rateLimiter { - order = n :: order + order.updateAndGet(ord => n :: ord) n } } @@ -505,7 +504,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues (time3-time1) should be >= 3000L - 10 (time2-time1) should be <= 2200L (time3-time1) should be <= 3200L - order should be (List(4, 3,2,1)) + order.get() should be (List(4, 3,2,1)) } behavior of "leaky bucket GenericRateLimiter" @@ -652,10 +651,10 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) ) - var order = List.empty[Int] + val order = new AtomicReference(List.empty[Int]) def operationN(n: Int) = { rateLimiter { - order = n :: order + order.updateAndGet(ord => n :: ord) n } } @@ -683,7 +682,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues (time3-time1) should be >= 2000L - 10 (time2-time1) should be <= 1200L (time3-time1) should be <= 2200L - order should be (List(4, 3,2,1)) + order.get() should be (List(4, 3,2,1)) } end GenericRateLimiterTest \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 79b1c4ed..24167f3b 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -33,7 +33,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T } it should "drop or block operation depending on method used for sliding window algorithm" in { - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) var executions = 0 def operation = { @@ -54,7 +54,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T } it should "drop or block operation depending on method used for token bucket algorithm" in { - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) var executions = 0 def operation = { @@ -68,10 +68,10 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result4 = rateLimiter.runBlocking(operation) result1 shouldBe Some(0) - result2 shouldBe Some(0) + result2 shouldBe None result3 shouldBe None result4 shouldBe 0 - executions shouldBe 3 + executions shouldBe 2 } it should "drop or block operation depending on method used for leaky bucker algorithm" in { diff --git a/doc/custom-rate-limiter.md b/doc/custom-rate-limiter.md new file mode 100644 index 00000000..6e08c74c --- /dev/null +++ b/doc/custom-rate-limiter.md @@ -0,0 +1,81 @@ +# Custom rate limiter +A rate limiter depends on an algorithm controlling whether an operation can be executed and a executor controlling the exact behaviour after rejecting an operation. The `RateLimiterAlgorithm` can be modified and used with the existing `RateLimiter` API. The executor can also be customized by using a different API: `GenericRateLimiter`. + +## Generic rate limiter +The generic rate limiter API provides utility to build rate limiters with custom execution policies. This can be useful to implement more complex policies like throttling of operations. + +The basic syntax for generic rate limiters is: + +```scala +val executor = GenericRateLimiter.Executor.Drop() +val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "seconds")) +val rateLimiter = GenericRateLimiter(algorithm, rateLimiter) +type T +def operation: T = ??? + +val result: Some[T] = rateLimiter(operation) +``` + +You can also specify the desired execution strategy: + +```scala +val executor = GenericRateLimiter.Executor.BlockOrDrop() +val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "seconds")) +val rateLimiter = GenericRateLimiter(algorithm, rateLimiter) +type T +def operation: T = ??? + +// This doesn't work because the rate limiter doesn't know which strategy to choose for the current executor +//val result: Some[T] = rateLimiter(operation) +val resultDrop: Some[T] = rateLimiter(operation)(using GenericRateLimiter.Strategy.Drop()) +val resultBlock: T = rateLimiter(operation)(using GenericRateLimiter.Strategy.Block()) +``` + +Note that modifying the strategy used, it's possible to have different return types for the same rate limiter. + +## Executor + +A `GenericRateLimiter` is defined by its executor and by its algorithm: + +```scala +case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( + executor: Executor[Returns], + algorithm: RateLimiterAlgorithm +): + def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] +``` + +The `Executor` and `Strategy` API are as follows: + +```scala +sealed trait Strategy[F[*]]: + def run[T](operation: => T): F[T] + +trait Executor[Returns[_[_]] <: Strategy[_]]: + def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Future[Result[T]] + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit + def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] // calls Strategy.run + end Executor +``` + +To create a custom executor, you need to define a custom `Returns` higher-kinded trait extending `Strategy` that will codify the possible behaviours of the executor or use one of the predefined. Then, the operations in `Executor` will use the leaf classes extending your custom `Returns` to implement the behaviour. When calling your custom rate limiter, you will need to make available through implicits your desired execution strategy, which can be changed per operation. + +### Predefined strategies +`Strategy` is extended by three traits: +- Trait `Strategy.Blocking` gives the same return type as the operation to be executed. +- Trait `Strategy.Dropping` gives an `Option[T]` for an operation of type `=> T`. +- Trait `Strategy.BlockOrDrop` allows both types of return depending on the subclass employed. + +The traits are implemented by `Strategy.Drop` and `Strategy.Block`. + +### Custom strategies +Custom strategies allow not only the specification of the type, but also to add other information to the `Executor`. A possible use could be to add a timeout. The strategy could be defined like this: +```scala +type Id[A] = A +sealed trait CustomStrategy[F[*]] extends Strategy[F] +case class DropAfterTimeout(timeout: Long) extends CustomStrategy[Option] +case class RunAfterTimeout(timeout: Long) extends CustomStrategy[Id] +``` + +## Rate limiter algorithm +The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. \ No newline at end of file diff --git a/doc/index.md b/doc/index.md index 9017660b..7957f41a 100644 --- a/doc/index.md +++ b/doc/index.md @@ -62,6 +62,8 @@ In addition to this documentation, ScalaDocs can be browsed at [https://javadoc. oxapp retries + rate-limiter + custom-rate-limiter repeat scheduled resources diff --git a/doc/rate-limiter.md b/doc/rate-limiter.md new file mode 100644 index 00000000..6f57c4dc --- /dev/null +++ b/doc/rate-limiter.md @@ -0,0 +1,46 @@ +# Rate limiter +The rate limiter mechanism allows controlling the rate at which operations are executed. It ensures that a certain number of operations are performed within a specified time frame, preventing system overload and ensuring fair resource usage. + +## API + +The basic syntax for rate limiters is: + +```scala +import ox.resilience.* + +val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDurationt(1, "seconds")) +val rateLimiter = RateLimiter(algorithm) + +type T +def operation: T = ??? + +val blockedOperation: T = rateLimiter.runBlocking(operation) +val droppedOperation: Some[T] = rateLimiter.runOrDrop(operation) +``` + +`blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. +The `RateLimiter` API uses the `GenericRateLimiter` API underneath. See [custom rate limiters](custom-rate-limiter.md) for more details. + +## Operation definition + +The `operation` can be provided directly using a by-name parameter, i.e. `f: => T`. + +## Configuration + +The configuration of a `RateLimiter` depends on an underlying algorithm that controls whether an operation can be executed or not. The following algorithms are available: +- `RateLimiterAlgorithm.FixedRate(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in segments of `dur` duration after the execution of the first operation. +- `RateLimiterAlgorithm.SlidingWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in the a windows of time of duration `dur`. +- `RateLimiterAlgorithm.TokenBucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens availables in the token bucket algorithm and one token is added after `dur`. +- `RateLimiterAlgorithm.LeakyBucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity availables in the leaky bucket algorithm and 0 capacity is achieved after `dur` duration. + +It's possible to define your own algorithm. See [custom rate limiters](custom-rate-limiter.md) for more details. +### API shorthands + +You can use one of the following shorthands to define a Rate Limiter with the corresponding algorithm: + +- `RateLimiter.fixedRate(rate: Int, dur: FiniteDuration)`, +- `RateLimiter.slidingWindow(rate: Int, dur: FiniteDuration)`, +- `RateLimiter.tokenBucket(maximum: Int, dur: FiniteDuration)`, +- `RateLimiter.leakyBucket(maximum: Int, dur: FiniteDuration)`. + +See the tests in `ox.resilience.*` for more. From fe5aafc51f968d0dd2b11138872655f18027bac5 Mon Sep 17 00:00:00 2001 From: pablf Date: Wed, 30 Oct 2024 00:02:37 +0100 Subject: [PATCH 09/23] add fairness --- .../ox/resilience/GenericRateLimiter.scala | 59 +- .../scala/ox/resilience/RateLimiter.scala | 25 +- .../ox/resilience/RateLimiterAlgorithm.scala | 28 +- .../resilience/GenericRateLimiterTest.scala | 1160 +++++++++-------- .../scala/ox/resilience/RateLimiterTest.scala | 160 +-- doc/utils/rate-limiter.md | 6 +- 6 files changed, 752 insertions(+), 686 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index 34fbd3b3..20e01db7 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -20,7 +20,7 @@ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = val future = executor.add(algorithm, operation) executor.schedule(algorithm, operation) - future.map(f => Await.result(f, Duration.Inf)) + future.map(f => Await.ready(f, Duration.Inf)) executor.execute(algorithm, operation) end apply end GenericRateLimiter @@ -75,7 +75,7 @@ object GenericRateLimiter: object Executor: /** Block rejected operations until the rate limiter is ready to accept them */ - case class Block() extends Executor[Strategy.Blocking]: + case class Block(fairness: Boolean = false) extends Executor[Strategy.Blocking]: val block = new ConcurrentLinkedQueue[Promise[Unit]]() val schedule = new Semaphore(1) @@ -83,22 +83,53 @@ object GenericRateLimiter: def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]] ): Option[Future[Unit]] = - val promise = Promise[Unit]() - block.add(promise) - Some(promise.future) + if fairness then + val promise = Promise[Unit]() + block.add(promise) + Some(promise.future) + else None def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result]): Result[T] = cfg.run(operation) def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Unit = // can't be called with empty queue - if algorithm.tryAcquire then - val p = block.poll() - p.success(()) - else - schedule.acquire() - val wt = algorithm.getNextTime() - releaseNext(algorithm, FiniteDuration(wt, "nanoseconds")) + if fairness then + if algorithm.tryAcquire then + val p = block.poll() + p.success(()) + else + schedule.acquire() + val wt = algorithm.getNextTime() + releaseNext(algorithm, FiniteDuration(wt, "nanoseconds")) + else if !algorithm.tryAcquire then + if schedule.tryAcquire() then + val wt = algorithm.getNextTime() + releaseUnfair(algorithm, FiniteDuration(wt, "nanoseconds")) + + algorithm.acquire + schedule.release() + + // schedules next release + val wt2 = algorithm.getNextTime() + releaseUnfair(algorithm, FiniteDuration(wt2, "nanoseconds")) + else + algorithm.acquire + // schedules next release + val wt = algorithm.getNextTime() + releaseUnfair(algorithm, FiniteDuration(wt, "nanoseconds")) + + private def releaseUnfair(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = + if waitTime.toNanos != 0 then + Future { + val wt1 = waitTime.toMillis + val wt2 = waitTime.toNanos - wt1 * 1000000 + blocking(Thread.sleep(wt1, wt2.toInt)) + }.onComplete { _ => + algorithm.reset + } + end if + end releaseUnfair private def releaseNext(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = // sleeps waitTime and fulfills next promise in queue @@ -142,9 +173,9 @@ object GenericRateLimiter: /** Block rejected operations until the rate limiter is ready to accept them */ - case class BlockOrDrop() extends Executor[Strategy.BlockOrDrop]: + case class BlockOrDrop(fairness: Boolean = false) extends Executor[Strategy.BlockOrDrop]: - val blockExecutor = Block() + val blockExecutor = Block(fairness) val dropExecutor = Drop() def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index f3c5ec5a..8371439b 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -5,11 +5,12 @@ import scala.concurrent.duration.* /** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an operation. */ case class RateLimiter( - algorithm: RateLimiterAlgorithm + algorithm: RateLimiterAlgorithm, + fairness: Boolean = false ): import GenericRateLimiter.* - private val rateLimiter = GenericRateLimiter(Executor.BlockOrDrop(), algorithm) + private val rateLimiter = GenericRateLimiter(Executor.BlockOrDrop(fairness), algorithm) /** Blocks the operation until the rate limiter allows it. */ @@ -25,30 +26,34 @@ object RateLimiter: def leakyBucket( capacity: Int, - leakInterval: FiniteDuration + leakInterval: FiniteDuration, + fairness: Boolean = false ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval)) + RateLimiter(RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval), fairness) end leakyBucket def tokenBucket( maxTokens: Int, - refillInterval: FiniteDuration + refillInterval: FiniteDuration, + fairness: Boolean = false ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval)) + RateLimiter(RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval), fairness) end tokenBucket def fixedRate( maxRequests: Int, - windowSize: FiniteDuration + windowSize: FiniteDuration, + fairness: Boolean = false ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) + RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize), fairness) end fixedRate def slidingWindow( maxRequests: Int, - windowSize: FiniteDuration + windowSize: FiniteDuration, + fairness: Boolean = false ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) + RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize), fairness) end slidingWindow end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 58d1befe..9a31afdf 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -11,6 +11,7 @@ import java.util.concurrent.Semaphore /** Determines the algorithm to use for the rate limiter */ trait RateLimiterAlgorithm: + /** Acquire a permit to execute the operation. This method should block until a permit is available. */ def acquire: Unit @@ -19,12 +20,16 @@ trait RateLimiterAlgorithm: */ def tryAcquire: Boolean - /** Returns whether the rate limiter is ready to accept a new operation without modifying internal state + /** Returns whether the rate limiter is ready to accept a new operation without modifying internal state. */ def isReady: Boolean - /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. IT should not modify - * internal state + /** Updates the internal state of the rate limiter to check whether new operations can be accepted. + */ + def reset: Unit + + /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. It should not modify + * internal state. */ def getNextTime(): Long = if isReady then 0 @@ -57,10 +62,12 @@ object RateLimiterAlgorithm: def computeNextTime(): Long = lastUpdate.get() + per.toNanos - System.nanoTime() + def reset: Unit = + lastUpdate.set(System.nanoTime()) + semaphore.release(rate) + private def tryUnblock: Unit = - if lastUpdate.get() + per.toNanos < System.nanoTime() then - lastUpdate.set(System.nanoTime()) - semaphore.release(rate) + if lastUpdate.get() + per.toNanos < System.nanoTime() then reset end FixedRate @@ -91,6 +98,9 @@ object RateLimiterAlgorithm: def computeNextTime(): Long = log.peek() + per.toNanos - System.nanoTime() + def reset: Unit = + tryUnblock + private def tryUnblock: Unit = val now = System.nanoTime() while semaphore.availablePermits() < rate && log.peek() < now - per.toNanos do @@ -120,6 +130,9 @@ object RateLimiterAlgorithm: def computeNextTime(): Long = lastRefillTime.get() + refillInterval - System.nanoTime() + def reset: Unit = + refillTokens + private def refillTokens: Unit = val now = System.nanoTime() val elapsed = now - lastRefillTime.get() @@ -150,6 +163,9 @@ object RateLimiterAlgorithm: def computeNextTime(): Long = lastLeakTime.get() + leakInterval - System.nanoTime() + def reset: Unit = + leak + private def leak: Unit = val now = System.nanoTime() val lastLeak = lastLeakTime.get() diff --git a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala index f30bde65..088f66e5 100644 --- a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala @@ -10,679 +10,687 @@ import java.util.concurrent.atomic.AtomicReference class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: - behavior of "fixed rate GenericRateLimiter" + + List(false, true).foreach { fairness => + + behavior of s"fixed rate GenericRateLimiter with fairness=$fairness" + + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() + + var executions = 0 + def operation = { + executions +=1 + 0 + } - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - var executions = 0 - def operation = { - executions +=1 - 0 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) - it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val before = System.currentTimeMillis() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + val after = System.currentTimeMillis() + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + (after-before) should be >= 1000L + executions shouldBe 3 } - val before = System.currentTimeMillis() - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - val after = System.currentTimeMillis() - - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - (after-before) should be >= 1000L - executions shouldBe 3 - } - - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } } - } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result6 = operationN(6) - val result7 = operationN(7) // blocks until 3 seconds elapsed - val time4 = System.currentTimeMillis() - val result8 = operationN(8) - val result9 = operationN(9) // blocks until 4 seconds elapsed - val time5 = System.currentTimeMillis() - - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - result6 shouldBe 6 - result7 shouldBe 7 - result8 shouldBe 8 - result9 shouldBe 9 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time4-time1) should be >= 3000L - 10 - (time5-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - (time4-time1) should be <= 3200L - (time5-time1) should be <= 4200L - order should be (List(9, 8,7,6,5,4, 3,2,1)) - } + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result6 = operationN(6) + val result7 = operationN(7) // blocks until 3 seconds elapsed + val time4 = System.currentTimeMillis() + val result8 = operationN(8) + val result9 = operationN(9) // blocks until 4 seconds elapsed + val time5 = System.currentTimeMillis() + + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + result6 shouldBe 6 + result7 shouldBe 7 + result8 shouldBe 8 + result9 shouldBe 9 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time4-time1) should be >= 3000L - 10 + (time5-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + (time4-time1) should be <= 3200L + (time5-time1) should be <= 4200L + order should be (List(9, 8,7,6,5,4, 3,2,1)) + } - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } + } - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + var time2 = 0L + var time3 = 0L + var time4 = 0L + + + val time1 = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + forkUser: + sleep(200.millis) + operationN(5) + time3 = System.currentTimeMillis + forkUser: + sleep(250.millis) + operationN(6) + forkUser: + sleep(300.millis) + operationN(7) + time4 = System.currentTimeMillis + forkUser: + sleep(350.millis) + operationN(8) + forkUser: + sleep(400.millis) + operationN(9) } + val time5 = System.currentTimeMillis() + + if fairness then + order.get() should be (List(9, 8,7,6,5,4, 3,2,1)) + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time4-time1) should be >= 3000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + (time4-time1) should be <= 3200L + (time5-time1) should be >= 4000L - 10 + (time5-time1) should be <= 4200L } - var time2 = 0L - var time3 = 0L - var time4 = 0L - - - val time1 = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) - forkUser: - sleep(200.millis) - operationN(5) - time3 = System.currentTimeMillis - forkUser: - sleep(250.millis) - operationN(6) - forkUser: - sleep(300.millis) - operationN(7) - time4 = System.currentTimeMillis - forkUser: - sleep(350.millis) - operationN(8) - forkUser: - sleep(400.millis) - operationN(9) - } - val time5 = System.currentTimeMillis() - - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time4-time1) should be >= 3000L - 10 - (time5-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - (time4-time1) should be <= 3200L - (time5-time1) should be <= 4200L - order.get() should be (List(9, 8,7,6,5,4, 3,2,1)) - } + behavior of s"sliding window GenericRateLimiter with fairness=$fairness" - behavior of "sliding window GenericRateLimiter" + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 - } + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) - it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } - - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + + executions +=1 + 0 + } - var executions = 0 - def operation = { - - executions +=1 - 0 - } + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 } - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + ox.sleep(500.millis) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 1500L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 1700L + order should be (List(4, 3,2,1)) } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - ox.sleep(500.millis) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed - - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 1500L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 1700L - order should be (List(4, 3,2,1)) - } - - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } + } - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + var time2 = 0L + var time3 = 0L + + + val time1 = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(300.millis) + operationN(2) + forkUser: + sleep(400.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(700.millis) + operationN(4) + time3 = System.currentTimeMillis } + + if fairness then + order.get() should be (List(4, 3,2,1)) + (time2-time1) should be >= 1000L - 10 + (time2-time1) should be <= 1100L + (time3-time1) should be >= 1300L - 10 + (time3-time1) should be <= 1400L } - var time2 = 0L - var time3 = 0L - - - val time1 = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(300.millis) - operationN(2) - forkUser: - sleep(400.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(700.millis) - operationN(4) - time3 = System.currentTimeMillis - } - - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 1300L - 10 - (time2-time1) should be <= 1100L - (time3-time1) should be <= 1400L - order.get() should be (List(4, 3,2,1)) - } + behavior of s"token bucket GenericRateLimiter with fairness=$fairness" - behavior of "token bucket GenericRateLimiter" + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + executions shouldBe 1 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) + it should "refill token after time elapsed from last refill and not before" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) - result1 shouldBe Some(0) - result2 shouldBe None - executions shouldBe 1 - } - it should "refill token after time elapsed from last refill and not before" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(600.millis) + val result3 = rateLimiter(operation) - var executions = 0 - def operation = { - executions +=1 - 0 + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe Some(0) + executions shouldBe 2 } - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(600.millis) - val result3 = rateLimiter(operation) + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe Some(0) - executions shouldBe 2 - } + var executions = 0 + def operation = { + executions +=1 + 0 + } - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + + val r3 = rateLimiter(operation) + (r1, r2, r3) + } - var executions = 0 - def operation = { - executions +=1 - 0 + result1 shouldBe 0 + result2 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 } - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } + } - val r3 = rateLimiter(operation) - (r1, r2, r3) + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val time2 = System.currentTimeMillis() // 1 second + sleep(2.seconds) //add 2 tokens + val result3 = operationN(3) //blocks until 1 second elapsed + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time4 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 3000L - 10 + (time4-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 3200L + (time4-time1) should be <= 4200L + order should be (List(5, 4, 3,2,1)) } - result1 shouldBe 0 - result2 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } - - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } + } - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n + var time2 = 0L + + val time1 = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) } + val time3 = System.currentTimeMillis() + + if fairness then + order.get() should be (List(4, 3,2,1)) + (time2-time1) should be >= 2000L - 10 + (time2-time1) should be <= 2200L + (time3-time1) should be >= 3000L - 10 + (time3-time1) should be <= 3200L } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val time2 = System.currentTimeMillis() // 1 second - sleep(2.seconds) //add 2 tokens - val result3 = operationN(3) //blocks until 1 second elapsed - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time4 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 3000L - 10 - (time4-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 3200L - (time4-time1) should be <= 4200L - order should be (List(5, 4, 3,2,1)) - } + behavior of s"leaky bucket GenericRateLimiter with fairness=$fairness" - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) ) - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + var executions = 0 + def operation = { + executions +=1 + 0 } - } - - var time2 = 0L - - val time1 = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) - } - val time3 = System.currentTimeMillis() - - (time2-time1) should be >= 2000L - 10 - (time3-time1) should be >= 3000L - 10 - (time2-time1) should be <= 2200L - (time3-time1) should be <= 3200L - order.get() should be (List(4, 3,2,1)) - } - behavior of "leaky bucket GenericRateLimiter" + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions +=1 - 0 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } + it should "reject operation before leaking and accepting after it" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - it should "reject operation before leaking and accepting after it" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(400.millis) + val result3 = rateLimiter(operation) + ox.sleep(101.millis) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(400.millis) - val result3 = rateLimiter(operation) - ox.sleep(101.millis) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + executions +=1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + var executions = 0 + def operation = { + + executions +=1 + 0 + } - var executions = 0 - def operation = { - - executions +=1 - 0 - } + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 } - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + order should be (List(4, 3,2,1)) } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - order should be (List(4, 3,2,1)) - } + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } } - } - var time2 = 0L - - val time1 = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) + var time2 = 0L + + val time1 = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + time2 = System.currentTimeMillis + forkUser: + sleep(150.millis) + operationN(4) + } + val time3 = System.currentTimeMillis() + + if fairness then + order.get() should be (List(4, 3,2,1)) + (time2-time1) should be >= 1000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be >= 2000L - 10 + (time3-time1) should be <= 2200L } - val time3 = System.currentTimeMillis() - - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - order.get() should be (List(4, 3,2,1)) } end GenericRateLimiterTest \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 24167f3b..13e53865 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -9,88 +9,92 @@ import scala.concurrent.duration._ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: - behavior of "RateLimiter" - - it should "drop or block operation depending on method used for fixed rate algorithm" in { - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + List(false, true).foreach { fairness => + + behavior of s"RateLimiter with fairness=$fairness" + + it should "drop or block operation depending on method used for fixed rate algorithm" in { + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 - } - - it should "drop or block operation depending on method used for sliding window algorithm" in { - val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 - } - - it should "drop or block operation depending on method used for token bucket algorithm" in { - val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 + it should "drop or block operation depending on method used for sliding window algorithm" in { + val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 2 - } - - it should "drop or block operation depending on method used for leaky bucker algorithm" in { - val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 + it should "drop or block operation depending on method used for token bucket algorithm" in { + val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 2 } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 - } \ No newline at end of file + it should "drop or block operation depending on method used for leaky bucker algorithm" in { + val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 + } + } + \ No newline at end of file diff --git a/doc/utils/rate-limiter.md b/doc/utils/rate-limiter.md index 6f57c4dc..93f7dc86 100644 --- a/doc/utils/rate-limiter.md +++ b/doc/utils/rate-limiter.md @@ -10,6 +10,8 @@ import ox.resilience.* val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDurationt(1, "seconds")) val rateLimiter = RateLimiter(algorithm) +//val fairness = true +//val rateLimiter = RateLimiter(algorithm, fairness) type T def operation: T = ??? @@ -18,7 +20,7 @@ val blockedOperation: T = rateLimiter.runBlocking(operation) val droppedOperation: Some[T] = rateLimiter.runOrDrop(operation) ``` -`blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. +`blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. The fairness policy when blocking an operation can be specified per rate limiter and defaults to false. If the rate limiter is fair, blocked calls will be executed in order of arrival. Otherwise, the first blocked operation might not be the first to be executed after unblocking. The `RateLimiter` API uses the `GenericRateLimiter` API underneath. See [custom rate limiters](custom-rate-limiter.md) for more details. ## Operation definition @@ -33,7 +35,7 @@ The configuration of a `RateLimiter` depends on an underlying algorithm that con - `RateLimiterAlgorithm.TokenBucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens availables in the token bucket algorithm and one token is added after `dur`. - `RateLimiterAlgorithm.LeakyBucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity availables in the leaky bucket algorithm and 0 capacity is achieved after `dur` duration. -It's possible to define your own algorithm. See [custom rate limiters](custom-rate-limiter.md) for more details. +It's also possible to specify fairness in each of these methods. It's possible to define your own algorithm. See [custom rate limiters](custom-rate-limiter.md) for more details. ### API shorthands You can use one of the following shorthands to define a Rate Limiter with the corresponding algorithm: From 85c1a3668e84fc96e04e9049cae87fad247a40e9 Mon Sep 17 00:00:00 2001 From: pablf Date: Wed, 30 Oct 2024 00:23:08 +0100 Subject: [PATCH 10/23] fairness fixes --- .../ox/resilience/GenericRateLimiter.scala | 36 +++++++------------ 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index 20e01db7..29c1af3d 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -77,7 +77,7 @@ object GenericRateLimiter: */ case class Block(fairness: Boolean = false) extends Executor[Strategy.Blocking]: - val block = new ConcurrentLinkedQueue[Promise[Unit]]() + lazy val queue = new ConcurrentLinkedQueue[Promise[Unit]]() val schedule = new Semaphore(1) def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using @@ -85,7 +85,7 @@ object GenericRateLimiter: ): Option[Future[Unit]] = if fairness then val promise = Promise[Unit]() - block.add(promise) + queue.add(promise) Some(promise.future) else None @@ -96,44 +96,34 @@ object GenericRateLimiter: // can't be called with empty queue if fairness then if algorithm.tryAcquire then - val p = block.poll() + val p = queue.poll() p.success(()) else schedule.acquire() val wt = algorithm.getNextTime() releaseNext(algorithm, FiniteDuration(wt, "nanoseconds")) else if !algorithm.tryAcquire then - if schedule.tryAcquire() then - val wt = algorithm.getNextTime() - releaseUnfair(algorithm, FiniteDuration(wt, "nanoseconds")) - - algorithm.acquire - schedule.release() - - // schedules next release - val wt2 = algorithm.getNextTime() - releaseUnfair(algorithm, FiniteDuration(wt2, "nanoseconds")) - else - algorithm.acquire - // schedules next release - val wt = algorithm.getNextTime() - releaseUnfair(algorithm, FiniteDuration(wt, "nanoseconds")) + if schedule.tryAcquire() then releaseUnfair(algorithm, true) + algorithm.acquire + releaseUnfair(algorithm, false) - private def releaseUnfair(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = - if waitTime.toNanos != 0 then + private def releaseUnfair(algorithm: RateLimiterAlgorithm, releaseSchedule: Boolean): Unit = + val waitTime = algorithm.getNextTime() + if waitTime != 0 then Future { - val wt1 = waitTime.toMillis - val wt2 = waitTime.toNanos - wt1 * 1000000 + val wt1 = waitTime / 1000000 + val wt2 = waitTime - wt1 * 1000000 blocking(Thread.sleep(wt1, wt2.toInt)) }.onComplete { _ => algorithm.reset + if releaseSchedule then schedule.release() } end if end releaseUnfair private def releaseNext(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = // sleeps waitTime and fulfills next promise in queue - val p = block.poll() + val p = queue.poll() if waitTime.toNanos != 0 then Future { val wt1 = waitTime.toMillis From 1ac6caaeb9a06a65c8222246e80bc3f35ac1f910 Mon Sep 17 00:00:00 2001 From: pablf Date: Mon, 4 Nov 2024 14:06:18 +0100 Subject: [PATCH 11/23] fix --- .../ox/resilience/GenericRateLimiter.scala | 146 +-- .../scala/ox/resilience/RateLimiter.scala | 30 +- .../ox/resilience/RateLimiterAlgorithm.scala | 152 +-- .../resilience/GenericRateLimiterTest.scala | 1122 ++++++++--------- .../scala/ox/resilience/RateLimiterTest.scala | 160 ++- doc/utils/custom-rate-limiter.md | 4 +- doc/utils/rate-limiter.md | 2 +- 7 files changed, 764 insertions(+), 852 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index 29c1af3d..6bc1ade7 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -1,13 +1,14 @@ package ox.resilience -import java.util.concurrent.{ConcurrentLinkedQueue, Semaphore} -import scala.concurrent.{Await, Future, Promise} -import scala.concurrent.duration.{Duration, FiniteDuration} -import scala.concurrent.ExecutionContext.Implicits.global -import scala.concurrent.blocking +import java.util.concurrent.Semaphore import GenericRateLimiter.* import ox.resilience.GenericRateLimiter.Strategy.Blocking +import ox.* +/** Rate limiter which allows to pass a configuration value to the execution. This can include both runtime and compile time information, + * allowing for customization of return types and runtime behavior. If the only behavior needed is to block or drop operations, the + * `RateLimiter` class provides a simpler interface. + */ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( executor: Executor[Returns], algorithm: RateLimiterAlgorithm @@ -15,12 +16,10 @@ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( import GenericRateLimiter.Strategy.given - /** Limits the rate of execution of the given operation with custom Result type + /** Limits the rate of execution of the given operation with a custom Result type */ def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = - val future = executor.add(algorithm, operation) executor.schedule(algorithm, operation) - future.map(f => Await.ready(f, Duration.Inf)) executor.execute(algorithm, operation) end apply end GenericRateLimiter @@ -29,7 +28,8 @@ object GenericRateLimiter: type Id[A] = A - /** Describe the execution strategy that must be used by the rate limiter in a given operation + /** Describe the execution strategy that must be used by the rate limiter in a given operation. It allows the encoding of return types and + * custom runtime behavior. */ sealed trait Strategy[F[*]]: def run[T](operation: => T): F[T] @@ -49,23 +49,22 @@ object GenericRateLimiter: given Dropping[Option] = Drop() end Strategy - /** Determines the policy to apply when the rate limiter is full + /** Determines the policy to apply when the rate limiter is full. The executor is responsible of managing the inner state of the algorithm + * employed. In particular, it must ensure that operations are executed only if allowed and that the algorithm is updated. */ trait Executor[Returns[_[_]] <: Strategy[_]]: - /** Returns a future that will be completed when the operation is execute. It can be used for queueing mechanisms. + /** Performs any tasks needed to delay the operation or alter the execution mode. Usually, this will involve using `acquire` or + * `tryAcquire` methods from the algorithm and taking care of updating it. */ - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Option[Future[Unit]] + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Returns[Result]): Unit - /** Ensures that the future returned by `add` can be completed + /** Executes the operation and returns the expected result depending on the strategy. It might perform scheduling tasks if they are not + * independent from the execution. */ - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Returns[Result]): Result[T] - /** Executes the operation and returns the expected result depending on the strategy - */ - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Result[T] - - /** Runs the operation and returns the result using the given strategy + /** Runs the operation and returns the result using the given strategy. */ def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] = cfg.run(operation).asInstanceOf[Result[T]] @@ -73,110 +72,61 @@ object GenericRateLimiter: end Executor object Executor: - /** Block rejected operations until the rate limiter is ready to accept them + /** Block rejected operations until the rate limiter is ready to accept them. */ - case class Block(fairness: Boolean = false) extends Executor[Strategy.Blocking]: + case class Block() extends Executor[Strategy.Blocking]: - lazy val queue = new ConcurrentLinkedQueue[Promise[Unit]]() - val schedule = new Semaphore(1) + val updateLock = new Semaphore(0) - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using - cfg: Strategy.Blocking[Result[*]] - ): Option[Future[Unit]] = - if fairness then - val promise = Promise[Unit]() - queue.add(promise) - Some(promise.future) - else None + val schedule = new Semaphore(1) def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result]): Result[T] = cfg.run(operation) - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result[*]]): Unit = - // can't be called with empty queue - if fairness then - if algorithm.tryAcquire then - val p = queue.poll() - p.success(()) - else - schedule.acquire() - val wt = algorithm.getNextTime() - releaseNext(algorithm, FiniteDuration(wt, "nanoseconds")) - else if !algorithm.tryAcquire then - if schedule.tryAcquire() then releaseUnfair(algorithm, true) + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Strategy.Blocking[Result[*]]): Unit = + if !algorithm.tryAcquire then + // starts scheduler if not already running + if schedule.tryAcquire() then + supervised: + val _ = forkUser: + runScheduler(algorithm) + () algorithm.acquire - releaseUnfair(algorithm, false) - private def releaseUnfair(algorithm: RateLimiterAlgorithm, releaseSchedule: Boolean): Unit = + private def runScheduler(algorithm: RateLimiterAlgorithm): Unit = val waitTime = algorithm.getNextTime() - if waitTime != 0 then - Future { - val wt1 = waitTime / 1000000 - val wt2 = waitTime - wt1 * 1000000 - blocking(Thread.sleep(wt1, wt2.toInt)) - }.onComplete { _ => - algorithm.reset - if releaseSchedule then schedule.release() - } - end if - end releaseUnfair - - private def releaseNext(algorithm: RateLimiterAlgorithm, waitTime: FiniteDuration): Unit = - // sleeps waitTime and fulfills next promise in queue - val p = queue.poll() - if waitTime.toNanos != 0 then - Future { - val wt1 = waitTime.toMillis - val wt2 = waitTime.toNanos - wt1 * 1000000 - blocking(Thread.sleep(wt1, wt2.toInt)) - }.onComplete { _ => - algorithm.acquire - p.success(()) - schedule.release() - } - else - algorithm.acquire - p.success(()) - schedule.release() - end if - end releaseNext + algorithm.update + if waitTime > 0 then + val millis = waitTime / 1000000 + val nanos = waitTime % 1000000 + Thread.sleep(millis, nanos.toInt) + runScheduler(algorithm) + else schedule.release() + end runScheduler + end Block - /** Drop rejected operations + /** Drops rejected operations */ case class Drop() extends Executor[Strategy.Dropping]: - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using - cfg: Strategy.Dropping[Result[*]] - ): Option[Future[Unit]] = - None - end add - - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Unit = + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Strategy.Dropping[Result[*]]): Unit = () def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Result[T] = + algorithm.update if algorithm.tryAcquire then cfg.run(operation) else None.asInstanceOf[Result[T]] end Drop - /** Block rejected operations until the rate limiter is ready to accept them + /** Blocks rejected operations until the rate limiter is ready to accept them or drops them depending on the choosen strategy. */ - case class BlockOrDrop(fairness: Boolean = false) extends Executor[Strategy.BlockOrDrop]: + case class BlockOrDrop() extends Executor[Strategy.BlockOrDrop]: - val blockExecutor = Block(fairness) + val blockExecutor = Block() val dropExecutor = Drop() - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using - cfg: Strategy.BlockOrDrop[Result] - ): Option[Future[Unit]] = - cfg match - case cfg: Strategy.Block => - blockExecutor.add(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) - case cfg: Strategy.Drop => - dropExecutor.add(algorithm, operation)(using cfg) - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Result[T] = cfg match case cfg: Strategy.Block => @@ -184,8 +134,8 @@ object GenericRateLimiter: case cfg: Strategy.Drop => dropExecutor.execute(algorithm, operation)(using cfg) - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Unit = - cfg match + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Strategy.BlockOrDrop[Result]): Unit = + implicitly[Strategy.BlockOrDrop[Result]] match case cfg: Strategy.Block => blockExecutor.schedule(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) case cfg: Strategy.Drop => diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index 8371439b..d1fef5f6 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -1,22 +1,24 @@ package ox.resilience import scala.concurrent.duration.* +import ox.* /** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an operation. */ case class RateLimiter( - algorithm: RateLimiterAlgorithm, - fairness: Boolean = false + algorithm: RateLimiterAlgorithm ): import GenericRateLimiter.* - private val rateLimiter = GenericRateLimiter(Executor.BlockOrDrop(fairness), algorithm) + private val rateLimiter = + supervised: + GenericRateLimiter(Executor.BlockOrDrop(), algorithm) /** Blocks the operation until the rate limiter allows it. */ def runBlocking[T](operation: => T): T = rateLimiter(operation)(using Strategy.Block()) - /** Drops the operation if not allowed by the rate limiter. + /** Drops the operation if not allowed by the rate limiter returning `None`. */ def runOrDrop[T](operation: => T): Option[T] = rateLimiter(operation)(using Strategy.Drop()) @@ -26,34 +28,30 @@ object RateLimiter: def leakyBucket( capacity: Int, - leakInterval: FiniteDuration, - fairness: Boolean = false + leakInterval: FiniteDuration ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval), fairness) + RateLimiter(RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval)) end leakyBucket def tokenBucket( maxTokens: Int, - refillInterval: FiniteDuration, - fairness: Boolean = false + refillInterval: FiniteDuration ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval), fairness) + RateLimiter(RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval)) end tokenBucket def fixedRate( maxRequests: Int, - windowSize: FiniteDuration, - fairness: Boolean = false + windowSize: FiniteDuration ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize), fairness) + RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) end fixedRate def slidingWindow( maxRequests: Int, - windowSize: FiniteDuration, - fairness: Boolean = false + windowSize: FiniteDuration ): RateLimiter = - RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize), fairness) + RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) end slidingWindow end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 9a31afdf..f4304e05 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -5,8 +5,9 @@ import ox.resilience.RateLimiterAlgorithm.* import scala.concurrent.duration.* import java.util.concurrent.atomic.AtomicLong import scala.concurrent.* -import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.Semaphore +import java.util.{LinkedList, Queue} /** Determines the algorithm to use for the rate limiter */ @@ -20,93 +21,109 @@ trait RateLimiterAlgorithm: */ def tryAcquire: Boolean - /** Returns whether the rate limiter is ready to accept a new operation without modifying internal state. - */ - def isReady: Boolean - /** Updates the internal state of the rate limiter to check whether new operations can be accepted. */ - def reset: Unit + def update: Unit - /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. It should not modify - * internal state. + /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. It should return 0 only if + * there is no need of rescheduling an update in the future. It should not modify internal state. */ - def getNextTime(): Long = - if isReady then 0 - else computeNextTime() + def getNextTime(): Long - /** Compute the time until the next operation can be accepted. - */ - def computeNextTime(): Long end RateLimiterAlgorithm object RateLimiterAlgorithm: /** Fixed rate algorithm */ case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private lazy val lastUpdate = new AtomicLong(System.nanoTime()) + private val lastUpdate = new AtomicLong(System.nanoTime()) private val semaphore = new Semaphore(rate) + val lock = new java.util.concurrent.locks.ReentrantLock() def acquire: Unit = - tryUnblock semaphore.acquire() def tryAcquire: Boolean = - tryUnblock semaphore.tryAcquire() - def isReady: Boolean = - lastUpdate.get() - semaphore.availablePermits() > 0 - - def computeNextTime(): Long = - lastUpdate.get() + per.toNanos - System.nanoTime() + def getNextTime(): Long = + val waitTime = lastUpdate.get() + per.toNanos - System.nanoTime() + val q = semaphore.getQueueLength() + if waitTime > 0 then waitTime + else if q > 0 then per.toNanos + else 0L - def reset: Unit = - lastUpdate.set(System.nanoTime()) - semaphore.release(rate) - - private def tryUnblock: Unit = - if lastUpdate.get() + per.toNanos < System.nanoTime() then reset + def update: Unit = + val now = System.nanoTime() + lastUpdate.updateAndGet { time => + if time + per.toNanos < now then + semaphore.drainPermits() + semaphore.release(rate) + now + else time + } + () + end update end FixedRate /** Sliding window algorithm */ case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val log = new ConcurrentLinkedQueue[Long]() + private val log = new AtomicReference[Queue[Long]](new LinkedList[Long]()) private val semaphore = new Semaphore(rate) def acquire: Unit = - tryUnblock semaphore.acquire() val now = System.nanoTime() - log.add(now) + log.updateAndGet { q => + q.add(now) + q + } () + end acquire def tryAcquire: Boolean = - tryUnblock if semaphore.tryAcquire() then val now = System.nanoTime() - log.add(now) + log.updateAndGet { q => + q.add(now) + q + } true else false - def isReady: Boolean = - semaphore.availablePermits() > 0 - - def computeNextTime(): Long = - log.peek() + per.toNanos - System.nanoTime() - - def reset: Unit = - tryUnblock - - private def tryUnblock: Unit = + def getNextTime(): Long = + val furtherLog = log.get().peek() + if null eq furtherLog then + if semaphore.getQueueLength() > 0 then per.toNanos + else 0L + else + val waitTime = log.get().peek() + per.toNanos - System.nanoTime() + val q = semaphore.getQueueLength() + if waitTime > 0 then waitTime + else if q > 0 then + update + getNextTime() + else 0L + end if + end getNextTime + + def update: Unit = val now = System.nanoTime() - while semaphore.availablePermits() < rate && log.peek() < now - per.toNanos do - log.poll() - semaphore.release() - () + while semaphore.availablePermits() < rate && log + .updateAndGet { q => + if q.peek() < now - per.toNanos then + q.poll() + semaphore.release() + q + else q + } + .peek() < now - per.toNanos + do () + end while + end update + end SlidingWindow /** Token bucket algorithm @@ -117,23 +134,19 @@ object RateLimiterAlgorithm: private val semaphore = new Semaphore(1) def acquire: Unit = - refillTokens semaphore.acquire() def tryAcquire: Boolean = - refillTokens semaphore.tryAcquire() - def isReady: Boolean = - semaphore.availablePermits() > 0 - - def computeNextTime(): Long = - lastRefillTime.get() + refillInterval - System.nanoTime() + def getNextTime(): Long = + val waitTime = lastRefillTime.get() + refillInterval - System.nanoTime() + val q = semaphore.getQueueLength() + if waitTime > 0 then waitTime + else if q > 0 then refillInterval + else 0L - def reset: Unit = - refillTokens - - private def refillTokens: Unit = + def update: Unit = val now = System.nanoTime() val elapsed = now - lastRefillTime.get() val newTokens = elapsed / refillInterval @@ -150,23 +163,19 @@ object RateLimiterAlgorithm: private val semaphore = new Semaphore(capacity) def acquire: Unit = - leak semaphore.acquire() def tryAcquire: Boolean = - leak semaphore.tryAcquire() - def isReady: Boolean = - semaphore.availablePermits() > 0 - - def computeNextTime(): Long = - lastLeakTime.get() + leakInterval - System.nanoTime() + def getNextTime(): Long = + val waitTime = lastLeakTime.get() + leakInterval - System.nanoTime() + val q = semaphore.getQueueLength() + if waitTime > 0 then waitTime + else if q > 0 then leakInterval + else 0L - def reset: Unit = - leak - - private def leak: Unit = + def update: Unit = val now = System.nanoTime() val lastLeak = lastLeakTime.get() val elapsed = now - lastLeak @@ -174,6 +183,7 @@ object RateLimiterAlgorithm: val newTime = leaking * leakInterval + lastLeak semaphore.release(leaking.toInt) lastLeakTime.set(newTime) - end leak + end update + end LeakyBucket end RateLimiterAlgorithm diff --git a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala index 088f66e5..6a1efe40 100644 --- a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala @@ -9,688 +9,646 @@ import scala.concurrent.duration._ import java.util.concurrent.atomic.AtomicReference class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: + behavior of "fixed rate GenericRateLimiter" - - List(false, true).foreach { fairness => + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() - behavior of s"fixed rate GenericRateLimiter with fairness=$fairness" + var executions = 0 + def operation = { + executions +=1 + 0 + } - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - var executions = 0 - def operation = { - executions +=1 - 0 - } + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } - var executions = 0 - def operation = { - executions +=1 - 0 - } + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + val before = System.currentTimeMillis() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + val after = System.currentTimeMillis() + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + (after-before) should be >= 1000L + executions shouldBe 3 + } - val before = System.currentTimeMillis() - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - val after = System.currentTimeMillis() - - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - (after-before) should be >= 1000L - executions shouldBe 3 - } + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result6 = operationN(6) - val result7 = operationN(7) // blocks until 3 seconds elapsed - val time4 = System.currentTimeMillis() - val result8 = operationN(8) - val result9 = operationN(9) // blocks until 4 seconds elapsed - val time5 = System.currentTimeMillis() - - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - result6 shouldBe 6 - result7 shouldBe 7 - result8 shouldBe 8 - result9 shouldBe 9 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time4-time1) should be >= 3000L - 10 - (time5-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - (time4-time1) should be <= 3200L - (time5-time1) should be <= 4200L - order should be (List(9, 8,7,6,5,4, 3,2,1)) } - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } - } + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result6 = operationN(6) + val result7 = operationN(7) // blocks until 3 seconds elapsed + val time4 = System.currentTimeMillis() + val result8 = operationN(8) + val result9 = operationN(9) // blocks until 4 seconds elapsed + val time5 = System.currentTimeMillis() + + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + result6 shouldBe 6 + result7 shouldBe 7 + result8 shouldBe 8 + result9 shouldBe 9 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time4-time1) should be >= 3000L - 10 + (time5-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + (time4-time1) should be <= 3200L + (time5-time1) should be <= 4200L + order should be (List(9, 8,7,6,5,4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) - var time2 = 0L - var time3 = 0L - var time4 = 0L - - - val time1 = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) - forkUser: - sleep(200.millis) - operationN(5) - time3 = System.currentTimeMillis - forkUser: - sleep(250.millis) - operationN(6) - forkUser: - sleep(300.millis) - operationN(7) - time4 = System.currentTimeMillis - forkUser: - sleep(350.millis) - operationN(8) - forkUser: - sleep(400.millis) - operationN(9) + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n } - val time5 = System.currentTimeMillis() - - if fairness then - order.get() should be (List(9, 8,7,6,5,4, 3,2,1)) - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time4-time1) should be >= 3000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - (time4-time1) should be <= 3200L - (time5-time1) should be >= 4000L - 10 - (time5-time1) should be <= 4200L } + val before = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) + forkUser: + sleep(200.millis) + operationN(5) + forkUser: + sleep(250.millis) + operationN(6) + forkUser: + sleep(300.millis) + operationN(7) + forkUser: + sleep(350.millis) + operationN(8) + forkUser: + sleep(400.millis) + operationN(9) + } + val after = System.currentTimeMillis() + + (after-before) should be >= 4000L - 10 + (after-before) should be <= 4200L + } - behavior of s"sliding window GenericRateLimiter with fairness=$fairness" - - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - var executions = 0 - def operation = { - executions +=1 - 0 - } + behavior of "sliding window GenericRateLimiter" - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - - executions +=1 - 0 - } + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - ox.sleep(500.millis) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + var executions = 0 + def operation = { - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 1500L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 1700L - order should be (List(4, 3,2,1)) + executions +=1 + 0 } - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } - } + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) - var time2 = 0L - var time3 = 0L - - - val time1 = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(300.millis) - operationN(2) - forkUser: - sleep(400.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(700.millis) - operationN(4) - time3 = System.currentTimeMillis + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n } - - if fairness then - order.get() should be (List(4, 3,2,1)) - (time2-time1) should be >= 1000L - 10 - (time2-time1) should be <= 1100L - (time3-time1) should be >= 1300L - 10 - (time3-time1) should be <= 1400L } - behavior of s"token bucket GenericRateLimiter with fairness=$fairness" + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + ox.sleep(500.millis) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed + + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 1500L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 1700L + order should be (List(4, 3,2,1)) + } - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) - var executions = 0 - def operation = { - executions +=1 - 0 + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n } + } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) + val before = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(300.millis) + operationN(2) + forkUser: + sleep(400.millis) + operationN(3) + forkUser: + sleep(700.millis) + operationN(4) + } + val after = System.currentTimeMillis + + (after-before) should be >= 1300L - 10 + (after-before) should be <= 1400L + } + + behavior of "token bucket GenericRateLimiter" + + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) - result1 shouldBe Some(0) - result2 shouldBe None - executions shouldBe 1 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "refill token after time elapsed from last refill and not before" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + result1 shouldBe Some(0) + result2 shouldBe None + executions shouldBe 1 + } - var executions = 0 - def operation = { - executions +=1 - 0 - } + it should "refill token after time elapsed from last refill and not before" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(600.millis) - val result3 = rateLimiter(operation) - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe Some(0) - executions shouldBe 2 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(600.millis) + val result3 = rateLimiter(operation) - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe Some(0) + executions shouldBe 2 + } - val r3 = rateLimiter(operation) - (r1, r2, r3) - } + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) - result1 shouldBe 0 - result2 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val time2 = System.currentTimeMillis() // 1 second - sleep(2.seconds) //add 2 tokens - val result3 = operationN(3) //blocks until 1 second elapsed - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time4 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 3000L - 10 - (time4-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 3200L - (time4-time1) should be <= 4200L - order should be (List(5, 4, 3,2,1)) + val r3 = rateLimiter(operation) + (r1, r2, r3) } - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } - } + result1 shouldBe 0 + result2 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + ) - var time2 = 0L - - val time1 = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n } - val time3 = System.currentTimeMillis() - - if fairness then - order.get() should be (List(4, 3,2,1)) - (time2-time1) should be >= 2000L - 10 - (time2-time1) should be <= 2200L - (time3-time1) should be >= 3000L - 10 - (time3-time1) should be <= 3200L } - behavior of s"leaky bucket GenericRateLimiter with fairness=$fairness" + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val time2 = System.currentTimeMillis() // 1 second + sleep(2.seconds) //add 2 tokens + val result3 = operationN(3) //blocks until 1 second elapsed + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time4 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 3000L - 10 + (time4-time1) should be >= 4000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 3200L + (time4-time1) should be <= 4200L + order should be (List(5, 4, 3,2,1)) + } - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) ) - var executions = 0 - def operation = { - executions +=1 - 0 + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n } + } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 + val before = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) } + val after = System.currentTimeMillis() + + (after-before) should be >= 3000L - 10 + (after-before) should be <= 3200L + } - it should "reject operation before leaking and accepting after it" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - + behavior of "leaky bucket GenericRateLimiter" - var executions = 0 - def operation = { - executions +=1 - 0 - } + it should "drop operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(400.millis) - val result3 = rateLimiter(operation) - ox.sleep(101.millis) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + + it should "reject operation before leaking and accepting after it" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - var executions = 0 - def operation = { - executions +=1 - 0 - } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(400.millis) + val result3 = rateLimiter(operation) + ox.sleep(101.millis) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } - var executions = 0 - def operation = { - - executions +=1 - 0 - } + it should "restart rate limiter after given duration" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 + } - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } + it should "block operation when rate limit is exceeded" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() + var executions = 0 + def operation = { - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - order should be (List(4, 3,2,1)) + executions +=1 + 0 } - it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + + it should "respect queueing order when blocking" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n } + } - var time2 = 0L - - val time1 = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - time2 = System.currentTimeMillis - forkUser: - sleep(150.millis) - operationN(4) + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2-time1) should be >= 1000L - 10 + (time3-time1) should be >= 2000L - 10 + (time2-time1) should be <= 1200L + (time3-time1) should be <= 2200L + order should be (List(4, 3,2,1)) + } + + it should "respect queueing order when blocking concurrently" in { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n } - val time3 = System.currentTimeMillis() - - if fairness then - order.get() should be (List(4, 3,2,1)) - (time2-time1) should be >= 1000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be >= 2000L - 10 - (time3-time1) should be <= 2200L } + + + val before = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) + } + val after = System.currentTimeMillis() + + (after-before) should be >= 2000L - 10 + (after-before) should be <= 2200L } end GenericRateLimiterTest \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 13e53865..5240c0eb 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -4,97 +4,93 @@ import ox.* import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.scalatest.{EitherValues, TryValues} -import ox.util.ElapsedTime import scala.concurrent.duration._ -class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: +class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues: + behavior of "RateLimiter" - List(false, true).foreach { fairness => - - behavior of s"RateLimiter with fairness=$fairness" - - it should "drop or block operation depending on method used for fixed rate algorithm" in { - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 + it should "drop or block operation depending on method used for fixed rate algorithm" in { + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "drop or block operation depending on method used for sliding window algorithm" in { - val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 + } + + it should "drop or block operation depending on method used for sliding window algorithm" in { + val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "drop or block operation depending on method used for token bucket algorithm" in { - val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 + } + + it should "drop or block operation depending on method used for token bucket algorithm" in { + val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 2 + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 2 + } + + it should "drop or block operation depending on method used for leaky bucker algorithm" in { + val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 } - it should "drop or block operation depending on method used for leaky bucker algorithm" in { - val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 + } - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 - } - } - \ No newline at end of file + \ No newline at end of file diff --git a/doc/utils/custom-rate-limiter.md b/doc/utils/custom-rate-limiter.md index 6e08c74c..75b96626 100644 --- a/doc/utils/custom-rate-limiter.md +++ b/doc/utils/custom-rate-limiter.md @@ -52,8 +52,8 @@ sealed trait Strategy[F[*]]: def run[T](operation: => T): F[T] trait Executor[Returns[_[_]] <: Strategy[_]]: - def add[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Future[Result[T]] - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit + def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Result[T] def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] // calls Strategy.run end Executor ``` diff --git a/doc/utils/rate-limiter.md b/doc/utils/rate-limiter.md index 93f7dc86..f4e09cbf 100644 --- a/doc/utils/rate-limiter.md +++ b/doc/utils/rate-limiter.md @@ -1,5 +1,5 @@ # Rate limiter -The rate limiter mechanism allows controlling the rate at which operations are executed. It ensures that a certain number of operations are performed within a specified time frame, preventing system overload and ensuring fair resource usage. +The rate limiter mechanism allows controlling the rate at which operations are executed. It ensures that a certain number of operations are performed within a specified time frame, preventing system overload and ensuring fair resource usage. Note that the implemented limiting mechanism within `Ox` only take into account the start of execution and not the whole execution of an operation. This could be tweaked customizing the rate limiter executor and algorithm employed. ## API From e288c4dd8e47d54fa6de53cc7ebb288a16713c68 Mon Sep 17 00:00:00 2001 From: pablf Date: Tue, 5 Nov 2024 13:33:59 +0100 Subject: [PATCH 12/23] change forking place --- .../ox/resilience/GenericRateLimiter.scala | 17 +- .../scala/ox/resilience/RateLimiter.scala | 13 +- .../resilience/GenericRateLimiterTest.scala | 1018 +++++++++-------- .../scala/ox/resilience/RateLimiterTest.scala | 132 +-- 4 files changed, 611 insertions(+), 569 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index 6bc1ade7..59557f27 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -12,10 +12,13 @@ import ox.* case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( executor: Executor[Returns], algorithm: RateLimiterAlgorithm -): +)(using Ox): import GenericRateLimiter.Strategy.given + val _ = fork: + executor.update(algorithm) + /** Limits the rate of execution of the given operation with a custom Result type */ def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = @@ -58,6 +61,7 @@ object GenericRateLimiter: * `tryAcquire` methods from the algorithm and taking care of updating it. */ def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Returns[Result]): Unit + def update(algorithm: RateLimiterAlgorithm): Unit = () /** Executes the operation and returns the expected result depending on the strategy. It might perform scheduling tasks if they are not * independent from the execution. @@ -88,11 +92,15 @@ object GenericRateLimiter: // starts scheduler if not already running if schedule.tryAcquire() then supervised: - val _ = forkUser: - runScheduler(algorithm) + updateLock.release() () algorithm.acquire + override def update(algorithm: RateLimiterAlgorithm): Unit = + updateLock.acquire() + runScheduler(algorithm) + update(algorithm) + private def runScheduler(algorithm: RateLimiterAlgorithm): Unit = val waitTime = algorithm.getNextTime() algorithm.update @@ -127,6 +135,9 @@ object GenericRateLimiter: val blockExecutor = Block() val dropExecutor = Drop() + override def update(algorithm: RateLimiterAlgorithm): Unit = + blockExecutor.update(algorithm) + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Result[T] = cfg match case cfg: Strategy.Block => diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index d1fef5f6..ee03bcd9 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -7,12 +7,11 @@ import ox.* */ case class RateLimiter( algorithm: RateLimiterAlgorithm -): +)(using Ox): import GenericRateLimiter.* private val rateLimiter = - supervised: - GenericRateLimiter(Executor.BlockOrDrop(), algorithm) + GenericRateLimiter(Executor.BlockOrDrop(), algorithm) /** Blocks the operation until the rate limiter allows it. */ @@ -29,28 +28,28 @@ object RateLimiter: def leakyBucket( capacity: Int, leakInterval: FiniteDuration - ): RateLimiter = + )(using Ox): RateLimiter = RateLimiter(RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval)) end leakyBucket def tokenBucket( maxTokens: Int, refillInterval: FiniteDuration - ): RateLimiter = + )(using Ox): RateLimiter = RateLimiter(RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval)) end tokenBucket def fixedRate( maxRequests: Int, windowSize: FiniteDuration - ): RateLimiter = + )(using Ox): RateLimiter = RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) end fixedRate def slidingWindow( maxRequests: Int, windowSize: FiniteDuration - ): RateLimiter = + )(using Ox): RateLimiter = RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) end slidingWindow diff --git a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala index 6a1efe40..6cbf3425 100644 --- a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala @@ -12,643 +12,671 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues behavior of "fixed rate GenericRateLimiter" it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() - - var executions = 0 - def operation = { - executions +=1 - 0 - } + supervised: + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() + + var executions = 0 + def operation = { + executions +=1 + 0 + } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 } it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + supervised: + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions +=1 + 0 + } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) ) - var executions = 0 - def operation = { - executions +=1 - 0 - } + var executions = 0 + def operation = { + executions += 1 + 0 + } - val before = System.currentTimeMillis() - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - val after = System.currentTimeMillis() - - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - (after-before) should be >= 1000L - executions shouldBe 3 + val before = System.currentTimeMillis() + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + val after = System.currentTimeMillis() + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + (after - before) should be >= 1000L + executions shouldBe 3 + } } it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) ) - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } } - } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result6 = operationN(6) - val result7 = operationN(7) // blocks until 3 seconds elapsed - val time4 = System.currentTimeMillis() - val result8 = operationN(8) - val result9 = operationN(9) // blocks until 4 seconds elapsed - val time5 = System.currentTimeMillis() - - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - result6 shouldBe 6 - result7 shouldBe 7 - result8 shouldBe 8 - result9 shouldBe 9 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time4-time1) should be >= 3000L - 10 - (time5-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - (time4-time1) should be <= 3200L - (time5-time1) should be <= 4200L - order should be (List(9, 8,7,6,5,4, 3,2,1)) + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) // blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result6 = operationN(6) + val result7 = operationN(7) // blocks until 3 seconds elapsed + val time4 = System.currentTimeMillis() + val result8 = operationN(8) + val result9 = operationN(9) // blocks until 4 seconds elapsed + val time5 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + result6 shouldBe 6 + result7 shouldBe 7 + result8 shouldBe 8 + result9 shouldBe 9 + (time2 - time1) should be >= 1000L - 10 + (time3 - time1) should be >= 2000L - 10 + (time4 - time1) should be >= 3000L - 10 + (time5 - time1) should be >= 4000L - 10 + (time2 - time1) should be <= 1200L + (time3 - time1) should be <= 2200L + (time4 - time1) should be <= 3200L + (time5 - time1) should be <= 4200L + order should be(List(9, 8, 7, 6, 5, 4, 3, 2, 1)) + } } it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) ) - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } } - } - val before = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - forkUser: - sleep(150.millis) - operationN(4) - forkUser: - sleep(200.millis) - operationN(5) - forkUser: - sleep(250.millis) - operationN(6) - forkUser: - sleep(300.millis) - operationN(7) - forkUser: - sleep(350.millis) - operationN(8) - forkUser: - sleep(400.millis) - operationN(9) + val before = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) + forkUser: + sleep(200.millis) + operationN(5) + forkUser: + sleep(250.millis) + operationN(6) + forkUser: + sleep(300.millis) + operationN(7) + forkUser: + sleep(350.millis) + operationN(8) + forkUser: + sleep(400.millis) + operationN(9) + } + val after = System.currentTimeMillis() + + (after - before) should be >= 4000L - 10 + (after - before) should be <= 4200L } - val after = System.currentTimeMillis() - - (after-before) should be >= 4000L - 10 - (after-before) should be <= 4200L } behavior of "sliding window GenericRateLimiter" - it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) ) - var executions = 0 - def operation = { - executions +=1 - 0 - } + var executions = 0 + def operation = { + executions += 1 + 0 + } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } } it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) ) + var executions = 0 + def operation = { + executions += 1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 } it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) ) - var executions = 0 - def operation = { - - executions +=1 - 0 - } + var executions = 0 + def operation = { + executions += 1 + 0 + } - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } } it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) ) - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } } - } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - ox.sleep(500.millis) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed - - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 1500L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 1700L - order should be (List(4, 3,2,1)) + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + ox.sleep(500.millis) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2 - time1) should be >= 1000L - 10 + (time3 - time1) should be >= 1500L - 10 + (time2 - time1) should be <= 1200L + (time3 - time1) should be <= 1700L + order should be(List(4, 3, 2, 1)) + } } it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) ) - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } } - } - val before = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(300.millis) - operationN(2) - forkUser: - sleep(400.millis) - operationN(3) - forkUser: - sleep(700.millis) - operationN(4) + val before = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(300.millis) + operationN(2) + forkUser: + sleep(400.millis) + operationN(3) + forkUser: + sleep(700.millis) + operationN(4) + } + val after = System.currentTimeMillis + + (after - before) should be >= 1300L - 10 + (after - before) should be <= 1400L } - val after = System.currentTimeMillis - - (after-before) should be >= 1300L - 10 - (after-before) should be <= 1400L } behavior of "token bucket GenericRateLimiter" it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) ) - var executions = 0 - def operation = { - executions +=1 - 0 - } + var executions = 0 + def operation = { + executions += 1 + 0 + } - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) - result1 shouldBe Some(0) - result2 shouldBe None - executions shouldBe 1 + result1 shouldBe Some(0) + result2 shouldBe None + executions shouldBe 1 + } } it should "refill token after time elapsed from last refill and not before" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) ) + var executions = 0 + def operation = { + executions += 1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(600.millis) - val result3 = rateLimiter(operation) + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(600.millis) + val result3 = rateLimiter(operation) - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe Some(0) - executions shouldBe 2 + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe Some(0) + executions shouldBe 2 + } } it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) ) - var executions = 0 - def operation = { - executions +=1 - 0 - } + var executions = 0 + def operation = { + executions += 1 + 0 + } - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } - val r3 = rateLimiter(operation) - (r1, r2, r3) + result1 shouldBe 0 + result2 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 } - - result1 shouldBe 0 - result2 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 } it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) ) - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } } - } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val time2 = System.currentTimeMillis() // 1 second - sleep(2.seconds) //add 2 tokens - val result3 = operationN(3) //blocks until 1 second elapsed - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time4 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 3000L - 10 - (time4-time1) should be >= 4000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 3200L - (time4-time1) should be <= 4200L - order should be (List(5, 4, 3,2,1)) + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val time2 = System.currentTimeMillis() // 1 second + sleep(2.seconds) // add 2 tokens + val result3 = operationN(3) // blocks until 1 second elapsed + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time4 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + (time2 - time1) should be >= 1000L - 10 + (time3 - time1) should be >= 3000L - 10 + (time4 - time1) should be >= 4000L - 10 + (time2 - time1) should be <= 1200L + (time3 - time1) should be <= 3200L + (time4 - time1) should be <= 4200L + order should be(List(5, 4, 3, 2, 1)) + } } it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) ) - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } } - } + val before = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) + } + val after = System.currentTimeMillis() - val before = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - forkUser: - sleep(150.millis) - operationN(4) + (after - before) should be >= 3000L - 10 + (after - before) should be <= 3200L } - val after = System.currentTimeMillis() - - (after-before) should be >= 3000L - 10 - (after-before) should be <= 3200L } behavior of "leaky bucket GenericRateLimiter" it should "drop operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) + var executions = 0 + def operation = { + executions += 1 + 0 + } + + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } } it should "reject operation before leaking and accepting after it" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + var executions = 0 + def operation = { + executions += 1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + ox.sleep(500.millis) + val result2 = rateLimiter(operation) + ox.sleep(400.millis) + val result3 = rateLimiter(operation) + ox.sleep(101.millis) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } - - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(400.millis) - val result3 = rateLimiter(operation) - ox.sleep(101.millis) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 } it should "restart rate limiter after given duration" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Drop(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + var executions = 0 + def operation = { + executions += 1 + 0 + } - var executions = 0 - def operation = { - executions +=1 - 0 + val result1 = rateLimiter(operation) + val result2 = rateLimiter(operation) + val result3 = rateLimiter(operation) + ox.sleep(1.second) + val result4 = rateLimiter(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe Some(0) + executions shouldBe 3 } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 } it should "block operation when rate limit is exceeded" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - - executions +=1 - 0 - } + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } + var executions = 0 + def operation = { + executions += 1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter(operation) + val r2 = rateLimiter(operation) + val r3 = rateLimiter(operation) + (r1, r2, r3) + } - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } } it should "respect queueing order when blocking" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter { + order = n :: order + n + } } - } - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2-time1) should be >= 1000L - 10 - (time3-time1) should be >= 2000L - 10 - (time2-time1) should be <= 1200L - (time3-time1) should be <= 2200L - order should be (List(4, 3,2,1)) + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) // blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2 - time1) should be >= 1000L - 10 + (time3 - time1) should be >= 2000L - 10 + (time2 - time1) should be <= 1200L + (time3 - time1) should be <= 2200L + order should be(List(4, 3, 2, 1)) + } } it should "respect queueing order when blocking concurrently" in { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n + supervised { + val rateLimiter = GenericRateLimiter( + GenericRateLimiter.Executor.Block(), + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter { + order.updateAndGet(ord => n :: ord) + n + } } - } + val before = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) + } + val after = System.currentTimeMillis() - val before = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - forkUser: - sleep(150.millis) - operationN(4) + (after - before) should be >= 2000L - 10 + (after - before) should be <= 2200L } - val after = System.currentTimeMillis() - - (after-before) should be >= 2000L - 10 - (after-before) should be <= 2200L } end GenericRateLimiterTest \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 5240c0eb..07247444 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -10,87 +10,91 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T behavior of "RateLimiter" it should "drop or block operation depending on method used for fixed rate algorithm" in { - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + supervised: + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 } it should "drop or block operation depending on method used for sliding window algorithm" in { - val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + supervised: + val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 } it should "drop or block operation depending on method used for token bucket algorithm" in { - val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + supervised: + val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 2 + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 2 } it should "drop or block operation depending on method used for leaky bucker algorithm" in { - val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + supervised: + val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + executions shouldBe 3 } \ No newline at end of file From 0cf20496c99a2e00f3fb4153ba0100659f2004a7 Mon Sep 17 00:00:00 2001 From: pablf Date: Sat, 9 Nov 2024 22:56:20 +0100 Subject: [PATCH 13/23] separate update mechanism and detail docs --- .../ox/resilience/GenericRateLimiter.scala | 79 ++----- .../scala/ox/resilience/RateLimiter.scala | 47 ++-- .../ox/resilience/RateLimiterAlgorithm.scala | 183 ++++++--------- .../resilience/GenericRateLimiterTest.scala | 209 ++---------------- .../scala/ox/resilience/RateLimiterTest.scala | 80 ++++--- doc/utils/custom-rate-limiter.md | 52 ++++- doc/utils/rate-limiter.md | 15 +- 7 files changed, 234 insertions(+), 431 deletions(-) diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala index 59557f27..8beaab5d 100644 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala @@ -1,8 +1,6 @@ package ox.resilience -import java.util.concurrent.Semaphore import GenericRateLimiter.* -import ox.resilience.GenericRateLimiter.Strategy.Blocking import ox.* /** Rate limiter which allows to pass a configuration value to the execution. This can include both runtime and compile time information, @@ -16,25 +14,34 @@ case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( import GenericRateLimiter.Strategy.given - val _ = fork: - executor.update(algorithm) + val _ = + fork: + update() /** Limits the rate of execution of the given operation with a custom Result type */ def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = - executor.schedule(algorithm, operation) executor.execute(algorithm, operation) end apply + + private def update(): Unit = + val waitTime = algorithm.getNextUpdate + val millis = waitTime / 1000000 + val nanos = waitTime % 1000000 + Thread.sleep(millis, nanos.toInt) + algorithm.update + update() + end update end GenericRateLimiter object GenericRateLimiter: type Id[A] = A - /** Describe the execution strategy that must be used by the rate limiter in a given operation. It allows the encoding of return types and - * custom runtime behavior. + /** Describes the execution strategy that must be used by the rate limiter in a given operation. It allows the encoding of return types + * and custom runtime behavior. */ - sealed trait Strategy[F[*]]: + trait Strategy[F[*]]: def run[T](operation: => T): F[T] object Strategy: @@ -56,13 +63,6 @@ object GenericRateLimiter: * employed. In particular, it must ensure that operations are executed only if allowed and that the algorithm is updated. */ trait Executor[Returns[_[_]] <: Strategy[_]]: - - /** Performs any tasks needed to delay the operation or alter the execution mode. Usually, this will involve using `acquire` or - * `tryAcquire` methods from the algorithm and taking care of updating it. - */ - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Returns[Result]): Unit - def update(algorithm: RateLimiterAlgorithm): Unit = () - /** Executes the operation and returns the expected result depending on the strategy. It might perform scheduling tasks if they are not * independent from the execution. */ @@ -79,53 +79,18 @@ object GenericRateLimiter: /** Block rejected operations until the rate limiter is ready to accept them. */ case class Block() extends Executor[Strategy.Blocking]: - - val updateLock = new Semaphore(0) - - val schedule = new Semaphore(1) - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result]): Result[T] = - cfg.run(operation) - - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Strategy.Blocking[Result[*]]): Unit = - if !algorithm.tryAcquire then - // starts scheduler if not already running - if schedule.tryAcquire() then - supervised: - updateLock.release() - () - algorithm.acquire - - override def update(algorithm: RateLimiterAlgorithm): Unit = - updateLock.acquire() - runScheduler(algorithm) - update(algorithm) - - private def runScheduler(algorithm: RateLimiterAlgorithm): Unit = - val waitTime = algorithm.getNextTime() - algorithm.update - if waitTime > 0 then - val millis = waitTime / 1000000 - val nanos = waitTime % 1000000 - Thread.sleep(millis, nanos.toInt) - runScheduler(algorithm) - else schedule.release() - end runScheduler + algorithm.acquire + run(operation) end Block /** Drops rejected operations */ case class Drop() extends Executor[Strategy.Dropping]: - - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Strategy.Dropping[Result[*]]): Unit = - () - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Result[T] = - algorithm.update if algorithm.tryAcquire then cfg.run(operation) else None.asInstanceOf[Result[T]] - end Drop /** Blocks rejected operations until the rate limiter is ready to accept them or drops them depending on the choosen strategy. @@ -135,22 +100,12 @@ object GenericRateLimiter: val blockExecutor = Block() val dropExecutor = Drop() - override def update(algorithm: RateLimiterAlgorithm): Unit = - blockExecutor.update(algorithm) - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Result[T] = cfg match case cfg: Strategy.Block => blockExecutor.execute(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) case cfg: Strategy.Drop => dropExecutor.execute(algorithm, operation)(using cfg) - - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Strategy.BlockOrDrop[Result]): Unit = - implicitly[Strategy.BlockOrDrop[Result]] match - case cfg: Strategy.Block => - blockExecutor.schedule(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) - case cfg: Strategy.Drop => - dropExecutor.schedule(algorithm, operation)(using cfg) end BlockOrDrop end Executor diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index ee03bcd9..b2fe98ab 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -1,9 +1,9 @@ package ox.resilience -import scala.concurrent.duration.* -import ox.* +import scala.concurrent.duration.FiniteDuration +import ox.Ox -/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an operation. +/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. */ case class RateLimiter( algorithm: RateLimiterAlgorithm @@ -25,20 +25,13 @@ end RateLimiter object RateLimiter: - def leakyBucket( - capacity: Int, - leakInterval: FiniteDuration - )(using Ox): RateLimiter = - RateLimiter(RateLimiterAlgorithm.LeakyBucket(capacity, leakInterval)) - end leakyBucket - - def tokenBucket( - maxTokens: Int, - refillInterval: FiniteDuration - )(using Ox): RateLimiter = - RateLimiter(RateLimiterAlgorithm.TokenBucket(maxTokens, refillInterval)) - end tokenBucket - + /** Rate limiter with fixed rate algorithm with possibility to drop or block an operation if not allowed to run + * + * @param maxRequests + * Maximum number of requests per consecutive window + * @param windowSize + * Interval of time to pass before reset of the rate limiter + */ def fixedRate( maxRequests: Int, windowSize: FiniteDuration @@ -46,6 +39,13 @@ object RateLimiter: RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) end fixedRate + /** Rate limiter with sliding window algorithm with possibility to drop or block an operation if not allowed to run + * + * @param maxRequests + * Maximum number of requests in any window of time + * @param windowSize + * Size of the window + */ def slidingWindow( maxRequests: Int, windowSize: FiniteDuration @@ -53,4 +53,17 @@ object RateLimiter: RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) end slidingWindow + /** Rate limiter with token/leaky bucket algorithm with possibility to drop or block an operation if not allowed to run + * + * @param maxTokens + * Max capacity of tokens in the algorithm + * @param refillInterval + * Interval of time after which a token is added + */ + def bucket( + maxTokens: Int, + refillInterval: FiniteDuration + )(using Ox): RateLimiter = + RateLimiter(RateLimiterAlgorithm.Bucket(maxTokens, refillInterval)) + end bucket end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index f4304e05..07239795 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -1,10 +1,7 @@ package ox.resilience -import ox.* -import ox.resilience.RateLimiterAlgorithm.* -import scala.concurrent.duration.* +import scala.concurrent.duration.FiniteDuration import java.util.concurrent.atomic.AtomicLong -import scala.concurrent.* import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.Semaphore import java.util.{LinkedList, Queue} @@ -13,177 +10,141 @@ import java.util.{LinkedList, Queue} */ trait RateLimiterAlgorithm: - /** Acquire a permit to execute the operation. This method should block until a permit is available. + /** Acquires a permit to execute the operation. This method should block until a permit is available. */ - def acquire: Unit + final def acquire: Unit = + acquire(1) - /** Try to acquire a permit to execute the operation. This method should not block. + /** Acquires permits to execute the operation. This method should block until a permit is available. */ - def tryAcquire: Boolean + def acquire(permits: Int): Unit + + /** Tries to acquire a permit to execute the operation. This method should not block. + */ + final def tryAcquire: Boolean = + tryAcquire(1) + + /** Tries to acquire permits to execute the operation. This method should not block. + */ + def tryAcquire(permits: Int): Boolean /** Updates the internal state of the rate limiter to check whether new operations can be accepted. */ def update: Unit - /** Returns the time until the next operation can be accepted to be used by the `GenericRateLimiter.Executor`. It should return 0 only if - * there is no need of rescheduling an update in the future. It should not modify internal state. + /** Returns the time in nanoseconds that needs to elapse until the next update. It should not modify internal state. */ - def getNextTime(): Long + def getNextUpdate: Long end RateLimiterAlgorithm object RateLimiterAlgorithm: - /** Fixed rate algorithm + /** Fixed rate algorithm It allows starting at most `rate` operations in consecutively segments of duration `per`. */ case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: private val lastUpdate = new AtomicLong(System.nanoTime()) private val semaphore = new Semaphore(rate) - val lock = new java.util.concurrent.locks.ReentrantLock() - def acquire: Unit = - semaphore.acquire() + def acquire(permits: Int): Unit = + semaphore.acquire(permits) - def tryAcquire: Boolean = - semaphore.tryAcquire() + def tryAcquire(permits: Int): Boolean = + semaphore.tryAcquire(permits) - def getNextTime(): Long = + def getNextUpdate: Long = val waitTime = lastUpdate.get() + per.toNanos - System.nanoTime() - val q = semaphore.getQueueLength() - if waitTime > 0 then waitTime - else if q > 0 then per.toNanos - else 0L + if waitTime > 0 then waitTime else 0L def update: Unit = val now = System.nanoTime() - lastUpdate.updateAndGet { time => - if time + per.toNanos < now then - semaphore.drainPermits() - semaphore.release(rate) - now - else time - } - () + lastUpdate.set(now) + semaphore.release(rate - semaphore.availablePermits()) end update end FixedRate - /** Sliding window algorithm + /** Sliding window algorithm It allows to start at most `rate` operations in the lapse of `per` before current time. */ case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - private val log = new AtomicReference[Queue[Long]](new LinkedList[Long]()) + // stores the timestamp and the number of permits acquired after calling acquire or tryAcquire succesfully + private val log = new AtomicReference[Queue[(Long, Int)]](new LinkedList[(Long, Int)]()) private val semaphore = new Semaphore(rate) - def acquire: Unit = - semaphore.acquire() + def acquire(permits: Int): Unit = + semaphore.acquire(permits) + // adds timestamp to log val now = System.nanoTime() log.updateAndGet { q => - q.add(now) + q.add((now, permits)) q } () end acquire - def tryAcquire: Boolean = - if semaphore.tryAcquire() then + def tryAcquire(permits: Int): Boolean = + if semaphore.tryAcquire(permits) then + // adds timestamp to log val now = System.nanoTime() log.updateAndGet { q => - q.add(now) + q.add((now, permits)) q } true else false - def getNextTime(): Long = - val furtherLog = log.get().peek() - if null eq furtherLog then - if semaphore.getQueueLength() > 0 then per.toNanos - else 0L + def getNextUpdate: Long = + if log.get().size() == 0 then + // no logs so no need to update until `per` has passed + per.toNanos else - val waitTime = log.get().peek() + per.toNanos - System.nanoTime() - val q = semaphore.getQueueLength() - if waitTime > 0 then waitTime - else if q > 0 then - update - getNextTime() - else 0L - end if - end getNextTime + // oldest log provides the new updating point + val waitTime = log.get().peek()._1 + per.toNanos - System.nanoTime() + if waitTime > 0 then waitTime else 0L + end getNextUpdate def update: Unit = val now = System.nanoTime() - while semaphore.availablePermits() < rate && log - .updateAndGet { q => - if q.peek() < now - per.toNanos then - q.poll() - semaphore.release() - q - else q - } - .peek() < now - per.toNanos - do () - end while + // retrieving current queue to append it later if some elements were added concurrently + val q = log.getAndUpdate(_ => new LinkedList[(Long, Int)]()) + // remove records older than window size + while semaphore.availablePermits() < rate && q.peek()._1 + per.toNanos < now + do + val (_, permits) = q.poll() + semaphore.release(permits) + // merge old records with the ones concurrently added + val _ = log.updateAndGet(q2 => + val qBefore = q + while q2.size() > 0 + do + qBefore.add(q2.poll()) + () + qBefore + ) end update end SlidingWindow - /** Token bucket algorithm + /** Token/leaky bucket algorithm It adds a token to start an new operation each `per` with a maximum number of tokens of `rate`. */ - case class TokenBucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + case class Bucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: private val refillInterval = per.toNanos private val lastRefillTime = new AtomicLong(System.nanoTime()) private val semaphore = new Semaphore(1) - def acquire: Unit = - semaphore.acquire() + def acquire(permits: Int): Unit = + semaphore.acquire(permits) - def tryAcquire: Boolean = - semaphore.tryAcquire() + def tryAcquire(permits: Int): Boolean = + semaphore.tryAcquire(permits) - def getNextTime(): Long = + def getNextUpdate: Long = val waitTime = lastRefillTime.get() + refillInterval - System.nanoTime() - val q = semaphore.getQueueLength() - if waitTime > 0 then waitTime - else if q > 0 then refillInterval - else 0L - - def update: Unit = - val now = System.nanoTime() - val elapsed = now - lastRefillTime.get() - val newTokens = elapsed / refillInterval - lastRefillTime.set(newTokens * refillInterval + lastRefillTime.get()) - semaphore.release(newTokens.toInt) - - end TokenBucket - - /** Leaky bucket algorithm - */ - case class LeakyBucket(capacity: Int, leakRate: FiniteDuration) extends RateLimiterAlgorithm: - private val leakInterval = leakRate.toNanos - private val lastLeakTime = new AtomicLong(System.nanoTime()) - private val semaphore = new Semaphore(capacity) - - def acquire: Unit = - semaphore.acquire() - - def tryAcquire: Boolean = - semaphore.tryAcquire() - - def getNextTime(): Long = - val waitTime = lastLeakTime.get() + leakInterval - System.nanoTime() - val q = semaphore.getQueueLength() - if waitTime > 0 then waitTime - else if q > 0 then leakInterval - else 0L + if waitTime > 0 then waitTime else 0L def update: Unit = val now = System.nanoTime() - val lastLeak = lastLeakTime.get() - val elapsed = now - lastLeak - val leaking = elapsed / leakInterval - val newTime = leaking * leakInterval + lastLeak - semaphore.release(leaking.toInt) - lastLeakTime.set(newTime) - end update + lastRefillTime.set(now) + if semaphore.availablePermits() < rate then semaphore.release() - end LeakyBucket + end Bucket end RateLimiterAlgorithm diff --git a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala index 6cbf3425..3bcc5c72 100644 --- a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala @@ -1,11 +1,11 @@ package ox.resilience import ox.* +import ox.util.ElapsedTime import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.scalatest.{EitherValues, TryValues} -import ox.util.ElapsedTime -import scala.concurrent.duration._ +import scala.concurrent.duration.* import java.util.concurrent.atomic.AtomicReference class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: @@ -88,7 +88,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - it should "respect queueing order when blocking" in { + it should "respect time constraints when blocking" in { supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Block(), @@ -139,7 +139,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - it should "respect queueing order when blocking concurrently" in { + it should "respect time constraints when blocking concurrently" in { supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Block(), @@ -272,7 +272,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - it should "respect queueing order when blocking" in { + it should "respect time constraints when blocking" in { supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Block(), @@ -308,7 +308,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - it should "respect queueing order when blocking concurrently" in { + it should "respect time constraints when blocking concurrently" in { supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Block(), @@ -344,13 +344,13 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - behavior of "token bucket GenericRateLimiter" + behavior of "bucket GenericRateLimiter" it should "drop operation when rate limit is exceeded" in { supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -372,7 +372,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -398,7 +398,7 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -421,11 +421,11 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - it should "respect queueing order when blocking" in { + it should "respect time constraints when blocking" in { supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) ) var order = List.empty[Int] @@ -462,11 +462,11 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - it should "respect queueing order when blocking concurrently" in { + it should "respect time constraints when blocking concurrently" in { supervised { val rateLimiter = GenericRateLimiter( GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.TokenBucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) ) val order = new AtomicReference(List.empty[Int]) @@ -498,185 +498,4 @@ class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues } } - behavior of "leaky bucket GenericRateLimiter" - - it should "drop operation when rate limit is exceeded" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } - } - - it should "reject operation before leaking and accepting after it" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(400.millis) - val result3 = rateLimiter(operation) - ox.sleep(101.millis) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } - } - - it should "restart rate limiter after given duration" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } - } - - it should "block operation when rate limit is exceeded" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } - } - - it should "respect queueing order when blocking" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) // blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2 - time1) should be >= 1000L - 10 - (time3 - time1) should be >= 2000L - 10 - (time2 - time1) should be <= 1200L - (time3 - time1) should be <= 2200L - order should be(List(4, 3, 2, 1)) - } - } - - it should "respect queueing order when blocking concurrently" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } - } - - val before = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - forkUser: - sleep(150.millis) - operationN(4) - } - val after = System.currentTimeMillis() - - (after - before) should be >= 2000L - 10 - (after - before) should be <= 2200L - } - } - end GenericRateLimiterTest \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 07247444..97984d04 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -4,7 +4,7 @@ import ox.* import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.scalatest.{EitherValues, TryValues} -import scala.concurrent.duration._ +import scala.concurrent.duration.* class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues: behavior of "RateLimiter" @@ -23,12 +23,16 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) val result4 = rateLimiter.runBlocking(operation) + val result5 = rateLimiter.runBlocking(operation) + val result6 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None result4 shouldBe 0 - executions shouldBe 3 + result5 shouldBe 0 + result6 shouldBe None + executions shouldBe 4 } it should "drop or block operation depending on method used for sliding window algorithm" in { @@ -45,17 +49,21 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) val result4 = rateLimiter.runBlocking(operation) + val result5 = rateLimiter.runBlocking(operation) + val result6 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None result4 shouldBe 0 - executions shouldBe 3 + result5 shouldBe 0 + result6 shouldBe None + executions shouldBe 4 } - it should "drop or block operation depending on method used for token bucket algorithm" in { + it should "drop or block operation depending on method used for bucket algorithm" in { supervised: - val rateLimiter = RateLimiter.tokenBucket(2, FiniteDuration(1, "second")) + val rateLimiter = RateLimiter.bucket(2, FiniteDuration(1, "second")) var executions = 0 def operation = { @@ -67,34 +75,56 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) val result4 = rateLimiter.runBlocking(operation) + val result5 = rateLimiter.runBlocking(operation) + val result6 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) result2 shouldBe None result3 shouldBe None result4 shouldBe 0 - executions shouldBe 2 + result5 shouldBe 0 + result6 shouldBe None + executions shouldBe 3 } - it should "drop or block operation depending on method used for leaky bucker algorithm" in { - supervised: - val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) - - var executions = 0 - def operation = { - executions +=1 - 0 - } + it should "drop or block operation concurrently with BlockOrDrop executor" in { + supervised: + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - val result1 = rateLimiter.runOrDrop(operation) - val result2 = rateLimiter.runOrDrop(operation) - val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) + def operation = 0 - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe 0 - executions shouldBe 3 - } + var result1: Option[Int] = Some(-1) + var result2: Option[Int] = Some(-1) + var result3: Option[Int] = Some(-1) + var result4: Int = -1 + var result5: Int = -1 + var result6: Int = -1 + + // run two operations to block the rate limiter + rateLimiter.runOrDrop(operation) + rateLimiter.runOrDrop(operation) + + // operations with runOrDrop should be dropped while operations with runBlocking should wait + supervised: + forkUser: + result1 = rateLimiter.runOrDrop(operation) + forkUser: + result2 = rateLimiter.runOrDrop(operation) + forkUser: + result3 = rateLimiter.runOrDrop(operation) + forkUser: + result4 = rateLimiter.runBlocking(operation) + forkUser: + result5 = rateLimiter.runBlocking(operation) + forkUser: + result6 = rateLimiter.runBlocking(operation) + + result1 shouldBe None + result2 shouldBe None + result3 shouldBe None + result4 shouldBe 0 + result5 shouldBe 0 + result6 shouldBe 0 + } \ No newline at end of file diff --git a/doc/utils/custom-rate-limiter.md b/doc/utils/custom-rate-limiter.md index 75b96626..0391b2b1 100644 --- a/doc/utils/custom-rate-limiter.md +++ b/doc/utils/custom-rate-limiter.md @@ -1,10 +1,10 @@ # Custom rate limiter -A rate limiter depends on an algorithm controlling whether an operation can be executed and a executor controlling the exact behaviour after rejecting an operation. The `RateLimiterAlgorithm` can be modified and used with the existing `RateLimiter` API. The executor can also be customized by using a different API: `GenericRateLimiter`. +A rate limiter depends on an algorithm controlling whether an operation can be executed and a executor controlling the exact behaviour after rejecting an operation. Algorithms can be customized and used with the existing `RateLimiter` API. The executor can also be customized by using a different API: `GenericRateLimiter`. ## Generic rate limiter -The generic rate limiter API provides utility to build rate limiters with custom execution policies. This can be useful to implement more complex policies like throttling of operations. +The generic rate limiter API provides utilities to build rate limiters with custom execution policies. This can be useful to implement more complex policies like throttling of operations or passing parameters to modify the behavior of the rate limiter per operation. -The basic syntax for generic rate limiters is: +The basic syntax for generic rate limiters is the following: ```scala val executor = GenericRateLimiter.Executor.Drop() @@ -13,6 +13,8 @@ val rateLimiter = GenericRateLimiter(algorithm, rateLimiter) type T def operation: T = ??? +// internally, existing strategies are imported as implicits +// so you don't need to specify a strategy if there is only one for the given executor val result: Some[T] = rateLimiter(operation) ``` @@ -27,11 +29,12 @@ def operation: T = ??? // This doesn't work because the rate limiter doesn't know which strategy to choose for the current executor //val result: Some[T] = rateLimiter(operation) + val resultDrop: Some[T] = rateLimiter(operation)(using GenericRateLimiter.Strategy.Drop()) val resultBlock: T = rateLimiter(operation)(using GenericRateLimiter.Strategy.Block()) ``` -Note that modifying the strategy used, it's possible to have different return types for the same rate limiter. +Note that customizing the strategies employed, it's possible to have different return types for the same rate limiter. ## Executor @@ -52,13 +55,14 @@ sealed trait Strategy[F[*]]: def run[T](operation: => T): F[T] trait Executor[Returns[_[_]] <: Strategy[_]]: - def schedule[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Unit def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Result[T] - def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] // calls Strategy.run + def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] // calls Strategy.run by default end Executor ``` -To create a custom executor, you need to define a custom `Returns` higher-kinded trait extending `Strategy` that will codify the possible behaviours of the executor or use one of the predefined. Then, the operations in `Executor` will use the leaf classes extending your custom `Returns` to implement the behaviour. When calling your custom rate limiter, you will need to make available through implicits your desired execution strategy, which can be changed per operation. +To create a custom executor, you need to define a custom `Returns` higher-kinded trait extending `Strategy` that will codify the possible behaviours of the executor or use one of the predefined. Then, the operations in `Executor` will use the leaf classes extending your custom `Returns` to implement the behaviour. When calling your custom rate limiter, you will need to make available through implicits your desired execution strategy, which can be changed per operation. + +Note that your custom executor should not handle the updating of the `RateLimiterAlgorithm`. This is done automatically by the `GenericRateLimiter`. ### Predefined strategies `Strategy` is extended by three traits: @@ -66,16 +70,40 @@ To create a custom executor, you need to define a custom `Returns` higher-kinded - Trait `Strategy.Dropping` gives an `Option[T]` for an operation of type `=> T`. - Trait `Strategy.BlockOrDrop` allows both types of return depending on the subclass employed. -The traits are implemented by `Strategy.Drop` and `Strategy.Block`. +The traits are implemented by `Strategy.Drop()` and `Strategy.Block()`. ### Custom strategies -Custom strategies allow not only the specification of the type, but also to add other information to the `Executor`. A possible use could be to add a timeout. The strategy could be defined like this: +Custom strategies allow not only for the specification of the type, but also to add other information to the `Executor`. A possible use could be to add a timeout. The strategy could be defined like this: ```scala type Id[A] = A sealed trait CustomStrategy[F[*]] extends Strategy[F] -case class DropAfterTimeout(timeout: Long) extends CustomStrategy[Option] -case class RunAfterTimeout(timeout: Long) extends CustomStrategy[Id] +case class DropAfterTimeout(timeout: Long) extends CustomStrategy[Option]: + def run[T](operation: => T): Option[T] = Some(operation) +case class RunAfterTimeout(timeout: Long) extends CustomStrategy[Id]: + def run[T](operation: => T): T = operation +``` + +Another common example is to specify the "size" of an operation. This is useful when execution depends on the size of packages of data received. +```scala +type Id[A] = A +sealed trait CustomStrategy[F[*]] extends Strategy[F] +case class RunWithSize(size: Int) extends CustomStrategy[Id]: + def run[T](operation: => T): T = operation +``` + +After defining a custom strategy, you need to create a corresponding executor to use the `GenericRateLimiter` API. A possible implementation for the last example would be: +```scala +case class CustomExecutor() extends Executor[Strategy.CustomStrategy]: + def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: CustomStrategy[Result]): Result[T] = + cfg match + case RunWithSize(permitsToAcquire) => + algorithm.acquire(permitsToAcquire) + run(operation) ``` ## Rate limiter algorithm -The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. \ No newline at end of file +The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. For best compatibility with existing executors, methods `acquire` and `tryAcquire` should offer the same garanties as Java `Semaphores`. + +Aditionally, there are two methods employed by the `GenericRateLimiter` for updating its internal state automatically: +- `def update: Unit`: Updates the internal state of the rate limiter to reflect its current situation. +- `def getNextUpdate: Long`: Returns the time in nanoseconds after which a new `update` needs to be called. diff --git a/doc/utils/rate-limiter.md b/doc/utils/rate-limiter.md index f4e09cbf..14fa37da 100644 --- a/doc/utils/rate-limiter.md +++ b/doc/utils/rate-limiter.md @@ -10,8 +10,6 @@ import ox.resilience.* val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDurationt(1, "seconds")) val rateLimiter = RateLimiter(algorithm) -//val fairness = true -//val rateLimiter = RateLimiter(algorithm, fairness) type T def operation: T = ??? @@ -20,8 +18,9 @@ val blockedOperation: T = rateLimiter.runBlocking(operation) val droppedOperation: Some[T] = rateLimiter.runOrDrop(operation) ``` -`blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. The fairness policy when blocking an operation can be specified per rate limiter and defaults to false. If the rate limiter is fair, blocked calls will be executed in order of arrival. Otherwise, the first blocked operation might not be the first to be executed after unblocking. -The `RateLimiter` API uses the `GenericRateLimiter` API underneath. See [custom rate limiters](custom-rate-limiter.md) for more details. +`blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. + +The `RateLimiter` API uses the `GenericRateLimiter` API underneath. This API allows customizing the behaviour of rate limiters. See [custom rate limiters](custom-rate-limiter.md) for more details. ## Operation definition @@ -32,17 +31,15 @@ The `operation` can be provided directly using a by-name parameter, i.e. `f: => The configuration of a `RateLimiter` depends on an underlying algorithm that controls whether an operation can be executed or not. The following algorithms are available: - `RateLimiterAlgorithm.FixedRate(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in segments of `dur` duration after the execution of the first operation. - `RateLimiterAlgorithm.SlidingWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in the a windows of time of duration `dur`. -- `RateLimiterAlgorithm.TokenBucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens availables in the token bucket algorithm and one token is added after `dur`. -- `RateLimiterAlgorithm.LeakyBucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity availables in the leaky bucket algorithm and 0 capacity is achieved after `dur` duration. +- `RateLimiterAlgorithm.Bucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens availables in the token bucket algorithm and one token is added after `dur`. It can represent both the leaky bucket algorithm or the tocken bucket algorithm. -It's also possible to specify fairness in each of these methods. It's possible to define your own algorithm. See [custom rate limiters](custom-rate-limiter.md) for more details. +It's possible to define your own algorithm and executor. This allows to specify custom configuration per operation, for example, the "size" that an operation must occupy. See [custom rate limiters](custom-rate-limiter.md) for more details. ### API shorthands You can use one of the following shorthands to define a Rate Limiter with the corresponding algorithm: - `RateLimiter.fixedRate(rate: Int, dur: FiniteDuration)`, - `RateLimiter.slidingWindow(rate: Int, dur: FiniteDuration)`, -- `RateLimiter.tokenBucket(maximum: Int, dur: FiniteDuration)`, -- `RateLimiter.leakyBucket(maximum: Int, dur: FiniteDuration)`. +- `RateLimiter.bucket(maximum: Int, dur: FiniteDuration)`, See the tests in `ox.resilience.*` for more. From 789a1b0323e02e909d447da9fe5d8231492aaece Mon Sep 17 00:00:00 2001 From: pablf Date: Tue, 12 Nov 2024 12:15:54 +0100 Subject: [PATCH 14/23] change interface --- .../ox/resilience/GenericRateLimiter.scala | 112 ---- .../scala/ox/resilience/RateLimiter.scala | 25 +- .../ox/resilience/RateLimiterAlgorithm.scala | 53 +- .../resilience/GenericRateLimiterTest.scala | 501 ------------------ .../resilience/RateLimiterInterfaceTest.scala | 130 +++++ .../scala/ox/resilience/RateLimiterTest.scala | 501 +++++++++++++++--- doc/utils/custom-rate-limiter.md | 109 ---- doc/utils/rate-limiter.md | 12 +- 8 files changed, 613 insertions(+), 830 deletions(-) delete mode 100644 core/src/main/scala/ox/resilience/GenericRateLimiter.scala delete mode 100644 core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala create mode 100644 core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala delete mode 100644 doc/utils/custom-rate-limiter.md diff --git a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala b/core/src/main/scala/ox/resilience/GenericRateLimiter.scala deleted file mode 100644 index 8beaab5d..00000000 --- a/core/src/main/scala/ox/resilience/GenericRateLimiter.scala +++ /dev/null @@ -1,112 +0,0 @@ -package ox.resilience - -import GenericRateLimiter.* -import ox.* - -/** Rate limiter which allows to pass a configuration value to the execution. This can include both runtime and compile time information, - * allowing for customization of return types and runtime behavior. If the only behavior needed is to block or drop operations, the - * `RateLimiter` class provides a simpler interface. - */ -case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( - executor: Executor[Returns], - algorithm: RateLimiterAlgorithm -)(using Ox): - - import GenericRateLimiter.Strategy.given - - val _ = - fork: - update() - - /** Limits the rate of execution of the given operation with a custom Result type - */ - def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] = - executor.execute(algorithm, operation) - end apply - - private def update(): Unit = - val waitTime = algorithm.getNextUpdate - val millis = waitTime / 1000000 - val nanos = waitTime % 1000000 - Thread.sleep(millis, nanos.toInt) - algorithm.update - update() - end update -end GenericRateLimiter - -object GenericRateLimiter: - - type Id[A] = A - - /** Describes the execution strategy that must be used by the rate limiter in a given operation. It allows the encoding of return types - * and custom runtime behavior. - */ - trait Strategy[F[*]]: - def run[T](operation: => T): F[T] - - object Strategy: - sealed trait Blocking[F[*]] extends Strategy[F] - sealed trait Dropping[F[*]] extends Strategy[F] - sealed trait BlockOrDrop[F[*]] extends Strategy[F] - - case class Block() extends Blocking[Id] with BlockOrDrop[Id]: - def run[T](operation: => T): T = operation - - case class Drop() extends Dropping[Option] with BlockOrDrop[Option]: - def run[T](operation: => T): Option[T] = Some(operation) - - given Blocking[Id] = Block() - given Dropping[Option] = Drop() - end Strategy - - /** Determines the policy to apply when the rate limiter is full. The executor is responsible of managing the inner state of the algorithm - * employed. In particular, it must ensure that operations are executed only if allowed and that the algorithm is updated. - */ - trait Executor[Returns[_[_]] <: Strategy[_]]: - /** Executes the operation and returns the expected result depending on the strategy. It might perform scheduling tasks if they are not - * independent from the execution. - */ - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using Returns[Result]): Result[T] - - /** Runs the operation and returns the result using the given strategy. - */ - def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] = - cfg.run(operation).asInstanceOf[Result[T]] - - end Executor - - object Executor: - /** Block rejected operations until the rate limiter is ready to accept them. - */ - case class Block() extends Executor[Strategy.Blocking]: - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Blocking[Result]): Result[T] = - algorithm.acquire - run(operation) - - end Block - - /** Drops rejected operations - */ - case class Drop() extends Executor[Strategy.Dropping]: - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.Dropping[Result[*]]): Result[T] = - if algorithm.tryAcquire then cfg.run(operation) - else None.asInstanceOf[Result[T]] - end Drop - - /** Blocks rejected operations until the rate limiter is ready to accept them or drops them depending on the choosen strategy. - */ - case class BlockOrDrop() extends Executor[Strategy.BlockOrDrop]: - - val blockExecutor = Block() - val dropExecutor = Drop() - - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Strategy.BlockOrDrop[Result]): Result[T] = - cfg match - case cfg: Strategy.Block => - blockExecutor.execute(algorithm, operation)(using cfg.asInstanceOf[Strategy.Blocking[Result]]) - case cfg: Strategy.Drop => - dropExecutor.execute(algorithm, operation)(using cfg) - end BlockOrDrop - - end Executor -end GenericRateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index b2fe98ab..9e40d6df 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -1,25 +1,38 @@ package ox.resilience import scala.concurrent.duration.FiniteDuration -import ox.Ox +import ox.* /** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. */ case class RateLimiter( algorithm: RateLimiterAlgorithm )(using Ox): - import GenericRateLimiter.* - private val rateLimiter = - GenericRateLimiter(Executor.BlockOrDrop(), algorithm) + val _ = + fork: + update() + + private def update(): Unit = + val waitTime = algorithm.getNextUpdate + val millis = waitTime / 1000000 + val nanos = waitTime % 1000000 + Thread.sleep(millis, nanos.toInt) + algorithm.update + update() + end update /** Blocks the operation until the rate limiter allows it. */ - def runBlocking[T](operation: => T): T = rateLimiter(operation)(using Strategy.Block()) + def runBlocking[T](operation: => T): T = + algorithm.acquire + operation /** Drops the operation if not allowed by the rate limiter returning `None`. */ - def runOrDrop[T](operation: => T): Option[T] = rateLimiter(operation)(using Strategy.Drop()) + def runOrDrop[T](operation: => T): Option[T] = + if algorithm.tryAcquire then Some(operation) + else None end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 07239795..03f361cc 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -1,10 +1,10 @@ package ox.resilience import scala.concurrent.duration.FiniteDuration +import scala.collection.immutable.Queue import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.Semaphore -import java.util.{LinkedList, Queue} /** Determines the algorithm to use for the rate limiter */ @@ -67,7 +67,7 @@ object RateLimiterAlgorithm: */ case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: // stores the timestamp and the number of permits acquired after calling acquire or tryAcquire succesfully - private val log = new AtomicReference[Queue[(Long, Int)]](new LinkedList[(Long, Int)]()) + private val log = new AtomicReference[Queue[(Long, Int)]](Queue[(Long, Int)]()) private val semaphore = new Semaphore(rate) def acquire(permits: Int): Unit = @@ -75,8 +75,7 @@ object RateLimiterAlgorithm: // adds timestamp to log val now = System.nanoTime() log.updateAndGet { q => - q.add((now, permits)) - q + q.enqueue((now, permits)) } () end acquire @@ -86,42 +85,46 @@ object RateLimiterAlgorithm: // adds timestamp to log val now = System.nanoTime() log.updateAndGet { q => - q.add((now, permits)) - q + q.enqueue((now, permits)) } true else false def getNextUpdate: Long = - if log.get().size() == 0 then - // no logs so no need to update until `per` has passed - per.toNanos - else - // oldest log provides the new updating point - val waitTime = log.get().peek()._1 + per.toNanos - System.nanoTime() - if waitTime > 0 then waitTime else 0L + log.get().headOption match + case None => + // no logs so no need to update until `per` has passed + per.toNanos + case Some(record) => + // oldest log provides the new updating point + val waitTime = record._1 + per.toNanos - System.nanoTime() + if waitTime > 0 then waitTime else 0L end getNextUpdate def update: Unit = val now = System.nanoTime() // retrieving current queue to append it later if some elements were added concurrently - val q = log.getAndUpdate(_ => new LinkedList[(Long, Int)]()) + val q = log.getAndUpdate(_ => Queue[(Long, Int)]()) // remove records older than window size - while semaphore.availablePermits() < rate && q.peek()._1 + per.toNanos < now - do - val (_, permits) = q.poll() - semaphore.release(permits) + val qUpdated = removeRecords(q, now) // merge old records with the ones concurrently added - val _ = log.updateAndGet(q2 => - val qBefore = q - while q2.size() > 0 - do - qBefore.add(q2.poll()) - () - qBefore + val _ = log.updateAndGet(qNew => + qNew.foldLeft(qUpdated) { case (queue, record) => + queue.enqueue(record) + } ) end update + private def removeRecords(q: Queue[(Long, Int)], now: Long): Queue[(Long, Int)] = + q.dequeueOption match + case None => q + case Some((head, tail)) => + if semaphore.availablePermits() < rate && head._1 + per.toNanos < now then + val (_, permits) = head + semaphore.release(permits) + removeRecords(tail, now) + else q + end SlidingWindow /** Token/leaky bucket algorithm It adds a token to start an new operation each `per` with a maximum number of tokens of `rate`. diff --git a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala b/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala deleted file mode 100644 index 3bcc5c72..00000000 --- a/core/src/test/scala/ox/resilience/GenericRateLimiterTest.scala +++ /dev/null @@ -1,501 +0,0 @@ -package ox.resilience - -import ox.* -import ox.util.ElapsedTime -import org.scalatest.flatspec.AnyFlatSpec -import org.scalatest.matchers.should.Matchers -import org.scalatest.{EitherValues, TryValues} -import scala.concurrent.duration.* -import java.util.concurrent.atomic.AtomicReference - -class GenericRateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: - behavior of "fixed rate GenericRateLimiter" - - it should "drop operation when rate limit is exceeded" in { - supervised: - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - given GenericRateLimiter.Strategy.Drop = GenericRateLimiter.Strategy.Drop() - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } - - it should "restart rate limiter after given duration" in { - supervised: - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions +=1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } - - it should "block operation when rate limit is exceeded" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val before = System.currentTimeMillis() - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - val after = System.currentTimeMillis() - - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - (after - before) should be >= 1000L - executions shouldBe 3 - } - } - - it should "respect time constraints when blocking" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val result3 = operationN(3) // blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result6 = operationN(6) - val result7 = operationN(7) // blocks until 3 seconds elapsed - val time4 = System.currentTimeMillis() - val result8 = operationN(8) - val result9 = operationN(9) // blocks until 4 seconds elapsed - val time5 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - result6 shouldBe 6 - result7 shouldBe 7 - result8 shouldBe 8 - result9 shouldBe 9 - (time2 - time1) should be >= 1000L - 10 - (time3 - time1) should be >= 2000L - 10 - (time4 - time1) should be >= 3000L - 10 - (time5 - time1) should be >= 4000L - 10 - (time2 - time1) should be <= 1200L - (time3 - time1) should be <= 2200L - (time4 - time1) should be <= 3200L - (time5 - time1) should be <= 4200L - order should be(List(9, 8, 7, 6, 5, 4, 3, 2, 1)) - } - } - - it should "respect time constraints when blocking concurrently" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } - } - - val before = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - forkUser: - sleep(150.millis) - operationN(4) - forkUser: - sleep(200.millis) - operationN(5) - forkUser: - sleep(250.millis) - operationN(6) - forkUser: - sleep(300.millis) - operationN(7) - forkUser: - sleep(350.millis) - operationN(8) - forkUser: - sleep(400.millis) - operationN(9) - } - val after = System.currentTimeMillis() - - (after - before) should be >= 4000L - 10 - (after - before) should be <= 4200L - } - } - - - - behavior of "sliding window GenericRateLimiter" - it should "drop operation when rate limit is exceeded" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - executions shouldBe 2 - } - } - - it should "restart rate limiter after given duration" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - val result3 = rateLimiter(operation) - ox.sleep(1.second) - val result4 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe Some(0) - result3 shouldBe None - result4 shouldBe Some(0) - executions shouldBe 3 - } - } - - it should "block operation when rate limit is exceeded" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - - result1 shouldBe 0 - result2 shouldBe 0 - result3 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } - } - - it should "respect time constraints when blocking" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - ox.sleep(500.millis) - val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed - val time2 = System.currentTimeMillis() // 1 second - val result4 = operationN(4) - val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - (time2 - time1) should be >= 1000L - 10 - (time3 - time1) should be >= 1500L - 10 - (time2 - time1) should be <= 1200L - (time3 - time1) should be <= 1700L - order should be(List(4, 3, 2, 1)) - } - } - - it should "respect time constraints when blocking concurrently" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } - } - - val before = System.currentTimeMillis() // 0 seconds - supervised { - forkUser: - operationN(1) - forkUser: - sleep(300.millis) - operationN(2) - forkUser: - sleep(400.millis) - operationN(3) - forkUser: - sleep(700.millis) - operationN(4) - } - val after = System.currentTimeMillis - - (after - before) should be >= 1300L - 10 - (after - before) should be <= 1400L - } - } - - behavior of "bucket GenericRateLimiter" - - it should "drop operation when rate limit is exceeded" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val result1 = rateLimiter(operation) - val result2 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe None - executions shouldBe 1 - } - } - - it should "refill token after time elapsed from last refill and not before" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Drop(), - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val result1 = rateLimiter(operation) - ox.sleep(500.millis) - val result2 = rateLimiter(operation) - ox.sleep(600.millis) - val result3 = rateLimiter(operation) - - result1 shouldBe Some(0) - result2 shouldBe None - result3 shouldBe Some(0) - executions shouldBe 2 - } - } - - it should "block operation when rate limit is exceeded" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) - ) - - var executions = 0 - def operation = { - executions += 1 - 0 - } - - val ((result1, result2, result3), timeElapsed) = measure { - val r1 = rateLimiter(operation) - val r2 = rateLimiter(operation) - val r3 = rateLimiter(operation) - (r1, r2, r3) - } - - result1 shouldBe 0 - result2 shouldBe 0 - timeElapsed.toMillis should be >= 1000L - 10 - executions shouldBe 3 - } - } - - it should "respect time constraints when blocking" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) - ) - - var order = List.empty[Int] - def operationN(n: Int) = { - rateLimiter { - order = n :: order - n - } - } - - val time1 = System.currentTimeMillis() // 0 seconds - val result1 = operationN(1) - val result2 = operationN(2) - val time2 = System.currentTimeMillis() // 1 second - sleep(2.seconds) // add 2 tokens - val result3 = operationN(3) // blocks until 1 second elapsed - val result4 = operationN(4) // blocks until 2 seconds elapsed - val time3 = System.currentTimeMillis() - val result5 = operationN(5) // blocks until 2 seconds elapsed - val time4 = System.currentTimeMillis() - - result1 shouldBe 1 - result2 shouldBe 2 - result3 shouldBe 3 - result4 shouldBe 4 - result5 shouldBe 5 - (time2 - time1) should be >= 1000L - 10 - (time3 - time1) should be >= 3000L - 10 - (time4 - time1) should be >= 4000L - 10 - (time2 - time1) should be <= 1200L - (time3 - time1) should be <= 3200L - (time4 - time1) should be <= 4200L - order should be(List(5, 4, 3, 2, 1)) - } - } - - it should "respect time constraints when blocking concurrently" in { - supervised { - val rateLimiter = GenericRateLimiter( - GenericRateLimiter.Executor.Block(), - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) - ) - - val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { - rateLimiter { - order.updateAndGet(ord => n :: ord) - n - } - } - - val before = System.currentTimeMillis() - supervised { - forkUser: - operationN(1) - forkUser: - sleep(50.millis) - operationN(2) - forkUser: - sleep(100.millis) - operationN(3) - forkUser: - sleep(150.millis) - operationN(4) - } - val after = System.currentTimeMillis() - - (after - before) should be >= 3000L - 10 - (after - before) should be <= 3200L - } - } - -end GenericRateLimiterTest \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala new file mode 100644 index 00000000..dab903f3 --- /dev/null +++ b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala @@ -0,0 +1,130 @@ +package ox.resilience + +import ox.* +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers +import org.scalatest.{EitherValues, TryValues} +import scala.concurrent.duration.* + +class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValues with TryValues: + behavior of "RateLimiter interface" + + it should "drop or block operation depending on method used for fixed rate algorithm" in { + supervised: + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + val result5 = rateLimiter.runBlocking(operation) + val result6 = rateLimiter.runOrDrop(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + result5 shouldBe 0 + result6 shouldBe None + executions shouldBe 4 + } + + it should "drop or block operation depending on method used for sliding window algorithm" in { + supervised: + val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + val result5 = rateLimiter.runBlocking(operation) + val result6 = rateLimiter.runOrDrop(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) + result3 shouldBe None + result4 shouldBe 0 + result5 shouldBe 0 + result6 shouldBe None + executions shouldBe 4 + } + + it should "drop or block operation depending on method used for bucket algorithm" in { + supervised: + val rateLimiter = RateLimiter.bucket(2, FiniteDuration(1, "second")) + + var executions = 0 + def operation = { + executions +=1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + val result4 = rateLimiter.runBlocking(operation) + val result5 = rateLimiter.runBlocking(operation) + val result6 = rateLimiter.runOrDrop(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe None + result4 shouldBe 0 + result5 shouldBe 0 + result6 shouldBe None + executions shouldBe 3 + } + + it should "drop or block operation concurrently" in { + supervised: + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + + def operation = 0 + + var result1: Option[Int] = Some(-1) + var result2: Option[Int] = Some(-1) + var result3: Option[Int] = Some(-1) + var result4: Int = -1 + var result5: Int = -1 + var result6: Int = -1 + + // run two operations to block the rate limiter + rateLimiter.runOrDrop(operation) + rateLimiter.runOrDrop(operation) + + // operations with runOrDrop should be dropped while operations with runBlocking should wait + supervised: + forkUser: + result1 = rateLimiter.runOrDrop(operation) + forkUser: + result2 = rateLimiter.runOrDrop(operation) + forkUser: + result3 = rateLimiter.runOrDrop(operation) + forkUser: + result4 = rateLimiter.runBlocking(operation) + forkUser: + result5 = rateLimiter.runBlocking(operation) + forkUser: + result6 = rateLimiter.runBlocking(operation) + + result1 shouldBe None + result2 shouldBe None + result3 shouldBe None + result4 shouldBe 0 + result5 shouldBe 0 + result6 shouldBe 0 + } + + \ No newline at end of file diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 97984d04..5530eb11 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -1,18 +1,22 @@ package ox.resilience import ox.* +import ox.util.ElapsedTime import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.scalatest.{EitherValues, TryValues} import scala.concurrent.duration.* +import java.util.concurrent.atomic.AtomicReference -class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues: - behavior of "RateLimiter" +class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with TryValues with ElapsedTime: + behavior of "fixed rate RateLimiter" - it should "drop or block operation depending on method used for fixed rate algorithm" in { + it should "drop operation when rate limit is exceeded" in { supervised: - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + var executions = 0 def operation = { executions +=1 @@ -22,23 +26,19 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - val result5 = rateLimiter.runBlocking(operation) - val result6 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None - result4 shouldBe 0 - result5 shouldBe 0 - result6 shouldBe None - executions shouldBe 4 + executions shouldBe 2 } - it should "drop or block operation depending on method used for sliding window algorithm" in { + it should "restart rate limiter after given duration" in { supervised: - val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) - + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + var executions = 0 def operation = { executions +=1 @@ -48,83 +48,438 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - val result5 = rateLimiter.runBlocking(operation) - val result6 = rateLimiter.runOrDrop(operation) + ox.sleep(1.second) + val result4 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) result2 shouldBe Some(0) result3 shouldBe None - result4 shouldBe 0 - result5 shouldBe 0 - result6 shouldBe None - executions shouldBe 4 + result4 shouldBe Some(0) + executions shouldBe 3 } - it should "drop or block operation depending on method used for bucket algorithm" in { - supervised: - val rateLimiter = RateLimiter.bucket(2, FiniteDuration(1, "second")) - + it should "block operation when rate limit is exceeded" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + var executions = 0 def operation = { - executions +=1 + executions += 1 + 0 + } + + val before = System.currentTimeMillis() + val result1 = rateLimiter.runBlocking(operation) + val result2 = rateLimiter.runBlocking(operation) + val result3 = rateLimiter.runBlocking(operation) + val after = System.currentTimeMillis() + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + (after - before) should be >= 1000L + executions shouldBe 3 + } + } + + it should "respect time constraints when blocking" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter.runBlocking { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val result3 = operationN(3) // blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result6 = operationN(6) + val result7 = operationN(7) // blocks until 3 seconds elapsed + val time4 = System.currentTimeMillis() + val result8 = operationN(8) + val result9 = operationN(9) // blocks until 4 seconds elapsed + val time5 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + result6 shouldBe 6 + result7 shouldBe 7 + result8 shouldBe 8 + result9 shouldBe 9 + (time2 - time1) should be >= 1000L - 10 + (time3 - time1) should be >= 2000L - 10 + (time4 - time1) should be >= 3000L - 10 + (time5 - time1) should be >= 4000L - 10 + (time2 - time1) should be <= 1200L + (time3 - time1) should be <= 2200L + (time4 - time1) should be <= 3200L + (time5 - time1) should be <= 4200L + order should be(List(9, 8, 7, 6, 5, 4, 3, 2, 1)) + } + } + + it should "respect time constraints when blocking concurrently" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter.runBlocking { + order.updateAndGet(ord => n :: ord) + n + } + } + + val before = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) + forkUser: + sleep(200.millis) + operationN(5) + forkUser: + sleep(250.millis) + operationN(6) + forkUser: + sleep(300.millis) + operationN(7) + forkUser: + sleep(350.millis) + operationN(8) + forkUser: + sleep(400.millis) + operationN(9) + } + val after = System.currentTimeMillis() + + (after - before) should be >= 4000L - 10 + (after - before) should be <= 4200L + } + } + + + + behavior of "sliding window RateLimiter" + it should "drop operation when rate limit is exceeded" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions += 1 0 } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) - val result4 = rateLimiter.runBlocking(operation) - val result5 = rateLimiter.runBlocking(operation) - val result6 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) - result2 shouldBe None + result2 shouldBe Some(0) + result3 shouldBe None + executions shouldBe 2 + } + } + + it should "restart rate limiter after given duration" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions += 1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + val result3 = rateLimiter.runOrDrop(operation) + ox.sleep(1.second) + val result4 = rateLimiter.runOrDrop(operation) + + result1 shouldBe Some(0) + result2 shouldBe Some(0) result3 shouldBe None - result4 shouldBe 0 - result5 shouldBe 0 - result6 shouldBe None + result4 shouldBe Some(0) executions shouldBe 3 + } } - it should "drop or block operation concurrently with BlockOrDrop executor" in { - supervised: - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - - def operation = 0 - - var result1: Option[Int] = Some(-1) - var result2: Option[Int] = Some(-1) - var result3: Option[Int] = Some(-1) - var result4: Int = -1 - var result5: Int = -1 - var result6: Int = -1 - - // run two operations to block the rate limiter - rateLimiter.runOrDrop(operation) - rateLimiter.runOrDrop(operation) - - // operations with runOrDrop should be dropped while operations with runBlocking should wait - supervised: - forkUser: - result1 = rateLimiter.runOrDrop(operation) - forkUser: - result2 = rateLimiter.runOrDrop(operation) - forkUser: - result3 = rateLimiter.runOrDrop(operation) - forkUser: - result4 = rateLimiter.runBlocking(operation) - forkUser: - result5 = rateLimiter.runBlocking(operation) - forkUser: - result6 = rateLimiter.runBlocking(operation) - - result1 shouldBe None - result2 shouldBe None - result3 shouldBe None - result4 shouldBe 0 - result5 shouldBe 0 - result6 shouldBe 0 + it should "block operation when rate limit is exceeded" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions += 1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter.runBlocking(operation) + val r2 = rateLimiter.runBlocking(operation) + val r3 = rateLimiter.runBlocking(operation) + (r1, r2, r3) + } + + result1 shouldBe 0 + result2 shouldBe 0 + result3 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 } + } + + it should "respect time constraints when blocking" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter.runBlocking { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + ox.sleep(500.millis) + val result2 = operationN(2) + val result3 = operationN(3) //blocks until 1 second elapsed + val time2 = System.currentTimeMillis() // 1 second + val result4 = operationN(4) + val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + (time2 - time1) should be >= 1000L - 10 + (time3 - time1) should be >= 1500L - 10 + (time2 - time1) should be <= 1200L + (time3 - time1) should be <= 1700L + order should be(List(4, 3, 2, 1)) + } + } + + it should "respect time constraints when blocking concurrently" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.SlidingWindow(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter.runBlocking { + order.updateAndGet(ord => n :: ord) + n + } + } + + val before = System.currentTimeMillis() // 0 seconds + supervised { + forkUser: + operationN(1) + forkUser: + sleep(300.millis) + operationN(2) + forkUser: + sleep(400.millis) + operationN(3) + forkUser: + sleep(700.millis) + operationN(4) + } + val after = System.currentTimeMillis + + (after - before) should be >= 1300L - 10 + (after - before) should be <= 1400L + } + } + + behavior of "bucket RateLimiter" + + it should "drop operation when rate limit is exceeded" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions += 1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + val result2 = rateLimiter.runOrDrop(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + executions shouldBe 1 + } + } + + it should "refill token after time elapsed from last refill and not before" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions += 1 + 0 + } + + val result1 = rateLimiter.runOrDrop(operation) + ox.sleep(500.millis) + val result2 = rateLimiter.runOrDrop(operation) + ox.sleep(600.millis) + val result3 = rateLimiter.runOrDrop(operation) + + result1 shouldBe Some(0) + result2 shouldBe None + result3 shouldBe Some(0) + executions shouldBe 2 + } + } + + it should "block operation when rate limit is exceeded" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + ) + + var executions = 0 + def operation = { + executions += 1 + 0 + } + + val ((result1, result2, result3), timeElapsed) = measure { + val r1 = rateLimiter.runBlocking(operation) + val r2 = rateLimiter.runBlocking(operation) + val r3 = rateLimiter.runBlocking(operation) + (r1, r2, r3) + } + + result1 shouldBe 0 + result2 shouldBe 0 + timeElapsed.toMillis should be >= 1000L - 10 + executions shouldBe 3 + } + } + + it should "respect time constraints when blocking" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + ) + + var order = List.empty[Int] + def operationN(n: Int) = { + rateLimiter.runBlocking { + order = n :: order + n + } + } + + val time1 = System.currentTimeMillis() // 0 seconds + val result1 = operationN(1) + val result2 = operationN(2) + val time2 = System.currentTimeMillis() // 1 second + sleep(2.seconds) // add 2 tokens + val result3 = operationN(3) // blocks until 1 second elapsed + val result4 = operationN(4) // blocks until 2 seconds elapsed + val time3 = System.currentTimeMillis() + val result5 = operationN(5) // blocks until 2 seconds elapsed + val time4 = System.currentTimeMillis() + + result1 shouldBe 1 + result2 shouldBe 2 + result3 shouldBe 3 + result4 shouldBe 4 + result5 shouldBe 5 + (time2 - time1) should be >= 1000L - 10 + (time3 - time1) should be >= 3000L - 10 + (time4 - time1) should be >= 4000L - 10 + (time2 - time1) should be <= 1200L + (time3 - time1) should be <= 3200L + (time4 - time1) should be <= 4200L + order should be(List(5, 4, 3, 2, 1)) + } + } + + it should "respect time constraints when blocking concurrently" in { + supervised { + val rateLimiter = RateLimiter( + RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + ) + + val order = new AtomicReference(List.empty[Int]) + def operationN(n: Int) = { + rateLimiter.runBlocking { + order.updateAndGet(ord => n :: ord) + n + } + } + + val before = System.currentTimeMillis() + supervised { + forkUser: + operationN(1) + forkUser: + sleep(50.millis) + operationN(2) + forkUser: + sleep(100.millis) + operationN(3) + forkUser: + sleep(150.millis) + operationN(4) + } + val after = System.currentTimeMillis() + + (after - before) should be >= 3000L - 10 + (after - before) should be <= 3200L + } + } - \ No newline at end of file +end RateLimiterTest \ No newline at end of file diff --git a/doc/utils/custom-rate-limiter.md b/doc/utils/custom-rate-limiter.md deleted file mode 100644 index 0391b2b1..00000000 --- a/doc/utils/custom-rate-limiter.md +++ /dev/null @@ -1,109 +0,0 @@ -# Custom rate limiter -A rate limiter depends on an algorithm controlling whether an operation can be executed and a executor controlling the exact behaviour after rejecting an operation. Algorithms can be customized and used with the existing `RateLimiter` API. The executor can also be customized by using a different API: `GenericRateLimiter`. - -## Generic rate limiter -The generic rate limiter API provides utilities to build rate limiters with custom execution policies. This can be useful to implement more complex policies like throttling of operations or passing parameters to modify the behavior of the rate limiter per operation. - -The basic syntax for generic rate limiters is the following: - -```scala -val executor = GenericRateLimiter.Executor.Drop() -val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "seconds")) -val rateLimiter = GenericRateLimiter(algorithm, rateLimiter) -type T -def operation: T = ??? - -// internally, existing strategies are imported as implicits -// so you don't need to specify a strategy if there is only one for the given executor -val result: Some[T] = rateLimiter(operation) -``` - -You can also specify the desired execution strategy: - -```scala -val executor = GenericRateLimiter.Executor.BlockOrDrop() -val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "seconds")) -val rateLimiter = GenericRateLimiter(algorithm, rateLimiter) -type T -def operation: T = ??? - -// This doesn't work because the rate limiter doesn't know which strategy to choose for the current executor -//val result: Some[T] = rateLimiter(operation) - -val resultDrop: Some[T] = rateLimiter(operation)(using GenericRateLimiter.Strategy.Drop()) -val resultBlock: T = rateLimiter(operation)(using GenericRateLimiter.Strategy.Block()) -``` - -Note that customizing the strategies employed, it's possible to have different return types for the same rate limiter. - -## Executor - -A `GenericRateLimiter` is defined by its executor and by its algorithm: - -```scala -case class GenericRateLimiter[Returns[_[_]] <: Strategy[_]]( - executor: Executor[Returns], - algorithm: RateLimiterAlgorithm -): - def apply[T, Result[_]](operation: => T)(using Returns[Result]): Result[T] -``` - -The `Executor` and `Strategy` API are as follows: - -```scala -sealed trait Strategy[F[*]]: - def run[T](operation: => T): F[T] - -trait Executor[Returns[_[_]] <: Strategy[_]]: - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: Returns[Result]): Result[T] - def run[T, Result[_]](operation: => T)(using cfg: Returns[Result]): Result[T] // calls Strategy.run by default - end Executor -``` - -To create a custom executor, you need to define a custom `Returns` higher-kinded trait extending `Strategy` that will codify the possible behaviours of the executor or use one of the predefined. Then, the operations in `Executor` will use the leaf classes extending your custom `Returns` to implement the behaviour. When calling your custom rate limiter, you will need to make available through implicits your desired execution strategy, which can be changed per operation. - -Note that your custom executor should not handle the updating of the `RateLimiterAlgorithm`. This is done automatically by the `GenericRateLimiter`. - -### Predefined strategies -`Strategy` is extended by three traits: -- Trait `Strategy.Blocking` gives the same return type as the operation to be executed. -- Trait `Strategy.Dropping` gives an `Option[T]` for an operation of type `=> T`. -- Trait `Strategy.BlockOrDrop` allows both types of return depending on the subclass employed. - -The traits are implemented by `Strategy.Drop()` and `Strategy.Block()`. - -### Custom strategies -Custom strategies allow not only for the specification of the type, but also to add other information to the `Executor`. A possible use could be to add a timeout. The strategy could be defined like this: -```scala -type Id[A] = A -sealed trait CustomStrategy[F[*]] extends Strategy[F] -case class DropAfterTimeout(timeout: Long) extends CustomStrategy[Option]: - def run[T](operation: => T): Option[T] = Some(operation) -case class RunAfterTimeout(timeout: Long) extends CustomStrategy[Id]: - def run[T](operation: => T): T = operation -``` - -Another common example is to specify the "size" of an operation. This is useful when execution depends on the size of packages of data received. -```scala -type Id[A] = A -sealed trait CustomStrategy[F[*]] extends Strategy[F] -case class RunWithSize(size: Int) extends CustomStrategy[Id]: - def run[T](operation: => T): T = operation -``` - -After defining a custom strategy, you need to create a corresponding executor to use the `GenericRateLimiter` API. A possible implementation for the last example would be: -```scala -case class CustomExecutor() extends Executor[Strategy.CustomStrategy]: - def execute[T, Result[*]](algorithm: RateLimiterAlgorithm, operation: => T)(using cfg: CustomStrategy[Result]): Result[T] = - cfg match - case RunWithSize(permitsToAcquire) => - algorithm.acquire(permitsToAcquire) - run(operation) -``` - -## Rate limiter algorithm -The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. For best compatibility with existing executors, methods `acquire` and `tryAcquire` should offer the same garanties as Java `Semaphores`. - -Aditionally, there are two methods employed by the `GenericRateLimiter` for updating its internal state automatically: -- `def update: Unit`: Updates the internal state of the rate limiter to reflect its current situation. -- `def getNextUpdate: Long`: Returns the time in nanoseconds after which a new `update` needs to be called. diff --git a/doc/utils/rate-limiter.md b/doc/utils/rate-limiter.md index 14fa37da..727b4216 100644 --- a/doc/utils/rate-limiter.md +++ b/doc/utils/rate-limiter.md @@ -1,5 +1,5 @@ # Rate limiter -The rate limiter mechanism allows controlling the rate at which operations are executed. It ensures that a certain number of operations are performed within a specified time frame, preventing system overload and ensuring fair resource usage. Note that the implemented limiting mechanism within `Ox` only take into account the start of execution and not the whole execution of an operation. This could be tweaked customizing the rate limiter executor and algorithm employed. +The rate limiter mechanism allows controlling the rate at which operations are executed. It ensures that a certain number of operations are performed within a specified time frame, preventing system overload and ensuring fair resource usage. Note that the implemented limiting mechanism within `Ox` only take into account the start of execution and not the whole execution of an operation. This could be tweaked customizing the rate limiter algorithm employed or the interface of rate limiter. ## API @@ -20,8 +20,6 @@ val droppedOperation: Some[T] = rateLimiter.runOrDrop(operation) `blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. -The `RateLimiter` API uses the `GenericRateLimiter` API underneath. This API allows customizing the behaviour of rate limiters. See [custom rate limiters](custom-rate-limiter.md) for more details. - ## Operation definition The `operation` can be provided directly using a by-name parameter, i.e. `f: => T`. @@ -33,7 +31,6 @@ The configuration of a `RateLimiter` depends on an underlying algorithm that con - `RateLimiterAlgorithm.SlidingWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in the a windows of time of duration `dur`. - `RateLimiterAlgorithm.Bucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens availables in the token bucket algorithm and one token is added after `dur`. It can represent both the leaky bucket algorithm or the tocken bucket algorithm. -It's possible to define your own algorithm and executor. This allows to specify custom configuration per operation, for example, the "size" that an operation must occupy. See [custom rate limiters](custom-rate-limiter.md) for more details. ### API shorthands You can use one of the following shorthands to define a Rate Limiter with the corresponding algorithm: @@ -43,3 +40,10 @@ You can use one of the following shorthands to define a Rate Limiter with the co - `RateLimiter.bucket(maximum: Int, dur: FiniteDuration)`, See the tests in `ox.resilience.*` for more. + +## Custom rate limiter algorithms +The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. For best compatibility with the existing interface of `RateLimiter`, methods `acquire` and `tryAcquire` should offer the same garanties as Java `Semaphores`. + +Aditionally, there are two methods employed by the `GenericRateLimiter` for updating its internal state automatically: +- `def update: Unit`: Updates the internal state of the rate limiter to reflect its current situation. +- `def getNextUpdate: Long`: Returns the time in nanoseconds after which a new `update` needs to be called. \ No newline at end of file From 13eadcb581f73229bc295d05378fe035d7e75ce1 Mon Sep 17 00:00:00 2001 From: pablf Date: Fri, 15 Nov 2024 09:23:01 +0100 Subject: [PATCH 15/23] remove unnecesary condition --- core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 03f361cc..e919c52f 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -119,7 +119,7 @@ object RateLimiterAlgorithm: q.dequeueOption match case None => q case Some((head, tail)) => - if semaphore.availablePermits() < rate && head._1 + per.toNanos < now then + if head._1 + per.toNanos < now then val (_, permits) = head semaphore.release(permits) removeRecords(tail, now) From 26028cee936ddfa65393c829d7c22b249714bc1e Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 09:39:44 +0100 Subject: [PATCH 16/23] Remove old docs file --- doc/index.md | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/index.md b/doc/index.md index 6267cc50..bbfdc96c 100644 --- a/doc/index.md +++ b/doc/index.md @@ -69,7 +69,6 @@ In addition to this documentation, ScalaDocs can be browsed at [https://javadoc. utils/oxapp utils/retries utils/rate-limiter - utils/custom-rate-limiter utils/repeat utils/scheduled utils/resources From 5cdf3b1c602219a7d870b2b4ef9cfdea76429d49 Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 09:43:58 +0100 Subject: [PATCH 17/23] Format --- .../scala/ox/resilience/RateLimiter.scala | 22 +---- .../ox/resilience/RateLimiterAlgorithm.scala | 21 ++-- .../resilience/RateLimiterInterfaceTest.scala | 98 +++++++++---------- .../scala/ox/resilience/RateLimiterTest.scala | 61 +++++------- 4 files changed, 81 insertions(+), 121 deletions(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index 9e40d6df..c32f15c8 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -3,11 +3,8 @@ package ox.resilience import scala.concurrent.duration.FiniteDuration import ox.* -/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. - */ -case class RateLimiter( - algorithm: RateLimiterAlgorithm -)(using Ox): +/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. */ +case class RateLimiter(algorithm: RateLimiterAlgorithm)(using Ox): val _ = fork: @@ -45,10 +42,7 @@ object RateLimiter: * @param windowSize * Interval of time to pass before reset of the rate limiter */ - def fixedRate( - maxRequests: Int, - windowSize: FiniteDuration - )(using Ox): RateLimiter = + def fixedRate(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) end fixedRate @@ -59,10 +53,7 @@ object RateLimiter: * @param windowSize * Size of the window */ - def slidingWindow( - maxRequests: Int, - windowSize: FiniteDuration - )(using Ox): RateLimiter = + def slidingWindow(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) end slidingWindow @@ -73,10 +64,7 @@ object RateLimiter: * @param refillInterval * Interval of time after which a token is added */ - def bucket( - maxTokens: Int, - refillInterval: FiniteDuration - )(using Ox): RateLimiter = + def bucket(maxTokens: Int, refillInterval: FiniteDuration)(using Ox): RateLimiter = RateLimiter(RateLimiterAlgorithm.Bucket(maxTokens, refillInterval)) end bucket end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index e919c52f..9dcf305b 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -19,28 +19,23 @@ trait RateLimiterAlgorithm: */ def acquire(permits: Int): Unit - /** Tries to acquire a permit to execute the operation. This method should not block. - */ + /** Tries to acquire a permit to execute the operation. This method should not block. */ final def tryAcquire: Boolean = tryAcquire(1) - /** Tries to acquire permits to execute the operation. This method should not block. - */ + /** Tries to acquire permits to execute the operation. This method should not block. */ def tryAcquire(permits: Int): Boolean - /** Updates the internal state of the rate limiter to check whether new operations can be accepted. - */ + /** Updates the internal state of the rate limiter to check whether new operations can be accepted. */ def update: Unit - /** Returns the time in nanoseconds that needs to elapse until the next update. It should not modify internal state. - */ + /** Returns the time in nanoseconds that needs to elapse until the next update. It should not modify internal state. */ def getNextUpdate: Long end RateLimiterAlgorithm object RateLimiterAlgorithm: - /** Fixed rate algorithm It allows starting at most `rate` operations in consecutively segments of duration `per`. - */ + /** Fixed rate algorithm It allows starting at most `rate` operations in consecutively segments of duration `per`. */ case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: private val lastUpdate = new AtomicLong(System.nanoTime()) private val semaphore = new Semaphore(rate) @@ -63,8 +58,7 @@ object RateLimiterAlgorithm: end FixedRate - /** Sliding window algorithm It allows to start at most `rate` operations in the lapse of `per` before current time. - */ + /** Sliding window algorithm It allows to start at most `rate` operations in the lapse of `per` before current time. */ case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: // stores the timestamp and the number of permits acquired after calling acquire or tryAcquire succesfully private val log = new AtomicReference[Queue[(Long, Int)]](Queue[(Long, Int)]()) @@ -127,8 +121,7 @@ object RateLimiterAlgorithm: end SlidingWindow - /** Token/leaky bucket algorithm It adds a token to start an new operation each `per` with a maximum number of tokens of `rate`. - */ + /** Token/leaky bucket algorithm It adds a token to start an new operation each `per` with a maximum number of tokens of `rate`. */ case class Bucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: private val refillInterval = per.toNanos private val lastRefillTime = new AtomicLong(System.nanoTime()) diff --git a/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala index dab903f3..2c435fa7 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala @@ -12,12 +12,11 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu it should "drop or block operation depending on method used for fixed rate algorithm" in { supervised: val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - + var executions = 0 - def operation = { - executions +=1 + def operation = + executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -38,12 +37,11 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu it should "drop or block operation depending on method used for sliding window algorithm" in { supervised: val rateLimiter = RateLimiter.slidingWindow(2, FiniteDuration(1, "second")) - + var executions = 0 - def operation = { - executions +=1 + def operation = + executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -64,12 +62,11 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu it should "drop or block operation depending on method used for bucket algorithm" in { supervised: val rateLimiter = RateLimiter.bucket(2, FiniteDuration(1, "second")) - + var executions = 0 - def operation = { - executions +=1 + def operation = + executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -88,43 +85,42 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu } it should "drop or block operation concurrently" in { + supervised: + val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + + def operation = 0 + + var result1: Option[Int] = Some(-1) + var result2: Option[Int] = Some(-1) + var result3: Option[Int] = Some(-1) + var result4: Int = -1 + var result5: Int = -1 + var result6: Int = -1 + + // run two operations to block the rate limiter + rateLimiter.runOrDrop(operation) + rateLimiter.runOrDrop(operation) + + // operations with runOrDrop should be dropped while operations with runBlocking should wait supervised: - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) - - def operation = 0 - - var result1: Option[Int] = Some(-1) - var result2: Option[Int] = Some(-1) - var result3: Option[Int] = Some(-1) - var result4: Int = -1 - var result5: Int = -1 - var result6: Int = -1 - - // run two operations to block the rate limiter - rateLimiter.runOrDrop(operation) - rateLimiter.runOrDrop(operation) - - // operations with runOrDrop should be dropped while operations with runBlocking should wait - supervised: - forkUser: - result1 = rateLimiter.runOrDrop(operation) - forkUser: - result2 = rateLimiter.runOrDrop(operation) - forkUser: - result3 = rateLimiter.runOrDrop(operation) - forkUser: - result4 = rateLimiter.runBlocking(operation) - forkUser: - result5 = rateLimiter.runBlocking(operation) - forkUser: - result6 = rateLimiter.runBlocking(operation) - - result1 shouldBe None - result2 shouldBe None - result3 shouldBe None - result4 shouldBe 0 - result5 shouldBe 0 - result6 shouldBe 0 - } - - \ No newline at end of file + forkUser: + result1 = rateLimiter.runOrDrop(operation) + forkUser: + result2 = rateLimiter.runOrDrop(operation) + forkUser: + result3 = rateLimiter.runOrDrop(operation) + forkUser: + result4 = rateLimiter.runBlocking(operation) + forkUser: + result5 = rateLimiter.runBlocking(operation) + forkUser: + result6 = rateLimiter.runBlocking(operation) + + result1 shouldBe None + result2 shouldBe None + result3 shouldBe None + result4 shouldBe 0 + result5 shouldBe 0 + result6 shouldBe 0 + } +end RateLimiterInterfaceTest diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 5530eb11..ee8c4ce6 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -15,13 +15,12 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T supervised: val rateLimiter = RateLimiter( RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) + ) var executions = 0 - def operation = { - executions +=1 + def operation = + executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -33,17 +32,16 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T executions shouldBe 2 } - it should "restart rate limiter after given duration" in { + it should "restart rate limiter after given duration" in { supervised: val rateLimiter = RateLimiter( RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) - ) + ) var executions = 0 - def operation = { - executions +=1 + def operation = + executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -65,10 +63,9 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var executions = 0 - def operation = { + def operation = executions += 1 0 - } val before = System.currentTimeMillis() val result1 = rateLimiter.runBlocking(operation) @@ -91,12 +88,11 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var order = List.empty[Int] - def operationN(n: Int) = { + def operationN(n: Int) = rateLimiter.runBlocking { order = n :: order n } - } val time1 = System.currentTimeMillis() // 0 seconds val result1 = operationN(1) @@ -141,12 +137,11 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { + def operationN(n: Int) = rateLimiter.runBlocking { order.updateAndGet(ord => n :: ord) n } - } val before = System.currentTimeMillis() // 0 seconds supervised { @@ -184,8 +179,6 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T } } - - behavior of "sliding window RateLimiter" it should "drop operation when rate limit is exceeded" in { supervised { @@ -194,10 +187,9 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var executions = 0 - def operation = { + def operation = executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -217,10 +209,9 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var executions = 0 - def operation = { + def operation = executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -243,10 +234,9 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var executions = 0 - def operation = { + def operation = executions += 1 0 - } val ((result1, result2, result3), timeElapsed) = measure { val r1 = rateLimiter.runBlocking(operation) @@ -270,18 +260,17 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var order = List.empty[Int] - def operationN(n: Int) = { + def operationN(n: Int) = rateLimiter.runBlocking { order = n :: order n } - } val time1 = System.currentTimeMillis() // 0 seconds val result1 = operationN(1) ox.sleep(500.millis) val result2 = operationN(2) - val result3 = operationN(3) //blocks until 1 second elapsed + val result3 = operationN(3) // blocks until 1 second elapsed val time2 = System.currentTimeMillis() // 1 second val result4 = operationN(4) val time3 = System.currentTimeMillis() // blocks until 1.5 seconds elapsed @@ -305,12 +294,11 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { + def operationN(n: Int) = rateLimiter.runBlocking { order.updateAndGet(ord => n :: ord) n } - } val before = System.currentTimeMillis() // 0 seconds supervised { @@ -342,10 +330,9 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var executions = 0 - def operation = { + def operation = executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) val result2 = rateLimiter.runOrDrop(operation) @@ -363,10 +350,9 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var executions = 0 - def operation = { + def operation = executions += 1 0 - } val result1 = rateLimiter.runOrDrop(operation) ox.sleep(500.millis) @@ -388,10 +374,9 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var executions = 0 - def operation = { + def operation = executions += 1 0 - } val ((result1, result2, result3), timeElapsed) = measure { val r1 = rateLimiter.runBlocking(operation) @@ -414,12 +399,11 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) var order = List.empty[Int] - def operationN(n: Int) = { + def operationN(n: Int) = rateLimiter.runBlocking { order = n :: order n } - } val time1 = System.currentTimeMillis() // 0 seconds val result1 = operationN(1) @@ -454,12 +438,11 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T ) val order = new AtomicReference(List.empty[Int]) - def operationN(n: Int) = { + def operationN(n: Int) = rateLimiter.runBlocking { order.updateAndGet(ord => n :: ord) n } - } val before = System.currentTimeMillis() supervised { @@ -482,4 +465,4 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T } } -end RateLimiterTest \ No newline at end of file +end RateLimiterTest From 0420af6277994e9c70b87b0fc2b2e5d23a85e55d Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 09:51:59 +0100 Subject: [PATCH 18/23] Narrow the scope where the Ox concurrency scope is captured --- .../scala/ox/resilience/RateLimiter.scala | 61 +++++++++++-------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index c32f15c8..de383119 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -3,29 +3,19 @@ package ox.resilience import scala.concurrent.duration.FiniteDuration import ox.* -/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. */ -case class RateLimiter(algorithm: RateLimiterAlgorithm)(using Ox): - - val _ = - fork: - update() +import scala.annotation.tailrec - private def update(): Unit = - val waitTime = algorithm.getNextUpdate - val millis = waitTime / 1000000 - val nanos = waitTime % 1000000 - Thread.sleep(millis, nanos.toInt) - algorithm.update - update() - end update - - /** Blocks the operation until the rate limiter allows it. - */ +/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. */ +case class RateLimiter(algorithm: RateLimiterAlgorithm): + /** Runs the operation, blocking if the rate limit is reached, until new permits are available. */ def runBlocking[T](operation: => T): T = algorithm.acquire operation - /** Drops the operation if not allowed by the rate limiter returning `None`. + /** Runs or drops the operation, if the rate limit is reached. + * + * @return + * `Some` if the operation has been allowed to run, `None` if the operation has been dropped. */ def runOrDrop[T](operation: => T): Option[T] = if algorithm.tryAcquire then Some(operation) @@ -34,8 +24,24 @@ case class RateLimiter(algorithm: RateLimiterAlgorithm)(using Ox): end RateLimiter object RateLimiter: + def create(algorithm: RateLimiterAlgorithm)(using Ox): RateLimiter = + @tailrec + def update(): Unit = + val waitTime = algorithm.getNextUpdate + val millis = waitTime / 1000000 + val nanos = waitTime % 1000000 + Thread.sleep(millis, nanos.toInt) + algorithm.update + update() + end update + + forkDiscard(update()) + RateLimiter(algorithm) + end create - /** Rate limiter with fixed rate algorithm with possibility to drop or block an operation if not allowed to run + /** Rate limiter with fixed rate algorithm with possibility to drop or block an operation if not allowed to run. + * + * Must be run within an [[Ox]] concurrency scope, as a background thread is created, to replenish the rate limiter. * * @param maxRequests * Maximum number of requests per consecutive window @@ -43,10 +49,11 @@ object RateLimiter: * Interval of time to pass before reset of the rate limiter */ def fixedRate(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = - RateLimiter(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) - end fixedRate + create(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) - /** Rate limiter with sliding window algorithm with possibility to drop or block an operation if not allowed to run + /** Rate limiter with sliding window algorithm with possibility to drop or block an operation if not allowed to run. + * + * Must be run within an [[Ox]] concurrency scope, as a background thread is created, to replenish the rate limiter. * * @param maxRequests * Maximum number of requests in any window of time @@ -54,10 +61,11 @@ object RateLimiter: * Size of the window */ def slidingWindow(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = - RateLimiter(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) - end slidingWindow + create(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) - /** Rate limiter with token/leaky bucket algorithm with possibility to drop or block an operation if not allowed to run + /** Rate limiter with token/leaky bucket algorithm with possibility to drop or block an operation if not allowed to run. + * + * Must be run within an [[Ox]] concurrency scope, as a background thread is created, to replenish the rate limiter. * * @param maxTokens * Max capacity of tokens in the algorithm @@ -65,6 +73,5 @@ object RateLimiter: * Interval of time after which a token is added */ def bucket(maxTokens: Int, refillInterval: FiniteDuration)(using Ox): RateLimiter = - RateLimiter(RateLimiterAlgorithm.Bucket(maxTokens, refillInterval)) - end bucket + create(RateLimiterAlgorithm.Bucket(maxTokens, refillInterval)) end RateLimiter From 7bccef570c9f07c70978954cacd92022ed025f5c Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 09:59:35 +0100 Subject: [PATCH 19/23] Fix & toughen tests --- .../src/main/scala/ox/resilience/RateLimiter.scala | 14 +++++++------- .../test/scala/ox/resilience/RateLimiterTest.scala | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index de383119..84981195 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -6,7 +6,7 @@ import ox.* import scala.annotation.tailrec /** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. */ -case class RateLimiter(algorithm: RateLimiterAlgorithm): +class RateLimiter private (algorithm: RateLimiterAlgorithm): /** Runs the operation, blocking if the rate limit is reached, until new permits are available. */ def runBlocking[T](operation: => T): T = algorithm.acquire @@ -24,7 +24,7 @@ case class RateLimiter(algorithm: RateLimiterAlgorithm): end RateLimiter object RateLimiter: - def create(algorithm: RateLimiterAlgorithm)(using Ox): RateLimiter = + def apply(algorithm: RateLimiterAlgorithm)(using Ox): RateLimiter = @tailrec def update(): Unit = val waitTime = algorithm.getNextUpdate @@ -36,8 +36,8 @@ object RateLimiter: end update forkDiscard(update()) - RateLimiter(algorithm) - end create + new RateLimiter(algorithm) + end apply /** Rate limiter with fixed rate algorithm with possibility to drop or block an operation if not allowed to run. * @@ -49,7 +49,7 @@ object RateLimiter: * Interval of time to pass before reset of the rate limiter */ def fixedRate(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = - create(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) + apply(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) /** Rate limiter with sliding window algorithm with possibility to drop or block an operation if not allowed to run. * @@ -61,7 +61,7 @@ object RateLimiter: * Size of the window */ def slidingWindow(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = - create(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) + apply(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) /** Rate limiter with token/leaky bucket algorithm with possibility to drop or block an operation if not allowed to run. * @@ -73,5 +73,5 @@ object RateLimiter: * Interval of time after which a token is added */ def bucket(maxTokens: Int, refillInterval: FiniteDuration)(using Ox): RateLimiter = - create(RateLimiterAlgorithm.Bucket(maxTokens, refillInterval)) + apply(RateLimiterAlgorithm.Bucket(maxTokens, refillInterval)) end RateLimiter diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index ee8c4ce6..86378102 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -47,6 +47,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) ox.sleep(1.second) + ox.sleep(100.milliseconds) // make sure the rate limiter is replenished val result4 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) From cd25d431ef00a71b3bce0c5bf96818cef5b0be79 Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 10:03:58 +0100 Subject: [PATCH 20/23] Fix docs --- .../scala/ox/resilience/RateLimiter.scala | 2 +- .../ox/resilience/RateLimiterAlgorithm.scala | 19 ++++++------- .../scala/ox/resilience/RateLimiterTest.scala | 10 +++---- doc/utils/rate-limiter.md | 28 +++++++++++-------- 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index 84981195..e18f09a6 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -49,7 +49,7 @@ object RateLimiter: * Interval of time to pass before reset of the rate limiter */ def fixedRate(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = - apply(RateLimiterAlgorithm.FixedRate(maxRequests, windowSize)) + apply(RateLimiterAlgorithm.FixedWindow(maxRequests, windowSize)) /** Rate limiter with sliding window algorithm with possibility to drop or block an operation if not allowed to run. * diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 9dcf305b..a70b2e7a 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -6,17 +6,14 @@ import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.Semaphore -/** Determines the algorithm to use for the rate limiter - */ +/** Determines the algorithm to use for the rate limiter */ trait RateLimiterAlgorithm: - /** Acquires a permit to execute the operation. This method should block until a permit is available. - */ + /** Acquires a permit to execute the operation. This method should block until a permit is available. */ final def acquire: Unit = acquire(1) - /** Acquires permits to execute the operation. This method should block until a permit is available. - */ + /** Acquires permits to execute the operation. This method should block until a permit is available. */ def acquire(permits: Int): Unit /** Tries to acquire a permit to execute the operation. This method should not block. */ @@ -35,8 +32,8 @@ trait RateLimiterAlgorithm: end RateLimiterAlgorithm object RateLimiterAlgorithm: - /** Fixed rate algorithm It allows starting at most `rate` operations in consecutively segments of duration `per`. */ - case class FixedRate(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + /** Fixed window algorithm: allows starting at most `rate` operations in consecutively segments of duration `per`. */ + case class FixedWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: private val lastUpdate = new AtomicLong(System.nanoTime()) private val semaphore = new Semaphore(rate) @@ -56,11 +53,11 @@ object RateLimiterAlgorithm: semaphore.release(rate - semaphore.availablePermits()) end update - end FixedRate + end FixedWindow - /** Sliding window algorithm It allows to start at most `rate` operations in the lapse of `per` before current time. */ + /** Sliding window algorithm: allows to start at most `rate` operations in the lapse of `per` before current time. */ case class SlidingWindow(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: - // stores the timestamp and the number of permits acquired after calling acquire or tryAcquire succesfully + // stores the timestamp and the number of permits acquired after calling acquire or tryAcquire successfully private val log = new AtomicReference[Queue[(Long, Int)]](Queue[(Long, Int)]()) private val semaphore = new Semaphore(rate) diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index 86378102..ef8de9d0 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -14,7 +14,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "drop operation when rate limit is exceeded" in { supervised: val rateLimiter = RateLimiter( - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.FixedWindow(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -35,7 +35,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "restart rate limiter after given duration" in { supervised: val rateLimiter = RateLimiter( - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.FixedWindow(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -60,7 +60,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "block operation when rate limit is exceeded" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.FixedWindow(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -85,7 +85,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "respect time constraints when blocking" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.FixedWindow(2, FiniteDuration(1, "second")) ) var order = List.empty[Int] @@ -134,7 +134,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "respect time constraints when blocking concurrently" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.FixedRate(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.FixedWindow(2, FiniteDuration(1, "second")) ) val order = new AtomicReference(List.empty[Int]) diff --git a/doc/utils/rate-limiter.md b/doc/utils/rate-limiter.md index 727b4216..815c965d 100644 --- a/doc/utils/rate-limiter.md +++ b/doc/utils/rate-limiter.md @@ -5,17 +5,21 @@ The rate limiter mechanism allows controlling the rate at which operations are e The basic syntax for rate limiters is: -```scala +```scala mdoc:compile-only +import ox.supervised import ox.resilience.* +import scala.concurrent.duration.* -val algorithm = RateLimiterAlgorithm.FixedRate(2, FiniteDurationt(1, "seconds")) -val rateLimiter = RateLimiter(algorithm) +val algorithm = RateLimiterAlgorithm.FixedWindow(2, 1.second) -type T -def operation: T = ??? +supervised: + val rateLimiter = RateLimiter(algorithm) -val blockedOperation: T = rateLimiter.runBlocking(operation) -val droppedOperation: Some[T] = rateLimiter.runOrDrop(operation) + type T + def operation: T = ??? + + val blockedOperation: T = rateLimiter.runBlocking(operation) + val droppedOperation: Option[T] = rateLimiter.runOrDrop(operation) ``` `blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. @@ -27,7 +31,7 @@ The `operation` can be provided directly using a by-name parameter, i.e. `f: => ## Configuration The configuration of a `RateLimiter` depends on an underlying algorithm that controls whether an operation can be executed or not. The following algorithms are available: -- `RateLimiterAlgorithm.FixedRate(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in segments of `dur` duration after the execution of the first operation. +- `RateLimiterAlgorithm.FixedWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in segments of `dur` duration after the execution of the first operation. - `RateLimiterAlgorithm.SlidingWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in the a windows of time of duration `dur`. - `RateLimiterAlgorithm.Bucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens availables in the token bucket algorithm and one token is added after `dur`. It can represent both the leaky bucket algorithm or the tocken bucket algorithm. @@ -35,15 +39,15 @@ The configuration of a `RateLimiter` depends on an underlying algorithm that con You can use one of the following shorthands to define a Rate Limiter with the corresponding algorithm: -- `RateLimiter.fixedRate(rate: Int, dur: FiniteDuration)`, +- `RateLimiter.FixedWindow(rate: Int, dur: FiniteDuration)`, - `RateLimiter.slidingWindow(rate: Int, dur: FiniteDuration)`, - `RateLimiter.bucket(maximum: Int, dur: FiniteDuration)`, See the tests in `ox.resilience.*` for more. ## Custom rate limiter algorithms -The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. For best compatibility with the existing interface of `RateLimiter`, methods `acquire` and `tryAcquire` should offer the same garanties as Java `Semaphores`. +The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. For best compatibility with the existing interface of `RateLimiter`, methods `acquire` and `tryAcquire` should offer the same guaranties as Java `Semaphores`. -Aditionally, there are two methods employed by the `GenericRateLimiter` for updating its internal state automatically: +Additionally, there are two methods employed by the `GenericRateLimiter` for updating its internal state automatically: - `def update: Unit`: Updates the internal state of the rate limiter to reflect its current situation. -- `def getNextUpdate: Long`: Returns the time in nanoseconds after which a new `update` needs to be called. \ No newline at end of file +- `def getNextUpdate: Long`: Returns the time in nanoseconds after which a new `update` needs to be called. From 60121e0bfc717c8ffa0da7bb9e6247dc37b585bd Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 10:10:04 +0100 Subject: [PATCH 21/23] Fix warnings --- core/src/main/scala/ox/fork.scala | 7 +++- .../scala/ox/resilience/RateLimiter.scala | 6 ++-- .../ox/resilience/RateLimiterAlgorithm.scala | 35 +++++++++---------- .../scala/ox/resilience/RateLimiterTest.scala | 35 ++++++++++--------- 4 files changed, 44 insertions(+), 39 deletions(-) diff --git a/core/src/main/scala/ox/fork.scala b/core/src/main/scala/ox/fork.scala index cd1681f0..6fc1337f 100644 --- a/core/src/main/scala/ox/fork.scala +++ b/core/src/main/scala/ox/fork.scala @@ -184,11 +184,16 @@ def forkCancellable[T](f: => T)(using OxUnsupervised): CancellableFork[T] = end new end forkCancellable -/** Same as [[fork]], but discards the resulting [[Fork]], to avoid compiler warnings. That is, the fork is run only for its side-effects, +/** Same as [[fork]], but discards the resulting [[Fork]], to avoid compiler warnings. That is, the fork is run only for its side effects, * it's not possible to join it. */ inline def forkDiscard[T](inline f: T)(using Ox): Unit = fork(f).discard +/** Same as [[forkUser]], but discards the resulting [[Fork]], to avoid compiler warnings. That is, the fork is run only for its side + * effects, it's not possible to join it. + */ +inline def forkUserDiscard[T](inline f: T)(using Ox): Unit = forkUser(f).discard + private trait ForkUsingResult[T](result: CompletableFuture[T]) extends Fork[T]: override def join(): T = unwrapExecutionException(result.get()) override private[ox] def wasInterruptedWith(ie: InterruptedException): Boolean = diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index e18f09a6..b018f13e 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -9,7 +9,7 @@ import scala.annotation.tailrec class RateLimiter private (algorithm: RateLimiterAlgorithm): /** Runs the operation, blocking if the rate limit is reached, until new permits are available. */ def runBlocking[T](operation: => T): T = - algorithm.acquire + algorithm.acquire() operation /** Runs or drops the operation, if the rate limit is reached. @@ -18,7 +18,7 @@ class RateLimiter private (algorithm: RateLimiterAlgorithm): * `Some` if the operation has been allowed to run, `None` if the operation has been dropped. */ def runOrDrop[T](operation: => T): Option[T] = - if algorithm.tryAcquire then Some(operation) + if algorithm.tryAcquire() then Some(operation) else None end RateLimiter @@ -31,7 +31,7 @@ object RateLimiter: val millis = waitTime / 1000000 val nanos = waitTime % 1000000 Thread.sleep(millis, nanos.toInt) - algorithm.update + algorithm.update() update() end update diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index a70b2e7a..6dd06f1a 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -5,26 +5,27 @@ import scala.collection.immutable.Queue import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.Semaphore +import scala.annotation.tailrec /** Determines the algorithm to use for the rate limiter */ trait RateLimiterAlgorithm: /** Acquires a permit to execute the operation. This method should block until a permit is available. */ - final def acquire: Unit = + final def acquire(): Unit = acquire(1) /** Acquires permits to execute the operation. This method should block until a permit is available. */ def acquire(permits: Int): Unit /** Tries to acquire a permit to execute the operation. This method should not block. */ - final def tryAcquire: Boolean = + final def tryAcquire(): Boolean = tryAcquire(1) /** Tries to acquire permits to execute the operation. This method should not block. */ def tryAcquire(permits: Int): Boolean /** Updates the internal state of the rate limiter to check whether new operations can be accepted. */ - def update: Unit + def update(): Unit /** Returns the time in nanoseconds that needs to elapse until the next update. It should not modify internal state. */ def getNextUpdate: Long @@ -47,7 +48,7 @@ object RateLimiterAlgorithm: val waitTime = lastUpdate.get() + per.toNanos - System.nanoTime() if waitTime > 0 then waitTime else 0L - def update: Unit = + def update(): Unit = val now = System.nanoTime() lastUpdate.set(now) semaphore.release(rate - semaphore.availablePermits()) @@ -63,24 +64,21 @@ object RateLimiterAlgorithm: def acquire(permits: Int): Unit = semaphore.acquire(permits) - // adds timestamp to log - val now = System.nanoTime() - log.updateAndGet { q => - q.enqueue((now, permits)) - } - () - end acquire + addTimestampToLog(permits) def tryAcquire(permits: Int): Boolean = if semaphore.tryAcquire(permits) then - // adds timestamp to log - val now = System.nanoTime() - log.updateAndGet { q => - q.enqueue((now, permits)) - } + addTimestampToLog(permits) true else false + private def addTimestampToLog(permits: Int): Unit = + val now = System.nanoTime() + log.updateAndGet { q => + q.enqueue((now, permits)) + } + () + def getNextUpdate: Long = log.get().headOption match case None => @@ -92,7 +90,7 @@ object RateLimiterAlgorithm: if waitTime > 0 then waitTime else 0L end getNextUpdate - def update: Unit = + def update(): Unit = val now = System.nanoTime() // retrieving current queue to append it later if some elements were added concurrently val q = log.getAndUpdate(_ => Queue[(Long, Int)]()) @@ -106,6 +104,7 @@ object RateLimiterAlgorithm: ) end update + @tailrec private def removeRecords(q: Queue[(Long, Int)], now: Long): Queue[(Long, Int)] = q.dequeueOption match case None => q @@ -134,7 +133,7 @@ object RateLimiterAlgorithm: val waitTime = lastRefillTime.get() + refillInterval - System.nanoTime() if waitTime > 0 then waitTime else 0L - def update: Unit = + def update(): Unit = val now = System.nanoTime() lastRefillTime.set(now) if semaphore.availablePermits() < rate then semaphore.release() diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index ef8de9d0..f127e912 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -146,30 +146,30 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val before = System.currentTimeMillis() // 0 seconds supervised { - forkUser: + forkUserDiscard: operationN(1) - forkUser: + forkUserDiscard: sleep(50.millis) operationN(2) - forkUser: + forkUserDiscard: sleep(100.millis) operationN(3) - forkUser: + forkUserDiscard: sleep(150.millis) operationN(4) - forkUser: + forkUserDiscard: sleep(200.millis) operationN(5) - forkUser: + forkUserDiscard: sleep(250.millis) operationN(6) - forkUser: + forkUserDiscard: sleep(300.millis) operationN(7) - forkUser: + forkUserDiscard: sleep(350.millis) operationN(8) - forkUser: + forkUserDiscard: sleep(400.millis) operationN(9) } @@ -218,6 +218,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val result2 = rateLimiter.runOrDrop(operation) val result3 = rateLimiter.runOrDrop(operation) ox.sleep(1.second) + ox.sleep(100.milliseconds) // make sure the rate limiter is replenished val result4 = rateLimiter.runOrDrop(operation) result1 shouldBe Some(0) @@ -303,15 +304,15 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val before = System.currentTimeMillis() // 0 seconds supervised { - forkUser: + forkUserDiscard: operationN(1) - forkUser: + forkUserDiscard: sleep(300.millis) operationN(2) - forkUser: + forkUserDiscard: sleep(400.millis) operationN(3) - forkUser: + forkUserDiscard: sleep(700.millis) operationN(4) } @@ -447,15 +448,15 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T val before = System.currentTimeMillis() supervised { - forkUser: + forkUserDiscard: operationN(1) - forkUser: + forkUserDiscard: sleep(50.millis) operationN(2) - forkUser: + forkUserDiscard: sleep(100.millis) operationN(3) - forkUser: + forkUserDiscard: sleep(150.millis) operationN(4) } From 0cae62bc53f753f9c51da30e2eb0fc1644b1fe27 Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 10:10:34 +0100 Subject: [PATCH 22/23] Fix warnings --- .../ox/resilience/RateLimiterInterfaceTest.scala | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala index 2c435fa7..dee8dd9c 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala @@ -98,22 +98,22 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu var result6: Int = -1 // run two operations to block the rate limiter - rateLimiter.runOrDrop(operation) - rateLimiter.runOrDrop(operation) + rateLimiter.runOrDrop(operation).discard + rateLimiter.runOrDrop(operation).discard // operations with runOrDrop should be dropped while operations with runBlocking should wait supervised: - forkUser: + forkUserDiscard: result1 = rateLimiter.runOrDrop(operation) - forkUser: + forkUserDiscard: result2 = rateLimiter.runOrDrop(operation) - forkUser: + forkUserDiscard: result3 = rateLimiter.runOrDrop(operation) - forkUser: + forkUserDiscard: result4 = rateLimiter.runBlocking(operation) - forkUser: + forkUserDiscard: result5 = rateLimiter.runBlocking(operation) - forkUser: + forkUserDiscard: result6 = rateLimiter.runBlocking(operation) result1 shouldBe None From ccfef5192475691cf6f8dd85a74598b590abe8e3 Mon Sep 17 00:00:00 2001 From: adamw Date: Fri, 15 Nov 2024 10:25:00 +0100 Subject: [PATCH 23/23] Docs --- .../scala/ox/resilience/RateLimiter.scala | 49 ++++++++++--------- .../ox/resilience/RateLimiterAlgorithm.scala | 4 +- .../resilience/RateLimiterInterfaceTest.scala | 6 +-- .../scala/ox/resilience/RateLimiterTest.scala | 10 ++-- doc/utils/rate-limiter.md | 22 +++++---- 5 files changed, 48 insertions(+), 43 deletions(-) diff --git a/core/src/main/scala/ox/resilience/RateLimiter.scala b/core/src/main/scala/ox/resilience/RateLimiter.scala index b018f13e..cdae50a1 100644 --- a/core/src/main/scala/ox/resilience/RateLimiter.scala +++ b/core/src/main/scala/ox/resilience/RateLimiter.scala @@ -5,9 +5,9 @@ import ox.* import scala.annotation.tailrec -/** Rate Limiter with customizable algorithm. It allows to choose between blocking or dropping an incoming operation. */ +/** Rate limiter with a customizable algorithm. Operations can be blocked or dropped, when the rate limit is reached. */ class RateLimiter private (algorithm: RateLimiterAlgorithm): - /** Runs the operation, blocking if the rate limit is reached, until new permits are available. */ + /** Runs the operation, blocking if the rate limit is reached, until the rate limiter is replenished. */ def runBlocking[T](operation: => T): T = algorithm.acquire() operation @@ -39,39 +39,40 @@ object RateLimiter: new RateLimiter(algorithm) end apply - /** Rate limiter with fixed rate algorithm with possibility to drop or block an operation if not allowed to run. + /** Creates a rate limiter using a fixed window algorithm. * - * Must be run within an [[Ox]] concurrency scope, as a background thread is created, to replenish the rate limiter. + * Must be run within an [[Ox]] concurrency scope, as a background fork is created, to replenish the rate limiter. * - * @param maxRequests - * Maximum number of requests per consecutive window - * @param windowSize - * Interval of time to pass before reset of the rate limiter + * @param maxOperations + * Maximum number of operations that are allowed to **start** within a time [[window]]. + * @param window + * Interval of time between replenishing the rate limiter. THe rate limiter is replenished to allow up to [[maxOperations]] in the next + * time window. */ - def fixedRate(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = - apply(RateLimiterAlgorithm.FixedWindow(maxRequests, windowSize)) + def fixedWindow(maxOperations: Int, window: FiniteDuration)(using Ox): RateLimiter = + apply(RateLimiterAlgorithm.FixedWindow(maxOperations, window)) - /** Rate limiter with sliding window algorithm with possibility to drop or block an operation if not allowed to run. + /** Creates a rate limiter using a sliding window algorithm. * - * Must be run within an [[Ox]] concurrency scope, as a background thread is created, to replenish the rate limiter. + * Must be run within an [[Ox]] concurrency scope, as a background fork is created, to replenish the rate limiter. * - * @param maxRequests - * Maximum number of requests in any window of time - * @param windowSize - * Size of the window + * @param maxOperations + * Maximum number of operations that are allowed to **start** within any [[window]] of time. + * @param window + * Length of the window. */ - def slidingWindow(maxRequests: Int, windowSize: FiniteDuration)(using Ox): RateLimiter = - apply(RateLimiterAlgorithm.SlidingWindow(maxRequests, windowSize)) + def slidingWindow(maxOperations: Int, window: FiniteDuration)(using Ox): RateLimiter = + apply(RateLimiterAlgorithm.SlidingWindow(maxOperations, window)) - /** Rate limiter with token/leaky bucket algorithm with possibility to drop or block an operation if not allowed to run. + /** Rate limiter with token/leaky bucket algorithm. * - * Must be run within an [[Ox]] concurrency scope, as a background thread is created, to replenish the rate limiter. + * Must be run within an [[Ox]] concurrency scope, as a background fork is created, to replenish the rate limiter. * * @param maxTokens - * Max capacity of tokens in the algorithm + * Max capacity of tokens in the algorithm, limiting the operations that are allowed to **start** concurrently. * @param refillInterval - * Interval of time after which a token is added + * Interval of time between adding a single token to the bucket. */ - def bucket(maxTokens: Int, refillInterval: FiniteDuration)(using Ox): RateLimiter = - apply(RateLimiterAlgorithm.Bucket(maxTokens, refillInterval)) + def leakyBucket(maxTokens: Int, refillInterval: FiniteDuration)(using Ox): RateLimiter = + apply(RateLimiterAlgorithm.LeakyBucket(maxTokens, refillInterval)) end RateLimiter diff --git a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala index 6dd06f1a..bb667cf5 100644 --- a/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala +++ b/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala @@ -118,7 +118,7 @@ object RateLimiterAlgorithm: end SlidingWindow /** Token/leaky bucket algorithm It adds a token to start an new operation each `per` with a maximum number of tokens of `rate`. */ - case class Bucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: + case class LeakyBucket(rate: Int, per: FiniteDuration) extends RateLimiterAlgorithm: private val refillInterval = per.toNanos private val lastRefillTime = new AtomicLong(System.nanoTime()) private val semaphore = new Semaphore(1) @@ -138,5 +138,5 @@ object RateLimiterAlgorithm: lastRefillTime.set(now) if semaphore.availablePermits() < rate then semaphore.release() - end Bucket + end LeakyBucket end RateLimiterAlgorithm diff --git a/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala index dee8dd9c..047106df 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterInterfaceTest.scala @@ -11,7 +11,7 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu it should "drop or block operation depending on method used for fixed rate algorithm" in { supervised: - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + val rateLimiter = RateLimiter.fixedWindow(2, FiniteDuration(1, "second")) var executions = 0 def operation = @@ -61,7 +61,7 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu it should "drop or block operation depending on method used for bucket algorithm" in { supervised: - val rateLimiter = RateLimiter.bucket(2, FiniteDuration(1, "second")) + val rateLimiter = RateLimiter.leakyBucket(2, FiniteDuration(1, "second")) var executions = 0 def operation = @@ -86,7 +86,7 @@ class RateLimiterInterfaceTest extends AnyFlatSpec with Matchers with EitherValu it should "drop or block operation concurrently" in { supervised: - val rateLimiter = RateLimiter.fixedRate(2, FiniteDuration(1, "second")) + val rateLimiter = RateLimiter.fixedWindow(2, FiniteDuration(1, "second")) def operation = 0 diff --git a/core/src/test/scala/ox/resilience/RateLimiterTest.scala b/core/src/test/scala/ox/resilience/RateLimiterTest.scala index f127e912..17858148 100644 --- a/core/src/test/scala/ox/resilience/RateLimiterTest.scala +++ b/core/src/test/scala/ox/resilience/RateLimiterTest.scala @@ -328,7 +328,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "drop operation when rate limit is exceeded" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -348,7 +348,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "refill token after time elapsed from last refill and not before" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -372,7 +372,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "block operation when rate limit is exceeded" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) ) var executions = 0 @@ -397,7 +397,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "respect time constraints when blocking" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) ) var order = List.empty[Int] @@ -436,7 +436,7 @@ class RateLimiterTest extends AnyFlatSpec with Matchers with EitherValues with T it should "respect time constraints when blocking concurrently" in { supervised { val rateLimiter = RateLimiter( - RateLimiterAlgorithm.Bucket(2, FiniteDuration(1, "second")) + RateLimiterAlgorithm.LeakyBucket(2, FiniteDuration(1, "second")) ) val order = new AtomicReference(List.empty[Int]) diff --git a/doc/utils/rate-limiter.md b/doc/utils/rate-limiter.md index 815c965d..5885c1fb 100644 --- a/doc/utils/rate-limiter.md +++ b/doc/utils/rate-limiter.md @@ -1,9 +1,10 @@ # Rate limiter -The rate limiter mechanism allows controlling the rate at which operations are executed. It ensures that a certain number of operations are performed within a specified time frame, preventing system overload and ensuring fair resource usage. Note that the implemented limiting mechanism within `Ox` only take into account the start of execution and not the whole execution of an operation. This could be tweaked customizing the rate limiter algorithm employed or the interface of rate limiter. + +The rate limiter mechanism allows controlling the rate at which operations are executed. It ensures that at most a certain number of operations are run concurrently within a specified time frame, preventing system overload and ensuring fair resource usage. Note that the implemented limiting mechanism only takes into account the start of execution and not the whole execution of an operation. ## API -The basic syntax for rate limiters is: +Basic rate limiter usage: ```scala mdoc:compile-only import ox.supervised @@ -24,6 +25,8 @@ supervised: `blockedOperation` will block the operation until the algorithm allows it to be executed. Therefore, the return type is the same as the operation. On the other hand, if the algorithm doesn't allow execution of more operations, `runOrDrop` will drop the operation returning `None` and wrapping the result in `Some` when the operation is successfully executed. +A rate limiter must be created within an `Ox` [concurrency scope](../structured-concurrency/fork-join.md), as a background fork is created, to replenish the rate limiter. Once the scope ends, the rate limiter is stops as well. + ## Operation definition The `operation` can be provided directly using a by-name parameter, i.e. `f: => T`. @@ -31,23 +34,24 @@ The `operation` can be provided directly using a by-name parameter, i.e. `f: => ## Configuration The configuration of a `RateLimiter` depends on an underlying algorithm that controls whether an operation can be executed or not. The following algorithms are available: -- `RateLimiterAlgorithm.FixedWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in segments of `dur` duration after the execution of the first operation. -- `RateLimiterAlgorithm.SlidingWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in the a windows of time of duration `dur`. -- `RateLimiterAlgorithm.Bucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens availables in the token bucket algorithm and one token is added after `dur`. It can represent both the leaky bucket algorithm or the tocken bucket algorithm. +- `RateLimiterAlgorithm.FixedWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in fixed windows of `dur` duration. +- `RateLimiterAlgorithm.SlidingWindow(rate: Int, dur: FiniteDuration)` - where `rate` is the maximum number of operations to be executed in any window of time of duration `dur`. +- `RateLimiterAlgorithm.Bucket(maximum: Int, dur: FiniteDuration)` - where `maximum` is the maximum capacity of tokens available in the token bucket algorithm and one token is added each `dur`. It can represent both the leaky bucket algorithm or the token bucket algorithm. ### API shorthands You can use one of the following shorthands to define a Rate Limiter with the corresponding algorithm: -- `RateLimiter.FixedWindow(rate: Int, dur: FiniteDuration)`, +- `RateLimiter.fixedWindow(rate: Int, dur: FiniteDuration)`, - `RateLimiter.slidingWindow(rate: Int, dur: FiniteDuration)`, -- `RateLimiter.bucket(maximum: Int, dur: FiniteDuration)`, +- `RateLimiter.leakyBucket(maximum: Int, dur: FiniteDuration)`, See the tests in `ox.resilience.*` for more. ## Custom rate limiter algorithms -The `RateLimiterAlgorithm` employed by `RateLimiter` and `GenericRateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. For best compatibility with the existing interface of `RateLimiter`, methods `acquire` and `tryAcquire` should offer the same guaranties as Java `Semaphores`. + +The `RateLimiterAlgorithm` employed by `RateLimiter` can be extended to implement new algorithms or modify existing ones. Its interface is modelled like that of a `Semaphore` although the underlying implementation could be different. For best compatibility with the existing interface of `RateLimiter`, methods `acquire` and `tryAcquire` should offer the same guaranties as Java's `Semaphores`. Additionally, there are two methods employed by the `GenericRateLimiter` for updating its internal state automatically: -- `def update: Unit`: Updates the internal state of the rate limiter to reflect its current situation. +- `def update(): Unit`: Updates the internal state of the rate limiter to reflect its current situation. Invoked in a background fork repeatedly, when a rate limiter is created. - `def getNextUpdate: Long`: Returns the time in nanoseconds after which a new `update` needs to be called.