From aec712c1ac36305f2a1aebd7c5f257a9ce66a855 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 7 Dec 2022 15:44:08 +0000 Subject: [PATCH 01/11] TaskSeq --- src/Equinox.Core/Infrastructure.fs | 1 + src/Equinox.DynamoStore/DynamoStore.fs | 66 ++++++++++--------- .../Equinox.DynamoStore.fsproj | 2 +- 3 files changed, 36 insertions(+), 33 deletions(-) diff --git a/src/Equinox.Core/Infrastructure.fs b/src/Equinox.Core/Infrastructure.fs index ebee3820a..b6035fda2 100755 --- a/src/Equinox.Core/Infrastructure.fs +++ b/src/Equinox.Core/Infrastructure.fs @@ -54,6 +54,7 @@ type Async with module Async = let startAsTask ct computation = Async.StartAsTask(computation, cancellationToken = ct) + let startTask computation ct = Async.StartAsTask(computation, cancellationToken = ct) module ValueTuple = diff --git a/src/Equinox.DynamoStore/DynamoStore.fs b/src/Equinox.DynamoStore/DynamoStore.fs index f0ee0c1cb..a6299ae60 100644 --- a/src/Equinox.DynamoStore/DynamoStore.fs +++ b/src/Equinox.DynamoStore/DynamoStore.fs @@ -1,5 +1,6 @@ namespace Equinox.DynamoStore.Core +open System.Collections.Generic open Equinox.Core open FsCodec open FSharp.AWS.DynamoDB @@ -441,7 +442,7 @@ type Container(tableName, createContext : (RequestMetrics -> unit) -> TableConte let pk = Batch.tableKeyForStreamTip stream let! item = context.UpdateItemAsync(pk, updateExpr, ?precondition = precondition) return item |> Batch.ofSchema, rm.Consumed } - member _.QueryBatches(stream, consistentRead, minN, maxI, backwards, batchSize) : AsyncSeq = + member _.QueryBatches(stream, consistentRead, minN, maxI, backwards, batchSize, ct) : IAsyncEnumerable = let compile = (createContext ignore).Template.PrecomputeConditionalExpr let kc = match maxI with | Some maxI -> compile <@ fun (b : Batch.Schema) -> b.p = stream && b.i < maxI @> @@ -449,20 +450,21 @@ type Container(tableName, createContext : (RequestMetrics -> unit) -> TableConte let fc = match minN with | Some minN -> compile <@ fun (b : Batch.Schema) -> b.n > minN @> |> Some | None -> None - let rec aux (i, le) = asyncSeq { + let rec aux (i, le) = taskSeq { // TOCONSIDER could avoid projecting `p` let rm = Metrics() let context = createContext rm.Add let! t, res = context.QueryPaginatedAsync(kc, ?filterCondition = fc, limit = batchSize, ?exclusiveStartKey = le, - scanIndexForward = not backwards, consistentRead = consistentRead) |> Stopwatch.Time + scanIndexForward = not backwards, consistentRead = consistentRead) + |> Async.startTask |> Stopwatch.time ct yield i, t, Array.map Batch.ofSchema res.Records, rm.Consumed match res.LastEvaluatedKey with | None -> () | le -> yield! aux (i + 1, le) } aux (0, None) - member internal _.QueryIAndNOrderByNAscending(stream, maxItems) : AsyncSeq = - let rec aux (index, lastEvaluated) = asyncSeq { + member internal _.QueryIAndNOrderByNAscending(stream, maxItems) : IAsyncEnumerable = + let rec aux (index, lastEvaluated) = taskSeq { let rm = Metrics() let context = createContext rm.Add let keyCond = <@ fun (b : Batch.Schema) -> b.p = stream @> @@ -679,10 +681,10 @@ module internal Tip = module internal Query = - let private mkQuery (log : ILogger) (container : Container, stream : string) consistentRead maxItems (direction : Direction, minIndex, maxIndex) = + let private mkQuery (log : ILogger) (container : Container, stream : string) consistentRead maxItems (direction : Direction, minIndex, maxIndex) ct = let minN, maxI = minIndex, maxIndex log.Debug("EqxDynamo Query {stream}; n>{minIndex} i<{maxIndex}", stream, Option.toNullable minIndex, Option.toNullable maxIndex) - container.QueryBatches(stream, consistentRead, minN, maxI, (direction = Direction.Backward), maxItems) + container.QueryBatches(stream, consistentRead, minN, maxI, (direction = Direction.Backward), maxItems, ct) // Unrolls the Batches in a response // NOTE when reading backwards, the events are emitted in reverse Index order to suit the takeWhile consumption @@ -739,24 +741,23 @@ module internal Query = let f, e = xs |> Seq.tryFindBack isOrigin' |> Option.isSome, items.ToArray() { found = f; maybeTipPos = Some pos; minIndex = i; next = pos.index + 1L; events = e } - // Yields events in ascending Index order let scan<'event> (log : ILogger) (container, stream) consistentRead maxItems maxRequests direction (tryDecode : ITimelineEvent -> 'event voption, isOrigin : 'event -> bool) (minIndex, maxIndex) - : Async option> = async { + : Task option> = task { let mutable found = false let mutable responseCount = 0 - let mergeBatches (log : ILogger) (batchesBackward : AsyncSeq) = async { + let mergeBatches (log : ILogger) (batchesBackward : IAsyncEnumerable) = async { let mutable lastResponse, maybeTipPos, ru = None, None, 0. let! events = batchesBackward - |> AsyncSeq.map (fun (events, maybePos, rc) -> + |> TaskSeq.collectSeq (fun (events, maybePos, rc) -> if Option.isNone maybeTipPos then maybeTipPos <- maybePos lastResponse <- Some events; ru <- ru + rc.total responseCount <- responseCount + 1 seq { for x in events -> struct (x, x |> EncodedBody.ofInternal |> tryDecode) }) - |> AsyncSeq.concatSeq - |> AsyncSeq.takeWhileInclusive (function + + |> TaskSeq.takeWhileInclusive (function | struct (x, ValueSome e) when isOrigin e -> found <- true let log = log |> Log.prop "stream" stream @@ -842,10 +843,10 @@ module internal Query = /// 2) Querying Primary for predecessors of what's obtained from 1 /// 3) Querying Archive for predecessors of what's obtained from 2 let load (log : ILogger) (minIndex, maxIndex) (tip : ScanResult<'event> option) - (primary : int64 option * int64 option -> Async option>) + (primary : int64 option * int64 option -> Task option>) // Choice1Of2 -> indicates whether it's acceptable to ignore missing events; Choice2Of2 -> Fallback store - (fallback : Choice Async option>>) - : Async = async { + (fallback : Choice Task option>>) ct + : Task = task { let minI = defaultArg minIndex 0L match tip with | Some { found = true; maybeTipPos = Some p; events = e } -> return Some p, e @@ -1061,43 +1062,43 @@ type internal StoreClient(container : Container, fallback : Container option, qu let loadTip log stream consistentRead pos = Tip.tryLoad log (container, stream) consistentRead (pos, None) // Always yields events forward, regardless of direction - member _.Read(log, stream, consistentRead, direction, (tryDecode, isOrigin), ?minIndex, ?maxIndex, ?tip) : Async = async { + member _.Read(log, stream, consistentRead, direction, (tryDecode, isOrigin), ct, ?minIndex, ?maxIndex, ?tip) : Task = task { let tip = tip |> Option.map (Query.scanTip (tryDecode, isOrigin)) let maxIndex = match maxIndex with | Some _ as mi -> mi | None when Option.isSome tip -> Some Batch.tipMagicI | None -> None - let walk log container = Query.scan log (container, stream) consistentRead query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) + let walk log container = Query.scan log (container, stream) consistentRead query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) ct let walkFallback = match fallback with | None -> Choice1Of2 query.IgnoreMissingEvents | Some f -> Choice2Of2 (walk (log |> Log.prop "fallback" true) f) let log = log |> Log.prop "stream" stream - let! pos, events = Query.load log (minIndex, maxIndex) tip (walk log container) walkFallback + let! pos, events = Query.load log (minIndex, maxIndex) tip (walk log container) walkFallback ct return Token.create_ pos, events } member _.ReadLazy(log, batching : QueryOptions, stream, direction, (tryDecode, isOrigin), ?minIndex, ?maxIndex) : AsyncSeq<'event array> = Query.walkLazy log (container, stream) batching.MaxItems batching.MaxRequests (tryDecode, isOrigin) (direction, minIndex, maxIndex) - member store.Load(log, (stream, maybePos), consistentRead, (tryDecode, isOrigin), checkUnfolds : bool) : Async = - if not checkUnfolds then store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin)) - else async { + member store.Load(log, (stream, maybePos), consistentRead, (tryDecode, isOrigin), checkUnfolds : bool, ct) : Task = + if not checkUnfolds then store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), ct) + else task { match! loadTip log stream consistentRead maybePos with | Tip.Res.NotFound -> return Token.empty, Array.empty | Tip.Res.NotModified -> return invalidOp "Not applicable" - | Tip.Res.Found (pos, i, xs) -> return! store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), tip = (pos, i, xs)) } + | Tip.Res.Found (pos, i, xs) -> return! store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), ct, tip = (pos, i, xs)) } member _.GetPosition(log, stream, ?pos) : Async = async { match! loadTip log stream (*consistentRead*)false pos with | Tip.Res.NotFound -> return Token.empty | Tip.Res.NotModified -> return Token.create pos.Value | Tip.Res.Found (pos, _i, _unfoldsAndEvents) -> return Token.create pos } - member store.Reload(log, (stream, maybePos : Position option), consistentRead, (tryDecode, isOrigin), ?preview): Async> = - let read tipContent = async { - let! res = store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), minIndex = Position.toIndex maybePos, tip = tipContent) + member store.Reload(log, (stream, maybePos : Position option), consistentRead, (tryDecode, isOrigin), ct, ?preview): Task> = + let read tipContent = task { + let! res = store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), ct, minIndex = Position.toIndex maybePos, tip = tipContent) return LoadFromTokenResult.Found res } match preview with | Some (pos, i, xs) -> read (pos, i, xs) - | None -> async { + | None -> task { match! loadTip log stream consistentRead maybePos with | Tip.Res.NotFound -> return LoadFromTokenResult.Found (Token.empty, Array.empty) | Tip.Res.NotModified -> return LoadFromTokenResult.Unchanged @@ -1113,11 +1114,11 @@ type internal StoreClient(container : Container, fallback : Container option, qu Prune.until log (container, stream) query.MaxItems index type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IEventCodec<'event, EncodedBody, 'context>) = - member _.Load(log, stream, requireLeader, initial, checkUnfolds, fold, isOrigin) : Task = task { - let! token, events = store.Load(log, (stream, None), requireLeader, (codec.TryDecode, isOrigin), checkUnfolds) + member _.Load(log, stream, requireLeader, initial, checkUnfolds, fold, isOrigin, ct) : Task = task { + let! token, events = store.Load(log, (stream, None), requireLeader, (codec.TryDecode, isOrigin), checkUnfolds, ct) return struct (token, fold initial events) } member _.Reload(log, stream, requireLeader, (Token.Unpack pos as streamToken), state, fold, isOrigin, ct, ?preloaded) : Task = task { - match! store.Reload(log, (stream, pos), requireLeader, (codec.TryDecode, isOrigin), ?preview = preloaded) |> Async.startAsTask ct with + match! store.Reload(log, (stream, pos), requireLeader, (codec.TryDecode, isOrigin), ct, ?preview = preloaded) with | LoadFromTokenResult.Unchanged -> return struct (streamToken, state) | LoadFromTokenResult.Found (token', events) -> return token', fold state events } member cat.Sync(log, stream, (Token.Unpack pos as streamToken), state, events, mapUnfolds, fold, isOrigin, context): Async> = async { @@ -1163,7 +1164,7 @@ module internal Caching = interface ICategory<'event, 'state, 'context> with member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) : Task = task { match! tryReadCache streamName : Task> with - | ValueNone -> return! (fun () -> category.Load(log, streamName, requireLeader, initial, checkUnfolds, fold, isOrigin)) |> cache streamName + | ValueNone -> return! (fun () -> category.Load(log, streamName, requireLeader, initial, checkUnfolds, fold, isOrigin, ct)) |> cache streamName | ValueSome struct (token, state) when allowStale -> return struct (token, state) // read already updated TTL, no need to write | ValueSome struct (token, state) -> return! (fun () -> category.Reload(log, streamName, requireLeader, token, state, fold, isOrigin, ct)) |> cache streamName } member _.TrySync(log : ILogger, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, state, events, ct) : Task> = task { @@ -1435,7 +1436,8 @@ type EventsContext internal match maxCount with | Some limit -> maxCountPredicate limit | None -> fun _ -> false - let! token, events = store.Read(log, stream, (*consistentRead*)false, direction, (ValueSome, isOrigin), ?minIndex = minIndex, ?maxIndex = maxIndex) + let! ct = Async.CancellationToken + let! token, events = store.Read(log, stream, (*consistentRead*)false, direction, (ValueSome, isOrigin), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) |> Async.AwaitTaskCorrect if direction = Direction.Backward then System.Array.Reverse events return token, events } diff --git a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj index 469464fee..834c1445b 100644 --- a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj +++ b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj @@ -20,7 +20,7 @@ - + From bd9fc6031670db05f06fedbd9e3be395d3d7e119 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 12:51:57 +0000 Subject: [PATCH 02/11] Port the rest --- CHANGELOG.md | 1 + README.md | 12 +- samples/Infrastructure/Infrastructure.fsproj | 2 +- samples/Store/Domain/Domain.fsproj | 2 +- samples/TodoBackend/TodoBackend.fsproj | 2 +- samples/Tutorial/AsAt.fsx | 2 +- samples/Tutorial/Cosmos.fsx | 2 +- samples/Tutorial/Todo.fsx | 2 +- src/Equinox.Core/Equinox.Core.fsproj | 2 +- src/Equinox.Core/Infrastructure.fs | 5 - src/Equinox.Core/Internal.fs | 32 ++ src/Equinox.Core/StopwatchInterval.fs | 2 +- .../Equinox.CosmosStore.Prometheus.fsproj | 2 +- src/Equinox.CosmosStore/CosmosStore.fs | 314 +++++++++--------- .../Equinox.CosmosStore.fsproj | 4 +- .../Equinox.DynamoStore.Prometheus.fsproj | 2 +- src/Equinox.DynamoStore/DynamoStore.fs | 276 +++++++-------- .../Equinox.DynamoStore.fsproj | 2 +- .../Equinox.EventStore.fsproj | 4 +- src/Equinox.EventStore/EventStore.fs | 109 +++--- .../Equinox.EventStoreDb.fsproj | 4 +- src/Equinox.EventStoreDb/EventStoreDb.fs | 126 ++++--- .../Equinox.MemoryStore.fsproj | 2 +- .../Equinox.MessageDb.fsproj | 2 +- src/Equinox.MessageDb/MessageDb.fs | 9 +- .../Equinox.SqlStreamStore.MsSql.fsproj | 2 +- .../Equinox.SqlStreamStore.MySql.fsproj | 2 +- .../Equinox.SqlStreamStore.Postgres.fsproj | 2 +- .../Equinox.SqlStreamStore.fsproj | 4 +- src/Equinox.SqlStreamStore/SqlStreamStore.fs | 150 ++++----- src/Equinox/Equinox.fsproj | 2 +- .../CosmosCoreIntegration.fs | 17 +- .../CosmosFixtures.fs | 3 +- .../DocumentStoreIntegration.fs | 11 +- .../Equinox.CosmosStore.Integration.fsproj | 1 + tools/Equinox.Tool/Equinox.Tool.fsproj | 2 +- tools/Equinox.Tool/Program.fs | 2 +- .../Equinox.Tools.TestHarness.fsproj | 2 +- 38 files changed, 571 insertions(+), 551 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87d34b08a..e3a62ec34 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ The `Unreleased` section name is replaced by the expected version of next releas - `EventStore`: Target `EventStore.Client` v `22.0.0-preview`; rename `Connector` -> `EventStoreConnector` [#317](https://github.com/jet/equinox/pull/317) - `Equinox.Tool`/`samples/`: switched to use `Equinox.EventStoreDb` [#196](https://github.com/jet/equinox/pull/196) - Update all non-Client dependencies except `FSharp.Core`, `FSharp.Control.AsyncSeq` [#310](https://github.com/jet/equinox/pull/310) +- Replace `AsyncSeq` usage with `FSharp.Control.TaskSeq` [#361](https://github.com/jet/equinox/pull/361) - `FSharp.Core` requirement to `6.0.0` [#337](https://github.com/jet/equinox/pull/337) - Update all Stores to use `FsCodec` v `3.0.0`, with [`EventBody` types switching from `byte[]` to `ReadOnlyMemory` and/or `JsonElement` see FsCodec#75](https://github.com/jet/FsCodec/pull/75) [#323](https://github.com/jet/equinox/pull/323) - `CosmosStore.Core.Initialization.initAux`: Replace hard-coded manual 400 RU with `mode` parameter [#328](https://github.com/jet/equinox/pull/328) :pray: [@brihadish](https://github.com/brihadish) diff --git a/README.md b/README.md index 343b06f44..36980934c 100644 --- a/README.md +++ b/README.md @@ -165,14 +165,14 @@ The components within this repository are delivered as multi-targeted Nuget pack - `Equinox.Core` [![NuGet](https://img.shields.io/nuget/v/Equinox.Core.svg)](https://www.nuget.org/packages/Equinox.Core/): Interfaces and helpers used in the concrete Store implementations, together with the default [`System.Runtime.Caching.Cache`-based] `Cache` implementation. Hosts generic utility types frequently useful alongside Equinox: [`AsyncCacheCell`](https://github.com/jet/equinox/blob/master/src/Equinox.Core/AsyncCacheCell.fs#L36), [`AsyncBatchingGate`](https://github.com/jet/equinox/blob/master/src/Equinox.Core/AsyncBatchingGate.fs#L41). ([depends](https://www.fuget.org/packages/Equinox.Core) on `Equinox`, `System.Runtime.Caching`, `Serilog` (but not specific Serilog sinks, i.e. you configure to emit to `NLog` etc)) - `Equinox.MemoryStore` [![MemoryStore NuGet](https://img.shields.io/nuget/v/Equinox.MemoryStore.svg)](https://www.nuget.org/packages/Equinox.MemoryStore/): In-memory store for integration testing/performance base-lining/providing out-of-the-box zero dependency storage for examples. ([depends](https://www.fuget.org/packages/Equinox.MemoryStore) on `Equinox.Core`, `FsCodec`) -- `Equinox.CosmosStore` [![CosmosStore NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.svg)](https://www.nuget.org/packages/Equinox.CosmosStore/): Azure CosmosDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore) on `Equinox.Core`, `Microsoft.Azure.Cosmos` >= `3.27`, `FsCodec`, `System.Text.Json`, `FSharp.Control.AsyncSeq` >= `2.0.23`) +- `Equinox.CosmosStore` [![CosmosStore NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.svg)](https://www.nuget.org/packages/Equinox.CosmosStore/): Azure CosmosDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore) on `Equinox.Core`, `Microsoft.Azure.Cosmos` >= `3.27`, `FsCodec`, `System.Text.Json`, `FSharp.Control.TaskSeq` >= `0.3.0`) - `Equinox.CosmosStore.Prometheus` [![CosmosStore.Prometheus NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.Prometheus.svg)](https://www.nuget.org/packages/Equinox.CosmosStore.Prometheus/): Integration package providing a `Serilog.Core.ILogEventSink` that extracts detailed metrics information attached to the `LogEvent`s and feeds them to the `prometheus-net`'s `Prometheus.Metrics` static instance. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore.Prometheus) on `Equinox.CosmosStore`, `prometheus-net >= 3.6.0`) -- `Equinox.DynamoStore` [![DynamoStore NuGet](https://img.shields.io/nuget/v/Equinox.DynamoStore.svg)](https://www.nuget.org/packages/Equinox.DynamoStore/): Amazon DynamoDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RC costs, patterned after `Equinox.CosmosStore`. ([depends](https://www.fuget.org/packages/Equinox.DynamoStore) on `Equinox.Core`, `FSharp.AWS.DynamoDB` >= `0.11.2-beta`, `FsCodec`, `FSharp.Control.AsyncSeq` >= `2.0.23`) +- `Equinox.DynamoStore` [![DynamoStore NuGet](https://img.shields.io/nuget/v/Equinox.DynamoStore.svg)](https://www.nuget.org/packages/Equinox.DynamoStore/): Amazon DynamoDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RC costs, patterned after `Equinox.CosmosStore`. ([depends](https://www.fuget.org/packages/Equinox.DynamoStore) on `Equinox.Core`, `FSharp.AWS.DynamoDB` >= `0.11.2-beta`, `FsCodec`, `FSharp.Control.TaskSeq` >= `0.3.0`) - `Equinox.DynamoStore.Prometheus` [![DynamoStore.Prometheus NuGet](https://img.shields.io/nuget/v/Equinox.DynamoStore.Prometheus.svg)](https://www.nuget.org/packages/Equinox.DynamoStore.Prometheus/): Integration package providing a `Serilog.Core.ILogEventSink` that extracts detailed metrics information attached to the `LogEvent`s and feeds them to the `prometheus-net`'s `Prometheus.Metrics` static instance. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore.Prometheus) on `Equinox.DynamoStore`, `prometheus-net >= 3.6.0`) -- `Equinox.EventStore` [![EventStore NuGet](https://img.shields.io/nuget/v/Equinox.EventStore.svg)](https://www.nuget.org/packages/Equinox.EventStore/): [EventStoreDB](https://eventstore.org/) Adapter designed to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.EventStore) on `Equinox.Core`, `EventStore.Client >= 22.0.0-preview`, `FSharp.Control.AsyncSeq >= 2.0.23`), EventStore Server version `21.10` or later). **NO NOT use for new projects - the TCP interface to EventStoreDB has long been deprecated, this package is only provided to ease migration scenarios and will be removed in due course** -- `Equinox.EventStoreDb` [![EventStoreDb NuGet](https://img.shields.io/nuget/v/Equinox.EventStoreDb.svg)](https://www.nuget.org/packages/Equinox.EventStoreDb/): Production-strength [EventStoreDB](https://eventstore.org/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.EventStoreDb) on `Equinox.Core`, `EventStore.Client.Grpc.Streams` >= `22.0.0`, `FSharp.Control.AsyncSeq` v `2.0.23`, EventStore Server version `21.10` or later) -- `Equinox.MessageDb` [![MessageDb NuGet](https://img.shields.io/nuget/v/Equinox.MessageDb.svg)](https://www.nuget.org/packages/Equinox.MessageDb/): [MessageDb](http://docs.eventide-project.org/user-guide/message-db/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.MessageDb) on `Equinox.Core`, `Npgsql` >= `6.0.0`, `FSharp.Control.AsyncSeq` v `3.2.1`) -- `Equinox.SqlStreamStore` [![SqlStreamStore NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore/): [SqlStreamStore](https://github.com/SQLStreamStore/SQLStreamStore) Adapter derived from `Equinox.EventStore` - provides core facilities (but does not connect to a specific database; see sibling `SqlStreamStore`.* packages). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore) on `Equinox.Core`, `FsCodec`, `SqlStreamStore >= 1.2.0-beta.8`, `FSharp.Control.AsyncSeq`) +- `Equinox.EventStore` [![EventStore NuGet](https://img.shields.io/nuget/v/Equinox.EventStore.svg)](https://www.nuget.org/packages/Equinox.EventStore/): [EventStoreDB](https://eventstore.org/) Adapter designed to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.EventStore) on `Equinox.Core`, `EventStore.Client >= 22.0.0-preview`, `FSharp.Control.TaskSeq >= 0.3.0`), EventStore Server version `21.10` or later). **NO NOT use for new projects - the TCP interface to EventStoreDB has long been deprecated, this package is only provided to ease migration scenarios and will be removed in due course** +- `Equinox.EventStoreDb` [![EventStoreDb NuGet](https://img.shields.io/nuget/v/Equinox.EventStoreDb.svg)](https://www.nuget.org/packages/Equinox.EventStoreDb/): Production-strength [EventStoreDB](https://eventstore.org/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.EventStoreDb) on `Equinox.Core`, `EventStore.Client.Grpc.Streams` >= `22.0.0`, `FSharp.Control.TaskSeq` >= `0.3.0`, EventStore Server version `21.10` or later) +- `Equinox.MessageDb` [![MessageDb NuGet](https://img.shields.io/nuget/v/Equinox.MessageDb.svg)](https://www.nuget.org/packages/Equinox.MessageDb/): [MessageDb](http://docs.eventide-project.org/user-guide/message-db/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.MessageDb) on `Equinox.Core`, `Npgsql` >= `6.0.0`, `FSharp.Control.TaskSeq` >= `0.3.0`)) +- `Equinox.SqlStreamStore` [![SqlStreamStore NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore/): [SqlStreamStore](https://github.com/SQLStreamStore/SQLStreamStore) Adapter derived from `Equinox.EventStore` - provides core facilities (but does not connect to a specific database; see sibling `SqlStreamStore`.* packages). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore) on `Equinox.Core`, `FsCodec`, `SqlStreamStore >= 1.2.0-beta.8`, `FSharp.Control.TaskSeq`) - `Equinox.SqlStreamStore.MsSql` [![MsSql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MsSql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MsSql/): [SqlStreamStore.MsSql](https://sqlstreamstore.readthedocs.io/en/latest/sqlserver) Sql Server `Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MsSql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MsSql >= 1.2.0-beta.8`) - `Equinox.SqlStreamStore.MySql` [![MySql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MySql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MySql/): `SqlStreamStore.MySql` MySQL `Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MySql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MySql >= 1.2.0-beta.8`) - `Equinox.SqlStreamStore.Postgres` [![Postgres NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.Postgres.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.Postgres/): [SqlStreamStore.Postgres](https://sqlstreamstore.readthedocs.io/en/latest/postgres) PostgreSQL `Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.Postgres) on `Equinox.SqlStreamStore`, `SqlStreamStore.Postgres >= 1.2.0-beta.8`) diff --git a/samples/Infrastructure/Infrastructure.fsproj b/samples/Infrastructure/Infrastructure.fsproj index 7b062df62..39b347f91 100644 --- a/samples/Infrastructure/Infrastructure.fsproj +++ b/samples/Infrastructure/Infrastructure.fsproj @@ -23,7 +23,7 @@ - + diff --git a/samples/Store/Domain/Domain.fsproj b/samples/Store/Domain/Domain.fsproj index 8753fb450..1b8cae851 100644 --- a/samples/Store/Domain/Domain.fsproj +++ b/samples/Store/Domain/Domain.fsproj @@ -15,7 +15,7 @@ - + diff --git a/samples/TodoBackend/TodoBackend.fsproj b/samples/TodoBackend/TodoBackend.fsproj index 0b2d9f7d2..cc528a7db 100644 --- a/samples/TodoBackend/TodoBackend.fsproj +++ b/samples/TodoBackend/TodoBackend.fsproj @@ -9,7 +9,7 @@ - + diff --git a/samples/Tutorial/AsAt.fsx b/samples/Tutorial/AsAt.fsx index 90447343c..73c47df11 100644 --- a/samples/Tutorial/AsAt.fsx +++ b/samples/Tutorial/AsAt.fsx @@ -29,7 +29,7 @@ #r "Equinox.dll" #r "TypeShape.dll" #r "FsCodec.SystemTextJson.dll" -//#r "FSharp.Control.AsyncSeq.dll" +//#r "FSharp.Control.TaskSeq.dll" //#r "System.Net.Http" //#r "EventStore.Client.dll" //#r "EventStore.Client.Streams.dll" diff --git a/samples/Tutorial/Cosmos.fsx b/samples/Tutorial/Cosmos.fsx index 056eb6a76..e2f17cde4 100644 --- a/samples/Tutorial/Cosmos.fsx +++ b/samples/Tutorial/Cosmos.fsx @@ -14,7 +14,7 @@ #r "FSharp.UMX.dll" #r "FsCodec.dll" #r "FsCodec.SystemTextJson.dll" -#r "FSharp.Control.AsyncSeq.dll" +#r "FSharp.Control.TaskSeq.dll" #r "Microsoft.Azure.Cosmos.Client.dll" #r "System.Net.Http" #r "Serilog.Sinks.Seq.dll" diff --git a/samples/Tutorial/Todo.fsx b/samples/Tutorial/Todo.fsx index 91b133dac..2d2c2d88b 100644 --- a/samples/Tutorial/Todo.fsx +++ b/samples/Tutorial/Todo.fsx @@ -14,7 +14,7 @@ #r "FSharp.UMX.dll" #r "FsCodec.dll" #r "FsCodec.SystemTextJson.dll" -#r "FSharp.Control.AsyncSeq.dll" +#r "FSharp.Control.TaskSeq.dll" #r "Microsoft.Azure.Cosmos.Client.dll" #r "Equinox.CosmosStore.dll" #else diff --git a/src/Equinox.Core/Equinox.Core.fsproj b/src/Equinox.Core/Equinox.Core.fsproj index a041ce83b..68d4e075c 100644 --- a/src/Equinox.Core/Equinox.Core.fsproj +++ b/src/Equinox.Core/Equinox.Core.fsproj @@ -22,7 +22,7 @@ - + contentFiles diff --git a/src/Equinox.Core/Infrastructure.fs b/src/Equinox.Core/Infrastructure.fs index b6035fda2..a05cdb073 100755 --- a/src/Equinox.Core/Infrastructure.fs +++ b/src/Equinox.Core/Infrastructure.fs @@ -51,11 +51,6 @@ type Async with sc ()) |> ignore) -module Async = - - let startAsTask ct computation = Async.StartAsTask(computation, cancellationToken = ct) - let startTask computation ct = Async.StartAsTask(computation, cancellationToken = ct) - module ValueTuple = let inline fst struct (f, _s) = f diff --git a/src/Equinox.Core/Internal.fs b/src/Equinox.Core/Internal.fs index 38e82f155..40e08897e 100644 --- a/src/Equinox.Core/Internal.fs +++ b/src/Equinox.Core/Internal.fs @@ -11,3 +11,35 @@ module Log = let [] (|ScalarValue|_|) : Serilog.Events.LogEventPropertyValue -> obj voption = function | :? Serilog.Events.ScalarValue as x -> ValueSome x.Value | _ -> ValueNone + +module TaskSeq = + + open FSharp.Control + + let takeWhile predicate (source: taskSeq<_>) = taskSeq { + use e = source.GetAsyncEnumerator(System.Threading.CancellationToken()) + let! step = e.MoveNextAsync() + let mutable go = step + while go do + let value = e.Current + if predicate value then + yield value + let! more = e.MoveNextAsync() + go <- more + else + go <- false + } + + let takeWhileInclusive predicate (source: taskSeq<_>) = taskSeq { + use e = source.GetAsyncEnumerator(System.Threading.CancellationToken()) + let! step = e.MoveNextAsync() + let mutable go = step + while go do + let value = e.Current + yield value + if predicate value then + let! more = e.MoveNextAsync() + go <- more + else + go <- false + } diff --git a/src/Equinox.Core/StopwatchInterval.fs b/src/Equinox.Core/StopwatchInterval.fs index af52765ab..1c8921f79 100755 --- a/src/Equinox.Core/StopwatchInterval.fs +++ b/src/Equinox.Core/StopwatchInterval.fs @@ -34,7 +34,7 @@ and [] StopwatchInterval(startTicks : int64, e module Stopwatch = [] - let time (ct : CancellationToken) (f : CancellationToken -> Task<'T>) : Task = task { + let time (ct : 'a) (f : 'a -> Task<'T>) : Task = task { let startTicks = Stopwatch.GetTimestamp() let! result = f ct let endTicks = Stopwatch.GetTimestamp() diff --git a/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj b/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj index 18ab60fe9..427664f48 100644 --- a/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj +++ b/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj @@ -15,7 +15,7 @@ - + diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 3c8395797..9305b25e5 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -1,5 +1,6 @@ namespace Equinox.CosmosStore.Core +open System.Collections.Generic open Equinox.Core open FsCodec open FSharp.Control @@ -168,7 +169,7 @@ type internal Enum() = // where Index is equal, unfolds get delivered after the events so the fold semantics can be 'idempotent' |> Seq.sortBy (fun x -> x.Index, x.IsUnfold) -type IRetryPolicy = abstract member Execute : (int -> Async<'T>) -> Async<'T> +type IRetryPolicy = abstract member Execute : (int -> CancellationToken -> Task<'T>) -> Task<'T> module Log = @@ -227,9 +228,9 @@ module Log = let internal propStartPos (value : Position) log = prop "startPos" value.index log let internal propStartEtag (value : Position) log = prop "startEtag" value.etag log - let internal withLoggedRetries<'t> (retryPolicy : IRetryPolicy option) (contextLabel : string) (f : ILogger -> Async<'t>) log : Async<'t> = + let withLoggedRetries<'t> (retryPolicy : IRetryPolicy option) (contextLabel : string) (f : ILogger -> CancellationToken -> Task<'t>) log ct : Task<'t> = match retryPolicy with - | None -> f log + | None -> f log ct | Some retryPolicy -> let withLoggingContextWrapping count = let log = if count = 1 then log else log |> prop contextLabel count @@ -350,9 +351,8 @@ module private MicrosoftAzureCosmosWrappers = member private container.DeserializeResponseBody<'T>(rm : ResponseMessage) : 'T = rm.EnsureSuccessStatusCode().Content |> container.Database.Client.ClientOptions.Serializer.FromStream<'T> - member container.TryReadItem(partitionKey : PartitionKey, id : string, ?options : ItemRequestOptions) : Async> = async { - let! ct = Async.CancellationToken - use! rm = container.ReadItemStreamAsync(id, partitionKey, requestOptions = Option.toObj options, cancellationToken = ct) |> Async.AwaitTaskCorrect + member container.TryReadItem(partitionKey : PartitionKey, id : string, ct, ?options : ItemRequestOptions): Task> = task { + use! rm = container.ReadItemStreamAsync(id, partitionKey, requestOptions = Option.toObj options, cancellationToken = ct) return rm.Headers.RequestCharge, rm.StatusCode |> function | System.Net.HttpStatusCode.NotFound -> NotFound | System.Net.HttpStatusCode.NotModified -> NotModified @@ -455,17 +455,16 @@ module internal Sync = | Conflict of Position * events : ITimelineEvent[] | ConflictUnknown of Position - let private run (container : Container, stream : string) (maxEventsInTip, maxStringifyLen) (exp, req : Tip) - : Async = async { + let private run (container : Container, stream : string) (maxEventsInTip, maxStringifyLen) (exp, req : Tip, ct) + : Task = task { let ep = match exp with | SyncExp.Version ev -> Position.fromI ev | SyncExp.Etag et -> Position.fromEtag et | SyncExp.Any -> Position.fromAppendAtEnd - let! ct = Async.CancellationToken let args = [| box req; box ep.index; box (Option.toObj ep.etag); box maxEventsInTip; box maxStringifyLen |] let! (res : Scripts.StoredProcedureExecuteResponse) = - container.Scripts.ExecuteStoredProcedureAsync(SyncStoredProc.name, PartitionKey stream, args, cancellationToken = ct) |> Async.AwaitTaskCorrect + container.Scripts.ExecuteStoredProcedureAsync(SyncStoredProc.name, PartitionKey stream, args, cancellationToken = ct) let newPos = { index = res.Resource.n; etag = Option.ofObj res.Resource.etag } match res.Resource.conflicts with | null -> return res.RequestCharge, Result.Written newPos @@ -476,9 +475,8 @@ module internal Sync = let events = (Enum.Events(ep.index, res.Resource.e), Enum.Unfolds unfolds) ||> Seq.append |> Array.ofSeq return res.RequestCharge, Result.Conflict (newPos, events) } - let private logged (container, stream) (maxEventsInTip, maxStringifyLen) (exp : SyncExp, req : Tip) (log : ILogger) - : Async = async { - let! t, (ru, result) = run (container, stream) (maxEventsInTip, maxStringifyLen) (exp, req) |> Stopwatch.Time + let private logged (container, stream) (maxEventsInTip, maxStringifyLen) (exp : SyncExp, req : Tip) (log : ILogger) ct : Task = task { + let! t, (ru, result) = (fun ct -> run (container, stream) (maxEventsInTip, maxStringifyLen) (exp, req, ct)) |> Stopwatch.time ct let verbose = log.IsEnabled Serilog.Events.LogEventLevel.Debug let count, bytes = req.e.Length, if verbose then Enum.Events req |> Log.batchLen else 0 let log = @@ -502,9 +500,9 @@ module internal Sync = "Sync", stream, count, req.u.Length, t.ElapsedMilliseconds, ru, bytes, exp) return result } - let batch (log : ILogger) (retryPolicy, maxEventsInTip, maxStringifyLen) containerStream expBatch : Async = + let batch (log : ILogger) (retryPolicy, maxEventsInTip, maxStringifyLen) containerStream expBatch ct : Task = let call = logged containerStream (maxEventsInTip, maxStringifyLen) expBatch - Log.withLoggedRetries retryPolicy "writeAttempt" call log + Log.withLoggedRetries retryPolicy "writeAttempt" call log ct let private mkEvent (e : IEventData<_>) = { t = e.Timestamp @@ -550,8 +548,7 @@ module Initialization = let! _ = c.ReplaceThroughputAsync(throughput, cancellationToken = ct) |> Async.AwaitTaskCorrect return c } - let private createStoredProcIfNotExists (c : Container) (name, body) : Async = async { - let! ct = Async.CancellationToken + let private createStoredProcIfNotExists (c : Container) (name, body) ct : Task = task { try let! r = c.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(id = name, body = body), cancellationToken = ct) |> Async.AwaitTaskCorrect return r.RequestCharge with :? CosmosException as ce when ce.StatusCode = System.Net.HttpStatusCode.Conflict -> return ce.RequestCharge } @@ -563,16 +560,16 @@ module Initialization = cp.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors for k in Batch.IndexedFields do cp.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) - let createSyncStoredProcIfNotExists (log : ILogger option) container = async { - let! t, ru = createStoredProcIfNotExists container (SyncStoredProc.name, SyncStoredProc.body) |> Stopwatch.Time + let createSyncStoredProcIfNotExists (log : ILogger option) container ct = task { + let! t, ru = createStoredProcIfNotExists container (SyncStoredProc.name, SyncStoredProc.body) |> Stopwatch.time ct match log with | None -> () | Some log -> log.Information("Created stored procedure {procName} in {ms:f1}ms {ru}RU", SyncStoredProc.name, t.ElapsedMilliseconds, ru) } - let init log (client : CosmosClient) (dName, cName) mode skipStoredProc = async { + let init log (client : CosmosClient) (dName, cName) mode skipStoredProc ct = task { let! d = createOrProvisionDatabase client dName mode let! c = createOrProvisionContainer d (cName, sprintf "/%s" Batch.PartitionKeyField, applyBatchAndTipContainerProperties) mode // as per Cosmos team, Partition Key must be "/id" if not skipStoredProc then - do! createSyncStoredProcIfNotExists (Some log) c } + do! createSyncStoredProcIfNotExists (Some log) c ct } let private applyAuxContainerProperties (cp : ContainerProperties) = // TL;DR no indexing of any kind; see https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet/issues/142 @@ -593,12 +590,12 @@ module Initialization = module internal Tip = - let private get (container : Container, stream : string) (maybePos : Position option) = + let private get (container : Container, stream : string) (maybePos : Position option) ct = let ro = match maybePos with Some { etag = Some etag } -> ItemRequestOptions(IfNoneMatchEtag = etag) |> Some | _ -> None - container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId, ?options = ro) - let private loggedGet (get : Container * string -> Position option -> Async<_>) (container, stream) (maybePos : Position option) (log : ILogger) = async { + container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId, ct, ?options = ro) + let private loggedGet (get : Container * string -> Position option -> CancellationToken -> Task<_>) (container, stream) (maybePos : Position option) (log : ILogger) ct = task { let log = log |> Log.prop "stream" stream - let! t, (ru, res : ReadResult) = get (container, stream) maybePos |> Stopwatch.Time + let! t, (ru, res : ReadResult) = get (container, stream) maybePos |> Stopwatch.time ct let verbose = log.IsEnabled Events.LogEventLevel.Debug let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { database = container.Database.Id; container = container.Id; stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) match res with @@ -617,8 +614,8 @@ module internal Tip = return ru, res } type [] Result = NotModified | NotFound | Found of Position * i : int64 * ITimelineEvent[] /// `pos` being Some implies that the caller holds a cached value and hence is ready to deal with Result.NotModified - let tryLoad (log : ILogger) retryPolicy containerStream (maybePos : Position option, maxIndex) : Async = async { - let! _rc, res = Log.withLoggedRetries retryPolicy "readAttempt" (loggedGet get containerStream maybePos) log + let tryLoad (log : ILogger) retryPolicy containerStream (maybePos : Position option, maxIndex) ct : Task = task { + let! _rc, res = Log.withLoggedRetries retryPolicy "readAttempt" (loggedGet get containerStream maybePos) log ct match res with | ReadResult.NotModified -> return Result.NotModified | ReadResult.NotFound -> return Result.NotFound @@ -628,16 +625,13 @@ module internal Tip = module internal Query = - let feedIteratorMapTi (map : int -> StopwatchInterval -> FeedResponse<'t> -> 'u) (query : FeedIterator<'t>) : AsyncSeq<'u> = - let rec loop i : AsyncSeq<'u> = asyncSeq { - let! ct = Async.CancellationToken - let! t, (res : FeedResponse<'t>) = query.ReadNextAsync(ct) |> Async.AwaitTaskCorrect |> Stopwatch.Time - yield map i t res - if query.HasMoreResults then - yield! loop (i + 1) } + let feedIteratorMapTi (map : int -> StopwatchInterval -> FeedResponse<'t> -> 'u) (query : FeedIterator<'t>) ct : IAsyncEnumerable<'u> = taskSeq { // earlier versions, such as 3.9.0, do not implement IDisposable; see linked issue for detail on when SDK team added it use _ = query // see https://github.com/jet/equinox/issues/225 - in the Cosmos V4 SDK, all this is managed IAsyncEnumerable - if query.HasMoreResults then loop 0 else AsyncSeq.empty + let mutable i = 0 + while query.HasMoreResults do + let! t, (res : FeedResponse<'t>) = Stopwatch.time ct (fun ct -> query.ReadNextAsync ct) + yield map i t res } let private mkQuery (log : ILogger) (container : Container, stream : string) includeTip (maxItems : int) (direction : Direction, minIndex, maxIndex) : FeedIterator = let order = if direction = Direction.Forward then "ASC" else "DESC" let query = @@ -720,21 +714,20 @@ module internal Query = // Yields events in ascending Index order let scan<'event> (log : ILogger) (container, stream) includeTip (maxItems : int) maxRequests direction (tryDecode : ITimelineEvent -> 'event voption, isOrigin : 'event -> bool) - (minIndex, maxIndex) - : Async option> = async { + (minIndex, maxIndex, ct) + : Task option> = task { let mutable found = false let mutable responseCount = 0 - let mergeBatches (log : ILogger) (batchesBackward : AsyncSeq[] * Position option * float>) = async { + let mergeBatches (log : ILogger) (batchesBackward : IAsyncEnumerable[] * Position option * float>) = task { let mutable lastResponse, maybeTipPos, ru = None, None, 0. let! events = batchesBackward - |> AsyncSeq.map (fun (events, maybePos, r) -> + |> TaskSeq.collectSeq (fun (events, maybePos, r) -> if maybeTipPos = None then maybeTipPos <- maybePos lastResponse <- Some events; ru <- ru + r responseCount <- responseCount + 1 seq { for x in events -> struct (x, tryDecode x) }) - |> AsyncSeq.concatSeq - |> AsyncSeq.takeWhileInclusive (function + |> TaskSeq.takeWhileInclusive (function | struct (x, ValueSome e) when isOrigin e -> found <- true match lastResponse with @@ -745,16 +738,16 @@ module internal Query = stream, x.Index, x.EventType, used, residual) false | _ -> true) - |> AsyncSeq.toArrayAsync + |> TaskSeq.toArrayAsync return events, maybeTipPos, ru } let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readLog = log |> Log.prop "direction" direction - let batches : AsyncSeq[] * Position option * float> = - mkQuery readLog (container, stream) includeTip maxItems (direction, minIndex, maxIndex) - |> feedIteratorMapTi (mapPage direction (container, stream) (minIndex, maxIndex) maxRequests readLog) - let! t, (events, maybeTipPos, ru) = mergeBatches log batches |> Stopwatch.Time + let batches ct : IAsyncEnumerable[] * Position option * float> = + let query = mkQuery readLog (container, stream) includeTip maxItems (direction, minIndex, maxIndex) + feedIteratorMapTi (mapPage direction (container, stream) (minIndex, maxIndex) maxRequests readLog) query ct + let! t, (events, maybeTipPos, ru) = (fun ct -> batches ct |> mergeBatches log) |> Stopwatch.time ct let raws = Array.map ValueTuple.fst events - let decoded = if direction = Direction.Forward then Array.chooseV ValueTuple.snd events else Seq.chooseV ValueTuple.snd events |> Seq.rev |> Array.ofSeq + let decoded = if direction = Direction.Forward then Array.chooseV ValueTuple.snd events else let xs = Array.chooseV ValueTuple.snd events in Array.Reverse xs; xs let minMax = (None, raws) ||> Array.fold (fun acc x -> let i = x.Index in Some (match acc with None -> i, i | Some (n, x) -> min n i, max x i)) let version = match maybeTipPos, minMax with @@ -763,34 +756,35 @@ module internal Query = | None, None -> 0L log |> logQuery direction (container, stream) t (responseCount, raws) version ru match minMax, maybeTipPos with - | Some (i, m), _ -> return Some { found = found; minIndex = i; next = m + 1L; maybeTipPos = maybeTipPos; events = decoded } + | Some (i, m), _ -> return Some ({ found = found; minIndex = i; next = m + 1L; maybeTipPos = maybeTipPos; events = decoded } : ScanResult<_>) | None, Some { index = tipI } -> return Some { found = found; minIndex = tipI; next = tipI; maybeTipPos = maybeTipPos; events = [||] } | None, _ -> return None } let walkLazy<'event> (log : ILogger) (container, stream) maxItems maxRequests (tryDecode : ITimelineEvent -> 'event option, isOrigin : 'event -> bool) - (direction, minIndex, maxIndex) - : AsyncSeq<'event[]> = asyncSeq { + (direction, minIndex, maxIndex, ct) + : IAsyncEnumerable<'event[]> = taskSeq { let query = mkQuery log (container, stream) true maxItems (direction, minIndex, maxIndex) let readPage = mapPage direction (container, stream) (minIndex, maxIndex) maxRequests let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readLog = log |> Log.prop "direction" direction - let query = query |> feedIteratorMapTi (readPage readLog) + let query = feedIteratorMapTi (readPage readLog) query ct let startTicks = System.Diagnostics.Stopwatch.GetTimestamp() let allEvents = ResizeArray() let mutable i, ru = 0, 0. try let mutable ok = true - let e = query.GetEnumerator() + use e = query.GetAsyncEnumerator(ct) while ok do let batchLog = readLog |> Log.prop "batchIndex" i match maxRequests with | Some mr when i + 1 >= mr -> batchLog.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () - match! e.MoveNext() with - | None -> ok <- false // rest of block does not happen, while exits - | Some (events, _pos, rus) -> + let! more = e.MoveNextAsync() + if not more then ok <- false else // rest of block does not happen, while exits + + let events, _pos, rus = e.Current ru <- ru + rus allEvents.AddRange(events) @@ -818,10 +812,10 @@ module internal Query = /// 2) Querying Primary for predecessors of what's obtained from 1 /// 3) Querying Archive for predecessors of what's obtained from 2 let load (log : ILogger) (minIndex, maxIndex) (tip : ScanResult<'event> option) - (primary : int64 option * int64 option -> Async option>) + (primary : int64 option * int64 option * CancellationToken -> Task option>) // Choice1Of2 -> indicates whether it's acceptable to ignore missing events; Choice2Of2 -> Fallback store - (fallback : Choice Async option>>) - : Async = async { + (fallback : Choice Task option>>) ct + : Task = task { let minI = defaultArg minIndex 0L match tip with | Some { found = true; maybeTipPos = Some p; events = e } -> return p, e @@ -832,7 +826,7 @@ module internal Query = match tip with | Some { minIndex = i; maybeTipPos = p; events = e } -> Some i, e, p | None -> maxIndex, Array.empty, None - let! primary = primary (minIndex, i) + let! primary = primary (minIndex, i, ct) let events, pos = match primary with | None -> events, pos |> Option.defaultValue Position.fromKnownEmpty @@ -854,7 +848,7 @@ module internal Query = | _, Choice2Of2 fallback -> let maxIndex = match primary with Some p -> Some p.minIndex | None -> maxIndex // if no batches in primary, high water mark from tip is max - let! fallback = fallback (minIndex, maxIndex) + let! fallback = fallback (minIndex, maxIndex, ct) let events = match fallback with | Some s -> Array.append s.events events @@ -872,20 +866,18 @@ module Prune = type BatchIndices = { id : string; i : int64; n : int64 } - let until (log : ILogger) (container : Container, stream : string) (maxItems : int) indexInclusive : Async = async { + let until (log : ILogger) (container : Container, stream : string) (maxItems : int) indexInclusive ct : Task = task { let log = log |> Log.prop "stream" stream - let! ct = Async.CancellationToken - let deleteItem id count : Async = async { + let deleteItem id count : Task = task { let ro = ItemRequestOptions(EnableContentResponseOnWrite = false) // https://devblogs.microsoft.com/cosmosdb/enable-content-response-on-write/ - let! t, res = container.DeleteItemAsync(id, PartitionKey stream, ro, ct) |> Async.AwaitTaskCorrect |> Stopwatch.Time + let! t, res = (fun ct -> container.DeleteItemAsync(id, PartitionKey stream, ro, ct)) |> Stopwatch.time ct let rc, ms = res.RequestCharge, t.ElapsedMilliseconds let reqMetric : Log.Measurement = { database = container.Database.Id; container = container.Id; stream = stream; interval = t; bytes = -1; count = count; ru = rc } let log = let evt = Log.Metric.Delete reqMetric in log |> Log.event evt log.Information("EqxCosmos {action:l} {id} {ms:f1}ms {ru}RU", "Delete", id, ms, rc) - return rc - } - let trimTip expectedI count = async { - match! container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId) with + return rc } + let trimTip expectedI count = task { + match! container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId, ct) with | _, ReadResult.NotModified -> return failwith "unexpected NotModified; no etag supplied" | _, ReadResult.NotFound -> return failwith "unexpected NotFound" | _, ReadResult.Found tip when tip.i <> expectedI -> return failwithf "Concurrent write detected; Expected i=%d actual=%d" expectedI tip.i @@ -898,8 +890,7 @@ module Prune = let reqMetric : Log.Measurement = { database = container.Database.Id; container = container.Id; stream = stream; interval = t; bytes = -1; count = count; ru = rc } let log = let evt = Log.Metric.Trim reqMetric in log |> Log.event evt log.Information("EqxCosmos {action:l} {count} {ms:f1}ms {ru}RU", "Trim", count, ms, rc) - return rc - } + return rc } let log = log |> Log.prop "index" indexInclusive let query : FeedIterator = let qro = QueryRequestOptions(PartitionKey = PartitionKey stream, MaxItemCount = maxItems) @@ -915,7 +906,7 @@ module Prune = let! pt, outcomes = let isTip (x : BatchIndices) = x.id = Tip.WellKnownDocumentId let isRelevant x = x.i <= indexInclusive || isTip x - let handle (batches : BatchIndices[], rc) = async { + let handle (batches : BatchIndices[], rc) = task { let mutable delCharges, batchesDeleted, trimCharges, batchesTrimmed, eventsDeleted, eventsDeferred = 0., 0, 0., 0, 0, 0 let mutable lwm = None for x in batches |> Seq.takeWhile (fun x -> isRelevant x || lwm = None) do @@ -938,15 +929,13 @@ module Prune = eventsDeferred <- eventsDeferred + eligibleEvents if lwm = None then lwm <- Some x.i - return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) - } + return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) } let hasRelevantItems (batches, _rc) = batches |> Array.exists isRelevant - query - |> Query.feedIteratorMapTi mapPage - |> AsyncSeq.takeWhile hasRelevantItems - |> AsyncSeq.mapAsync handle - |> AsyncSeq.toArrayAsync - |> Stopwatch.Time + Query.feedIteratorMapTi mapPage query + >> TaskSeq.takeWhile hasRelevantItems + >> TaskSeq.mapAsync handle + >> TaskSeq.toArrayAsync + |> Stopwatch.time ct let mutable queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = 0., 0., 0., 0, 0, 0, 0 let mutable lwm = None for (qc, dc, tc), bLwm, (bCount, eDel, eDef) in outcomes do @@ -1030,7 +1019,7 @@ type StoreClient(container : Container, archive : Container option, query : Quer let ignoreMissing = tip.IgnoreMissingEvents // Always yields events forward, regardless of direction - member internal _.Read(log, stream, direction, (tryDecode, isOrigin), ?minIndex, ?maxIndex, ?tip) : Async = async { + member internal _.Read(log, stream, direction, (tryDecode, isOrigin), ?ct, ?minIndex, ?maxIndex, ?tip) : Task = task { let tip = tip |> Option.map (Query.scanTip (tryDecode, isOrigin)) let includeTip = Option.isNone tip let walk log container = Query.scan log (container, stream) includeTip query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) @@ -1040,55 +1029,54 @@ type StoreClient(container : Container, archive : Container option, query : Quer | Some f -> Choice2Of2 (walk (log |> Log.prop "fallback" true) f) let log = log |> Log.prop "stream" stream - let! pos, events = Query.load log (minIndex, maxIndex) tip (walk log container) walkFallback + let! pos, events = Query.load log (minIndex, maxIndex) tip (walk log container) walkFallback (defaultArg ct CancellationToken.None) return Token.create pos, events } - member _.ReadLazy(log, batching : QueryOptions, stream, direction, (tryDecode, isOrigin), ?minIndex, ?maxIndex) : AsyncSeq<'event[]> = - Query.walkLazy log (container, stream) batching.MaxItems batching.MaxRequests (tryDecode, isOrigin) (direction, minIndex, maxIndex) + member _.ReadLazy(log, batching : QueryOptions, stream, direction, (tryDecode, isOrigin), ?ct, ?minIndex, ?maxIndex) : IAsyncEnumerable<'event[]> = + Query.walkLazy log (container, stream) batching.MaxItems batching.MaxRequests (tryDecode, isOrigin) (direction, minIndex, maxIndex, defaultArg ct CancellationToken.None) - member store.Load(log, (stream, maybePos), (tryDecode, isOrigin), checkUnfolds : bool) : Async = - if not checkUnfolds then store.Read(log, stream, Direction.Backward, (tryDecode, isOrigin)) - else async { - match! loadTip log stream maybePos with + member store.Load(log, (stream, maybePos), (tryDecode, isOrigin), checkUnfolds : bool, ct) : Task = + if not checkUnfolds then store.Read(log, stream, Direction.Backward, (tryDecode, isOrigin), ct) + else task { + match! loadTip log stream maybePos ct with | Tip.Result.NotFound -> return Token.create Position.fromKnownEmpty, Array.empty | Tip.Result.NotModified -> return invalidOp "Not applicable" - | Tip.Result.Found (pos, i, xs) -> return! store.Read(log, stream, Direction.Backward, (tryDecode, isOrigin), tip = (pos, i, xs)) } - member _.GetPosition(log, stream, ?pos) : Async = async { - match! loadTip log stream pos with + | Tip.Result.Found (pos, i, xs) -> return! store.Read(log, stream, Direction.Backward, (tryDecode, isOrigin), ct, tip = (pos, i, xs)) } + member _.GetPosition(log, stream, ct, ?pos) : Task = task { + match! loadTip log stream pos ct with | Tip.Result.NotFound -> return Token.create Position.fromKnownEmpty | Tip.Result.NotModified -> return Token.create pos.Value | Tip.Result.Found (pos, _i, _unfoldsAndEvents) -> return Token.create pos } - member store.Reload(log, (stream, pos), (tryDecode, isOrigin), ?preview): Async> = - let read tipContent = async { - let! res = store.Read(log, stream, Direction.Backward, (tryDecode, isOrigin), minIndex = pos.index, tip = tipContent) + member store.Reload(log, (stream, pos), (tryDecode, isOrigin), ct, ?preview): Task> = + let read tipContent = task { + let! res = store.Read(log, stream, Direction.Backward, (tryDecode, isOrigin), ct, minIndex = pos.index, tip = tipContent) return LoadFromTokenResult.Found res } match preview with | Some (pos, i, xs) -> read (pos, i, xs) - | None -> async { - match! loadTip log stream (Some pos) with + | None -> task { + match! loadTip log stream (Some pos) ct with | Tip.Result.NotFound -> return LoadFromTokenResult.Found (Token.create Position.fromKnownEmpty, Array.empty) | Tip.Result.NotModified -> return LoadFromTokenResult.Unchanged | Tip.Result.Found (pos, i, xs) -> return! read (pos, i, xs) } - member internal _.Sync(log, stream, exp, batch : Tip) : Async = async { + member internal _.Sync(log, stream, exp, batch : Tip, ct) : Task = task { if Array.isEmpty batch.e && Array.isEmpty batch.u then invalidOp "Must write either events or unfolds." - match! Sync.batch log (tip.WriteRetryPolicy, tip.MaxEvents, tip.MaxJsonLength) (container, stream) (exp, batch) with + match! Sync.batch log (tip.WriteRetryPolicy, tip.MaxEvents, tip.MaxJsonLength) (container, stream) (exp, batch) ct with | Sync.Result.Conflict (pos', events) -> return InternalSyncResult.Conflict (pos', events) | Sync.Result.ConflictUnknown pos' -> return InternalSyncResult.ConflictUnknown (Token.create pos') | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create pos') } - member _.Prune(log, stream, index) = - Prune.until log (container, stream) query.MaxItems index + member _.Prune(log, stream, index, ct) = + Prune.until log (container, stream) query.MaxItems index ct type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IEventCodec<'event, EventBody, 'context>) = member _.Load(log, stream, initial, checkUnfolds, fold, isOrigin, ct) : Task = task { - let! token, events = store.Load(log, (stream, None), (codec.TryDecode, isOrigin), checkUnfolds) |> Async.startAsTask ct + let! token, events = store.Load(log, (stream, None), (codec.TryDecode, isOrigin), checkUnfolds, ct) return struct (token, fold initial events) } member _.Reload(log, streamName, (Token.Unpack pos as streamToken), state, fold, isOrigin, ct, ?preloaded) : Task = task { - match! store.Reload(log, (streamName, pos), (codec.TryDecode, isOrigin), ?preview = preloaded) |> Async.startAsTask ct with + match! store.Reload(log, (streamName, pos), (codec.TryDecode, isOrigin), ct, ?preview = preloaded) with | LoadFromTokenResult.Unchanged -> return struct (streamToken, state) | LoadFromTokenResult.Found (token', events) -> return token', fold state events } - - member cat.Sync(log, streamName, (Token.Unpack pos as streamToken), state, events, mapUnfolds, fold, isOrigin, context, compressUnfolds) : Async> = async { + member cat.Sync(log, streamName, (Token.Unpack pos as streamToken), state, events, mapUnfolds, fold, isOrigin, context, compressUnfolds, ct) : Task> = task { let state' = fold state (Seq.ofArray events) let encode e = codec.Encode(context, e) let exp, events, eventsEncoded, projectionsEncoded = @@ -1102,7 +1090,7 @@ type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IE let renderElement = if compressUnfolds then JsonElement.undefinedToNull >> JsonElement.deflate else JsonElement.undefinedToNull let projections = projectionsEncoded |> Seq.map (Sync.mkUnfold renderElement baseIndex) let batch = Sync.mkBatch streamName eventsEncoded projections - match! store.Sync(log, streamName, exp, batch) with + match! store.Sync(log, streamName, exp, batch, ct) with | InternalSyncResult.Conflict (pos', tipEvents) -> return SyncResult.Conflict (fun ct -> cat.Reload(log, streamName, streamToken, state, fold, isOrigin, ct, (pos', pos.index, tipEvents))) | InternalSyncResult.ConflictUnknown _token' -> return SyncResult.Conflict (fun ct -> cat.Reload(log, streamName, streamToken, state, fold, isOrigin, ct)) | InternalSyncResult.Written token' -> return SyncResult.Written (token', state') } @@ -1125,27 +1113,27 @@ module internal Caching = type CachingCategory<'event, 'state, 'context> ( category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, isOrigin : 'event -> bool, - tryReadCache, updateCache : _ -> struct (_*_) -> Task, + tryReadCache : string -> Task>, updateCache : string -> struct (StreamToken * 'state) -> Task, checkUnfolds, compressUnfolds, - mapUnfolds : Choice 'state -> 'event array), 'event array -> 'state -> 'event array * 'event array>) = - let cache streamName (inner : unit -> Task<_>) = task { - let! struct (token, state) = inner () - do! updateCache streamName (token, state) - return struct (token, state) } + mapUnfolds : Choice 'state -> 'event array, 'event array -> 'state -> 'event array * 'event array>) = + let cache streamName (inner : CancellationToken -> Task<_>) ct = task { + let! tokenAndState = inner ct + do! updateCache streamName tokenAndState + return tokenAndState } interface ICategory<'event, 'state, 'context> with - member _.Load(log, _categoryName, _streamId, streamName, allowStale, _requireLeader, ct) : Task = task { - match! tryReadCache streamName : Task with - | ValueNone -> return! (fun () -> category.Load(log, streamName, initial, checkUnfolds, fold, isOrigin, ct)) |> cache streamName - | ValueSome struct (token, state) when allowStale -> return struct (token, state) // read already updated TTL, no need to write - | ValueSome (token, state) -> return! (fun () -> category.Reload(log, streamName, token, state, fold, isOrigin, ct)) |> cache streamName } + member _.Load(log, _categoryName, _streamId, streamName, allowStale, _requireLeader, ct) = task { + match! tryReadCache streamName with + | ValueNone -> return! cache streamName (fun ct -> category.Load(log, streamName, initial, checkUnfolds, fold, isOrigin, ct)) ct + | ValueSome tokenAndState when allowStale -> return tokenAndState // read already updated TTL, no need to write + | ValueSome (token, state) -> return! cache streamName (fun ct -> category.Reload(log, streamName, token, state, fold, isOrigin, ct)) ct } member _.TrySync(log : ILogger, _categoryName, _streamId, streamName, context, maybeInit, streamToken, state, events, ct) : Task> = task { match maybeInit with ValueNone -> () | ValueSome i -> do! i ct - match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context, compressUnfolds) |> Async.startAsTask ct with + match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context, compressUnfolds, ct) with | SyncResult.Conflict resync -> - return SyncResult.Conflict (fun ct -> cache streamName (fun () -> resync ct)) - | SyncResult.Written (token', state') -> - do! updateCache streamName struct (token', state') - return SyncResult.Written (token', state') } + return SyncResult.Conflict (cache streamName resync) + | SyncResult.Written tokenAndState' -> + do! updateCache streamName tokenAndState' + return SyncResult.Written tokenAndState' } module ConnectionString = @@ -1224,9 +1212,9 @@ type CosmosClientFactory /// Creates and validates a Client [including loading metadata](https://devblogs.microsoft.com/cosmosdb/improve-net-sdk-initialization) for the specified containers member x.CreateAndInitialize(discovery : Discovery, containers) = async { let! ct = Async.CancellationToken - match discovery with // + match discovery with | Discovery.AccountUriAndKey (accountUri = uri; key = key) -> return! CosmosClient.CreateAndInitializeAsync(string uri, key, containers, x.Options, ct) |> Async.AwaitTaskCorrect - | Discovery.ConnectionString cs -> return! CosmosClient.CreateAndInitializeAsync(cs, containers, x.Options) |> Async.AwaitTaskCorrect } + | Discovery.ConnectionString cs -> return! CosmosClient.CreateAndInitializeAsync(cs, containers, x.Options, ct) |> Async.AwaitTaskCorrect } /// Manages establishing a CosmosClient, which is used by CosmosStoreClient to read from the underlying Cosmos DB Container. type CosmosStoreConnector @@ -1308,7 +1296,7 @@ type CosmosStoreClient let createContainerInitializerGuard (d, c) = let init = if Some true = disableInitialization then None - else Some (fun cosmosContainer ct -> Initialization.createSyncStoredProcIfNotExists None cosmosContainer |> Async.Ignore |> Async.startAsTask ct) + else Some (Initialization.createSyncStoredProcIfNotExists None) let archiveD, archiveC = primaryDatabaseAndContainerToArchive (d, c) let primaryContainer, fallbackContainer = createContainer (d, c), createFallbackContainer (archiveD, archiveC) Initialization.ContainerInitializerGuard(createGateway primaryContainer, Option.map createGateway fallbackContainer, ?initContainer = init) @@ -1449,6 +1437,8 @@ type CosmosStoreCategory<'event, 'state, 'context>(resolveInner, empty) = namespace Equinox.CosmosStore.Core +open System.Collections.Generic +open System.Threading.Tasks open Equinox.Core open FsCodec open FSharp.Control @@ -1473,7 +1463,7 @@ type EventsContext internal acc.Value <- acc.Value - 1 false - let yieldPositionAndData res = async { + let yieldPositionAndData res = task { let! Token.Unpack pos', data = res return pos', data } @@ -1492,12 +1482,12 @@ type EventsContext internal struct (streamName, init) member x.StreamId(streamName) : string = x.ResolveStream streamName |> ValueTuple.fst - member internal _.GetLazy(stream, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex) : AsyncSeq[]> = + member internal _.GetLazy(stream, ?ct, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex) : IAsyncEnumerable[]> = let direction = defaultArg direction Direction.Forward let batching = match queryMaxItems with Some qmi -> QueryOptions(qmi) | _ -> context.QueryOptions - store.ReadLazy(log, batching, stream, direction, (Some, fun _ -> false), ?minIndex = minIndex, ?maxIndex = maxIndex) + store.ReadLazy(log, batching, stream, direction, (Some, fun _ -> false), ?ct = ct, ?minIndex = minIndex, ?maxIndex = maxIndex) - member internal _.GetInternal((stream, startPos), ?maxCount, ?direction) = async { + member internal _.GetInternal((stream, startPos), ?ct, ?maxCount, ?direction) = task { let direction = defaultArg direction Direction.Forward if maxCount = Some 0 then // Search semantics include the first hit so we need to special case this anyway @@ -1508,37 +1498,37 @@ type EventsContext internal | Some limit -> maxCountPredicate limit | None -> fun _ -> false let minIndex, maxIndex = getRange direction startPos - let! token, events = store.Read(log, stream, direction, (ValueSome, isOrigin), ?minIndex = minIndex, ?maxIndex = maxIndex) + let! token, events = store.Read(log, stream, direction, (ValueSome, isOrigin), ?ct = ct, ?minIndex = minIndex, ?maxIndex = maxIndex) if direction = Direction.Backward then System.Array.Reverse events return token, events } /// Establishes the current position of the stream in as efficient a manner as possible /// (The ideal situation is that the preceding token is supplied as input in order to avail of 1RU low latency state checks) member _.Sync(stream, [] ?position : Position) : Async = async { - let! Token.Unpack pos' = store.GetPosition(log, stream, ?pos = position) + let! ct = Async.CancellationToken + let! Token.Unpack pos' = store.GetPosition(log, stream, ct, ?pos = position) |> Async.AwaitTaskCorrect return pos' } /// Query (with MaxItems set to `queryMaxItems`) from the specified `Position`, allowing the reader to efficiently walk away from a running query /// ... NB as long as they Dispose! - member x.Walk(stream, queryMaxItems, [] ?minIndex, [] ?maxIndex, [] ?direction) : AsyncSeq[]> = - x.GetLazy(stream, queryMaxItems, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) + member x.Walk(stream, queryMaxItems, [] ?ct, [] ?minIndex, [] ?maxIndex, [] ?direction) : IAsyncEnumerable[]> = + x.GetLazy(stream, ?ct = ct, queryMaxItems = queryMaxItems, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) /// Reads all Events from a `Position` in a given `direction` - member x.Read(stream, [] ?position, [] ?maxCount, [] ?direction) : Async[]> = - x.GetInternal((stream, position), ?maxCount = maxCount, ?direction = direction) |> yieldPositionAndData + member x.Read(stream, [] ?ct, [] ?position, [] ?maxCount, [] ?direction) : Task[]> = + x.GetInternal((stream, position), ?ct = ct, ?maxCount = maxCount, ?direction = direction) |> yieldPositionAndData /// Appends the supplied batch of events, subject to a consistency check based on the `position` /// Callers should implement appropriate idempotent handling, or use Equinox.Decider for that purpose - member x.Sync(stream, position, events : IEventData<_>[]) : Async> = async { + member x.Sync(stream, position, events : IEventData<_>[], ct) : Task> = task { // Writes go through the stored proc, which we need to provision per container // Having to do this here in this way is far from ideal, but work on caching, external snapshots and caching is likely // to move this about before we reach a final destination in any case match x.ResolveStream stream |> ValueTuple.snd with | ValueNone -> () - | ValueSome init -> let! ct = Async.CancellationToken - do! init ct |> Async.AwaitTaskCorrect + | ValueSome init -> do! init ct let batch = Sync.mkBatch stream events Seq.empty - match! store.Sync(log, stream, SyncExp.Version position.index, batch) with + match! store.Sync(log, stream, SyncExp.Version position.index, batch, ct) with | InternalSyncResult.Written (Token.Unpack pos) -> return AppendResult.Ok pos | InternalSyncResult.Conflict (pos, events) -> return AppendResult.Conflict (pos, events) | InternalSyncResult.ConflictUnknown (Token.Unpack pos) -> return AppendResult.ConflictUnknown pos } @@ -1546,12 +1536,13 @@ type EventsContext internal /// Low level, non-idempotent call appending events to a stream without a concurrency control mechanism in play /// NB Should be used sparingly; Equinox.Decider enables building equivalent equivalent idempotent handling with minimal code. member x.NonIdempotentAppend(stream, events : IEventData<_>[]) : Async = async { - match! x.Sync(stream, Position.fromAppendAtEnd, events) with + let! ct = Async.CancellationToken + match! x.Sync(stream, Position.fromAppendAtEnd, events, ct) |> Async.AwaitTaskCorrect with | AppendResult.Ok token -> return token | x -> return x |> sprintf "Conflict despite it being disabled %A" |> invalidOp } - member _.Prune(stream, index) : Async = - store.Prune(log, stream, index) + member _.Prune(stream, index, ct) : Task = + store.Prune(log, stream, index, ct) /// Provides mechanisms for building `EventData` records to be supplied to the `Events` API type EventData() = @@ -1563,16 +1554,16 @@ type EventData() = module Events = let private (|PositionIndex|) (x : Position) = x.index - let private stripSyncResult (f : Async>) : Async> = async { - match! f with + let private stripSyncResult (f : Task>) : Async> = async { + match! f |> Async.AwaitTaskCorrect with | AppendResult.Ok (PositionIndex index)-> return AppendResult.Ok index | AppendResult.Conflict (PositionIndex index, events) -> return AppendResult.Conflict (index, events) | AppendResult.ConflictUnknown (PositionIndex index) -> return AppendResult.ConflictUnknown index } let private stripPosition (f : Async) : Async = async { let! (PositionIndex index) = f return index } - let private dropPosition (f : Async[]>) : Async[]> = async { - let! _, xs = f + let private dropPosition (f : Task[]>) : Async[]> = async { + let! _, xs = f |> Async.AwaitTaskCorrect return xs } let (|MinPosition|) = function | 0L -> None @@ -1585,49 +1576,56 @@ module Events = /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let getAll (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) : AsyncSeq[]> = - ctx.Walk(ctx.StreamId streamName, batchSize, minIndex = index) + let getAll (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) : Async[]>> = async { + let! ct = Async.CancellationToken + return ctx.Walk(ctx.StreamId streamName, batchSize, ct, minIndex = index) } /// Returns an async array of events in the stream starting at the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let get (ctx : EventsContext) (streamName : string) (MinPosition index : int64) (maxCount : int) : Async[]> = - ctx.Read(ctx.StreamId streamName, ?position = index, maxCount = maxCount) |> dropPosition + let get (ctx : EventsContext) (streamName : string) (MinPosition index : int64) (maxCount : int) : Async[]> = async { + let! ct = Async.CancellationToken + return! ctx.Read(ctx.StreamId streamName, ct, ?position = index, maxCount = maxCount) |> dropPosition } /// Appends a batch of events to a stream at the specified expected sequence number. /// If the specified expected sequence number does not match the stream, the events are not appended /// and a failure is returned. - let append (ctx : EventsContext) (streamName : string) (index : int64) (events : IEventData<_>[]) : Async> = - ctx.Sync(ctx.StreamId streamName, Position.fromI index, events) |> stripSyncResult + let append (ctx : EventsContext) (streamName : string) (index : int64) (events : IEventData<_>[]) : Async> = async { + let! ct = Async.CancellationToken + return! ctx.Sync(ctx.StreamId streamName, Position.fromI index, events, ct) |> stripSyncResult } /// Appends a batch of events to a stream at the the present Position without any conflict checks. /// NB typically, it is recommended to ensure idempotency of operations by using the `append` and related API as /// this facilitates ensuring consistency is maintained, and yields reduced latency and Request Charges impacts /// (See equivalent APIs on `Context` that yield `Position` values) - let appendAtEnd (ctx : EventsContext) (streamName : string) (events : IEventData<_>[]) : Async = - ctx.NonIdempotentAppend(ctx.StreamId streamName, events) |> stripPosition + let appendAtEnd (ctx : EventsContext) (streamName : string) (events : IEventData<_>[]) : Async = async { + let! ct = Async.CancellationToken + return! ctx.NonIdempotentAppend(ctx.StreamId streamName, events) |> stripPosition } /// Requests deletion of events up and including the specified index. /// Due to the need to preserve ordering of data in the stream, only complete Batches will be removed. /// If the index is within the Tip, events are removed via an etag-checked update. Does not alter the unfolds held in the Tip, or remove the Tip itself. /// Returns count of events deleted this time, events that could not be deleted due to partial batches, and the stream's lowest remaining sequence number. - let pruneUntil (ctx : EventsContext) (streamName : string) (index : int64) : Async = - ctx.Prune(ctx.StreamId streamName, index) + let pruneUntil (ctx : EventsContext) (streamName : string) (index : int64) : Async = async { + let! ct = Async.CancellationToken + return! ctx.Prune(ctx.StreamId streamName, index, ct) |> Async.AwaitTaskCorrect } /// Returns an async sequence of events in the stream backwards starting from the specified sequence number, /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getAllBackwards (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) : AsyncSeq[]> = - ctx.Walk(ctx.StreamId streamName, batchSize, maxIndex = index, direction = Direction.Backward) + let getAllBackwards (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) : Async[]>> = async { + let! ct = Async.CancellationToken + return ctx.Walk(ctx.StreamId streamName, batchSize, ct, maxIndex = index, direction = Direction.Backward) } /// Returns an async array of events in the stream backwards starting from the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getBackwards (ctx : EventsContext) (streamName : string) (MaxPosition index : int64) (maxCount : int) : Async[]> = - ctx.Read(ctx.StreamId streamName, ?position = index, maxCount = maxCount, direction = Direction.Backward) |> dropPosition + let getBackwards (ctx : EventsContext) (streamName : string) (MaxPosition index : int64) (maxCount : int) : Async[]> = async { + let! ct = Async.CancellationToken + return! ctx.Read(ctx.StreamId streamName, ct, ?position = index, maxCount = maxCount, direction = Direction.Backward) |> dropPosition } /// Obtains the `index` from the current write Position let getNextIndex (ctx : EventsContext) (streamName : string) : Async = diff --git a/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj index dd8154bde..46fb5f5db 100644 --- a/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj +++ b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj @@ -18,10 +18,10 @@ - + - + diff --git a/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj b/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj index 1c593436f..3901f975b 100644 --- a/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj +++ b/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj @@ -15,7 +15,7 @@ - + diff --git a/src/Equinox.DynamoStore/DynamoStore.fs b/src/Equinox.DynamoStore/DynamoStore.fs index a6299ae60..aae7c168f 100644 --- a/src/Equinox.DynamoStore/DynamoStore.fs +++ b/src/Equinox.DynamoStore/DynamoStore.fs @@ -1,13 +1,14 @@ namespace Equinox.DynamoStore.Core -open System.Collections.Generic open Equinox.Core open FsCodec open FSharp.AWS.DynamoDB open FSharp.Control open Serilog open System +open System.Collections.Generic open System.IO +open System.Threading open System.Threading.Tasks [] @@ -285,9 +286,9 @@ module Log = { mutable rux100 : int64; mutable count : int64; mutable ms : int64 } static member Create() = { rux100 = 0L; count = 0L; ms = 0L } member x.Ingest(ms, ru) = - System.Threading.Interlocked.Increment(&x.count) |> ignore - System.Threading.Interlocked.Add(&x.rux100, int64 (ru * 100.)) |> ignore - System.Threading.Interlocked.Add(&x.ms, ms) |> ignore + Interlocked.Increment(&x.count) |> ignore + Interlocked.Add(&x.rux100, int64 (ru * 100.)) |> ignore + Interlocked.Add(&x.ms, ms) |> ignore type internal Counters() = let tables = System.Collections.Concurrent.ConcurrentDictionary() let create (_name : string) = Counter.Create() @@ -313,7 +314,7 @@ module Log = static let mutable epoch = Epoch() static member Restart() = let fresh = Epoch() - let outgoing = System.Threading.Interlocked.Exchange(&epoch, fresh) + let outgoing = Interlocked.Exchange(&epoch, fresh) outgoing.Stop() outgoing interface Serilog.Core.ILogEventSink with @@ -419,6 +420,15 @@ type private Metrics() = t <- t + x.CapacityUnits member _.Consumed : RequestConsumption = { total = t } +module private Async = + + let startImmediateAsTask ct computation = Async.StartImmediateAsTask(computation, cancellationToken = ct) + +module private Stopwatch = + + let timeAsync (ct : CancellationToken) (f : Async<'T>) : Task = + (fun ct -> Async.startImmediateAsTask ct f) |> Stopwatch.time ct + type internal BatchIndices = { isTip : bool; index : int64; n : int64 } type Container(tableName, createContext : (RequestMetrics -> unit) -> TableContext) = @@ -430,17 +440,17 @@ type Container(tableName, createContext : (RequestMetrics -> unit) -> TableConte let createContext collector = TableContext(client, tableName, metricsCollector = collector) Container(tableName, createContext) - member x.TryGetTip(stream : string, consistentRead) : Async = async { + member x.TryGetTip(stream : string, consistentRead, ct) : Task = task { let rm = Metrics() let context = createContext rm.Add let pk = Batch.tableKeyForStreamTip stream - let! item = context.TryGetItemAsync(pk, consistentRead) + let! item = context.TryGetItemAsync(pk, consistentRead) |> Async.startImmediateAsTask ct return item |> Option.map Batch.ofSchema, rm.Consumed } - member x.TryUpdateTip(stream : string, updateExpr : Quotations.Expr Batch.Schema>, ?precondition) : Async = async { + member x.TryUpdateTip(stream : string, updateExpr : Quotations.Expr Batch.Schema>, ct, ?precondition) : Task = task { let rm = Metrics() let context = createContext rm.Add let pk = Batch.tableKeyForStreamTip stream - let! item = context.UpdateItemAsync(pk, updateExpr, ?precondition = precondition) + let! item = context.UpdateItemAsync(pk, updateExpr, ?precondition = precondition) |> Async.startImmediateAsTask ct return item |> Batch.ofSchema, rm.Consumed } member _.QueryBatches(stream, consistentRead, minN, maxI, backwards, batchSize, ct) : IAsyncEnumerable = let compile = (createContext ignore).Template.PrecomputeConditionalExpr @@ -456,31 +466,30 @@ type Container(tableName, createContext : (RequestMetrics -> unit) -> TableConte let context = createContext rm.Add let! t, res = context.QueryPaginatedAsync(kc, ?filterCondition = fc, limit = batchSize, ?exclusiveStartKey = le, scanIndexForward = not backwards, consistentRead = consistentRead) - |> Async.startTask |> Stopwatch.time ct + |> Stopwatch.timeAsync ct yield i, t, Array.map Batch.ofSchema res.Records, rm.Consumed match res.LastEvaluatedKey with | None -> () - | le -> yield! aux (i + 1, le) - } + | le -> yield! aux (i + 1, le) } aux (0, None) - member internal _.QueryIAndNOrderByNAscending(stream, maxItems) : IAsyncEnumerable = + member internal _.QueryIAndNOrderByNAscending(stream, maxItems, ct) : IAsyncEnumerable = let rec aux (index, lastEvaluated) = taskSeq { let rm = Metrics() let context = createContext rm.Add let keyCond = <@ fun (b : Batch.Schema) -> b.p = stream @> let proj = <@ fun (b : Batch.Schema) -> b.i, b.c, b.n @> // TOCONSIDER want len of c, but b.e.Length explodes in empty array case, so no choice but to return the full thing let! t, res = context.QueryProjectedPaginatedAsync(keyCond, proj, ?exclusiveStartKey = lastEvaluated, scanIndexForward = true, limit = maxItems) - |> Stopwatch.Time + |> Stopwatch.timeAsync ct yield index, t, [| for i, c, n in res -> { isTip = Batch.isTip i; index = n - int64 c.Length; n = n } |], rm.Consumed match res.LastEvaluatedKey with | None -> () | le -> yield! aux (index + 1, le) } aux (0, None) - member x.DeleteItem(stream : string, i) : Async = async { + member x.DeleteItem(stream : string, i, ct) : Task = task { let rm = Metrics() let context = createContext rm.Add let pk = TableKey.Combined(stream, i) - let! _item = context.DeleteItemAsync(pk) + let! _item = context.DeleteItemAsync(pk) |> Async.startImmediateAsTask ct return rm.Consumed } /// Represents the State of the Stream for the purposes of deciding how to map a Sync request to DynamoDB operations @@ -561,7 +570,7 @@ module internal Sync = type private Res = | Written of etag' : string | ConflictUnknown - let private transact (container : Container, stream : string) requestArgs : Async = async { + let private transact (container : Container, stream : string) requestArgs ct : Task = task { let etag' = let g = Guid.NewGuid() in g.ToString "N" let actions = generateRequests stream requestArgs etag' let rm = Metrics() @@ -569,14 +578,15 @@ module internal Sync = match actions with | [ TransactWrite.Put (item, Some cond) ] -> context.PutItemAsync(item, cond) |> Async.Ignore | [ TransactWrite.Update (key, Some cond, updateExpr) ] -> context.UpdateItemAsync(key, updateExpr, cond) |> Async.Ignore - | actions -> context.TransactWriteItems actions - return rm.Consumed, Res.Written etag' + | actions -> context.TransactWriteItems actions |> Async.Ignore + |> Async.startImmediateAsTask ct + return struct (rm.Consumed, Res.Written etag') with DynamoDbConflict -> return rm.Consumed, Res.ConflictUnknown } - let private transactLogged (container, stream) (baseBytes, baseEvents, req, unfolds, exp, b', n') (log : ILogger) - : Async = async { - let! t, ({ total = ru } as rc, result) = transact (container, stream) (req, unfolds, exp, b', n') |> Stopwatch.Time + let private transactLogged (container, stream) (baseBytes, baseEvents, req, unfolds, exp, b', n', ct) (log : ILogger) + : Task = task { + let! t, ({ total = ru } as rc, result) = transact (container, stream) (req, unfolds, exp, b', n') |> Stopwatch.time ct let calfBytes, calfCount, tipBytes, tipEvents, appended = req |> function | Req.Append (_tipWasEmpty, appends) -> 0, 0, baseBytes + Event.arrayBytes appends, baseEvents + appends.Length, appends | Req.Calve (calf, tip, appendedCount) -> Event.arrayBytes calf, calf.Length, baseBytes + Event.arrayBytes tip, tip.Length, @@ -608,7 +618,7 @@ module internal Sync = let private maxDynamoDbItemSize = 400 * 1024 let handle log (maxEvents, maxBytes) (container, stream) - (pos, exp, n', events : IEventData array, unfolds : IEventData array) = async { + (pos, exp, n', events : IEventData array, unfolds : IEventData array, ct) = task { let baseIndex = int n' - events.Length let events : Event array = events |> Array.mapi (fun i e -> { i = baseIndex + i; t = e.Timestamp; c = e.EventType; d = EncodedBody.toInternal e.Data; m = EncodedBody.toInternal e.Meta @@ -630,7 +640,7 @@ module internal Sync = let tipEvents = residualEvents.ToArray() Req.Calve (calfEvents, tipEvents, events.Length), cur.calvedBytes + Event.arrayBytes calfEvents, tipEvents else Req.Append (Array.isEmpty cur.events, events), cur.calvedBytes, Array.append cur.events events - match! transactLogged (container, stream) (cur.baseBytes, cur.events.Length, req, unfolds, exp pos, predecessorBytes', n') log with + match! transactLogged (container, stream) (cur.baseBytes, cur.events.Length, req, unfolds, exp pos, predecessorBytes', n', ct) log with | Res.ConflictUnknown -> return Result.ConflictUnknown | Res.Written etag' -> return Result.Written (etag', predecessorBytes', tipEvents', unfolds) } @@ -641,14 +651,14 @@ module internal Tip = | Found of 'T | NotFound | NotModified - let private get (container : Container, stream : string) consistentRead (maybePos : Position option) = async { - match! container.TryGetTip(stream, consistentRead) with + let private get (container : Container, stream : string) consistentRead (maybePos : Position option) ct = task { + match! container.TryGetTip(stream, consistentRead, ct) with | Some { etag = fe }, rc when fe = Position.toEtag maybePos -> return rc, Res.NotModified | Some t, rc -> return rc, Res.Found t | None, rc -> return rc, Res.NotFound } - let private loggedGet (get : Container * string -> bool -> Position option -> Async<_>) (container, stream) consistentRead (maybePos : Position option) (log : ILogger) = async { + let private loggedGet (get : Container * string -> bool -> Position option -> CancellationToken -> Task<_>) (container, stream) consistentRead (maybePos : Position option) (log : ILogger) ct = task { let log = log |> Log.prop "stream" stream - let! t, ({ total = ru } as rc, res : Res<_>) = get (container, stream) consistentRead maybePos |> Stopwatch.Time + let! t, ({ total = ru } as rc, res : Res<_>) = get (container, stream) consistentRead maybePos |> Stopwatch.time ct let logMetric bytes count (f : Log.Measurement -> _) = log |> Log.event (f (Log.metric container.TableName stream t bytes count rc)) match res with | Res.NotModified -> @@ -670,8 +680,8 @@ module internal Tip = |> Seq.sortBy (fun x -> x.Index, x.IsUnfold) |> Array.ofSeq /// `pos` being Some implies that the caller holds a cached value and hence is ready to deal with Result.NotModified - let tryLoad (log : ILogger) containerStream consistentRead (maybePos : Position option, maxIndex) : Async array>> = async { - let! _rc, res = loggedGet get containerStream consistentRead maybePos log + let tryLoad (log : ILogger) containerStream consistentRead (maybePos : Position option, maxIndex) ct : Task array>> = task { + let! _rc, res = loggedGet get containerStream consistentRead maybePos log ct match res with | Res.NotModified -> return Res.NotModified | Res.NotFound -> return Res.NotFound @@ -743,11 +753,11 @@ module internal Query = let scan<'event> (log : ILogger) (container, stream) consistentRead maxItems maxRequests direction (tryDecode : ITimelineEvent -> 'event voption, isOrigin : 'event -> bool) - (minIndex, maxIndex) + (minIndex, maxIndex, ct) : Task option> = task { let mutable found = false let mutable responseCount = 0 - let mergeBatches (log : ILogger) (batchesBackward : IAsyncEnumerable) = async { + let mergeBatches (log : ILogger) (batchesBackward : IAsyncEnumerable) = task { let mutable lastResponse, maybeTipPos, ru = None, None, 0. let! events = batchesBackward @@ -756,7 +766,6 @@ module internal Query = lastResponse <- Some events; ru <- ru + rc.total responseCount <- responseCount + 1 seq { for x in events -> struct (x, x |> EncodedBody.ofInternal |> tryDecode) }) - |> TaskSeq.takeWhileInclusive (function | struct (x, ValueSome e) when isOrigin e -> found <- true @@ -769,16 +778,16 @@ module internal Query = log.Write(logLevel, "EqxDynamo Stop @{index} {case} used {used}b residual {residual}b", x.i, x.c, used, residual) false | _ -> true) - |> AsyncSeq.toArrayAsync + |> TaskSeq.toArrayAsync return events, maybeTipPos, { total = ru } } let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readLog = log |> Log.prop "direction" direction - let batches : AsyncSeq = - mkQuery readLog (container, stream) consistentRead maxItems (direction, minIndex, maxIndex) - |> AsyncSeq.map (mapPage direction (container, stream) (minIndex, maxIndex, maxItems) maxRequests readLog) - let! t, (events, maybeTipPos, ru) = mergeBatches log batches |> Stopwatch.Time + let batches ct : IAsyncEnumerable = + mkQuery readLog (container, stream) consistentRead maxItems (direction, minIndex, maxIndex) ct + |> TaskSeq.map (mapPage direction (container, stream) (minIndex, maxIndex, maxItems) maxRequests readLog) + let! t, (events, maybeTipPos, ru) = batches >> mergeBatches log |> Stopwatch.time ct let raws = Array.map ValueTuple.fst events - let decoded = if direction = Direction.Forward then Array.chooseV ValueTuple.snd events else Seq.chooseV ValueTuple.snd events |> Seq.rev |> Array.ofSeq + let decoded = if direction = Direction.Forward then Array.chooseV ValueTuple.snd events else let xs = Array.chooseV ValueTuple.snd events in Array.Reverse xs; xs let minMax = (None, raws) ||> Array.fold (fun acc x -> let i = int64 x.i in Some (match acc with None -> i, i | Some (n, x) -> min n i, max x i)) let version = match maybeTipPos, minMax with @@ -787,34 +796,35 @@ module internal Query = | None, None -> 0L log |> logQuery (direction, minIndex, maxIndex) (container, stream) t (responseCount, raws) version ru match minMax, maybeTipPos with - | Some (i, m), _ -> return Some { found = found; minIndex = i; next = m + 1L; maybeTipPos = maybeTipPos; events = decoded } + | Some (i, m), _ -> return Some ({ found = found; minIndex = i; next = m + 1L; maybeTipPos = maybeTipPos; events = decoded } : ScanResult<_>) | None, Some { index = tipI } -> return Some { found = found; minIndex = tipI; next = tipI; maybeTipPos = maybeTipPos; events = [||] } | None, _ -> return None } let walkLazy<'event> (log : ILogger) (container, stream) maxItems maxRequests (tryDecode : ITimelineEvent -> 'event option, isOrigin : 'event -> bool) - (direction, minIndex, maxIndex) - : AsyncSeq<'event array> = asyncSeq { + (direction, minIndex, maxIndex) ct + : IAsyncEnumerable<'event array> = taskSeq { let query = mkQuery log (container, stream) (*consistentRead*)false maxItems (direction, minIndex, maxIndex) let readPage = mapPage direction (container, stream) (minIndex, maxIndex, maxItems) maxRequests let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readLog = log |> Log.prop "direction" direction - let query = query |> AsyncSeq.map (readPage readLog) let startTicks = System.Diagnostics.Stopwatch.GetTimestamp() + let query = query ct |> TaskSeq.map (readPage readLog) let allEvents = ResizeArray() let mutable i, ru = 0, 0. try let mutable ok = true - let e = query.GetEnumerator() + use e = query.GetAsyncEnumerator(ct) while ok do let batchLog = readLog |> Log.prop "batchIndex" i match maxRequests with | Some mr when i + 1 >= mr -> batchLog.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () - match! e.MoveNext() with - | None -> ok <- false // rest of block does not happen, while exits - | Some (events, _pos, rc) -> + let! more = e.MoveNextAsync() + if not more then ok <- false else // rest of block does not happen, while exits + + let events, _pos, rc = e.Current ru <- ru + rc.total allEvents.AddRange(events) @@ -843,9 +853,9 @@ module internal Query = /// 2) Querying Primary for predecessors of what's obtained from 1 /// 3) Querying Archive for predecessors of what's obtained from 2 let load (log : ILogger) (minIndex, maxIndex) (tip : ScanResult<'event> option) - (primary : int64 option * int64 option -> Task option>) + (primary : int64 option * int64 option * CancellationToken -> Task option>) // Choice1Of2 -> indicates whether it's acceptable to ignore missing events; Choice2Of2 -> Fallback store - (fallback : Choice Task option>>) ct + (fallback : Choice Task option>>) ct : Task = task { let minI = defaultArg minIndex 0L match tip with @@ -857,7 +867,7 @@ module internal Query = match tip with | Some { minIndex = i; maybeTipPos = p; events = e } -> Some i, e, p | None -> maxIndex, Array.empty, None - let! primary = primary (minIndex, i) + let! primary = primary (minIndex, i, ct) let events, pos = match primary with | None -> events, match pos with Some p -> Pos p | None -> Empty @@ -879,7 +889,7 @@ module internal Query = | _, Choice2Of2 fallback -> let maxIndex = match primary with Some p -> Some p.minIndex | None -> maxIndex // if no batches in primary, high water mark from tip is max - let! fallback = fallback (minIndex, maxIndex) + let! fallback = fallback (minIndex, maxIndex, ct) let events = match fallback with | Some s -> Array.append s.events events @@ -895,17 +905,16 @@ module internal Query = // NOTE: module is public so BatchIndices can be deserialized into module internal Prune = - let until (log : ILogger) (container : Container, stream : string) maxItems indexInclusive : Async = async { + let until (log : ILogger) (container : Container, stream : string) maxItems indexInclusive ct : Task = task { let log = log |> Log.prop "stream" stream - let deleteItem i count : Async = async { - let! t, rc = container.DeleteItem(stream, i) |> Stopwatch.Time + let deleteItem i count : Task = task { + let! t, rc = (fun ct -> container.DeleteItem(stream, i, ct)) |> Stopwatch.time ct let reqMetric = Log.metric container.TableName stream t -1 count rc let log = let evt = Log.Metric.Delete reqMetric in log |> Log.event evt log.Information("EqxDynamo {action:l} {i} {ms:f1}ms {ru}RU", "Delete", i, t.ElapsedMilliseconds, rc) - return rc - } - let trimTip expectedN count = async { - match! container.TryGetTip(stream, consistentRead = false) with + return rc } + let trimTip expectedN count = task { + match! container.TryGetTip(stream, (*consistentRead = *)false, ct) with | None, _rc -> return failwith "unexpected NotFound" | Some tip, _rc when tip.n <> expectedN -> return failwithf "Concurrent write detected; Expected n=%d actual=%d" expectedN tip.n | Some tip, tipRc -> @@ -915,16 +924,15 @@ module internal Prune = let updEtag = let g = Guid.NewGuid() in g.ToString "N" let condExpr : Quotations.Expr bool> = <@ fun t -> t.etag = Some tip.etag @> let updateExpr : Quotations.Expr _> = <@ fun t -> { t with etag = Some updEtag; c = tC'; e = tE' } @> - let! t, (_updated, updRc) = container.TryUpdateTip(stream, updateExpr, condExpr) |> Stopwatch.Time + let! t, (_updated, updRc) = (fun ct -> container.TryUpdateTip(stream, updateExpr, ct, condExpr)) |> Stopwatch.time ct let rc = { total = tipRc.total + updRc.total } let reqMetric = Log.metric container.TableName stream t -1 count rc let log = let evt = Log.Metric.Trim reqMetric in log |> Log.event evt log.Information("EqxDynamo {action:l} {count} {ms:f1}ms {ru}RU", "Trim", count, t.ElapsedMilliseconds, rc.total) - return rc - } + return rc } let log = log |> Log.prop "index" indexInclusive // need to sort by n to guarantee we don't ever leave an observable gap in the sequence - let query = container.QueryIAndNOrderByNAscending(stream, maxItems) + let query ct = container.QueryIAndNOrderByNAscending(stream, maxItems, ct) let mapPage (i, t : StopwatchInterval, batches : BatchIndices array, rc) = let next = Array.tryLast batches |> Option.map (fun x -> x.n) let reqMetric = Log.metric container.TableName stream t -1 batches.Length rc @@ -934,7 +942,7 @@ module internal Prune = batches, rc let! pt, outcomes = let isRelevant (x : BatchIndices) = x.index <= indexInclusive || x.isTip - let handle (batches : BatchIndices array, rc) = async { + let handle (batches : BatchIndices array, rc) = task { let mutable delCharges, batchesDeleted, trimCharges, batchesTrimmed, eventsDeleted, eventsDeferred = 0., 0, 0., 0, 0, 0 let mutable lwm = None for x in batches |> Seq.takeWhile (fun x -> isRelevant x || lwm = None) do @@ -957,15 +965,14 @@ module internal Prune = eventsDeferred <- eventsDeferred + eligibleEvents if lwm = None then lwm <- Some x.index - return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) - } + return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) } let hasRelevantItems (batches, _rc) = batches |> Array.exists isRelevant query - |> AsyncSeq.map mapPage - |> AsyncSeq.takeWhile hasRelevantItems - |> AsyncSeq.mapAsync handle - |> AsyncSeq.toArrayAsync - |> Stopwatch.Time + >> TaskSeq.map mapPage + >> TaskSeq.takeWhile hasRelevantItems + >> TaskSeq.mapAsync handle + >> TaskSeq.toArrayAsync + |> Stopwatch.time ct let mutable queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = 0., 0., 0., 0, 0, 0, 0 let mutable lwm = None for (qc, dc, tc), bLwm, (bCount, eDel, eDef) in outcomes do @@ -1068,7 +1075,7 @@ type internal StoreClient(container : Container, fallback : Container option, qu | Some _ as mi -> mi | None when Option.isSome tip -> Some Batch.tipMagicI | None -> None - let walk log container = Query.scan log (container, stream) consistentRead query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) ct + let walk log container = Query.scan log (container, stream) consistentRead query.MaxItems query.MaxRequests direction (tryDecode, isOrigin) let walkFallback = match fallback with | None -> Choice1Of2 query.IgnoreMissingEvents @@ -1077,18 +1084,18 @@ type internal StoreClient(container : Container, fallback : Container option, qu let log = log |> Log.prop "stream" stream let! pos, events = Query.load log (minIndex, maxIndex) tip (walk log container) walkFallback ct return Token.create_ pos, events } - member _.ReadLazy(log, batching : QueryOptions, stream, direction, (tryDecode, isOrigin), ?minIndex, ?maxIndex) : AsyncSeq<'event array> = - Query.walkLazy log (container, stream) batching.MaxItems batching.MaxRequests (tryDecode, isOrigin) (direction, minIndex, maxIndex) + member _.ReadLazy(log, batching : QueryOptions, stream, direction, (tryDecode, isOrigin), ct, ?minIndex, ?maxIndex) : IAsyncEnumerable<'event array> = + Query.walkLazy log (container, stream) batching.MaxItems batching.MaxRequests (tryDecode, isOrigin) (direction, minIndex, maxIndex) ct member store.Load(log, (stream, maybePos), consistentRead, (tryDecode, isOrigin), checkUnfolds : bool, ct) : Task = if not checkUnfolds then store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), ct) else task { - match! loadTip log stream consistentRead maybePos with + match! loadTip log stream consistentRead maybePos ct with | Tip.Res.NotFound -> return Token.empty, Array.empty | Tip.Res.NotModified -> return invalidOp "Not applicable" | Tip.Res.Found (pos, i, xs) -> return! store.Read(log, stream, consistentRead, Direction.Backward, (tryDecode, isOrigin), ct, tip = (pos, i, xs)) } - member _.GetPosition(log, stream, ?pos) : Async = async { - match! loadTip log stream (*consistentRead*)false pos with + member _.GetPosition(log, stream, ct, ?pos) : Task = task { + match! loadTip log stream (*consistentRead*)false pos ct with | Tip.Res.NotFound -> return Token.empty | Tip.Res.NotModified -> return Token.create pos.Value | Tip.Res.Found (pos, _i, _unfoldsAndEvents) -> return Token.create pos } @@ -1099,19 +1106,19 @@ type internal StoreClient(container : Container, fallback : Container option, qu match preview with | Some (pos, i, xs) -> read (pos, i, xs) | None -> task { - match! loadTip log stream consistentRead maybePos with + match! loadTip log stream consistentRead maybePos ct with | Tip.Res.NotFound -> return LoadFromTokenResult.Found (Token.empty, Array.empty) | Tip.Res.NotModified -> return LoadFromTokenResult.Unchanged | Tip.Res.Found (pos, i, xs) -> return! read (pos, i, xs) } - member _.Sync(log, stream, pos, exp, n' : int64, eventsEncoded, unfoldsEncoded) : Async = async { - match! Sync.handle log (tip.MaxEvents, tip.MaxBytes) (container, stream) (pos, exp, n', eventsEncoded, unfoldsEncoded) with + member _.Sync(log, stream, pos, exp, n' : int64, eventsEncoded, unfoldsEncoded, ct) : Task = task { + match! Sync.handle log (tip.MaxEvents, tip.MaxBytes) (container, stream) (pos, exp, n', eventsEncoded, unfoldsEncoded, ct) with | Sync.Result.ConflictUnknown -> return InternalSyncResult.ConflictUnknown | Sync.Result.Written (etag', b', events, unfolds) -> return InternalSyncResult.Written (Token.create (Position.fromElements (stream, b', n', events, unfolds, etag'))) } - member _.Prune(log, stream, index) = - Prune.until log (container, stream) query.MaxItems index + member _.Prune(log, stream, index, ct) = + Prune.until log (container, stream) query.MaxItems index ct type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IEventCodec<'event, EncodedBody, 'context>) = member _.Load(log, stream, requireLeader, initial, checkUnfolds, fold, isOrigin, ct) : Task = task { @@ -1121,7 +1128,7 @@ type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IE match! store.Reload(log, (stream, pos), requireLeader, (codec.TryDecode, isOrigin), ct, ?preview = preloaded) with | LoadFromTokenResult.Unchanged -> return struct (streamToken, state) | LoadFromTokenResult.Found (token', events) -> return token', fold state events } - member cat.Sync(log, stream, (Token.Unpack pos as streamToken), state, events, mapUnfolds, fold, isOrigin, context): Async> = async { + member cat.Sync(log, stream, (Token.Unpack pos as streamToken), state, events, mapUnfolds, fold, isOrigin, context, ct): Task> = task { let state' = fold state (Seq.ofArray events) let exp, events, eventsEncoded, unfoldsEncoded = let encode e = codec.Encode(context, e) @@ -1133,20 +1140,20 @@ type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IE let events', unfolds = transmute events state' Position.toEtag >> Sync.Exp.Etag, events', Array.map encode events', Array.map encode unfolds let baseVer = Position.toIndex pos + int64 (Array.length events) - match! store.Sync(log, stream, pos, exp, baseVer, eventsEncoded, unfoldsEncoded) with + match! store.Sync(log, stream, pos, exp, baseVer, eventsEncoded, unfoldsEncoded, ct) with | InternalSyncResult.ConflictUnknown -> return SyncResult.Conflict (fun ct -> cat.Reload(log, stream, true, streamToken, state, fold, isOrigin, ct)) | InternalSyncResult.Written token' -> return SyncResult.Written (token', state') } module internal Caching = let applyCacheUpdatesWithSlidingExpiration (cache : ICache, prefix : string) (slidingExpiration : TimeSpan) = - let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) + let mkCacheEntry struct (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) let options = CacheItemOptions.RelativeExpiration slidingExpiration fun streamName value -> cache.UpdateIfNewer(prefix + streamName, options, Token.supersedes, mkCacheEntry value) let applyCacheUpdatesWithFixedTimeSpan (cache : ICache, prefix : string) (period : TimeSpan) = - let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) + let mkCacheEntry struct (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) fun streamName value -> let expirationPoint = let creationDate = DateTimeOffset.UtcNow in creationDate.Add period let options = CacheItemOptions.AbsoluteExpiration expirationPoint @@ -1155,25 +1162,25 @@ module internal Caching = type CachingCategory<'event, 'state, 'context> ( category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, isOrigin : 'event -> bool, - tryReadCache, updateCache : string -> _ -> Task, + tryReadCache : string -> Task>, updateCache : string -> struct (_ * _) -> Task, checkUnfolds, mapUnfolds : Choice 'state -> 'event array, 'event array -> 'state -> 'event array * 'event array>) = - let cache streamName (inner : unit -> Task<_>) = task { - let! struct (token, state) = inner () - do! updateCache streamName (token, state) - return struct (token, state) } + let cache streamName (inner : CancellationToken -> Task<_>) ct = task { + let! tokenAndState = inner ct + do! updateCache streamName tokenAndState + return tokenAndState } interface ICategory<'event, 'state, 'context> with - member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) : Task = task { - match! tryReadCache streamName : Task> with - | ValueNone -> return! (fun () -> category.Load(log, streamName, requireLeader, initial, checkUnfolds, fold, isOrigin, ct)) |> cache streamName - | ValueSome struct (token, state) when allowStale -> return struct (token, state) // read already updated TTL, no need to write - | ValueSome struct (token, state) -> return! (fun () -> category.Reload(log, streamName, requireLeader, token, state, fold, isOrigin, ct)) |> cache streamName } - member _.TrySync(log : ILogger, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, state, events, ct) : Task> = task { - match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context) |> Async.startAsTask ct with + member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = task { + match! tryReadCache streamName with + | ValueNone -> return! cache streamName (fun ct -> category.Load(log, streamName, requireLeader, initial, checkUnfolds, fold, isOrigin, ct)) ct + | ValueSome tokenAndState when allowStale -> return tokenAndState // read already updated TTL, no need to write + | ValueSome (token, state) -> return! cache streamName (fun ct -> category.Reload(log, streamName, requireLeader, token, state, fold, isOrigin, ct)) ct } + member _.TrySync(log : ILogger, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, state, events, ct) = task { + match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context, ct) with | SyncResult.Conflict resync -> - return SyncResult.Conflict (fun ct -> (fun () -> resync ct) |> cache streamName) - | SyncResult.Written (token', state') -> - do! updateCache streamName (token', state') - return SyncResult.Written (token', state') } + return SyncResult.Conflict (cache streamName resync) + | SyncResult.Written tokenAndState' -> + do! updateCache streamName tokenAndState' + return SyncResult.Written tokenAndState' } namespace Equinox.DynamoStore @@ -1387,6 +1394,8 @@ module Exceptions = namespace Equinox.DynamoStore.Core +open System.Collections.Generic +open System.Threading.Tasks open Equinox.Core open FsCodec open FSharp.Control @@ -1410,7 +1419,7 @@ type EventsContext internal acc.Value <- acc.Value - 1 false - let yieldPositionAndData res = async { + let yieldPositionAndData res = task { let! Token.Unpack pos', data = res return Position.flatten pos', data } @@ -1420,12 +1429,12 @@ type EventsContext internal member x.StreamId(streamName) : string = context.ResolveContainerClientAndStreamId(null, streamName) |> ValueTuple.snd - member internal _.GetLazy(stream, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex) : AsyncSeq array> = + member internal _.GetLazy(stream, ct, ?queryMaxItems, ?direction, ?minIndex, ?maxIndex) : IAsyncEnumerable array> = let direction = defaultArg direction Direction.Forward let batching = match queryMaxItems with Some qmi -> QueryOptions(qmi) | _ -> context.QueryOptions - store.ReadLazy(log, batching, stream, direction, (Some, fun _ -> false), ?minIndex = minIndex, ?maxIndex = maxIndex) + store.ReadLazy(log, batching, stream, direction, (Some, fun _ -> false), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) - member internal _.GetInternal(stream, ?minIndex, ?maxIndex, ?maxCount, ?direction) = async { + member internal _.GetInternal(stream, ct, ?minIndex, ?maxIndex, ?maxCount, ?direction) = task { let direction = defaultArg direction Direction.Forward if maxCount = Some 0 then // Search semantics include the first hit so we need to special case this anyway @@ -1436,27 +1445,26 @@ type EventsContext internal match maxCount with | Some limit -> maxCountPredicate limit | None -> fun _ -> false - let! ct = Async.CancellationToken let! token, events = store.Read(log, stream, (*consistentRead*)false, direction, (ValueSome, isOrigin), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) |> Async.AwaitTaskCorrect if direction = Direction.Backward then System.Array.Reverse events return token, events } /// Establishes the current position of the stream in as efficient a manner as possible /// (The ideal situation is that the preceding token is supplied as input in order to avail of 1RU low latency state checks) - member _.Sync(stream, [] ?position : Position) : Async = async { - let! Token.Unpack pos' = store.GetPosition(log, stream, ?pos = position) + member _.Sync(stream, ct, [] ?position : Position) : Task = task { + let! Token.Unpack pos' = store.GetPosition(log, stream, ct, ?pos = position) return Position.flatten pos' } /// Query (with MaxItems set to `queryMaxItems`) from the specified `Position`, allowing the reader to efficiently walk away from a running query /// ... NB as long as they Dispose! - member x.Walk(stream, queryMaxItems, [] ?minIndex, [] ?maxIndex, [] ?direction) - : AsyncSeq array> = - x.GetLazy(stream, queryMaxItems, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) + member x.Walk(stream, queryMaxItems, ct, [] ?minIndex, [] ?maxIndex, [] ?direction) + : IAsyncEnumerable array> = + x.GetLazy(stream, queryMaxItems, ct, ?direction = direction, ?minIndex = minIndex, ?maxIndex = maxIndex) /// Reads all Events from a `Position` in a given `direction` - member x.Read(stream, [] ?minIndex, [] ?maxIndex, [] ?maxCount, [] ?direction) - : Async array> = - x.GetInternal(stream, ?minIndex = minIndex, ?maxIndex = maxIndex, ?maxCount = maxCount, ?direction = direction) |> yieldPositionAndData + member x.Read(stream, ct, [] ?minIndex, [] ?maxIndex, [] ?maxCount, [] ?direction) + : Task array> = + x.GetInternal(stream, ct, ?minIndex = minIndex, ?maxIndex = maxIndex, ?maxCount = maxCount, ?direction = direction) |> yieldPositionAndData #if APPEND_SUPPORT /// Appends the supplied batch of events, subject to a consistency check based on the `position` @@ -1467,8 +1475,8 @@ type EventsContext internal | InternalSyncResult.ConflictUnknown -> return AppendResult.ConflictUnknown } #endif - member _.Prune(stream, index) : Async = - store.Prune(log, stream, index) + member _.Prune(stream, index, ct) : Task = + store.Prune(log, stream, index, ct) /// Provides mechanisms for building `EventData` records to be supplied to the `Events` API type EventData() = @@ -1484,26 +1492,27 @@ module Events = match! f with | AppendResult.Ok (PositionIndex index)-> return AppendResult.Ok index | AppendResult.ConflictUnknown -> return AppendResult.ConflictUnknown } - let private stripPosition (f : Async) : Async = async { - let! (PositionIndex index) = f + let private stripPosition (f : Task) : Async = async { + let! (PositionIndex index) = f |> Async.AwaitTaskCorrect return index } - let private dropPosition (f : Async array>) : Async array> = async { - let! _, xs = f + let private dropPosition (f : Task array>) : Async array> = async { + let! _, xs = f |> Async.AwaitTaskCorrect return xs } /// Returns an async sequence of events in the stream starting at the specified sequence number, /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let getAll (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) : AsyncSeq array> = - ctx.Walk(ctx.StreamId streamName, batchSize, minIndex = index) + let getAll (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) ct : IAsyncEnumerable array> = + ctx.Walk(ctx.StreamId streamName, ct, batchSize, minIndex = index) /// Returns an async array of events in the stream starting at the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let get (ctx : EventsContext) (streamName : string) (index : int64) (maxCount : int) : Async array> = - ctx.Read(ctx.StreamId streamName, ?minIndex = (if index = 0 then None else Some index), maxCount = maxCount) |> dropPosition + let get (ctx : EventsContext) (streamName : string) (index : int64) (maxCount : int) : Async array> = async { + let! ct = Async.CancellationToken + return! ctx.Read(ctx.StreamId streamName, ct, ?minIndex = (if index = 0 then None else Some index), maxCount = maxCount) |> dropPosition } #if APPEND_SUPPORT /// Appends a batch of events to a stream at the specified expected sequence number. @@ -1517,23 +1526,26 @@ module Events = /// Due to the need to preserve ordering of data in the stream, only complete Batches will be removed. /// If the index is within the Tip, events are removed via an etag-checked update. Does not alter the unfolds held in the Tip, or remove the Tip itself. /// Returns count of events deleted this time, events that could not be deleted due to partial batches, and the stream's lowest remaining sequence number. - let pruneUntil (ctx : EventsContext) (streamName : string) (index : int64) : Async = - ctx.Prune(ctx.StreamId streamName, index) + let pruneUntil (ctx : EventsContext) (streamName : string) (index : int64) : Async = async { + let! ct = Async.CancellationToken + return! ctx.Prune(ctx.StreamId streamName, index, ct) |> Async.AwaitTaskCorrect } /// Returns an async sequence of events in the stream backwards starting from the specified sequence number, /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getAllBackwards (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) : AsyncSeq array> = - ctx.Walk(ctx.StreamId streamName, batchSize, maxIndex = index, direction = Direction.Backward) + let getAllBackwards (ctx : EventsContext) (streamName : string) (index : int64) (batchSize : int) ct : IAsyncEnumerable array> = + ctx.Walk(ctx.StreamId streamName, ct, batchSize, maxIndex = index, direction = Direction.Backward) /// Returns an async array of events in the stream backwards starting from the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getBackwards (ctx : EventsContext) (streamName : string) (index : int64) (maxCount : int) : Async array> = - ctx.Read(ctx.StreamId streamName, ?maxIndex = (match index with int64.MaxValue -> None | i -> Some (i + 1L)), maxCount = maxCount, direction = Direction.Backward) |> dropPosition + let getBackwards (ctx : EventsContext) (streamName : string) (index : int64) (maxCount : int) : Async array> = async { + let! ct = Async.CancellationToken + return! ctx.Read(ctx.StreamId streamName, ct, ?maxIndex = (match index with int64.MaxValue -> None | i -> Some (i + 1L)), maxCount = maxCount, direction = Direction.Backward) |> dropPosition } /// Obtains the `index` from the current write Position - let getNextIndex (ctx : EventsContext) (streamName : string) : Async = - ctx.Sync(ctx.StreamId streamName) |> stripPosition + let getNextIndex (ctx : EventsContext) (streamName : string) : Async = async { + let! ct = Async.CancellationToken + return! ctx.Sync(ctx.StreamId streamName, ct) |> stripPosition } diff --git a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj index 834c1445b..c74a851f6 100644 --- a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj +++ b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj @@ -13,7 +13,7 @@ - + contentFiles diff --git a/src/Equinox.EventStore/Equinox.EventStore.fsproj b/src/Equinox.EventStore/Equinox.EventStore.fsproj index d68d17719..7103ab22a 100644 --- a/src/Equinox.EventStore/Equinox.EventStore.fsproj +++ b/src/Equinox.EventStore/Equinox.EventStore.fsproj @@ -18,11 +18,11 @@ - + - + diff --git a/src/Equinox.EventStore/EventStore.fs b/src/Equinox.EventStore/EventStore.fs index a4e5ae4d9..bad53fe8c 100755 --- a/src/Equinox.EventStore/EventStore.fs +++ b/src/Equinox.EventStore/EventStore.fs @@ -4,6 +4,9 @@ open Equinox.Core open EventStore.ClientAPI open Serilog // NB must shadow EventStore.ClientAPI.ILogger open System +open System.Collections.Generic +open System.Threading +open System.Threading.Tasks type EventBody = ReadOnlyMemory @@ -48,7 +51,7 @@ module Log = if e.IsJson then yield System.Collections.Generic.KeyValuePair<_, _>(e.EventType, System.Text.Encoding.UTF8.GetString e.Data) }) - let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> Async<'t>) log : Async<'t> = + let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> Task<'t>) log : Task<'t> = match retryPolicy with | None -> f log | Some retryPolicy -> @@ -130,8 +133,8 @@ type EsSyncResult = Written of (*EventStore.ClientAPI.*) WriteResult | Conflict module private Write = /// Yields `EsSyncResult.Written` or `EsSyncResult.Conflict` to signify WrongExpectedVersion let private writeEventsAsync (log : ILogger) (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) - : Async = async { - try let! wr = conn.AppendToStreamAsync(streamName, version, events) |> Async.AwaitTaskCorrect + : Task = task { + try let! wr = conn.AppendToStreamAsync(streamName, version, events) return EsSyncResult.Written wr with :? EventStore.ClientAPI.Exceptions.WrongExpectedVersionException as ex -> log.Information(ex, "Ges TrySync WrongExpectedVersionException writing {EventTypes}, actual {ActualVersion}", @@ -143,12 +146,12 @@ module private Write = events |> Array.sumBy eventDataLen let private writeEventsLogged (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) - : Async = async { + : Task = task { let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propEventData "Json" events let bytes, count = eventDataBytes events, events.Length let log = log |> Log.prop "bytes" bytes let writeLog = log |> Log.prop "stream" streamName |> Log.prop "expectedVersion" version |> Log.prop "count" count - let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.Time + let! t, result = (fun _ct -> writeEventsAsync writeLog conn streamName version events) |> Stopwatch.time CancellationToken.None let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let resultLog, evt = match result, reqMetric with @@ -161,25 +164,23 @@ module private Write = return result } let writeEvents (log : ILogger) retryPolicy (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) - : Async = - let call = writeEventsLogged conn streamName version events + : Task = + let call log = writeEventsLogged conn streamName version events log Log.withLoggedRetries retryPolicy "writeAttempt" call log module private Read = open FSharp.Control let private readSliceAsync (conn : IEventStoreConnection) (streamName : string) (direction : Direction) (batchSize : int) (startPos : int64) - : Async = async { - let call = - match direction with - | Direction.Forward -> conn.ReadStreamEventsForwardAsync(streamName, startPos, batchSize, resolveLinkTos = false) - | Direction.Backward -> conn.ReadStreamEventsBackwardAsync(streamName, startPos, batchSize, resolveLinkTos = false) - return! call |> Async.AwaitTaskCorrect } + : Task = + match direction with + | Direction.Forward -> conn.ReadStreamEventsForwardAsync(streamName, startPos, batchSize, resolveLinkTos = false) + | Direction.Backward -> conn.ReadStreamEventsBackwardAsync(streamName, startPos, batchSize, resolveLinkTos = false) let (|ResolvedEventLen|) (x : ResolvedEvent) = match x.Event.Data, x.Event.Metadata with Log.BlobLen bytes, Log.BlobLen metaBytes -> bytes + metaBytes - let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) : Async = async { - let! t, slice = readSliceAsync conn streamName direction batchSize startPos |> Stopwatch.Time + let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) : Task = task { + let! t, slice = (fun _ct -> readSliceAsync conn streamName direction batchSize startPos) |> Stopwatch.time CancellationToken.None let bytes, count = slice.Events |> Array.sumBy (|ResolvedEventLen|), slice.Events.Length let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let evt = Log.Slice (direction, reqMetric) @@ -188,10 +189,10 @@ module private Read = "Read", count, slice.LastEventNumber) return slice } - let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> Async) + let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> Task) (maxPermittedBatchReads : int option) (startPosition : int64) - : AsyncSeq = - let rec loop batchCount pos : AsyncSeq = asyncSeq { + : IAsyncEnumerable = + let rec loop batchCount pos : IAsyncEnumerable = taskSeq { match maxPermittedBatchReads with | Some mpbr when batchCount >= mpbr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () @@ -221,24 +222,22 @@ module private Read = "Ges{action:l} stream={stream} count={count}/{batches} version={version}", action, streamName, count, batches, version) - let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition - : Async = async { - let mergeBatches (batches : AsyncSeq) = async { + let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition : Task = task { + let mergeBatches (batches : IAsyncEnumerable) = task { let mutable versionFromStream = None let! (events : ResolvedEvent[]) = batches - |> AsyncSeq.map (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) - |> AsyncSeq.concatSeq - |> AsyncSeq.toArrayAsync + |> TaskSeq.collectSeq (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) + |> TaskSeq.toArrayAsync let version = match versionFromStream with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, events } - let call pos = loggedReadSlice conn streamName Direction.Forward batchSize pos + let call pos log = loggedReadSlice conn streamName Direction.Forward batchSize pos log let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName - let batches : AsyncSeq = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition - let! t, (version, events) = mergeBatches batches |> Stopwatch.Time + let batches : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition + let! t, (version, events) = Stopwatch.time CancellationToken.None <| fun _ct -> mergeBatches batches log |> logBatchRead direction streamName t events batchSize version return version, events } @@ -247,19 +246,18 @@ module private Read = Array.fold acc (0, 0) let loadBackwardsUntilCompactionOrStart (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName (tryDecode, isOrigin) - : Async = async { - let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : AsyncSeq) - : Async = async { + : Task = task { + let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : IAsyncEnumerable) + : Task = task { let versionFromStream, lastBatch = ref None, ref None let! tempBackward = batchesBackward - |> AsyncSeq.map (fun batch -> + |> TaskSeq.collectSeq (fun batch -> match batch with | None, events -> lastBatch.Value <- Some events; events | Some _ as reportedVersion, events -> versionFromStream.Value <- reportedVersion; lastBatch.Value <- Some events; events |> Array.map (fun e -> struct (e, tryDecode e))) - |> AsyncSeq.concatSeq - |> AsyncSeq.takeWhileInclusive (function + |> TaskSeq.takeWhileInclusive (function | x, ValueSome e when isOrigin e -> match lastBatch.Value with | None -> log.Information("GesStop stream={stream} at={eventNumber}", streamName, x.Event.EventNumber) @@ -268,7 +266,7 @@ module private Read = log.Information("GesStop stream={stream} at={eventNumber} used={used} residual={residual}", streamName, x.Event.EventNumber, used, residual) false | _ -> true) // continue the search - |> AsyncSeq.toArrayAsync + |> TaskSeq.toArrayAsync let eventsForward = Array.Reverse(tempBackward); tempBackward // sic - relatively cheap, in-place reverse of something we own let version = match versionFromStream.Value with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, eventsForward } @@ -279,8 +277,8 @@ module private Read = let startPosition = int64 StreamPosition.End let direction = Direction.Backward let readlog = log |> Log.prop "direction" direction - let batchesBackward : AsyncSeq = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition - let! t, (version, events) = mergeFromCompactionPointOrStartFromBackwardsStream log batchesBackward |> Stopwatch.Time + let batchesBackward _ct : IAsyncEnumerable = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition + let! t, (version, events) = (batchesBackward >> mergeFromCompactionPointOrStartFromBackwardsStream log) |> Stopwatch.time CancellationToken.None log |> logBatchRead direction streamName t (Array.map ValueTuple.fst events) batchSize version return version, events } @@ -374,7 +372,7 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp member val BatchOptions = batchOptions member _.TokenEmpty = Token.ofUncompactedVersion batchOptions.BatchSize -1L - member _.LoadBatched streamName requireLeader log (tryDecode, isCompactionEventType) : Async = async { + member _.LoadBatched(streamName, requireLeader, log, (tryDecode, isCompactionEventType)) : Task = task { let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L match tryIsResolvedEventEventType isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events @@ -382,14 +380,14 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp match events |> Array.tryFindBack isCompactionEvent with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member _.LoadBackwardsStoppingAtCompactionEvent streamName requireLeader log (tryDecode, isOrigin) : Async = async { + member _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, (tryDecode, isOrigin)) : Task = task { let! version, events = Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) match Array.tryHead events |> Option.filter (function _, ValueSome e -> isOrigin e | _ -> false) with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events | Some (resolvedEvent, _) -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events } - member _.Reload requireLeader streamName log (Token.Unpack token as streamToken) (tryDecode, isCompactionEventType) - : Async = async { + member _.Reload(requireLeader, streamName, log, (Token.Unpack token as streamToken), (tryDecode, isCompactionEventType)) + : Task = task { let streamPosition = token.streamVersion + 1L let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition match isCompactionEventType with @@ -399,7 +397,7 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp | None -> return Token.ofPreviousTokenAndEventsLength streamToken events.Length batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member _.TrySync log streamName (Token.Unpack token as streamToken) (events, encodedEvents : EventData array) isCompactionEventType : Async = async { + member _.TrySync(log, streamName, (Token.Unpack token as streamToken), (events, encodedEvents : EventData array), isCompactionEventType) : Task = task { let streamVersion = token.streamVersion match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName streamVersion encodedEvents with | EsSyncResult.Conflict actualVersion -> @@ -416,7 +414,7 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp Token.ofPreviousStreamVersionAndCompactionEventDataIndex streamToken compactionEventIndex encodedEvents.Length batchOptions.BatchSize version' return GatewaySyncResult.Written token } // Used by Propulsion.EventStore.EventStoreSink - member _.Sync(log, streamName, streamVersion, events : FsCodec.IEventData[]) : Async = async { + member _.Sync(log, streamName, streamVersion, events : FsCodec.IEventData[]) : Task = task { let encodedEvents : EventData[] = events |> Array.map UnionEncoderAdapters.eventDataOfEncodedEvent match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName streamVersion encodedEvents with | EsSyncResult.Conflict actualVersion -> @@ -456,24 +454,22 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod | None | Some AccessStrategy.LatestKnownEvent -> fun _ -> true | Some (AccessStrategy.RollingSnapshots (isValid, _)) -> isValid let loadAlgorithm streamName requireLeader log = - let batched = context.LoadBatched streamName requireLeader log (tryDecode, None) - let compacted = context.LoadBackwardsStoppingAtCompactionEvent streamName requireLeader log (tryDecode, isOrigin) match access with - | None -> batched + | None -> context.LoadBatched(streamName, requireLeader, log, (tryDecode, None)) | Some AccessStrategy.LatestKnownEvent - | Some (AccessStrategy.RollingSnapshots _) -> compacted - let load (fold : 'state -> 'event seq -> 'state) initial f = async { + | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, (tryDecode, isOrigin)) + let load (fold : 'state -> 'event seq -> 'state) initial f = task { let! token, events = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger) : Async = + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger) : Task = load fold initial (loadAlgorithm streamName requireLeader log) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger) : Async = - load fold state (context.Reload requireLeader streamName log token (tryDecode, compactionPredicate)) + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger) : Task = + load fold state (context.Reload(requireLeader, streamName, log, token, (tryDecode, compactionPredicate))) member _.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context) : Async> = async { + streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context) : Task> = task { let encode e = codec.Encode(ctx, e) let events = match access with @@ -482,21 +478,20 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod let cc = CompactionContext(Array.length events, token.batchCapacityLimit.Value) if cc.IsCompactionDue then Array.append events (fold state events |> compact |> Array.singleton) else events let encodedEvents : EventData[] = events |> Array.map (encode >> UnionEncoderAdapters.eventDataOfEncodedEvent) - match! context.TrySync log streamName streamToken (events, encodedEvents) compactionPredicate with + match! context.TrySync(log, streamName, streamToken, (events, encodedEvents), compactionPredicate) with | GatewaySyncResult.ConflictUnknown _ -> - return SyncResult.Conflict (fun ct -> load fold state (context.Reload true streamName log streamToken (tryDecode, compactionPredicate)) |> Async.startAsTask ct) + return SyncResult.Conflict (fun ct -> load fold state (context.Reload(true, streamName, log, streamToken, (tryDecode, compactionPredicate)))) | GatewaySyncResult.Written token' -> return SyncResult.Written (token', fold state (Seq.ofArray events)) } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log streamName requireLeader = category.Load(fold, initial, streamName, requireLeader, log) interface ICategory<'event, 'state, 'context> with - member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = + member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, _ct) = match readCache with - | None -> Async.startAsTask ct (batched log streamName requireLeader) + | None -> category.Load(fold, initial, streamName, requireLeader, log) | Some (cache : ICache, prefix : string) -> task { match! cache.TryGet(prefix + streamName) with - | ValueNone -> return! batched log streamName requireLeader + | ValueNone -> return! category.Load(fold, initial, streamName, requireLeader, log) | ValueSome tokenAndState when allowStale -> return tokenAndState | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log) } member _.TrySync(log, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, initialState, events, _ct) = task { diff --git a/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj b/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj index 16e14e055..e15a1b25c 100644 --- a/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj +++ b/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj @@ -18,11 +18,11 @@ - + - + diff --git a/src/Equinox.EventStoreDb/EventStoreDb.fs b/src/Equinox.EventStoreDb/EventStoreDb.fs index a6ae58b1f..3a710c5d6 100755 --- a/src/Equinox.EventStoreDb/EventStoreDb.fs +++ b/src/Equinox.EventStoreDb/EventStoreDb.fs @@ -4,6 +4,8 @@ open Equinox.Core open EventStore.Client open Serilog open System +open System.Threading +open System.Threading.Tasks type EventBody = ReadOnlyMemory @@ -47,9 +49,9 @@ module Log = if e.ContentType = "application/json" then yield let d = e.Data in System.Collections.Generic.KeyValuePair<_,_>(e.EventType, System.Text.Encoding.UTF8.GetString d.Span) }) - let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> Async<'t>) log : Async<'t> = + let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> CancellationToken -> Task<'t>) log ct : Task<'t> = match retryPolicy with - | None -> f log + | None -> f log ct | Some retryPolicy -> let withLoggingContextWrapping count = let log = if count = 1 then log else log |> prop contextLabel count @@ -75,8 +77,8 @@ module Log = { mutable count : int64; mutable ms : int64 } static member Create() = { count = 0L; ms = 0L } member x.Ingest(ms) = - System.Threading.Interlocked.Increment(&x.count) |> ignore - System.Threading.Interlocked.Add(&x.ms, ms) |> ignore + Interlocked.Increment(&x.count) |> ignore + Interlocked.Add(&x.ms, ms) |> ignore type LogSink() = static let epoch = System.Diagnostics.Stopwatch.StartNew() @@ -126,10 +128,8 @@ type EsSyncResult = Written of ConditionalWriteResult | Conflict of actualVersio module private Write = - let private writeEventsAsync (log : ILogger) (conn : EventStoreClient) (streamName : string) version (events : EventData[]) - : Async = async { - let! ct = Async.CancellationToken - let! wr = conn.ConditionalAppendToStreamAsync(streamName, StreamRevision.FromInt64 version, events, cancellationToken = ct) |> Async.AwaitTaskCorrect + let private writeEventsAsync (log : ILogger) (conn : EventStoreClient) (streamName : string) version (events : EventData[]) ct : Task = task { + let! wr = conn.ConditionalAppendToStreamAsync(streamName, StreamRevision.FromInt64 version, events, cancellationToken = ct) if wr.Status = ConditionalWriteStatus.VersionMismatch then log.Information("Esdb TrySync VersionMismatch writing {EventTypes}, actual {ActualVersion}", [| for x in events -> x.Type |], wr.NextExpectedVersion) @@ -138,17 +138,17 @@ module private Write = elif wr.Status = ConditionalWriteStatus.Succeeded then return EsSyncResult.Written wr else return failwithf "Unexpected write response code %O" wr.Status } - let eventDataBytes events = + let private eventDataBytes events = let eventDataLen (x : EventData) = match x.Data, x.Metadata with Log.BlobLen bytes, Log.BlobLen metaBytes -> bytes + metaBytes events |> Array.sumBy eventDataLen - let private writeEventsLogged (conn : EventStoreClient) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) - : Async = async { + let private writeEventsLogged (conn : EventStoreClient) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) ct + : Task = task { let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propEventData "Json" events let bytes, count = eventDataBytes events, events.Length let log = log |> Log.prop "bytes" bytes |> Log.prop "expectedVersion" version let writeLog = log |> Log.prop "stream" streamName |> Log.prop "count" count - let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.Time + let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.time ct let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let resultLog, evt = match result, reqMetric with @@ -160,16 +160,16 @@ module private Write = "Write", events.Length, match evt with Log.WriteConflict _ -> true | _ -> false) return result } - let writeEvents (log : ILogger) retryPolicy (conn : EventStoreClient) (streamName : string) (version : int64) (events : EventData[]) - : Async = + let writeEvents (log : ILogger) retryPolicy (conn : EventStoreClient) (streamName : string) (version : int64) (events : EventData[]) ct + : Task = let call = writeEventsLogged conn streamName version events - Log.withLoggedRetries retryPolicy "writeAttempt" call log + Log.withLoggedRetries retryPolicy "writeAttempt" call log ct module private Read = open FSharp.Control let resolvedEventBytes (x : ResolvedEvent) = let Log.BlobLen bytes, Log.BlobLen metaBytes = x.Event.Data, x.Event.Metadata in bytes + metaBytes let resolvedEventsBytes events = events |> Array.sumBy resolvedEventBytes - let logBatchRead direction streamName t events batchSize version (log : ILogger) = + let private logBatchRead direction streamName t events batchSize version (log : ILogger) = let bytes, count = resolvedEventsBytes events, events.Length let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let batches = match batchSize with Some batchSize -> (events.Length - 1) / batchSize + 1 | None -> -1 @@ -179,44 +179,40 @@ module private Read = (log |> Log.prop "bytes" bytes |> Log.event evt).Information( "Esdb{action:l} stream={stream} count={count}/{batches} version={version}", action, streamName, count, batches, version) - let loadBackwardsUntilOrigin (log : ILogger) (conn : EventStoreClient) batchSize streamName (tryDecode, isOrigin) - : Async = async { - let! ct = Async.CancellationToken + let private loadBackwardsUntilOrigin (log : ILogger) (conn : EventStoreClient) batchSize streamName (tryDecode, isOrigin) ct + : Task = task { let res = conn.ReadStreamAsync(Direction.Backwards, streamName, StreamPosition.End, int64 batchSize, resolveLinkTos = false, cancellationToken = ct) try let! events = - AsyncSeq.ofAsyncEnum res - |> AsyncSeq.map (fun x -> struct (x, tryDecode x)) - |> AsyncSeq.takeWhileInclusive (function + res + |> TaskSeq.map (fun x -> struct (x, tryDecode x)) + |> TaskSeq.takeWhileInclusive (function | x, ValueSome e when isOrigin e -> log.Information("EsdbStop stream={stream} at={eventNumber}", streamName, let en = x.Event.EventNumber in en.ToInt64()) false | _ -> true) - |> AsyncSeq.toArrayAsync + |> TaskSeq.toArrayAsync let v = match Seq.tryHead events with Some (r, _) -> let en = r.Event.EventNumber in en.ToInt64() | None -> -1 Array.Reverse events return v, events with :? AggregateException as e when (e.InnerExceptions.Count = 1 && e.InnerExceptions[0] :? StreamNotFoundException) -> return -1L, [||] } - let loadBackwards (log : ILogger) (conn : EventStoreClient) batchSize streamName (tryDecode, isOrigin) - : Async = async { - let! t, (version, events) = loadBackwardsUntilOrigin log conn batchSize streamName (tryDecode, isOrigin) |> Stopwatch.Time + let loadBackwards (log : ILogger) (conn : EventStoreClient) batchSize streamName (tryDecode, isOrigin) ct + : Task = task { + let! t, (version, events) = loadBackwardsUntilOrigin log conn batchSize streamName (tryDecode, isOrigin) |> Stopwatch.time ct let log = log |> Log.prop "batchSize" batchSize |> Log.prop "stream" streamName log |> logBatchRead Direction.Backward streamName t (Array.map ValueTuple.fst events) (Some batchSize) version return version, events } - let loadForward (conn : EventStoreClient) streamName startPosition - : Async = async { - let! ct = Async.CancellationToken + let private loadForward (conn : EventStoreClient) streamName startPosition ct : Task = task { let res = conn.ReadStreamAsync(Direction.Forwards, streamName, startPosition, Int64.MaxValue, resolveLinkTos = false, cancellationToken = ct) - try let! events = AsyncSeq.ofAsyncEnum res |> AsyncSeq.toArrayAsync + try let! events = TaskSeq.toArrayAsync res let v = match Seq.tryLast events with Some r -> let en = r.Event.EventNumber in en.ToInt64() | None -> startPosition.ToInt64() - 1L return v, events with :? AggregateException as e when (e.InnerExceptions.Count = 1 && e.InnerExceptions[0] :? StreamNotFoundException) -> return -1L, [||] } - let loadForwards log conn streamName startPosition - : Async = async { + let loadForwards log conn streamName startPosition ct : Task = task { let direction = Direction.Forward - let! t, (version, events) = loadForward conn streamName startPosition |> Stopwatch.Time + let! t, (version, events) = loadForward conn streamName startPosition |> Stopwatch.time ct let log = log |> Log.prop "startPos" startPosition |> Log.prop "direction" direction |> Log.prop "stream" streamName log |> logBatchRead direction streamName t events None version return version, events } @@ -312,33 +308,33 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp member val BatchOptions = batchOptions member internal _.TokenEmpty = Token.ofUncompactedVersion batchOptions.BatchSize -1L - member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType) : Async = async { - let! version, events = Read.loadForwards log (conn requireLeader) streamName StreamPosition.Start + member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType, ct) : Task = task { + let! version, events = Read.loadForwards log (conn requireLeader) streamName StreamPosition.Start ct match tryIsResolvedEventEventType isCompactionEventType with - | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events + | None -> return struct (Token.ofNonCompacting version, Array.chooseV tryDecode events) | Some isCompactionEvent -> match events |> Array.tryFindBack isCompactionEvent with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, limit, tryDecode, isOrigin) : Async = async { - let! version, events = Read.loadBackwards log (conn requireLeader) (defaultArg limit Int32.MaxValue) streamName (tryDecode, isOrigin) + member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, limit, tryDecode, isOrigin, ct) : Task = task { + let! version, events = Read.loadBackwards log (conn requireLeader) (defaultArg limit Int32.MaxValue) streamName (tryDecode, isOrigin) ct match Array.tryHead events |> Option.filter (function _, ValueSome e -> isOrigin e | _ -> false) with - | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events + | None -> return struct (Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events) | Some (resolvedEvent, _) -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events } - member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType) - : Async = async { + member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType, ct) + : Task = task { let streamPosition = StreamPosition.FromInt64(token.streamVersion + 1L) - let! version, events = Read.loadForwards log (conn requireLeader) streamName streamPosition + let! version, events = Read.loadForwards log (conn requireLeader) streamName streamPosition ct match isCompactionEventType with - | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events + | None -> return struct (Token.ofNonCompacting version, Array.chooseV tryDecode events) | Some isCompactionEvent -> match events |> Array.tryFindBack (fun re -> match tryDecode re with ValueSome e -> isCompactionEvent e | _ -> false) with | None -> return Token.ofPreviousTokenAndEventsLength streamToken events.Length batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.TrySync(log, streamName, streamToken, events, encodedEvents : EventData array, isCompactionEventType): Async = async { + member internal _.TrySync(log, streamName, streamToken, events, encodedEvents : EventData array, isCompactionEventType, ct): Task = task { let streamVersion = let (Token.Unpack token) = streamToken in token.streamVersion - let! wr = Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName streamVersion encodedEvents + let! wr = Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName streamVersion encodedEvents ct match wr with | EsSyncResult.Conflict actualVersion -> return GatewaySyncResult.ConflictUnknown (Token.ofNonCompacting actualVersion) @@ -354,9 +350,9 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp Token.ofPreviousStreamVersionAndCompactionEventDataIndex streamToken compactionEventIndex encodedEvents.Length batchOptions.BatchSize version' return GatewaySyncResult.Written token } // Used by Propulsion.EventStoreDb.EventStoreSink - member _.Sync(log, streamName, streamVersion, events : FsCodec.IEventData[]) : Async = async { + member _.Sync(log, streamName, streamVersion, events : FsCodec.IEventData[], ct) : Task = task { let encodedEvents : EventData[] = events |> Array.map ClientCodec.eventData - match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName streamVersion encodedEvents with + match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName streamVersion encodedEvents ct with | EsSyncResult.Conflict actualVersion -> return GatewaySyncResult.ConflictUnknown (Token.ofNonCompacting actualVersion) | EsSyncResult.Written wr -> @@ -393,24 +389,24 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod match access with | None | Some AccessStrategy.LatestKnownEvent -> fun _ -> true | Some (AccessStrategy.RollingSnapshots (isValid, _)) -> isValid - let loadAlgorithm streamName requireLeader log = - let compacted limit = context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, limit, tryDecode, isOrigin) + let loadAlgorithm streamName requireLeader log ct = + let compacted limit = context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, limit, tryDecode, isOrigin, ct) match access with - | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None) + | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None, ct) | Some AccessStrategy.LatestKnownEvent -> compacted (Some 1) | Some (AccessStrategy.RollingSnapshots _) -> compacted None - let load (fold : 'state -> 'event seq -> 'state) initial f : Async = async { + let load (fold : 'state -> 'event seq -> 'state) initial f : Task = task { let! struct (token, events) = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger) = - load fold initial (loadAlgorithm streamName requireLeader log) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger) = - load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate)) + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger, ct) = + load fold initial (loadAlgorithm streamName requireLeader log ct) + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger, ct) = + load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate, ct)) member x.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context) : Async> = async { + streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) : Task> = task { let encode e = codec.Encode(ctx, e) let events = match access with @@ -419,26 +415,24 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod let cc = CompactionContext(Array.length events, token.batchCapacityLimit.Value) if cc.IsCompactionDue then Array.append events (fold state events |> compact |> Array.singleton) else events let encodedEvents : EventData[] = events |> Array.map (encode >> ClientCodec.eventData) - match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate) with + match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate, ct) with | GatewaySyncResult.ConflictUnknown _ -> - return SyncResult.Conflict (fun ct -> x.Reload(fold, state, streamName, (*requireLeader*)true, streamToken, log) |> Async.startAsTask ct) + return SyncResult.Conflict (fun ct -> x.Reload(fold, state, streamName, (*requireLeader*)true, streamToken, log, ct)) | GatewaySyncResult.Written token' -> return SyncResult.Written (token', fold state (Seq.ofArray events)) } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log streamName requireLeader = category.Load(fold, initial, streamName, requireLeader, log) interface ICategory<'event, 'state, 'context> with - member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, _ct) = task { + member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = task { match readCache with - | None -> return! batched log streamName requireLeader + | None -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) | Some (cache : ICache, prefix : string) -> match! cache.TryGet(prefix + streamName) with - | ValueNone -> return! batched log streamName requireLeader + | ValueNone -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) | ValueSome tokenAndState when allowStale -> return tokenAndState - | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log) } - - member _.TrySync(log, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, initialState, events, _ct) = task { - match! category.TrySync(log, fold, streamName, streamToken, initialState, events, context) with + | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log, ct) } + member _.TrySync(log, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, initialState, events, ct) = task { + match! category.TrySync(log, fold, streamName, streamToken, initialState, events, context, ct) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token', state') -> return SyncResult.Written (token', state') } diff --git a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj index ac4c64656..1a9afb350 100644 --- a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj +++ b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj @@ -16,7 +16,7 @@ - + diff --git a/src/Equinox.MessageDb/Equinox.MessageDb.fsproj b/src/Equinox.MessageDb/Equinox.MessageDb.fsproj index e50d57c34..0e950d41e 100644 --- a/src/Equinox.MessageDb/Equinox.MessageDb.fsproj +++ b/src/Equinox.MessageDb/Equinox.MessageDb.fsproj @@ -26,7 +26,7 @@ - + diff --git a/src/Equinox.MessageDb/MessageDb.fs b/src/Equinox.MessageDb/MessageDb.fs index 02e38f4e0..8ad1712b8 100644 --- a/src/Equinox.MessageDb/MessageDb.fs +++ b/src/Equinox.MessageDb/MessageDb.fs @@ -421,21 +421,18 @@ type private Category<'event, 'state, 'context>(context : MessageDbContext, code return SyncResult.Written (token', state') } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log categoryName streamId streamName requireLeader ct = category.Load(fold, initial, categoryName, streamId, streamName, requireLeader, log, ct) - interface ICategory<'event, 'state, 'context> with member _.Load(log, categoryName, streamId, streamName, allowStale, requireLeader, ct) = task { - let act = Activity.Current match readCache with - | None -> return! batched log categoryName streamId streamName requireLeader ct + | None -> return! category.Load(fold, initial, categoryName, streamId, streamName, requireLeader, log, ct) | Some (cache : ICache, prefix : string) -> let! cacheItem = cache.TryGet(prefix + streamName) + let act = Activity.Current if act <> null then act.AddCacheHit(match cacheItem with ValueNone -> false | _ -> true) |> ignore match cacheItem with - | ValueNone -> return! batched log categoryName streamId streamName requireLeader ct + | ValueNone -> return! category.Load(fold, initial, categoryName, streamId, streamName, requireLeader, log, ct) | ValueSome tokenAndState when allowStale -> return tokenAndState | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log, ct) } - member _.TrySync(log, categoryName, streamId, streamName, context, _init, token, originState, events, ct) = task { match! category.TrySync(log, fold, categoryName, streamId, streamName, token, originState, events, context, ct) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync diff --git a/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj b/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj index b49cacee1..cfb5c6fbd 100644 --- a/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj +++ b/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj @@ -12,7 +12,7 @@ - + diff --git a/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj b/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj index b26627763..a826bc03e 100644 --- a/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj +++ b/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj @@ -12,7 +12,7 @@ - + diff --git a/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj b/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj index 367281fcb..817c82e52 100644 --- a/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj +++ b/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj @@ -12,7 +12,7 @@ - + diff --git a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj index 7a5ef5b30..80b6f9ed5 100644 --- a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj +++ b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj @@ -18,10 +18,10 @@ - + - + diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index fa4f5c939..d14c3673e 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -2,9 +2,12 @@ open Equinox.Core open Serilog -open System open SqlStreamStore open SqlStreamStore.Streams +open System +open System.Collections.Generic +open System.Threading +open System.Threading.Tasks type EventBody = ReadOnlyMemory type EventData = NewStreamMessage @@ -47,12 +50,12 @@ module Log = let propResolvedEvents name (events : ResolvedEvent[]) (log : ILogger) = log |> propEvents name (seq { for x in events do - let data = x.GetJsonData() |> Async.AwaitTaskCorrect |> Async.RunSynchronously + let data = x.GetJsonData().Result yield System.Collections.Generic.KeyValuePair<_, _>(x.Type, data) }) - let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> Async<'t>) log : Async<'t> = + let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> CancellationToken -> Task<'t>) log ct : Task<'t> = match retryPolicy with - | None -> f log + | None -> f log ct | Some retryPolicy -> let withLoggingContextWrapping count = let log = if count = 1 then log else log |> prop contextLabel count @@ -78,8 +81,8 @@ module Log = { mutable count : int64; mutable ms : int64 } static member Create() = { count = 0L; ms = 0L } member x.Ingest(ms) = - System.Threading.Interlocked.Increment(&x.count) |> ignore - System.Threading.Interlocked.Add(&x.ms, ms) |> ignore + Interlocked.Increment(&x.count) |> ignore + Interlocked.Add(&x.ms, ms) |> ignore type LogSink() = static let epoch = System.Diagnostics.Stopwatch.StartNew() static member val Read = Counter.Create() with get, set @@ -129,9 +132,9 @@ type EsSyncResult = Written of AppendResult | ConflictUnknown module private Write = /// Yields `EsSyncResult.Written` or `EsSyncResult.Conflict` to signify WrongExpectedVersion - let private writeEventsAsync (log : ILogger) (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) - : Async = async { - try let! wr = conn.AppendToStream(StreamId streamName, (if version = -1L then ExpectedVersion.NoStream else int version), events) |> Async.AwaitTaskCorrect + let private writeEventsAsync (log : ILogger) (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) ct + : Task = task { + try let! wr = conn.AppendToStream(StreamId streamName, (if version = -1L then ExpectedVersion.NoStream else int version), events, ct) return EsSyncResult.Written wr with :? WrongExpectedVersionException as ex -> log.Information(ex, "SqlEs TrySync WrongExpectedVersionException writing {EventTypes}, expected {ExpectedVersion}", @@ -140,13 +143,13 @@ module private Write = let eventDataBytes events = let eventDataLen (x : NewStreamMessage) = match x.JsonData |> System.Text.Encoding.UTF8.GetBytes, x.JsonMetadata |> System.Text.Encoding.UTF8.GetBytes with Log.BlobLen bytes, Log.BlobLen metaBytes -> bytes + metaBytes events |> Array.sumBy eventDataLen - let private writeEventsLogged (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) - : Async = async { + let private writeEventsLogged (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) ct + : Task = task { let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propEventData "Json" events let bytes, count = eventDataBytes events, events.Length let log = log |> Log.prop "bytes" bytes let writeLog = log |> Log.prop "stream" streamName |> Log.prop "expectedVersion" version |> Log.prop "count" count - let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.Time + let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.time ct let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let resultLog, evt = match result, reqMetric with @@ -157,42 +160,40 @@ module private Write = (resultLog |> Log.event evt).Information("SqlEs{action:l} count={count} conflict={conflict}", "Write", events.Length, match evt with Log.WriteConflict _ -> true | _ -> false) return result } - let writeEvents (log : ILogger) retryPolicy (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) - : Async = + let writeEvents (log : ILogger) retryPolicy (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) ct + : Task = let call = writeEventsLogged conn streamName version events - Log.withLoggedRetries retryPolicy "writeAttempt" call log + Log.withLoggedRetries retryPolicy "writeAttempt" call log ct module private Read = open FSharp.Control - let private readSliceAsync (conn : IEventStoreConnection) (streamName : string) (direction : Direction) (batchSize : int) (startPos : int64) - : Async = async { - let call = - match direction with - | Direction.Forward -> conn.ReadStreamForwards(streamName, int startPos, batchSize) - | Direction.Backward -> conn.ReadStreamBackwards(streamName, int startPos, batchSize) - return! call |> Async.AwaitTaskCorrect } + let private readSliceAsync (conn : IEventStoreConnection) (streamName : string) (direction : Direction) (batchSize : int) (startPos : int64) ct + : Task = + match direction with + | Direction.Forward -> conn.ReadStreamForwards(streamName, int startPos, batchSize, ct) + | Direction.Backward -> conn.ReadStreamBackwards(streamName, int startPos, batchSize, ct) let (|ResolvedEventLen|) (x : StreamMessage) = let data = x.GetJsonData() |> Async.AwaitTaskCorrect |> Async.RunSynchronously match data, x.JsonMetadata with Log.StrLen bytes, Log.StrLen metaBytes -> bytes + metaBytes - let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) : Async = async { - let! t, slice = readSliceAsync conn streamName direction batchSize startPos |> Stopwatch.Time + let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) ct : Task = task { + let! t, slice = readSliceAsync conn streamName direction batchSize startPos |> Stopwatch.time ct let bytes, count = slice.Messages |> Array.sumBy (|ResolvedEventLen|), slice.Messages.Length - let reqMetric : Log.Measurement ={ stream = streamName; interval = t; bytes = bytes; count = count} + let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count } let evt = Log.Slice (direction, reqMetric) let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propResolvedEvents "Json" slice.Messages (log |> Log.prop "startPos" startPos |> Log.prop "bytes" bytes |> Log.event evt).Information("SqlEs{action:l} count={count} version={version}", "Read", count, slice.LastStreamVersion) return slice } - let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> Async) - (maxPermittedBatchReads : int option) (startPosition : int64) - : AsyncSeq = - let rec loop batchCount pos : AsyncSeq = asyncSeq { + let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> CancellationToken -> Task) + (maxPermittedBatchReads : int option) (startPosition : int64) ct + : IAsyncEnumerable = + let rec loop batchCount pos : IAsyncEnumerable = taskSeq { match maxPermittedBatchReads with | Some mpbr when batchCount >= mpbr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () let batchLog = log |> Log.prop "batchIndex" batchCount - let! slice = readSlice pos batchLog + let! slice = readSlice pos batchLog ct match slice.Status with | PageReadStatus.StreamNotFound -> yield Some (int64 ExpectedVersion.EmptyStream), Array.empty // NB NoStream in ES version= -1 | PageReadStatus.Success -> @@ -212,42 +213,40 @@ module private Read = (log |> Log.prop "bytes" bytes |> Log.event evt).Information( "SqlEs{action:l} stream={stream} count={count}/{batches} version={version}", action, streamName, count, batches, version) - let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition - : Async = async { - let mergeBatches (batches : AsyncSeq) = async { + let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition ct + : Task = task { + let mergeBatches (batches : IAsyncEnumerable) = task { let mutable versionFromStream = None let! (events : ResolvedEvent[]) = batches - |> AsyncSeq.map (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) - |> AsyncSeq.concatSeq - |> AsyncSeq.toArrayAsync + |> TaskSeq.collectSeq (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) + |> TaskSeq.toArrayAsync let version = match versionFromStream with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, events } let call pos = loggedReadSlice conn streamName Direction.Forward batchSize pos - let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) + let retryingLoggingReadSlice pos ct = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) ct let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName - let batches : AsyncSeq = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition - let! t, (version, events) = mergeBatches batches |> Stopwatch.Time + let batches ct : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition ct + let! t, (version, events) = (fun ct -> batches ct |> mergeBatches) |> Stopwatch.time ct log |> logBatchRead direction streamName t events batchSize version return version, events } let partitionPayloadFrom firstUsedEventNumber : ResolvedEvent[] -> int * int = let acc (tu, tr) (ResolvedEventLen bytes as y) = if y.Position < firstUsedEventNumber then tu, tr + bytes else tu + bytes, tr Array.fold acc (0, 0) - let loadBackwardsUntilCompactionOrStart (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName (tryDecode, isOrigin) - : Async = async { - let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : AsyncSeq) - : Async = async { + let loadBackwardsUntilCompactionOrStart (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName (tryDecode, isOrigin) ct + : Task = task { + let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : IAsyncEnumerable) + : Task = task { let versionFromStream, lastBatch = ref None, ref None let! tempBackward = batchesBackward - |> AsyncSeq.map (fun batch -> + |> TaskSeq.collectSeq (fun batch -> match batch with | None, events -> lastBatch.Value <- Some events; events | Some _ as reportedVersion, events -> versionFromStream.Value <- reportedVersion; lastBatch.Value <- Some events; events |> Array.map (fun e -> struct (e, tryDecode e))) - |> AsyncSeq.concatSeq - |> AsyncSeq.takeWhileInclusive (function + |> TaskSeq.takeWhileInclusive (function | x, ValueSome e when isOrigin e -> match lastBatch.Value with | None -> log.Information("SqlEsStop stream={stream} at={eventNumber}", streamName, x.Position) @@ -256,7 +255,7 @@ module private Read = log.Information("SqlEsStop stream={stream} at={eventNumber} used={used} residual={residual}", streamName, x.Position, used, residual) false | _ -> true) // continue the search - |> AsyncSeq.toArrayAsync + |> TaskSeq.toArrayAsync let eventsForward = Array.Reverse(tempBackward); tempBackward // sic - relatively cheap, in-place reverse of something we own let version = match versionFromStream.Value with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, eventsForward } @@ -266,8 +265,8 @@ module private Read = let startPosition = int64 Position.End let direction = Direction.Backward let readlog = log |> Log.prop "direction" direction - let batchesBackward : AsyncSeq = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition - let! t, (version, events) = mergeFromCompactionPointOrStartFromBackwardsStream log batchesBackward |> Stopwatch.Time + let batchesBackward ct : IAsyncEnumerable = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition ct + let! t, (version, events) = (fun ct -> batchesBackward ct |> mergeFromCompactionPointOrStartFromBackwardsStream log) |> Stopwatch.time ct log |> logBatchRead direction streamName t (Array.map ValueTuple.fst events) batchSize version return version, events } @@ -361,32 +360,32 @@ type SqlStreamStoreContext(connection : SqlStreamStoreConnection, batchOptions : member val BatchOptions = batchOptions member internal _.TokenEmpty = Token.ofUncompactedVersion batchOptions.BatchSize -1L - member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType) : Async = async { - let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L + member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType, ct) : Task = task { + let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L ct match tryIsResolvedEventEventType isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events | Some isCompactionEvent -> match events |> Array.tryFindBack isCompactionEvent with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin) : Async = async { + member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin, ct) : Task = task { let! version, events = - Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) + Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) ct match Array.tryHead events |> Option.filter (function _, ValueSome e -> isOrigin e | _ -> false) with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events | Some (resolvedEvent, _) -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events } - member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType) - : Async = async { + member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType, ct) + : Task = task { let streamPosition = token.streamVersion + 1L - let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition + let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition ct match isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events | Some isCompactionEvent -> match events |> Array.tryFindBack (fun re -> match tryDecode re with ValueSome e -> isCompactionEvent e | _ -> false) with | None -> return Token.ofPreviousTokenAndEventsLength streamToken events.Length batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.TrySync(log, streamName, (Token.Unpack pos as streamToken), events, encodedEvents : EventData array, isCompactionEventType) : Async = async { - match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName pos.streamVersion encodedEvents with + member internal _.TrySync(log, streamName, (Token.Unpack pos as streamToken), events, encodedEvents : EventData array, isCompactionEventType, ct) : Task = task { + match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName pos.streamVersion encodedEvents ct with | EsSyncResult.ConflictUnknown -> return GatewaySyncResult.ConflictUnknown | EsSyncResult.Written wr -> @@ -430,23 +429,25 @@ type private Category<'event, 'state, 'context>(context : SqlStreamStoreContext, match access with | None | Some AccessStrategy.LatestKnownEvent -> fun _ -> true | Some (AccessStrategy.RollingSnapshots (isValid, _)) -> isValid - let loadAlgorithm streamName requireLeader log = + let loadAlgorithm streamName requireLeader log ct = match access with - | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None) + | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None, ct) | Some AccessStrategy.LatestKnownEvent - | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin) - let load (fold : 'state -> 'event seq -> 'state) initial f = async { + | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin, ct) + let load (fold : 'state -> 'event seq -> 'state) initial f = task { let! token, events = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger) : Async = - load fold initial (loadAlgorithm streamName requireLeader log) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger) : Async = - load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate)) + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger, ct) + : Task = + load fold initial (loadAlgorithm streamName requireLeader log ct) + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger, ct) + : Task = + load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate, ct)) member _.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context) : Async> = async { + streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) : Task> = task { let encode e = codec.Encode(ctx, e) let events = match access with @@ -455,25 +456,24 @@ type private Category<'event, 'state, 'context>(context : SqlStreamStoreContext, let cc = CompactionContext(Array.length events, token.batchCapacityLimit.Value) if cc.IsCompactionDue then Array.append events (fold state events |> compact |> Array.singleton) else events let encodedEvents : EventData[] = events |> Array.map (encode >> UnionEncoderAdapters.eventDataOfEncodedEvent) - match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate) with + match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate, ct) with | GatewaySyncResult.ConflictUnknown -> - return SyncResult.Conflict (fun ct -> load fold state (context.Reload(streamName, (*requireLeader*)true, log, streamToken, tryDecode, compactionPredicate)) |> Async.startAsTask ct) + return SyncResult.Conflict (fun ct -> load fold state (context.Reload(streamName, (*requireLeader*)true, log, streamToken, tryDecode, compactionPredicate, ct))) | GatewaySyncResult.Written token' -> return SyncResult.Written (token', fold state (Seq.ofArray events)) } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log streamName requireLeader ct = category.Load(fold, initial, streamName, requireLeader, log) |> Async.startAsTask ct interface ICategory<'event, 'state, 'context> with member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = task { match readCache with - | None -> return! batched log streamName requireLeader ct + | None -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) | Some (cache : ICache, prefix : string) -> match! cache.TryGet(prefix + streamName) with - | ValueNone -> return! batched log streamName requireLeader ct + | ValueNone -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) | ValueSome tokenAndState when allowStale -> return tokenAndState - | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log) } - member _.TrySync(log, _categoryName, _streamId, streamName, context, _init, token, originState, events, _ct) = task { - match! category.TrySync(log, fold, streamName, token, originState, events, context) with + | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log, ct) } + member _.TrySync(log, _categoryName, _streamId, streamName, context, _init, token, originState, events, ct) = task { + match! category.TrySync(log, fold, streamName, token, originState, events, context, ct) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token', state') -> return SyncResult.Written (token', state') } diff --git a/src/Equinox/Equinox.fsproj b/src/Equinox/Equinox.fsproj index c4bdb211d..136d94412 100644 --- a/src/Equinox/Equinox.fsproj +++ b/src/Equinox/Equinox.fsproj @@ -14,7 +14,7 @@ - + contentFiles diff --git a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs index c92e18cef..bcafacd8d 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs @@ -1,5 +1,6 @@ module Equinox.CosmosStore.Integration.CosmosCoreIntegration +open Equinox.Core // TaskSeq extensions open Equinox.CosmosStore.Core open FsCodec open FSharp.Control @@ -151,12 +152,12 @@ type Tests(testOutputHelper) = else verifyRequestChargesMax 448 // 447.5 // 463.01 observed capture.Clear() - let! pos = ctx.Sync(stream,?position=None) + let! pos = ctx.Sync(stream, ?position = None) test <@ [EqxAct.Tip] = capture.ExternalCalls @> verifyRequestChargesMax 5 // 41 observed // for a 200, you'll pay a lot (we omitted to include the position that NonIdempotentAppend yielded) capture.Clear() - let! _pos = ctx.Sync(stream,pos) + let! _pos = ctx.Sync(stream, pos) test <@ [EqxAct.TipNotModified] = capture.ExternalCalls @> verifyRequestChargesMax 1 // for a 304 by definition - when an etag IfNotMatch is honored, you only pay one RU } @@ -238,7 +239,8 @@ type Tests(testOutputHelper) = let! expected = add6EventsIn2BatchesEx ctx streamName 4 - let! res = Events.getAll ctx streamName 0L 1 |> AsyncSeq.concatSeq |> AsyncSeq.takeWhileInclusive (fun _ -> false) |> AsyncSeq.toArrayAsync + let! seq = Events.getAll ctx streamName 0L 1 + let! res = seq |> TaskSeq.takeWhileInclusive (fun _ -> false) |> TaskSeq.collectSeq id |> TaskSeq.toArrayAsync |> Async.AwaitTask let expected = expected |> Array.take 1 verifyCorrectEvents 0L expected res @@ -294,11 +296,12 @@ type Tests(testOutputHelper) = let! expected = add6EventsIn2BatchesEx ctx streamName 4 + let! res = Events.getAllBackwards ctx streamName 10L 1 let! res = - Events.getAllBackwards ctx streamName 10L 1 - |> AsyncSeq.concatSeq - |> AsyncSeq.takeWhileInclusive (fun x -> x.Index <> 4L) - |> AsyncSeq.toArrayAsync + res + |> TaskSeq.collectSeq id + |> TaskSeq.takeWhileInclusive (fun x -> x.Index <> 4L) + |> TaskSeq.toArrayAsync |> Async.AwaitTask let expected = expected |> Array.skip 4 // omit index 0, 1 as we vote to finish at 2L verifyCorrectEventsBackward 5L expected res diff --git a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs index 91493ce14..298712a02 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs @@ -13,7 +13,7 @@ let private archiveTableName = tryRead "EQUINOX_DYNAMO_TABLE_ARCHIVE" |> Option. let discoverConnection () = match tryRead "EQUINOX_DYNAMO_CONNECTION" with - | None -> "dynamodb-local", "http://localhost:8000" + | None -> "dynamodb-local", "http://localhost:8000" // OR: change to "https://dynamodb.eu-west-1.amazonaws.com" to hit prod instance | Some connectionString -> "EQUINOX_DYNAMO_CONNECTION", connectionString let createClient (log : Serilog.ILogger) name serviceUrl = @@ -21,6 +21,7 @@ let createClient (log : Serilog.ILogger) name serviceUrl = let clientConfig = AmazonDynamoDBConfig(ServiceURL = serviceUrl) log.Information("DynamoStore {name} {endpoint}", name, serviceUrl) // Credentials are not validated if connecting to local instance so anything will do (this avoids it looking for profiles to be configured) + // OR: don't pass credentials to ctor to use keychain configured access let credentials = Amazon.Runtime.BasicAWSCredentials("A", "A") new AmazonDynamoDBClient(credentials, clientConfig) :> IAmazonDynamoDB diff --git a/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs index f67f00aa7..11fab2972 100644 --- a/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/DocumentStoreIntegration.fs @@ -98,12 +98,7 @@ type Tests(testOutputHelper) = let service = Cart.createServiceWithoutOptimization log context let expectedResponses n = let tipItem = 1 -#if STORE_DYNAMO // For Cosmos, we supply a full query and it notices it is at the end - for Dynamo, another query is required - let finalEmptyPage = 1 -#else - let finalEmptyPage = 0 -#endif - let expectedItems = tipItem + (if eventsInTip then n / 2 else n) + finalEmptyPage + let expectedItems = tipItem + (if eventsInTip then n / 2 else n) max 1 (int (ceil (float expectedItems / float queryMaxItems))) let cartId = % Guid.NewGuid() @@ -257,11 +252,7 @@ type Tests(testOutputHelper) = | Choice2Of2 e -> e.Message.StartsWith "Origin event not found; no Archive Container supplied" || e.Message.StartsWith "Origin event not found; no Archive Table supplied" | x -> failwithf "Unexpected %A" x @> -#if STORE_DYNAMO // Extra null query - test <@ [EqxAct.ResponseForward; EqxAct.ResponseForward; EqxAct.QueryForward] = capture.ExternalCalls @> -#else test <@ [EqxAct.ResponseForward; EqxAct.QueryForward] = capture.ExternalCalls @> -#endif verifyRequestChargesMax 3 // 2.99 // But not forgotten diff --git a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj index 6d293bcda..37dc7b9a2 100644 --- a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj +++ b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj @@ -7,6 +7,7 @@ + diff --git a/tools/Equinox.Tool/Equinox.Tool.fsproj b/tools/Equinox.Tool/Equinox.Tool.fsproj index 785fdc8ed..6d77238eb 100644 --- a/tools/Equinox.Tool/Equinox.Tool.fsproj +++ b/tools/Equinox.Tool/Equinox.Tool.fsproj @@ -32,7 +32,7 @@ - + diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index e708495c2..c826e1783 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -550,7 +550,7 @@ let main argv = Log.Logger <- createDomainLog verbose verboseConsole maybeSeq try let log = Log.Logger try match p.GetSubCommand() with - | Init a -> CosmosInit.containerAndOrDb log a |> Async.RunSynchronously + | Init a -> (CosmosInit.containerAndOrDb log a CancellationToken.None).Wait() | InitAws a -> DynamoInit.table log a |> Async.RunSynchronously | Config a -> SqlInit.databaseOrSchema log a |> Async.RunSynchronously | Dump a -> Dump.run (log, verboseConsole, maybeSeq) a diff --git a/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj b/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj index e2f52454e..ee66e8fe0 100644 --- a/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj +++ b/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj @@ -16,7 +16,7 @@ - + From 26059cfe7aba7547250142918bcd5d3fdde10881 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 13:15:58 +0000 Subject: [PATCH 03/11] Remove resumable state machines warning --- src/Equinox.CosmosStore/CosmosStore.fs | 22 +++++++++++----------- src/Equinox.DynamoStore/DynamoStore.fs | 24 ++++++++++++------------ 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 9305b25e5..444b852a8 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -931,14 +931,14 @@ module Prune = lwm <- Some x.i return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) } let hasRelevantItems (batches, _rc) = batches |> Array.exists isRelevant - Query.feedIteratorMapTi mapPage query - >> TaskSeq.takeWhile hasRelevantItems - >> TaskSeq.mapAsync handle - >> TaskSeq.toArrayAsync - |> Stopwatch.time ct - let mutable queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = 0., 0., 0., 0, 0, 0, 0 - let mutable lwm = None - for (qc, dc, tc), bLwm, (bCount, eDel, eDef) in outcomes do + let loadOutcomes ct = + Query.feedIteratorMapTi mapPage query ct + |> TaskSeq.takeWhile hasRelevantItems + |> TaskSeq.mapAsync handle + |> TaskSeq.toArrayAsync + loadOutcomes |> Stopwatch.time ct + let mutable lwm, queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = None, 0., 0., 0., 0, 0, 0, 0 + let accumulate ((qc, dc, tc), bLwm, (bCount, eDel, eDef)) = lwm <- max lwm bLwm queryCharges <- queryCharges + qc delCharges <- delCharges + dc @@ -947,13 +947,13 @@ module Prune = batches <- batches + bCount eventsDeleted <- eventsDeleted + eDel eventsDeferred <- eventsDeferred + eDef + outcomes |> Array.iter accumulate let reqMetric : Log.Measurement = { database = container.Database.Id; container = container.Id; stream = stream; interval = pt; bytes = eventsDeleted; count = batches; ru = queryCharges } let log = let evt = Log.Metric.Prune (responses, reqMetric) in log |> Log.event evt let lwm = lwm |> Option.defaultValue 0L // If we've seen no batches at all, then the write position is 0L log.Information("EqxCosmos {action:l} {events}/{batches} lwm={lwm} {ms:f1}ms queryRu={queryRu} deleteRu={deleteRu} trimRu={trimRu}", - "Prune", eventsDeleted, batches, lwm, pt.ElapsedMilliseconds, queryCharges, delCharges, trimCharges) - return eventsDeleted, eventsDeferred, lwm - } + "Prune", eventsDeleted, batches, lwm, pt.ElapsedMilliseconds, queryCharges, delCharges, trimCharges) + return eventsDeleted, eventsDeferred, lwm } type [] Token = { pos : Position } module Token = diff --git a/src/Equinox.DynamoStore/DynamoStore.fs b/src/Equinox.DynamoStore/DynamoStore.fs index aae7c168f..5cc731e6f 100644 --- a/src/Equinox.DynamoStore/DynamoStore.fs +++ b/src/Equinox.DynamoStore/DynamoStore.fs @@ -906,7 +906,7 @@ module internal Query = module internal Prune = let until (log : ILogger) (container : Container, stream : string) maxItems indexInclusive ct : Task = task { - let log = log |> Log.prop "stream" stream + let log = log |> Log.prop "stream2" stream let deleteItem i count : Task = task { let! t, rc = (fun ct -> container.DeleteItem(stream, i, ct)) |> Stopwatch.time ct let reqMetric = Log.metric container.TableName stream t -1 count rc @@ -967,15 +967,15 @@ module internal Prune = lwm <- Some x.index return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) } let hasRelevantItems (batches, _rc) = batches |> Array.exists isRelevant - query - >> TaskSeq.map mapPage - >> TaskSeq.takeWhile hasRelevantItems - >> TaskSeq.mapAsync handle - >> TaskSeq.toArrayAsync - |> Stopwatch.time ct - let mutable queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = 0., 0., 0., 0, 0, 0, 0 - let mutable lwm = None - for (qc, dc, tc), bLwm, (bCount, eDel, eDef) in outcomes do + let load ct = + query ct + |> TaskSeq.map mapPage + |> TaskSeq.takeWhile hasRelevantItems + |> TaskSeq.mapAsync handle + |> TaskSeq.toArrayAsync + load |> Stopwatch.time ct + let mutable lwm, queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = None, 0., 0., 0., 0, 0, 0, 0 + let accumulate ((qc, dc, tc), bLwm, (bCount, eDel, eDef)) = lwm <- max lwm bLwm queryCharges <- queryCharges + qc.total delCharges <- delCharges + dc @@ -984,13 +984,13 @@ module internal Prune = batches <- batches + bCount eventsDeleted <- eventsDeleted + eDel eventsDeferred <- eventsDeferred + eDef + outcomes |> Array.iter accumulate let reqMetric = Log.metric container.TableName stream pt eventsDeleted batches { total = queryCharges } let log = let evt = Log.Metric.Prune (responses, reqMetric) in log |> Log.event evt let lwm = lwm |> Option.defaultValue 0L // If we've seen no batches at all, then the write position is 0L log.Information("EqxDynamo {action:l} {events}/{batches} lwm={lwm} {ms:f1}ms queryRu={queryRu} deleteRu={deleteRu} trimRu={trimRu}", "Prune", eventsDeleted, batches, lwm, pt.ElapsedMilliseconds, queryCharges, delCharges, trimCharges) - return eventsDeleted, eventsDeferred, lwm - } + return eventsDeleted, eventsDeferred, lwm } type [] Token = { pos : Position option } module Token = From ae044554f16317afbad71c32b7cfe2d835d8cb6f Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 13:21:02 +0000 Subject: [PATCH 04/11] Remove usage of .Result --- src/Equinox.SqlStreamStore/SqlStreamStore.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index d14c3673e..193651c24 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -50,7 +50,7 @@ module Log = let propResolvedEvents name (events : ResolvedEvent[]) (log : ILogger) = log |> propEvents name (seq { for x in events do - let data = x.GetJsonData().Result + let data = x.GetJsonData() |> Async.AwaitTaskCorrect |> Async.RunSynchronously yield System.Collections.Generic.KeyValuePair<_, _>(x.Type, data) }) let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> CancellationToken -> Task<'t>) log ct : Task<'t> = From 485a9a3cfa212adf1cf0523982c216de2b4b217d Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 14:08:48 +0000 Subject: [PATCH 05/11] Prepare for TaskSeq Formatting/inlining Target FSharp.Core 6.0.7 --- samples/Infrastructure/Infrastructure.fsproj | 2 +- samples/Store/Domain/Domain.fsproj | 2 +- samples/TodoBackend/TodoBackend.fsproj | 2 +- src/Equinox.Core/Equinox.Core.fsproj | 5 +- .../Equinox.CosmosStore.Prometheus.fsproj | 2 +- src/Equinox.CosmosStore/CosmosStore.fs | 67 ++++---- .../Equinox.CosmosStore.fsproj | 2 +- .../Equinox.DynamoStore.Prometheus.fsproj | 2 +- src/Equinox.DynamoStore/DynamoStore.fs | 81 +++++----- .../Equinox.DynamoStore.fsproj | 5 +- .../Equinox.EventStore.fsproj | 2 +- src/Equinox.EventStore/EventStore.fs | 25 ++- .../Equinox.EventStoreDb.fsproj | 2 +- src/Equinox.EventStoreDb/EventStoreDb.fs | 29 ++-- .../Equinox.MemoryStore.fsproj | 2 +- .../Equinox.MessageDb.fsproj | 2 +- src/Equinox.MessageDb/MessageDb.fs | 9 +- .../Equinox.SqlStreamStore.MsSql.fsproj | 2 +- .../Equinox.SqlStreamStore.MySql.fsproj | 2 +- .../Equinox.SqlStreamStore.Postgres.fsproj | 2 +- .../Equinox.SqlStreamStore.fsproj | 4 +- src/Equinox.SqlStreamStore/SqlStreamStore.fs | 148 +++++++++--------- src/Equinox/Equinox.fsproj | 5 +- .../CosmosCoreIntegration.fs | 4 +- .../CosmosFixtures.fs | 3 +- .../Equinox.CosmosStore.Integration.fsproj | 1 + tools/Equinox.Tool/Equinox.Tool.fsproj | 2 +- tools/Equinox.Tool/Program.fs | 2 +- .../Equinox.Tools.TestHarness.fsproj | 2 +- 29 files changed, 198 insertions(+), 220 deletions(-) diff --git a/samples/Infrastructure/Infrastructure.fsproj b/samples/Infrastructure/Infrastructure.fsproj index 7b062df62..fa758318f 100644 --- a/samples/Infrastructure/Infrastructure.fsproj +++ b/samples/Infrastructure/Infrastructure.fsproj @@ -23,7 +23,7 @@ - + diff --git a/samples/Store/Domain/Domain.fsproj b/samples/Store/Domain/Domain.fsproj index 8753fb450..2f5b31d20 100644 --- a/samples/Store/Domain/Domain.fsproj +++ b/samples/Store/Domain/Domain.fsproj @@ -15,7 +15,7 @@ - + diff --git a/samples/TodoBackend/TodoBackend.fsproj b/samples/TodoBackend/TodoBackend.fsproj index 0b2d9f7d2..cb9056ba3 100644 --- a/samples/TodoBackend/TodoBackend.fsproj +++ b/samples/TodoBackend/TodoBackend.fsproj @@ -9,7 +9,7 @@ - + diff --git a/src/Equinox.Core/Equinox.Core.fsproj b/src/Equinox.Core/Equinox.Core.fsproj index a041ce83b..48a5b68c0 100644 --- a/src/Equinox.Core/Equinox.Core.fsproj +++ b/src/Equinox.Core/Equinox.Core.fsproj @@ -22,10 +22,7 @@ - - - contentFiles - + diff --git a/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj b/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj index 18ab60fe9..50a7b1464 100644 --- a/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj +++ b/src/Equinox.CosmosStore.Prometheus/Equinox.CosmosStore.Prometheus.fsproj @@ -15,7 +15,7 @@ - + diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 3c8395797..f84ba7f3e 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -882,8 +882,7 @@ module Prune = let reqMetric : Log.Measurement = { database = container.Database.Id; container = container.Id; stream = stream; interval = t; bytes = -1; count = count; ru = rc } let log = let evt = Log.Metric.Delete reqMetric in log |> Log.event evt log.Information("EqxCosmos {action:l} {id} {ms:f1}ms {ru}RU", "Delete", id, ms, rc) - return rc - } + return rc } let trimTip expectedI count = async { match! container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId) with | _, ReadResult.NotModified -> return failwith "unexpected NotModified; no etag supplied" @@ -898,8 +897,7 @@ module Prune = let reqMetric : Log.Measurement = { database = container.Database.Id; container = container.Id; stream = stream; interval = t; bytes = -1; count = count; ru = rc } let log = let evt = Log.Metric.Trim reqMetric in log |> Log.event evt log.Information("EqxCosmos {action:l} {count} {ms:f1}ms {ru}RU", "Trim", count, ms, rc) - return rc - } + return rc } let log = log |> Log.prop "index" indexInclusive let query : FeedIterator = let qro = QueryRequestOptions(PartitionKey = PartitionKey stream, MaxItemCount = maxItems) @@ -938,18 +936,16 @@ module Prune = eventsDeferred <- eventsDeferred + eligibleEvents if lwm = None then lwm <- Some x.i - return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) - } + return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) } let hasRelevantItems (batches, _rc) = batches |> Array.exists isRelevant - query - |> Query.feedIteratorMapTi mapPage - |> AsyncSeq.takeWhile hasRelevantItems - |> AsyncSeq.mapAsync handle - |> AsyncSeq.toArrayAsync - |> Stopwatch.Time - let mutable queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = 0., 0., 0., 0, 0, 0, 0 - let mutable lwm = None - for (qc, dc, tc), bLwm, (bCount, eDel, eDef) in outcomes do + let loadOutcomes query = + Query.feedIteratorMapTi mapPage query + |> AsyncSeq.takeWhile hasRelevantItems + |> AsyncSeq.mapAsync handle + |> AsyncSeq.toArrayAsync + loadOutcomes query |> Stopwatch.Time + let mutable lwm, queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = None, 0., 0., 0., 0, 0, 0, 0 + let accumulate ((qc, dc, tc), bLwm, (bCount, eDel, eDef)) = lwm <- max lwm bLwm queryCharges <- queryCharges + qc delCharges <- delCharges + dc @@ -958,13 +954,13 @@ module Prune = batches <- batches + bCount eventsDeleted <- eventsDeleted + eDel eventsDeferred <- eventsDeferred + eDef + outcomes |> Array.iter accumulate let reqMetric : Log.Measurement = { database = container.Database.Id; container = container.Id; stream = stream; interval = pt; bytes = eventsDeleted; count = batches; ru = queryCharges } let log = let evt = Log.Metric.Prune (responses, reqMetric) in log |> Log.event evt let lwm = lwm |> Option.defaultValue 0L // If we've seen no batches at all, then the write position is 0L log.Information("EqxCosmos {action:l} {events}/{batches} lwm={lwm} {ms:f1}ms queryRu={queryRu} deleteRu={deleteRu} trimRu={trimRu}", - "Prune", eventsDeleted, batches, lwm, pt.ElapsedMilliseconds, queryCharges, delCharges, trimCharges) - return eventsDeleted, eventsDeferred, lwm - } + "Prune", eventsDeleted, batches, lwm, pt.ElapsedMilliseconds, queryCharges, delCharges, trimCharges) + return eventsDeleted, eventsDeferred, lwm } type [] Token = { pos : Position } module Token = @@ -1087,7 +1083,6 @@ type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IE match! store.Reload(log, (streamName, pos), (codec.TryDecode, isOrigin), ?preview = preloaded) |> Async.startAsTask ct with | LoadFromTokenResult.Unchanged -> return struct (streamToken, state) | LoadFromTokenResult.Found (token', events) -> return token', fold state events } - member cat.Sync(log, streamName, (Token.Unpack pos as streamToken), state, events, mapUnfolds, fold, isOrigin, context, compressUnfolds) : Async> = async { let state' = fold state (Seq.ofArray events) let encode e = codec.Encode(context, e) @@ -1125,27 +1120,27 @@ module internal Caching = type CachingCategory<'event, 'state, 'context> ( category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, isOrigin : 'event -> bool, - tryReadCache, updateCache : _ -> struct (_*_) -> Task, + tryReadCache : string -> Task>, updateCache : string -> struct (StreamToken * 'state) -> Task, checkUnfolds, compressUnfolds, - mapUnfolds : Choice 'state -> 'event array), 'event array -> 'state -> 'event array * 'event array>) = - let cache streamName (inner : unit -> Task<_>) = task { - let! struct (token, state) = inner () - do! updateCache streamName (token, state) - return struct (token, state) } + mapUnfolds : Choice 'state -> 'event array, 'event array -> 'state -> 'event array * 'event array>) = + let cache streamName (inner : CancellationToken -> Task<_>) ct = task { + let! tokenAndState = inner ct + do! updateCache streamName tokenAndState + return tokenAndState } interface ICategory<'event, 'state, 'context> with - member _.Load(log, _categoryName, _streamId, streamName, allowStale, _requireLeader, ct) : Task = task { - match! tryReadCache streamName : Task with - | ValueNone -> return! (fun () -> category.Load(log, streamName, initial, checkUnfolds, fold, isOrigin, ct)) |> cache streamName - | ValueSome struct (token, state) when allowStale -> return struct (token, state) // read already updated TTL, no need to write - | ValueSome (token, state) -> return! (fun () -> category.Reload(log, streamName, token, state, fold, isOrigin, ct)) |> cache streamName } + member _.Load(log, _categoryName, _streamId, streamName, allowStale, _requireLeader, ct) = task { + match! tryReadCache streamName with + | ValueNone -> return! cache streamName (fun ct -> category.Load(log, streamName, initial, checkUnfolds, fold, isOrigin, ct)) ct + | ValueSome tokenAndState when allowStale -> return tokenAndState // read already updated TTL, no need to write + | ValueSome (token, state) -> return! cache streamName (fun ct -> category.Reload(log, streamName, token, state, fold, isOrigin, ct)) ct } member _.TrySync(log : ILogger, _categoryName, _streamId, streamName, context, maybeInit, streamToken, state, events, ct) : Task> = task { match maybeInit with ValueNone -> () | ValueSome i -> do! i ct - match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context, compressUnfolds) |> Async.startAsTask ct with + match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context, compressUnfolds) with | SyncResult.Conflict resync -> - return SyncResult.Conflict (fun ct -> cache streamName (fun () -> resync ct)) - | SyncResult.Written (token', state') -> - do! updateCache streamName struct (token', state') - return SyncResult.Written (token', state') } + return SyncResult.Conflict (cache streamName resync) + | SyncResult.Written tokenAndState' -> + do! updateCache streamName tokenAndState' + return SyncResult.Written tokenAndState' } module ConnectionString = @@ -1224,7 +1219,7 @@ type CosmosClientFactory /// Creates and validates a Client [including loading metadata](https://devblogs.microsoft.com/cosmosdb/improve-net-sdk-initialization) for the specified containers member x.CreateAndInitialize(discovery : Discovery, containers) = async { let! ct = Async.CancellationToken - match discovery with // + match discovery with | Discovery.AccountUriAndKey (accountUri = uri; key = key) -> return! CosmosClient.CreateAndInitializeAsync(string uri, key, containers, x.Options, ct) |> Async.AwaitTaskCorrect | Discovery.ConnectionString cs -> return! CosmosClient.CreateAndInitializeAsync(cs, containers, x.Options) |> Async.AwaitTaskCorrect } diff --git a/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj index dd8154bde..69c9f171e 100644 --- a/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj +++ b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj @@ -18,7 +18,7 @@ - + diff --git a/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj b/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj index 1c593436f..9e8a97160 100644 --- a/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj +++ b/src/Equinox.DynamoStore.Prometheus/Equinox.DynamoStore.Prometheus.fsproj @@ -15,7 +15,7 @@ - + diff --git a/src/Equinox.DynamoStore/DynamoStore.fs b/src/Equinox.DynamoStore/DynamoStore.fs index f0ee0c1cb..7be4e89e3 100644 --- a/src/Equinox.DynamoStore/DynamoStore.fs +++ b/src/Equinox.DynamoStore/DynamoStore.fs @@ -7,6 +7,7 @@ open FSharp.Control open Serilog open System open System.IO +open System.Threading open System.Threading.Tasks [] @@ -284,9 +285,9 @@ module Log = { mutable rux100 : int64; mutable count : int64; mutable ms : int64 } static member Create() = { rux100 = 0L; count = 0L; ms = 0L } member x.Ingest(ms, ru) = - System.Threading.Interlocked.Increment(&x.count) |> ignore - System.Threading.Interlocked.Add(&x.rux100, int64 (ru * 100.)) |> ignore - System.Threading.Interlocked.Add(&x.ms, ms) |> ignore + Interlocked.Increment(&x.count) |> ignore + Interlocked.Add(&x.rux100, int64 (ru * 100.)) |> ignore + Interlocked.Add(&x.ms, ms) |> ignore type internal Counters() = let tables = System.Collections.Concurrent.ConcurrentDictionary() let create (_name : string) = Counter.Create() @@ -312,7 +313,7 @@ module Log = static let mutable epoch = Epoch() static member Restart() = let fresh = Epoch() - let outgoing = System.Threading.Interlocked.Exchange(&epoch, fresh) + let outgoing = Interlocked.Exchange(&epoch, fresh) outgoing.Stop() outgoing interface Serilog.Core.ILogEventSink with @@ -458,8 +459,7 @@ type Container(tableName, createContext : (RequestMetrics -> unit) -> TableConte yield i, t, Array.map Batch.ofSchema res.Records, rm.Consumed match res.LastEvaluatedKey with | None -> () - | le -> yield! aux (i + 1, le) - } + | le -> yield! aux (i + 1, le) } aux (0, None) member internal _.QueryIAndNOrderByNAscending(stream, maxItems) : AsyncSeq = let rec aux (index, lastEvaluated) = asyncSeq { @@ -777,7 +777,7 @@ module internal Query = |> AsyncSeq.map (mapPage direction (container, stream) (minIndex, maxIndex, maxItems) maxRequests readLog) let! t, (events, maybeTipPos, ru) = mergeBatches log batches |> Stopwatch.Time let raws = Array.map ValueTuple.fst events - let decoded = if direction = Direction.Forward then Array.chooseV ValueTuple.snd events else Seq.chooseV ValueTuple.snd events |> Seq.rev |> Array.ofSeq + let decoded = if direction = Direction.Forward then Array.chooseV ValueTuple.snd events else let xs = Array.chooseV ValueTuple.snd events in Array.Reverse xs; xs let minMax = (None, raws) ||> Array.fold (fun acc x -> let i = int64 x.i in Some (match acc with None -> i, i | Some (n, x) -> min n i, max x i)) let version = match maybeTipPos, minMax with @@ -786,7 +786,7 @@ module internal Query = | None, None -> 0L log |> logQuery (direction, minIndex, maxIndex) (container, stream) t (responseCount, raws) version ru match minMax, maybeTipPos with - | Some (i, m), _ -> return Some { found = found; minIndex = i; next = m + 1L; maybeTipPos = maybeTipPos; events = decoded } + | Some (i, m), _ -> return Some ({ found = found; minIndex = i; next = m + 1L; maybeTipPos = maybeTipPos; events = decoded } : ScanResult<_>) | None, Some { index = tipI } -> return Some { found = found; minIndex = tipI; next = tipI; maybeTipPos = maybeTipPos; events = [||] } | None, _ -> return None } @@ -901,8 +901,7 @@ module internal Prune = let reqMetric = Log.metric container.TableName stream t -1 count rc let log = let evt = Log.Metric.Delete reqMetric in log |> Log.event evt log.Information("EqxDynamo {action:l} {i} {ms:f1}ms {ru}RU", "Delete", i, t.ElapsedMilliseconds, rc) - return rc - } + return rc } let trimTip expectedN count = async { match! container.TryGetTip(stream, consistentRead = false) with | None, _rc -> return failwith "unexpected NotFound" @@ -919,8 +918,7 @@ module internal Prune = let reqMetric = Log.metric container.TableName stream t -1 count rc let log = let evt = Log.Metric.Trim reqMetric in log |> Log.event evt log.Information("EqxDynamo {action:l} {count} {ms:f1}ms {ru}RU", "Trim", count, t.ElapsedMilliseconds, rc.total) - return rc - } + return rc } let log = log |> Log.prop "index" indexInclusive // need to sort by n to guarantee we don't ever leave an observable gap in the sequence let query = container.QueryIAndNOrderByNAscending(stream, maxItems) @@ -956,18 +954,17 @@ module internal Prune = eventsDeferred <- eventsDeferred + eligibleEvents if lwm = None then lwm <- Some x.index - return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) - } + return (rc, delCharges, trimCharges), lwm, (batchesDeleted + batchesTrimmed, eventsDeleted, eventsDeferred) } let hasRelevantItems (batches, _rc) = batches |> Array.exists isRelevant - query - |> AsyncSeq.map mapPage - |> AsyncSeq.takeWhile hasRelevantItems - |> AsyncSeq.mapAsync handle - |> AsyncSeq.toArrayAsync - |> Stopwatch.Time - let mutable queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = 0., 0., 0., 0, 0, 0, 0 - let mutable lwm = None - for (qc, dc, tc), bLwm, (bCount, eDel, eDef) in outcomes do + let load query = + query + |> AsyncSeq.map mapPage + |> AsyncSeq.takeWhile hasRelevantItems + |> AsyncSeq.mapAsync handle + |> AsyncSeq.toArrayAsync + load query |> Stopwatch.Time + let mutable lwm, queryCharges, delCharges, trimCharges, responses, batches, eventsDeleted, eventsDeferred = None, 0., 0., 0., 0, 0, 0, 0 + let accumulate ((qc, dc, tc), bLwm, (bCount, eDel, eDef)) = lwm <- max lwm bLwm queryCharges <- queryCharges + qc.total delCharges <- delCharges + dc @@ -976,13 +973,13 @@ module internal Prune = batches <- batches + bCount eventsDeleted <- eventsDeleted + eDel eventsDeferred <- eventsDeferred + eDef + outcomes |> Array.iter accumulate let reqMetric = Log.metric container.TableName stream pt eventsDeleted batches { total = queryCharges } let log = let evt = Log.Metric.Prune (responses, reqMetric) in log |> Log.event evt let lwm = lwm |> Option.defaultValue 0L // If we've seen no batches at all, then the write position is 0L log.Information("EqxDynamo {action:l} {events}/{batches} lwm={lwm} {ms:f1}ms queryRu={queryRu} deleteRu={deleteRu} trimRu={trimRu}", "Prune", eventsDeleted, batches, lwm, pt.ElapsedMilliseconds, queryCharges, delCharges, trimCharges) - return eventsDeleted, eventsDeferred, lwm - } + return eventsDeleted, eventsDeferred, lwm } type [] Token = { pos : Position option } module Token = @@ -1139,13 +1136,13 @@ type internal Category<'event, 'state, 'context>(store : StoreClient, codec : IE module internal Caching = let applyCacheUpdatesWithSlidingExpiration (cache : ICache, prefix : string) (slidingExpiration : TimeSpan) = - let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) + let mkCacheEntry struct (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) let options = CacheItemOptions.RelativeExpiration slidingExpiration fun streamName value -> cache.UpdateIfNewer(prefix + streamName, options, Token.supersedes, mkCacheEntry value) let applyCacheUpdatesWithFixedTimeSpan (cache : ICache, prefix : string) (period : TimeSpan) = - let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) + let mkCacheEntry struct (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState) fun streamName value -> let expirationPoint = let creationDate = DateTimeOffset.UtcNow in creationDate.Add period let options = CacheItemOptions.AbsoluteExpiration expirationPoint @@ -1154,25 +1151,25 @@ module internal Caching = type CachingCategory<'event, 'state, 'context> ( category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, isOrigin : 'event -> bool, - tryReadCache, updateCache : string -> _ -> Task, + tryReadCache : string -> Task>, updateCache : string -> struct (_ * _) -> Task, checkUnfolds, mapUnfolds : Choice 'state -> 'event array, 'event array -> 'state -> 'event array * 'event array>) = let cache streamName (inner : unit -> Task<_>) = task { - let! struct (token, state) = inner () - do! updateCache streamName (token, state) - return struct (token, state) } + let! tokenAndState = inner () + do! updateCache streamName tokenAndState + return tokenAndState } interface ICategory<'event, 'state, 'context> with - member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) : Task = task { - match! tryReadCache streamName : Task> with - | ValueNone -> return! (fun () -> category.Load(log, streamName, requireLeader, initial, checkUnfolds, fold, isOrigin)) |> cache streamName - | ValueSome struct (token, state) when allowStale -> return struct (token, state) // read already updated TTL, no need to write - | ValueSome struct (token, state) -> return! (fun () -> category.Reload(log, streamName, requireLeader, token, state, fold, isOrigin, ct)) |> cache streamName } - member _.TrySync(log : ILogger, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, state, events, ct) : Task> = task { - match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context) |> Async.startAsTask ct with + member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = task { + match! tryReadCache streamName with + | ValueNone -> return! cache streamName (fun () -> category.Load(log, streamName, requireLeader, initial, checkUnfolds, fold, isOrigin)) + | ValueSome tokenAndState when allowStale -> return tokenAndState // read already updated TTL, no need to write + | ValueSome (token, state) -> return! cache streamName (fun () -> category.Reload(log, streamName, requireLeader, token, state, fold, isOrigin, ct)) } + member _.TrySync(log : ILogger, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, state, events, ct) = task { + match! category.Sync(log, streamName, streamToken, state, events, mapUnfolds, fold, isOrigin, context) with | SyncResult.Conflict resync -> - return SyncResult.Conflict (fun ct -> (fun () -> resync ct) |> cache streamName) - | SyncResult.Written (token', state') -> - do! updateCache streamName (token', state') - return SyncResult.Written (token', state') } + return SyncResult.Conflict (fun ct -> cache streamName (fun () -> resync ct)) + | SyncResult.Written tokenAndState' -> + do! updateCache streamName tokenAndState' + return SyncResult.Written tokenAndState' } namespace Equinox.DynamoStore diff --git a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj index 469464fee..b2057983e 100644 --- a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj +++ b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj @@ -13,10 +13,7 @@ - - - contentFiles - + diff --git a/src/Equinox.EventStore/Equinox.EventStore.fsproj b/src/Equinox.EventStore/Equinox.EventStore.fsproj index d68d17719..6daa4ff30 100644 --- a/src/Equinox.EventStore/Equinox.EventStore.fsproj +++ b/src/Equinox.EventStore/Equinox.EventStore.fsproj @@ -18,7 +18,7 @@ - + diff --git a/src/Equinox.EventStore/EventStore.fs b/src/Equinox.EventStore/EventStore.fs index a4e5ae4d9..27c0708ed 100755 --- a/src/Equinox.EventStore/EventStore.fs +++ b/src/Equinox.EventStore/EventStore.fs @@ -374,7 +374,7 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp member val BatchOptions = batchOptions member _.TokenEmpty = Token.ofUncompactedVersion batchOptions.BatchSize -1L - member _.LoadBatched streamName requireLeader log (tryDecode, isCompactionEventType) : Async = async { + member _.LoadBatched(streamName, requireLeader, log, (tryDecode, isCompactionEventType)) : Async = async { let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L match tryIsResolvedEventEventType isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events @@ -382,13 +382,13 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp match events |> Array.tryFindBack isCompactionEvent with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member _.LoadBackwardsStoppingAtCompactionEvent streamName requireLeader log (tryDecode, isOrigin) : Async = async { + member _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, (tryDecode, isOrigin)) : Async = async { let! version, events = Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) match Array.tryHead events |> Option.filter (function _, ValueSome e -> isOrigin e | _ -> false) with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events | Some (resolvedEvent, _) -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events } - member _.Reload requireLeader streamName log (Token.Unpack token as streamToken) (tryDecode, isCompactionEventType) + member _.Reload(requireLeader, streamName, log, (Token.Unpack token as streamToken), (tryDecode, isCompactionEventType)) : Async = async { let streamPosition = token.streamVersion + 1L let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition @@ -399,7 +399,7 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp | None -> return Token.ofPreviousTokenAndEventsLength streamToken events.Length batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member _.TrySync log streamName (Token.Unpack token as streamToken) (events, encodedEvents : EventData array) isCompactionEventType : Async = async { + member _.TrySync(log, streamName, (Token.Unpack token as streamToken), (events, encodedEvents : EventData array), isCompactionEventType) : Async = async { let streamVersion = token.streamVersion match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName streamVersion encodedEvents with | EsSyncResult.Conflict actualVersion -> @@ -456,12 +456,10 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod | None | Some AccessStrategy.LatestKnownEvent -> fun _ -> true | Some (AccessStrategy.RollingSnapshots (isValid, _)) -> isValid let loadAlgorithm streamName requireLeader log = - let batched = context.LoadBatched streamName requireLeader log (tryDecode, None) - let compacted = context.LoadBackwardsStoppingAtCompactionEvent streamName requireLeader log (tryDecode, isOrigin) match access with - | None -> batched + | None -> context.LoadBatched(streamName, requireLeader, log, (tryDecode, None)) | Some AccessStrategy.LatestKnownEvent - | Some (AccessStrategy.RollingSnapshots _) -> compacted + | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, (tryDecode, isOrigin)) let load (fold : 'state -> 'event seq -> 'state) initial f = async { let! token, events = f return struct (token, fold initial events) } @@ -469,7 +467,7 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger) : Async = load fold initial (loadAlgorithm streamName requireLeader log) member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger) : Async = - load fold state (context.Reload requireLeader streamName log token (tryDecode, compactionPredicate)) + load fold state (context.Reload(requireLeader, streamName, log, token, (tryDecode, compactionPredicate))) member _.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, @@ -482,21 +480,20 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod let cc = CompactionContext(Array.length events, token.batchCapacityLimit.Value) if cc.IsCompactionDue then Array.append events (fold state events |> compact |> Array.singleton) else events let encodedEvents : EventData[] = events |> Array.map (encode >> UnionEncoderAdapters.eventDataOfEncodedEvent) - match! context.TrySync log streamName streamToken (events, encodedEvents) compactionPredicate with + match! context.TrySync(log, streamName, streamToken, (events, encodedEvents), compactionPredicate) with | GatewaySyncResult.ConflictUnknown _ -> - return SyncResult.Conflict (fun ct -> load fold state (context.Reload true streamName log streamToken (tryDecode, compactionPredicate)) |> Async.startAsTask ct) + return SyncResult.Conflict (fun ct -> load fold state (context.Reload(true, streamName, log, streamToken, (tryDecode, compactionPredicate))) |> Async.startAsTask ct) | GatewaySyncResult.Written token' -> return SyncResult.Written (token', fold state (Seq.ofArray events)) } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log streamName requireLeader = category.Load(fold, initial, streamName, requireLeader, log) interface ICategory<'event, 'state, 'context> with member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = match readCache with - | None -> Async.startAsTask ct (batched log streamName requireLeader) + | None -> category.Load(fold, initial, streamName, requireLeader, log) |> Async.startAsTask ct | Some (cache : ICache, prefix : string) -> task { match! cache.TryGet(prefix + streamName) with - | ValueNone -> return! batched log streamName requireLeader + | ValueNone -> return! category.Load(fold, initial, streamName, requireLeader, log) |> Async.startAsTask ct | ValueSome tokenAndState when allowStale -> return tokenAndState | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log) } member _.TrySync(log, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, initialState, events, _ct) = task { diff --git a/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj b/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj index 16e14e055..2b8cdcd05 100644 --- a/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj +++ b/src/Equinox.EventStoreDb/Equinox.EventStoreDb.fsproj @@ -18,7 +18,7 @@ - + diff --git a/src/Equinox.EventStoreDb/EventStoreDb.fs b/src/Equinox.EventStoreDb/EventStoreDb.fs index a6ae58b1f..adbf6dd9d 100755 --- a/src/Equinox.EventStoreDb/EventStoreDb.fs +++ b/src/Equinox.EventStoreDb/EventStoreDb.fs @@ -1,5 +1,6 @@ namespace Equinox.EventStoreDb +open System.Threading open Equinox.Core open EventStore.Client open Serilog @@ -75,8 +76,8 @@ module Log = { mutable count : int64; mutable ms : int64 } static member Create() = { count = 0L; ms = 0L } member x.Ingest(ms) = - System.Threading.Interlocked.Increment(&x.count) |> ignore - System.Threading.Interlocked.Add(&x.ms, ms) |> ignore + Interlocked.Increment(&x.count) |> ignore + Interlocked.Add(&x.ms, ms) |> ignore type LogSink() = static let epoch = System.Diagnostics.Stopwatch.StartNew() @@ -138,7 +139,7 @@ module private Write = elif wr.Status = ConditionalWriteStatus.Succeeded then return EsSyncResult.Written wr else return failwithf "Unexpected write response code %O" wr.Status } - let eventDataBytes events = + let private eventDataBytes events = let eventDataLen (x : EventData) = match x.Data, x.Metadata with Log.BlobLen bytes, Log.BlobLen metaBytes -> bytes + metaBytes events |> Array.sumBy eventDataLen @@ -169,7 +170,7 @@ module private Read = open FSharp.Control let resolvedEventBytes (x : ResolvedEvent) = let Log.BlobLen bytes, Log.BlobLen metaBytes = x.Event.Data, x.Event.Metadata in bytes + metaBytes let resolvedEventsBytes events = events |> Array.sumBy resolvedEventBytes - let logBatchRead direction streamName t events batchSize version (log : ILogger) = + let private logBatchRead direction streamName t events batchSize version (log : ILogger) = let bytes, count = resolvedEventsBytes events, events.Length let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let batches = match batchSize with Some batchSize -> (events.Length - 1) / batchSize + 1 | None -> -1 @@ -179,7 +180,7 @@ module private Read = (log |> Log.prop "bytes" bytes |> Log.event evt).Information( "Esdb{action:l} stream={stream} count={count}/{batches} version={version}", action, streamName, count, batches, version) - let loadBackwardsUntilOrigin (log : ILogger) (conn : EventStoreClient) batchSize streamName (tryDecode, isOrigin) + let private loadBackwardsUntilOrigin (log : ILogger) (conn : EventStoreClient) batchSize streamName (tryDecode, isOrigin) : Async = async { let! ct = Async.CancellationToken let res = conn.ReadStreamAsync(Direction.Backwards, streamName, StreamPosition.End, int64 batchSize, resolveLinkTos = false, cancellationToken = ct) @@ -204,7 +205,7 @@ module private Read = log |> logBatchRead Direction.Backward streamName t (Array.map ValueTuple.fst events) (Some batchSize) version return version, events } - let loadForward (conn : EventStoreClient) streamName startPosition + let private loadForward (conn : EventStoreClient) streamName startPosition : Async = async { let! ct = Async.CancellationToken let res = conn.ReadStreamAsync(Direction.Forwards, streamName, startPosition, Int64.MaxValue, resolveLinkTos = false, cancellationToken = ct) @@ -315,7 +316,7 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType) : Async = async { let! version, events = Read.loadForwards log (conn requireLeader) streamName StreamPosition.Start match tryIsResolvedEventEventType isCompactionEventType with - | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events + | None -> return struct (Token.ofNonCompacting version, Array.chooseV tryDecode events) | Some isCompactionEvent -> match events |> Array.tryFindBack isCompactionEvent with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV tryDecode events @@ -323,14 +324,14 @@ type EventStoreContext(connection : EventStoreConnection, batchOptions : BatchOp member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, limit, tryDecode, isOrigin) : Async = async { let! version, events = Read.loadBackwards log (conn requireLeader) (defaultArg limit Int32.MaxValue) streamName (tryDecode, isOrigin) match Array.tryHead events |> Option.filter (function _, ValueSome e -> isOrigin e | _ -> false) with - | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events + | None -> return struct (Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events) | Some (resolvedEvent, _) -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events } member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType) : Async = async { let streamPosition = StreamPosition.FromInt64(token.streamVersion + 1L) let! version, events = Read.loadForwards log (conn requireLeader) streamName streamPosition match isCompactionEventType with - | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events + | None -> return struct (Token.ofNonCompacting version, Array.chooseV tryDecode events) | Some isCompactionEvent -> match events |> Array.tryFindBack (fun re -> match tryDecode re with ValueSome e -> isCompactionEvent e | _ -> false) with | None -> return Token.ofPreviousTokenAndEventsLength streamToken events.Length batchOptions.BatchSize version, Array.chooseV tryDecode events @@ -426,18 +427,16 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod return SyncResult.Written (token', fold state (Seq.ofArray events)) } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log streamName requireLeader = category.Load(fold, initial, streamName, requireLeader, log) interface ICategory<'event, 'state, 'context> with - member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, _ct) = task { + member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = task { match readCache with - | None -> return! batched log streamName requireLeader + | None -> return! category.Load(fold, initial, streamName, requireLeader, log) | Some (cache : ICache, prefix : string) -> match! cache.TryGet(prefix + streamName) with - | ValueNone -> return! batched log streamName requireLeader + | ValueNone -> return! category.Load(fold, initial, streamName, requireLeader, log) | ValueSome tokenAndState when allowStale -> return tokenAndState | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log) } - - member _.TrySync(log, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, initialState, events, _ct) = task { + member _.TrySync(log, _categoryName, _streamId, streamName, context, _maybeInit, streamToken, initialState, events, ct) = task { match! category.TrySync(log, fold, streamName, streamToken, initialState, events, context) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token', state') -> return SyncResult.Written (token', state') } diff --git a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj index ac4c64656..3301595f8 100644 --- a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj +++ b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj @@ -16,7 +16,7 @@ - + diff --git a/src/Equinox.MessageDb/Equinox.MessageDb.fsproj b/src/Equinox.MessageDb/Equinox.MessageDb.fsproj index e50d57c34..688aac168 100644 --- a/src/Equinox.MessageDb/Equinox.MessageDb.fsproj +++ b/src/Equinox.MessageDb/Equinox.MessageDb.fsproj @@ -26,7 +26,7 @@ - + diff --git a/src/Equinox.MessageDb/MessageDb.fs b/src/Equinox.MessageDb/MessageDb.fs index 02e38f4e0..8ad1712b8 100644 --- a/src/Equinox.MessageDb/MessageDb.fs +++ b/src/Equinox.MessageDb/MessageDb.fs @@ -421,21 +421,18 @@ type private Category<'event, 'state, 'context>(context : MessageDbContext, code return SyncResult.Written (token', state') } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log categoryName streamId streamName requireLeader ct = category.Load(fold, initial, categoryName, streamId, streamName, requireLeader, log, ct) - interface ICategory<'event, 'state, 'context> with member _.Load(log, categoryName, streamId, streamName, allowStale, requireLeader, ct) = task { - let act = Activity.Current match readCache with - | None -> return! batched log categoryName streamId streamName requireLeader ct + | None -> return! category.Load(fold, initial, categoryName, streamId, streamName, requireLeader, log, ct) | Some (cache : ICache, prefix : string) -> let! cacheItem = cache.TryGet(prefix + streamName) + let act = Activity.Current if act <> null then act.AddCacheHit(match cacheItem with ValueNone -> false | _ -> true) |> ignore match cacheItem with - | ValueNone -> return! batched log categoryName streamId streamName requireLeader ct + | ValueNone -> return! category.Load(fold, initial, categoryName, streamId, streamName, requireLeader, log, ct) | ValueSome tokenAndState when allowStale -> return tokenAndState | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log, ct) } - member _.TrySync(log, categoryName, streamId, streamName, context, _init, token, originState, events, ct) = task { match! category.TrySync(log, fold, categoryName, streamId, streamName, token, originState, events, context, ct) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync diff --git a/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj b/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj index b49cacee1..32edc1ab0 100644 --- a/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj +++ b/src/Equinox.SqlStreamStore.MsSql/Equinox.SqlStreamStore.MsSql.fsproj @@ -12,7 +12,7 @@ - + diff --git a/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj b/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj index b26627763..a90cc73c8 100644 --- a/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj +++ b/src/Equinox.SqlStreamStore.MySql/Equinox.SqlStreamStore.MySql.fsproj @@ -12,7 +12,7 @@ - + diff --git a/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj b/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj index 367281fcb..e04a02702 100644 --- a/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj +++ b/src/Equinox.SqlStreamStore.Postgres/Equinox.SqlStreamStore.Postgres.fsproj @@ -12,7 +12,7 @@ - + diff --git a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj index 7a5ef5b30..2e8cb2c68 100644 --- a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj +++ b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj @@ -18,10 +18,10 @@ - + - + diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index fa4f5c939..193651c24 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -2,9 +2,12 @@ open Equinox.Core open Serilog -open System open SqlStreamStore open SqlStreamStore.Streams +open System +open System.Collections.Generic +open System.Threading +open System.Threading.Tasks type EventBody = ReadOnlyMemory type EventData = NewStreamMessage @@ -50,9 +53,9 @@ module Log = let data = x.GetJsonData() |> Async.AwaitTaskCorrect |> Async.RunSynchronously yield System.Collections.Generic.KeyValuePair<_, _>(x.Type, data) }) - let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> Async<'t>) log : Async<'t> = + let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> CancellationToken -> Task<'t>) log ct : Task<'t> = match retryPolicy with - | None -> f log + | None -> f log ct | Some retryPolicy -> let withLoggingContextWrapping count = let log = if count = 1 then log else log |> prop contextLabel count @@ -78,8 +81,8 @@ module Log = { mutable count : int64; mutable ms : int64 } static member Create() = { count = 0L; ms = 0L } member x.Ingest(ms) = - System.Threading.Interlocked.Increment(&x.count) |> ignore - System.Threading.Interlocked.Add(&x.ms, ms) |> ignore + Interlocked.Increment(&x.count) |> ignore + Interlocked.Add(&x.ms, ms) |> ignore type LogSink() = static let epoch = System.Diagnostics.Stopwatch.StartNew() static member val Read = Counter.Create() with get, set @@ -129,9 +132,9 @@ type EsSyncResult = Written of AppendResult | ConflictUnknown module private Write = /// Yields `EsSyncResult.Written` or `EsSyncResult.Conflict` to signify WrongExpectedVersion - let private writeEventsAsync (log : ILogger) (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) - : Async = async { - try let! wr = conn.AppendToStream(StreamId streamName, (if version = -1L then ExpectedVersion.NoStream else int version), events) |> Async.AwaitTaskCorrect + let private writeEventsAsync (log : ILogger) (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) ct + : Task = task { + try let! wr = conn.AppendToStream(StreamId streamName, (if version = -1L then ExpectedVersion.NoStream else int version), events, ct) return EsSyncResult.Written wr with :? WrongExpectedVersionException as ex -> log.Information(ex, "SqlEs TrySync WrongExpectedVersionException writing {EventTypes}, expected {ExpectedVersion}", @@ -140,13 +143,13 @@ module private Write = let eventDataBytes events = let eventDataLen (x : NewStreamMessage) = match x.JsonData |> System.Text.Encoding.UTF8.GetBytes, x.JsonMetadata |> System.Text.Encoding.UTF8.GetBytes with Log.BlobLen bytes, Log.BlobLen metaBytes -> bytes + metaBytes events |> Array.sumBy eventDataLen - let private writeEventsLogged (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) - : Async = async { + let private writeEventsLogged (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) ct + : Task = task { let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propEventData "Json" events let bytes, count = eventDataBytes events, events.Length let log = log |> Log.prop "bytes" bytes let writeLog = log |> Log.prop "stream" streamName |> Log.prop "expectedVersion" version |> Log.prop "count" count - let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.Time + let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.time ct let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let resultLog, evt = match result, reqMetric with @@ -157,42 +160,40 @@ module private Write = (resultLog |> Log.event evt).Information("SqlEs{action:l} count={count} conflict={conflict}", "Write", events.Length, match evt with Log.WriteConflict _ -> true | _ -> false) return result } - let writeEvents (log : ILogger) retryPolicy (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) - : Async = + let writeEvents (log : ILogger) retryPolicy (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) ct + : Task = let call = writeEventsLogged conn streamName version events - Log.withLoggedRetries retryPolicy "writeAttempt" call log + Log.withLoggedRetries retryPolicy "writeAttempt" call log ct module private Read = open FSharp.Control - let private readSliceAsync (conn : IEventStoreConnection) (streamName : string) (direction : Direction) (batchSize : int) (startPos : int64) - : Async = async { - let call = - match direction with - | Direction.Forward -> conn.ReadStreamForwards(streamName, int startPos, batchSize) - | Direction.Backward -> conn.ReadStreamBackwards(streamName, int startPos, batchSize) - return! call |> Async.AwaitTaskCorrect } + let private readSliceAsync (conn : IEventStoreConnection) (streamName : string) (direction : Direction) (batchSize : int) (startPos : int64) ct + : Task = + match direction with + | Direction.Forward -> conn.ReadStreamForwards(streamName, int startPos, batchSize, ct) + | Direction.Backward -> conn.ReadStreamBackwards(streamName, int startPos, batchSize, ct) let (|ResolvedEventLen|) (x : StreamMessage) = let data = x.GetJsonData() |> Async.AwaitTaskCorrect |> Async.RunSynchronously match data, x.JsonMetadata with Log.StrLen bytes, Log.StrLen metaBytes -> bytes + metaBytes - let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) : Async = async { - let! t, slice = readSliceAsync conn streamName direction batchSize startPos |> Stopwatch.Time + let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) ct : Task = task { + let! t, slice = readSliceAsync conn streamName direction batchSize startPos |> Stopwatch.time ct let bytes, count = slice.Messages |> Array.sumBy (|ResolvedEventLen|), slice.Messages.Length - let reqMetric : Log.Measurement ={ stream = streamName; interval = t; bytes = bytes; count = count} + let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count } let evt = Log.Slice (direction, reqMetric) let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propResolvedEvents "Json" slice.Messages (log |> Log.prop "startPos" startPos |> Log.prop "bytes" bytes |> Log.event evt).Information("SqlEs{action:l} count={count} version={version}", "Read", count, slice.LastStreamVersion) return slice } - let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> Async) - (maxPermittedBatchReads : int option) (startPosition : int64) - : AsyncSeq = - let rec loop batchCount pos : AsyncSeq = asyncSeq { + let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> CancellationToken -> Task) + (maxPermittedBatchReads : int option) (startPosition : int64) ct + : IAsyncEnumerable = + let rec loop batchCount pos : IAsyncEnumerable = taskSeq { match maxPermittedBatchReads with | Some mpbr when batchCount >= mpbr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () let batchLog = log |> Log.prop "batchIndex" batchCount - let! slice = readSlice pos batchLog + let! slice = readSlice pos batchLog ct match slice.Status with | PageReadStatus.StreamNotFound -> yield Some (int64 ExpectedVersion.EmptyStream), Array.empty // NB NoStream in ES version= -1 | PageReadStatus.Success -> @@ -212,42 +213,40 @@ module private Read = (log |> Log.prop "bytes" bytes |> Log.event evt).Information( "SqlEs{action:l} stream={stream} count={count}/{batches} version={version}", action, streamName, count, batches, version) - let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition - : Async = async { - let mergeBatches (batches : AsyncSeq) = async { + let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition ct + : Task = task { + let mergeBatches (batches : IAsyncEnumerable) = task { let mutable versionFromStream = None let! (events : ResolvedEvent[]) = batches - |> AsyncSeq.map (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) - |> AsyncSeq.concatSeq - |> AsyncSeq.toArrayAsync + |> TaskSeq.collectSeq (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) + |> TaskSeq.toArrayAsync let version = match versionFromStream with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, events } let call pos = loggedReadSlice conn streamName Direction.Forward batchSize pos - let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) + let retryingLoggingReadSlice pos ct = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) ct let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName - let batches : AsyncSeq = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition - let! t, (version, events) = mergeBatches batches |> Stopwatch.Time + let batches ct : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition ct + let! t, (version, events) = (fun ct -> batches ct |> mergeBatches) |> Stopwatch.time ct log |> logBatchRead direction streamName t events batchSize version return version, events } let partitionPayloadFrom firstUsedEventNumber : ResolvedEvent[] -> int * int = let acc (tu, tr) (ResolvedEventLen bytes as y) = if y.Position < firstUsedEventNumber then tu, tr + bytes else tu + bytes, tr Array.fold acc (0, 0) - let loadBackwardsUntilCompactionOrStart (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName (tryDecode, isOrigin) - : Async = async { - let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : AsyncSeq) - : Async = async { + let loadBackwardsUntilCompactionOrStart (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName (tryDecode, isOrigin) ct + : Task = task { + let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : IAsyncEnumerable) + : Task = task { let versionFromStream, lastBatch = ref None, ref None let! tempBackward = batchesBackward - |> AsyncSeq.map (fun batch -> + |> TaskSeq.collectSeq (fun batch -> match batch with | None, events -> lastBatch.Value <- Some events; events | Some _ as reportedVersion, events -> versionFromStream.Value <- reportedVersion; lastBatch.Value <- Some events; events |> Array.map (fun e -> struct (e, tryDecode e))) - |> AsyncSeq.concatSeq - |> AsyncSeq.takeWhileInclusive (function + |> TaskSeq.takeWhileInclusive (function | x, ValueSome e when isOrigin e -> match lastBatch.Value with | None -> log.Information("SqlEsStop stream={stream} at={eventNumber}", streamName, x.Position) @@ -256,7 +255,7 @@ module private Read = log.Information("SqlEsStop stream={stream} at={eventNumber} used={used} residual={residual}", streamName, x.Position, used, residual) false | _ -> true) // continue the search - |> AsyncSeq.toArrayAsync + |> TaskSeq.toArrayAsync let eventsForward = Array.Reverse(tempBackward); tempBackward // sic - relatively cheap, in-place reverse of something we own let version = match versionFromStream.Value with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, eventsForward } @@ -266,8 +265,8 @@ module private Read = let startPosition = int64 Position.End let direction = Direction.Backward let readlog = log |> Log.prop "direction" direction - let batchesBackward : AsyncSeq = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition - let! t, (version, events) = mergeFromCompactionPointOrStartFromBackwardsStream log batchesBackward |> Stopwatch.Time + let batchesBackward ct : IAsyncEnumerable = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition ct + let! t, (version, events) = (fun ct -> batchesBackward ct |> mergeFromCompactionPointOrStartFromBackwardsStream log) |> Stopwatch.time ct log |> logBatchRead direction streamName t (Array.map ValueTuple.fst events) batchSize version return version, events } @@ -361,32 +360,32 @@ type SqlStreamStoreContext(connection : SqlStreamStoreConnection, batchOptions : member val BatchOptions = batchOptions member internal _.TokenEmpty = Token.ofUncompactedVersion batchOptions.BatchSize -1L - member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType) : Async = async { - let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L + member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType, ct) : Task = task { + let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L ct match tryIsResolvedEventEventType isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events | Some isCompactionEvent -> match events |> Array.tryFindBack isCompactionEvent with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin) : Async = async { + member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin, ct) : Task = task { let! version, events = - Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) + Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) ct match Array.tryHead events |> Option.filter (function _, ValueSome e -> isOrigin e | _ -> false) with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events | Some (resolvedEvent, _) -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events } - member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType) - : Async = async { + member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType, ct) + : Task = task { let streamPosition = token.streamVersion + 1L - let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition + let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition ct match isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events | Some isCompactionEvent -> match events |> Array.tryFindBack (fun re -> match tryDecode re with ValueSome e -> isCompactionEvent e | _ -> false) with | None -> return Token.ofPreviousTokenAndEventsLength streamToken events.Length batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.TrySync(log, streamName, (Token.Unpack pos as streamToken), events, encodedEvents : EventData array, isCompactionEventType) : Async = async { - match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName pos.streamVersion encodedEvents with + member internal _.TrySync(log, streamName, (Token.Unpack pos as streamToken), events, encodedEvents : EventData array, isCompactionEventType, ct) : Task = task { + match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName pos.streamVersion encodedEvents ct with | EsSyncResult.ConflictUnknown -> return GatewaySyncResult.ConflictUnknown | EsSyncResult.Written wr -> @@ -430,23 +429,25 @@ type private Category<'event, 'state, 'context>(context : SqlStreamStoreContext, match access with | None | Some AccessStrategy.LatestKnownEvent -> fun _ -> true | Some (AccessStrategy.RollingSnapshots (isValid, _)) -> isValid - let loadAlgorithm streamName requireLeader log = + let loadAlgorithm streamName requireLeader log ct = match access with - | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None) + | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None, ct) | Some AccessStrategy.LatestKnownEvent - | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin) - let load (fold : 'state -> 'event seq -> 'state) initial f = async { + | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin, ct) + let load (fold : 'state -> 'event seq -> 'state) initial f = task { let! token, events = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger) : Async = - load fold initial (loadAlgorithm streamName requireLeader log) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger) : Async = - load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate)) + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger, ct) + : Task = + load fold initial (loadAlgorithm streamName requireLeader log ct) + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger, ct) + : Task = + load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate, ct)) member _.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context) : Async> = async { + streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) : Task> = task { let encode e = codec.Encode(ctx, e) let events = match access with @@ -455,25 +456,24 @@ type private Category<'event, 'state, 'context>(context : SqlStreamStoreContext, let cc = CompactionContext(Array.length events, token.batchCapacityLimit.Value) if cc.IsCompactionDue then Array.append events (fold state events |> compact |> Array.singleton) else events let encodedEvents : EventData[] = events |> Array.map (encode >> UnionEncoderAdapters.eventDataOfEncodedEvent) - match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate) with + match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate, ct) with | GatewaySyncResult.ConflictUnknown -> - return SyncResult.Conflict (fun ct -> load fold state (context.Reload(streamName, (*requireLeader*)true, log, streamToken, tryDecode, compactionPredicate)) |> Async.startAsTask ct) + return SyncResult.Conflict (fun ct -> load fold state (context.Reload(streamName, (*requireLeader*)true, log, streamToken, tryDecode, compactionPredicate, ct))) | GatewaySyncResult.Written token' -> return SyncResult.Written (token', fold state (Seq.ofArray events)) } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = - let batched log streamName requireLeader ct = category.Load(fold, initial, streamName, requireLeader, log) |> Async.startAsTask ct interface ICategory<'event, 'state, 'context> with member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = task { match readCache with - | None -> return! batched log streamName requireLeader ct + | None -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) | Some (cache : ICache, prefix : string) -> match! cache.TryGet(prefix + streamName) with - | ValueNone -> return! batched log streamName requireLeader ct + | ValueNone -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) | ValueSome tokenAndState when allowStale -> return tokenAndState - | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log) } - member _.TrySync(log, _categoryName, _streamId, streamName, context, _init, token, originState, events, _ct) = task { - match! category.TrySync(log, fold, streamName, token, originState, events, context) with + | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log, ct) } + member _.TrySync(log, _categoryName, _streamId, streamName, context, _init, token, originState, events, ct) = task { + match! category.TrySync(log, fold, streamName, token, originState, events, context, ct) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token', state') -> return SyncResult.Written (token', state') } diff --git a/src/Equinox/Equinox.fsproj b/src/Equinox/Equinox.fsproj index c4bdb211d..d57abc927 100644 --- a/src/Equinox/Equinox.fsproj +++ b/src/Equinox/Equinox.fsproj @@ -14,10 +14,7 @@ - - - contentFiles - + diff --git a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs index c92e18cef..dccad5972 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs @@ -151,12 +151,12 @@ type Tests(testOutputHelper) = else verifyRequestChargesMax 448 // 447.5 // 463.01 observed capture.Clear() - let! pos = ctx.Sync(stream,?position=None) + let! pos = ctx.Sync(stream, ?position = None) test <@ [EqxAct.Tip] = capture.ExternalCalls @> verifyRequestChargesMax 5 // 41 observed // for a 200, you'll pay a lot (we omitted to include the position that NonIdempotentAppend yielded) capture.Clear() - let! _pos = ctx.Sync(stream,pos) + let! _pos = ctx.Sync(stream, pos) test <@ [EqxAct.TipNotModified] = capture.ExternalCalls @> verifyRequestChargesMax 1 // for a 304 by definition - when an etag IfNotMatch is honored, you only pay one RU } diff --git a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs index 91493ce14..298712a02 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs @@ -13,7 +13,7 @@ let private archiveTableName = tryRead "EQUINOX_DYNAMO_TABLE_ARCHIVE" |> Option. let discoverConnection () = match tryRead "EQUINOX_DYNAMO_CONNECTION" with - | None -> "dynamodb-local", "http://localhost:8000" + | None -> "dynamodb-local", "http://localhost:8000" // OR: change to "https://dynamodb.eu-west-1.amazonaws.com" to hit prod instance | Some connectionString -> "EQUINOX_DYNAMO_CONNECTION", connectionString let createClient (log : Serilog.ILogger) name serviceUrl = @@ -21,6 +21,7 @@ let createClient (log : Serilog.ILogger) name serviceUrl = let clientConfig = AmazonDynamoDBConfig(ServiceURL = serviceUrl) log.Information("DynamoStore {name} {endpoint}", name, serviceUrl) // Credentials are not validated if connecting to local instance so anything will do (this avoids it looking for profiles to be configured) + // OR: don't pass credentials to ctor to use keychain configured access let credentials = Amazon.Runtime.BasicAWSCredentials("A", "A") new AmazonDynamoDBClient(credentials, clientConfig) :> IAmazonDynamoDB diff --git a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj index 6d293bcda..37dc7b9a2 100644 --- a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj +++ b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj @@ -7,6 +7,7 @@ + diff --git a/tools/Equinox.Tool/Equinox.Tool.fsproj b/tools/Equinox.Tool/Equinox.Tool.fsproj index 785fdc8ed..31ecd39c2 100644 --- a/tools/Equinox.Tool/Equinox.Tool.fsproj +++ b/tools/Equinox.Tool/Equinox.Tool.fsproj @@ -32,7 +32,7 @@ - + diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index e708495c2..c826e1783 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -550,7 +550,7 @@ let main argv = Log.Logger <- createDomainLog verbose verboseConsole maybeSeq try let log = Log.Logger try match p.GetSubCommand() with - | Init a -> CosmosInit.containerAndOrDb log a |> Async.RunSynchronously + | Init a -> (CosmosInit.containerAndOrDb log a CancellationToken.None).Wait() | InitAws a -> DynamoInit.table log a |> Async.RunSynchronously | Config a -> SqlInit.databaseOrSchema log a |> Async.RunSynchronously | Dump a -> Dump.run (log, verboseConsole, maybeSeq) a diff --git a/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj b/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj index e2f52454e..4ed530611 100644 --- a/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj +++ b/tools/Equinox.Tools.TestHarness/Equinox.Tools.TestHarness.fsproj @@ -16,7 +16,7 @@ - + From d3b2c2ed742507234f1bdf6cbff8a3e38ae53ab5 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 14:35:15 +0000 Subject: [PATCH 06/11] Reverts etc --- samples/Store/Integration/Integration.fsproj | 1 - .../Equinox.SqlStreamStore.fsproj | 2 +- src/Equinox.SqlStreamStore/SqlStreamStore.fs | 141 +++++++++--------- .../Equinox.CosmosStore.Integration.fsproj | 1 - .../Equinox.DynamoStore.Integration.fsproj | 1 - .../Equinox.EventStore.Integration.fsproj | 1 - .../Equinox.EventStoreDb.Integration.fsproj | 1 - .../Equinox.MemoryStore.Integration.fsproj | 1 - .../Equinox.MessageDb.Integration.fsproj | 1 - ...ox.SqlStreamStore.MsSql.Integration.fsproj | 1 - ...ox.SqlStreamStore.MySql.Integration.fsproj | 1 - ...SqlStreamStore.Postgres.Integration.fsproj | 1 - tools/Equinox.Tool/Program.fs | 2 +- 13 files changed, 73 insertions(+), 82 deletions(-) diff --git a/samples/Store/Integration/Integration.fsproj b/samples/Store/Integration/Integration.fsproj index 795d3c432..0b99f0aa5 100644 --- a/samples/Store/Integration/Integration.fsproj +++ b/samples/Store/Integration/Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false diff --git a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj index 2e8cb2c68..e82d86295 100644 --- a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj +++ b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj @@ -21,7 +21,7 @@ - + diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index 193651c24..f259fca80 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -5,9 +5,7 @@ open Serilog open SqlStreamStore open SqlStreamStore.Streams open System -open System.Collections.Generic open System.Threading -open System.Threading.Tasks type EventBody = ReadOnlyMemory type EventData = NewStreamMessage @@ -53,9 +51,9 @@ module Log = let data = x.GetJsonData() |> Async.AwaitTaskCorrect |> Async.RunSynchronously yield System.Collections.Generic.KeyValuePair<_, _>(x.Type, data) }) - let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> CancellationToken -> Task<'t>) log ct : Task<'t> = + let withLoggedRetries<'t> retryPolicy (contextLabel : string) (f : ILogger -> Async<'t>) log : Async<'t> = match retryPolicy with - | None -> f log ct + | None -> f log | Some retryPolicy -> let withLoggingContextWrapping count = let log = if count = 1 then log else log |> prop contextLabel count @@ -132,9 +130,9 @@ type EsSyncResult = Written of AppendResult | ConflictUnknown module private Write = /// Yields `EsSyncResult.Written` or `EsSyncResult.Conflict` to signify WrongExpectedVersion - let private writeEventsAsync (log : ILogger) (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) ct - : Task = task { - try let! wr = conn.AppendToStream(StreamId streamName, (if version = -1L then ExpectedVersion.NoStream else int version), events, ct) + let private writeEventsAsync (log : ILogger) (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) + : Async = async { + try let! wr = conn.AppendToStream(StreamId streamName, (if version = -1L then ExpectedVersion.NoStream else int version), events) |> Async.AwaitTaskCorrect return EsSyncResult.Written wr with :? WrongExpectedVersionException as ex -> log.Information(ex, "SqlEs TrySync WrongExpectedVersionException writing {EventTypes}, expected {ExpectedVersion}", @@ -143,13 +141,13 @@ module private Write = let eventDataBytes events = let eventDataLen (x : NewStreamMessage) = match x.JsonData |> System.Text.Encoding.UTF8.GetBytes, x.JsonMetadata |> System.Text.Encoding.UTF8.GetBytes with Log.BlobLen bytes, Log.BlobLen metaBytes -> bytes + metaBytes events |> Array.sumBy eventDataLen - let private writeEventsLogged (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) ct - : Task = task { + let private writeEventsLogged (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) (log : ILogger) + : Async = async { let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propEventData "Json" events let bytes, count = eventDataBytes events, events.Length let log = log |> Log.prop "bytes" bytes let writeLog = log |> Log.prop "stream" streamName |> Log.prop "expectedVersion" version |> Log.prop "count" count - let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.time ct + let! t, result = writeEventsAsync writeLog conn streamName version events |> Stopwatch.Time let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count} let resultLog, evt = match result, reqMetric with @@ -160,40 +158,42 @@ module private Write = (resultLog |> Log.event evt).Information("SqlEs{action:l} count={count} conflict={conflict}", "Write", events.Length, match evt with Log.WriteConflict _ -> true | _ -> false) return result } - let writeEvents (log : ILogger) retryPolicy (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) ct - : Task = + let writeEvents (log : ILogger) retryPolicy (conn : IEventStoreConnection) (streamName : string) (version : int64) (events : EventData[]) + : Async = let call = writeEventsLogged conn streamName version events - Log.withLoggedRetries retryPolicy "writeAttempt" call log ct + Log.withLoggedRetries retryPolicy "writeAttempt" call log module private Read = open FSharp.Control - let private readSliceAsync (conn : IEventStoreConnection) (streamName : string) (direction : Direction) (batchSize : int) (startPos : int64) ct - : Task = - match direction with - | Direction.Forward -> conn.ReadStreamForwards(streamName, int startPos, batchSize, ct) - | Direction.Backward -> conn.ReadStreamBackwards(streamName, int startPos, batchSize, ct) + let private readSliceAsync (conn : IEventStoreConnection) (streamName : string) (direction : Direction) (batchSize : int) (startPos : int64) + : Async = async { + let call = + match direction with + | Direction.Forward -> conn.ReadStreamForwards(streamName, int startPos, batchSize) + | Direction.Backward -> conn.ReadStreamBackwards(streamName, int startPos, batchSize) + return! call |> Async.AwaitTaskCorrect } let (|ResolvedEventLen|) (x : StreamMessage) = let data = x.GetJsonData() |> Async.AwaitTaskCorrect |> Async.RunSynchronously match data, x.JsonMetadata with Log.StrLen bytes, Log.StrLen metaBytes -> bytes + metaBytes - let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) ct : Task = task { - let! t, slice = readSliceAsync conn streamName direction batchSize startPos |> Stopwatch.time ct + let private loggedReadSlice conn streamName direction batchSize startPos (log : ILogger) : Async = async { + let! t, slice = readSliceAsync conn streamName direction batchSize startPos |> Stopwatch.Time let bytes, count = slice.Messages |> Array.sumBy (|ResolvedEventLen|), slice.Messages.Length - let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count } + let reqMetric : Log.Measurement ={ stream = streamName; interval = t; bytes = bytes; count = count} let evt = Log.Slice (direction, reqMetric) let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propResolvedEvents "Json" slice.Messages (log |> Log.prop "startPos" startPos |> Log.prop "bytes" bytes |> Log.event evt).Information("SqlEs{action:l} count={count} version={version}", "Read", count, slice.LastStreamVersion) return slice } - let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> CancellationToken -> Task) - (maxPermittedBatchReads : int option) (startPosition : int64) ct - : IAsyncEnumerable = - let rec loop batchCount pos : IAsyncEnumerable = taskSeq { + let private readBatches (log : ILogger) (readSlice : int64 -> ILogger -> Async) + (maxPermittedBatchReads : int option) (startPosition : int64) + : AsyncSeq = + let rec loop batchCount pos : AsyncSeq = asyncSeq { match maxPermittedBatchReads with | Some mpbr when batchCount >= mpbr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () let batchLog = log |> Log.prop "batchIndex" batchCount - let! slice = readSlice pos batchLog ct + let! slice = readSlice pos batchLog match slice.Status with | PageReadStatus.StreamNotFound -> yield Some (int64 ExpectedVersion.EmptyStream), Array.empty // NB NoStream in ES version= -1 | PageReadStatus.Success -> @@ -213,40 +213,42 @@ module private Read = (log |> Log.prop "bytes" bytes |> Log.event evt).Information( "SqlEs{action:l} stream={stream} count={count}/{batches} version={version}", action, streamName, count, batches, version) - let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition ct - : Task = task { - let mergeBatches (batches : IAsyncEnumerable) = task { + let loadForwardsFrom (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName startPosition + : Async = async { + let mergeBatches (batches : AsyncSeq) = async { let mutable versionFromStream = None let! (events : ResolvedEvent[]) = batches - |> TaskSeq.collectSeq (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) - |> TaskSeq.toArrayAsync + |> AsyncSeq.map (function None, events -> events | Some _ as reportedVersion, events -> versionFromStream <- reportedVersion; events) + |> AsyncSeq.concatSeq + |> AsyncSeq.toArrayAsync let version = match versionFromStream with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, events } let call pos = loggedReadSlice conn streamName Direction.Forward batchSize pos - let retryingLoggingReadSlice pos ct = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) ct + let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName - let batches ct : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition ct - let! t, (version, events) = (fun ct -> batches ct |> mergeBatches) |> Stopwatch.time ct + let batches : AsyncSeq = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition + let! t, (version, events) = mergeBatches batches |> Stopwatch.Time log |> logBatchRead direction streamName t events batchSize version return version, events } let partitionPayloadFrom firstUsedEventNumber : ResolvedEvent[] -> int * int = let acc (tu, tr) (ResolvedEventLen bytes as y) = if y.Position < firstUsedEventNumber then tu, tr + bytes else tu + bytes, tr Array.fold acc (0, 0) - let loadBackwardsUntilCompactionOrStart (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName (tryDecode, isOrigin) ct - : Task = task { - let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : IAsyncEnumerable) - : Task = task { + let loadBackwardsUntilCompactionOrStart (log : ILogger) retryPolicy conn batchSize maxPermittedBatchReads streamName (tryDecode, isOrigin) + : Async = async { + let mergeFromCompactionPointOrStartFromBackwardsStream (log : ILogger) (batchesBackward : AsyncSeq) + : Async = async { let versionFromStream, lastBatch = ref None, ref None let! tempBackward = batchesBackward - |> TaskSeq.collectSeq (fun batch -> + |> AsyncSeq.map (fun batch -> match batch with | None, events -> lastBatch.Value <- Some events; events | Some _ as reportedVersion, events -> versionFromStream.Value <- reportedVersion; lastBatch.Value <- Some events; events |> Array.map (fun e -> struct (e, tryDecode e))) - |> TaskSeq.takeWhileInclusive (function + |> AsyncSeq.concatSeq + |> AsyncSeq.takeWhileInclusive (function | x, ValueSome e when isOrigin e -> match lastBatch.Value with | None -> log.Information("SqlEsStop stream={stream} at={eventNumber}", streamName, x.Position) @@ -255,7 +257,7 @@ module private Read = log.Information("SqlEsStop stream={stream} at={eventNumber} used={used} residual={residual}", streamName, x.Position, used, residual) false | _ -> true) // continue the search - |> TaskSeq.toArrayAsync + |> AsyncSeq.toArrayAsync let eventsForward = Array.Reverse(tempBackward); tempBackward // sic - relatively cheap, in-place reverse of something we own let version = match versionFromStream.Value with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, eventsForward } @@ -265,8 +267,8 @@ module private Read = let startPosition = int64 Position.End let direction = Direction.Backward let readlog = log |> Log.prop "direction" direction - let batchesBackward ct : IAsyncEnumerable = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition ct - let! t, (version, events) = (fun ct -> batchesBackward ct |> mergeFromCompactionPointOrStartFromBackwardsStream log) |> Stopwatch.time ct + let batchesBackward : AsyncSeq = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition + let! t, (version, events) = mergeFromCompactionPointOrStartFromBackwardsStream log batchesBackward |> Stopwatch.Time log |> logBatchRead direction streamName t (Array.map ValueTuple.fst events) batchSize version return version, events } @@ -360,32 +362,32 @@ type SqlStreamStoreContext(connection : SqlStreamStoreConnection, batchOptions : member val BatchOptions = batchOptions member internal _.TokenEmpty = Token.ofUncompactedVersion batchOptions.BatchSize -1L - member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType, ct) : Task = task { - let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L ct + member internal _.LoadBatched(streamName, requireLeader, log, tryDecode, isCompactionEventType) : Async = async { + let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName 0L match tryIsResolvedEventEventType isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events | Some isCompactionEvent -> match events |> Array.tryFindBack isCompactionEvent with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin, ct) : Task = task { + member internal _.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin) : Async = async { let! version, events = - Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) ct + Read.loadBackwardsUntilCompactionOrStart log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName (tryDecode, isOrigin) match Array.tryHead events |> Option.filter (function _, ValueSome e -> isOrigin e | _ -> false) with | None -> return Token.ofUncompactedVersion batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events | Some (resolvedEvent, _) -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV ValueTuple.snd events } - member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType, ct) - : Task = task { + member internal _.Reload(streamName, requireLeader, log, (Token.Unpack token as streamToken), tryDecode, isCompactionEventType) + : Async = async { let streamPosition = token.streamVersion + 1L - let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition ct + let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy (conn requireLeader) batchOptions.BatchSize batchOptions.MaxBatches streamName streamPosition match isCompactionEventType with | None -> return Token.ofNonCompacting version, Array.chooseV tryDecode events | Some isCompactionEvent -> match events |> Array.tryFindBack (fun re -> match tryDecode re with ValueSome e -> isCompactionEvent e | _ -> false) with | None -> return Token.ofPreviousTokenAndEventsLength streamToken events.Length batchOptions.BatchSize version, Array.chooseV tryDecode events | Some resolvedEvent -> return Token.ofCompactionResolvedEventAndVersion resolvedEvent batchOptions.BatchSize version, Array.chooseV tryDecode events } - member internal _.TrySync(log, streamName, (Token.Unpack pos as streamToken), events, encodedEvents : EventData array, isCompactionEventType, ct) : Task = task { - match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName pos.streamVersion encodedEvents ct with + member internal _.TrySync(log, streamName, (Token.Unpack pos as streamToken), events, encodedEvents : EventData array, isCompactionEventType) : Async = async { + match! Write.writeEvents log connection.WriteRetryPolicy connection.WriteConnection streamName pos.streamVersion encodedEvents with | EsSyncResult.ConflictUnknown -> return GatewaySyncResult.ConflictUnknown | EsSyncResult.Written wr -> @@ -429,25 +431,23 @@ type private Category<'event, 'state, 'context>(context : SqlStreamStoreContext, match access with | None | Some AccessStrategy.LatestKnownEvent -> fun _ -> true | Some (AccessStrategy.RollingSnapshots (isValid, _)) -> isValid - let loadAlgorithm streamName requireLeader log ct = + let loadAlgorithm streamName requireLeader log = match access with - | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None, ct) + | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None) | Some AccessStrategy.LatestKnownEvent - | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin, ct) - let load (fold : 'state -> 'event seq -> 'state) initial f = task { + | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin) + let load (fold : 'state -> 'event seq -> 'state) initial f = async { let! token, events = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger, ct) - : Task = - load fold initial (loadAlgorithm streamName requireLeader log ct) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger, ct) - : Task = - load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate, ct)) + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger) : Async = + load fold initial (loadAlgorithm streamName requireLeader log) + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger) : Async = + load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate)) member _.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) : Task> = task { + streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context) : Async> = async { let encode e = codec.Encode(ctx, e) let events = match access with @@ -456,24 +456,25 @@ type private Category<'event, 'state, 'context>(context : SqlStreamStoreContext, let cc = CompactionContext(Array.length events, token.batchCapacityLimit.Value) if cc.IsCompactionDue then Array.append events (fold state events |> compact |> Array.singleton) else events let encodedEvents : EventData[] = events |> Array.map (encode >> UnionEncoderAdapters.eventDataOfEncodedEvent) - match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate, ct) with + match! context.TrySync(log, streamName, streamToken, events, encodedEvents, compactionPredicate) with | GatewaySyncResult.ConflictUnknown -> - return SyncResult.Conflict (fun ct -> load fold state (context.Reload(streamName, (*requireLeader*)true, log, streamToken, tryDecode, compactionPredicate, ct))) + return SyncResult.Conflict (fun ct -> load fold state (context.Reload(streamName, (*requireLeader*)true, log, streamToken, tryDecode, compactionPredicate)) |> Async.startAsTask ct) | GatewaySyncResult.Written token' -> return SyncResult.Written (token', fold state (Seq.ofArray events)) } type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = + let batched log streamName requireLeader ct = category.Load(fold, initial, streamName, requireLeader, log) |> Async.startAsTask ct interface ICategory<'event, 'state, 'context> with member _.Load(log, _categoryName, _streamId, streamName, allowStale, requireLeader, ct) = task { match readCache with - | None -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) + | None -> return! batched log streamName requireLeader ct | Some (cache : ICache, prefix : string) -> match! cache.TryGet(prefix + streamName) with - | ValueNone -> return! category.Load(fold, initial, streamName, requireLeader, log, ct) + | ValueNone -> return! batched log streamName requireLeader ct | ValueSome tokenAndState when allowStale -> return tokenAndState - | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log, ct) } - member _.TrySync(log, _categoryName, _streamId, streamName, context, _init, token, originState, events, ct) = task { - match! category.TrySync(log, fold, streamName, token, originState, events, context, ct) with + | ValueSome (token, state) -> return! category.Reload(fold, state, streamName, requireLeader, token, log) } + member _.TrySync(log, _categoryName, _streamId, streamName, context, _init, token, originState, events, _ct) = task { + match! category.TrySync(log, fold, streamName, token, originState, events, context) with | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token', state') -> return SyncResult.Written (token', state') } diff --git a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj index 37dc7b9a2..87ef41aed 100644 --- a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj +++ b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false diff --git a/tests/Equinox.DynamoStore.Integration/Equinox.DynamoStore.Integration.fsproj b/tests/Equinox.DynamoStore.Integration/Equinox.DynamoStore.Integration.fsproj index cf9076b95..260ac7388 100644 --- a/tests/Equinox.DynamoStore.Integration/Equinox.DynamoStore.Integration.fsproj +++ b/tests/Equinox.DynamoStore.Integration/Equinox.DynamoStore.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false $(DefineConstants);STORE_DYNAMO diff --git a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj index 68c22cf6a..27487a367 100644 --- a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj +++ b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false $(DefineConstants);STORE_EVENTSTORE_LEGACY diff --git a/tests/Equinox.EventStoreDb.Integration/Equinox.EventStoreDb.Integration.fsproj b/tests/Equinox.EventStoreDb.Integration/Equinox.EventStoreDb.Integration.fsproj index 7c53fd2ce..fbf3a6adc 100644 --- a/tests/Equinox.EventStoreDb.Integration/Equinox.EventStoreDb.Integration.fsproj +++ b/tests/Equinox.EventStoreDb.Integration/Equinox.EventStoreDb.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false $(DefineConstants);STORE_EVENTSTOREDB diff --git a/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj b/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj index b1190cff4..487716f0d 100644 --- a/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj +++ b/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false diff --git a/tests/Equinox.MessageDb.Integration/Equinox.MessageDb.Integration.fsproj b/tests/Equinox.MessageDb.Integration/Equinox.MessageDb.Integration.fsproj index fed2b8429..8a6fc0c74 100644 --- a/tests/Equinox.MessageDb.Integration/Equinox.MessageDb.Integration.fsproj +++ b/tests/Equinox.MessageDb.Integration/Equinox.MessageDb.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false $(DefineConstants);STORE_MESSAGEDB diff --git a/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj b/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj index ddebf01a5..736c8129f 100644 --- a/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj +++ b/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false $(DefineConstants);STORE_MSSQL diff --git a/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj b/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj index 8865b6669..0f58ca5a6 100644 --- a/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj +++ b/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false $(DefineConstants);STORE_MYSQL diff --git a/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj b/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj index b955598e0..3bca394af 100644 --- a/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj +++ b/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj @@ -2,7 +2,6 @@ net6.0 - false $(DefineConstants);STORE_POSTGRES diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index c826e1783..e708495c2 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -550,7 +550,7 @@ let main argv = Log.Logger <- createDomainLog verbose verboseConsole maybeSeq try let log = Log.Logger try match p.GetSubCommand() with - | Init a -> (CosmosInit.containerAndOrDb log a CancellationToken.None).Wait() + | Init a -> CosmosInit.containerAndOrDb log a |> Async.RunSynchronously | InitAws a -> DynamoInit.table log a |> Async.RunSynchronously | Config a -> SqlInit.databaseOrSchema log a |> Async.RunSynchronously | Dump a -> Dump.run (log, verboseConsole, maybeSeq) a From d71f256b12c584d67160378c773f3480d1f01452 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 14:59:17 +0000 Subject: [PATCH 07/11] Update CL --- CHANGELOG.md | 6 +++--- README.md | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e3a62ec34..b855152f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,10 +38,10 @@ The `Unreleased` section name is replaced by the expected version of next releas - `CosmosStore.AccessStrategy.MultiSnapshot`,`Custom`: Change `list` and `seq` types to `array` [#338](https://github.com/jet/equinox/pull/338) - `EventStore`: Target `EventStore.Client` v `22.0.0-preview`; rename `Connector` -> `EventStoreConnector` [#317](https://github.com/jet/equinox/pull/317) - `Equinox.Tool`/`samples/`: switched to use `Equinox.EventStoreDb` [#196](https://github.com/jet/equinox/pull/196) -- Update all non-Client dependencies except `FSharp.Core`, `FSharp.Control.AsyncSeq` [#310](https://github.com/jet/equinox/pull/310) -- Replace `AsyncSeq` usage with `FSharp.Control.TaskSeq` [#361](https://github.com/jet/equinox/pull/361) -- `FSharp.Core` requirement to `6.0.0` [#337](https://github.com/jet/equinox/pull/337) +- Replace `AsyncSeq` usage with `FSharp.Control.TaskSeq` v `0.3.0` [#361](https://github.com/jet/equinox/pull/361) +- Raise `FSharp.Core` requirement to `6.0.7` [#337](https://github.com/jet/equinox/pull/337) [#33](https://github.com/jet/equinox/pull/362) - Update all Stores to use `FsCodec` v `3.0.0`, with [`EventBody` types switching from `byte[]` to `ReadOnlyMemory` and/or `JsonElement` see FsCodec#75](https://github.com/jet/FsCodec/pull/75) [#323](https://github.com/jet/equinox/pull/323) +- Update all non-Client dependencies except `FSharp.Core`, `FSharp.Control.AsyncSeq` [#310](https://github.com/jet/equinox/pull/310) - `CosmosStore.Core.Initialization.initAux`: Replace hard-coded manual 400 RU with `mode` parameter [#328](https://github.com/jet/equinox/pull/328) :pray: [@brihadish](https://github.com/brihadish) ### Removed diff --git a/README.md b/README.md index 36980934c..54fb94f34 100644 --- a/README.md +++ b/README.md @@ -165,13 +165,13 @@ The components within this repository are delivered as multi-targeted Nuget pack - `Equinox.Core` [![NuGet](https://img.shields.io/nuget/v/Equinox.Core.svg)](https://www.nuget.org/packages/Equinox.Core/): Interfaces and helpers used in the concrete Store implementations, together with the default [`System.Runtime.Caching.Cache`-based] `Cache` implementation. Hosts generic utility types frequently useful alongside Equinox: [`AsyncCacheCell`](https://github.com/jet/equinox/blob/master/src/Equinox.Core/AsyncCacheCell.fs#L36), [`AsyncBatchingGate`](https://github.com/jet/equinox/blob/master/src/Equinox.Core/AsyncBatchingGate.fs#L41). ([depends](https://www.fuget.org/packages/Equinox.Core) on `Equinox`, `System.Runtime.Caching`, `Serilog` (but not specific Serilog sinks, i.e. you configure to emit to `NLog` etc)) - `Equinox.MemoryStore` [![MemoryStore NuGet](https://img.shields.io/nuget/v/Equinox.MemoryStore.svg)](https://www.nuget.org/packages/Equinox.MemoryStore/): In-memory store for integration testing/performance base-lining/providing out-of-the-box zero dependency storage for examples. ([depends](https://www.fuget.org/packages/Equinox.MemoryStore) on `Equinox.Core`, `FsCodec`) -- `Equinox.CosmosStore` [![CosmosStore NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.svg)](https://www.nuget.org/packages/Equinox.CosmosStore/): Azure CosmosDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore) on `Equinox.Core`, `Microsoft.Azure.Cosmos` >= `3.27`, `FsCodec`, `System.Text.Json`, `FSharp.Control.TaskSeq` >= `0.3.0`) +- `Equinox.CosmosStore` [![CosmosStore NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.svg)](https://www.nuget.org/packages/Equinox.CosmosStore/): Azure CosmosDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore) on `Equinox.Core`, `Microsoft.Azure.Cosmos` >= `3.27`, `FsCodec`, `System.Text.Json`, `FSharp.Control.TaskSeq`) - `Equinox.CosmosStore.Prometheus` [![CosmosStore.Prometheus NuGet](https://img.shields.io/nuget/v/Equinox.CosmosStore.Prometheus.svg)](https://www.nuget.org/packages/Equinox.CosmosStore.Prometheus/): Integration package providing a `Serilog.Core.ILogEventSink` that extracts detailed metrics information attached to the `LogEvent`s and feeds them to the `prometheus-net`'s `Prometheus.Metrics` static instance. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore.Prometheus) on `Equinox.CosmosStore`, `prometheus-net >= 3.6.0`) -- `Equinox.DynamoStore` [![DynamoStore NuGet](https://img.shields.io/nuget/v/Equinox.DynamoStore.svg)](https://www.nuget.org/packages/Equinox.DynamoStore/): Amazon DynamoDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RC costs, patterned after `Equinox.CosmosStore`. ([depends](https://www.fuget.org/packages/Equinox.DynamoStore) on `Equinox.Core`, `FSharp.AWS.DynamoDB` >= `0.11.2-beta`, `FsCodec`, `FSharp.Control.TaskSeq` >= `0.3.0`) +- `Equinox.DynamoStore` [![DynamoStore NuGet](https://img.shields.io/nuget/v/Equinox.DynamoStore.svg)](https://www.nuget.org/packages/Equinox.DynamoStore/): Amazon DynamoDB Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RC costs, patterned after `Equinox.CosmosStore`. ([depends](https://www.fuget.org/packages/Equinox.DynamoStore) on `Equinox.Core`, `FSharp.AWS.DynamoDB` >= `0.11.2-beta`, `FsCodec`, `FSharp.Control.TaskSeq`) - `Equinox.DynamoStore.Prometheus` [![DynamoStore.Prometheus NuGet](https://img.shields.io/nuget/v/Equinox.DynamoStore.Prometheus.svg)](https://www.nuget.org/packages/Equinox.DynamoStore.Prometheus/): Integration package providing a `Serilog.Core.ILogEventSink` that extracts detailed metrics information attached to the `LogEvent`s and feeds them to the `prometheus-net`'s `Prometheus.Metrics` static instance. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore.Prometheus) on `Equinox.DynamoStore`, `prometheus-net >= 3.6.0`) -- `Equinox.EventStore` [![EventStore NuGet](https://img.shields.io/nuget/v/Equinox.EventStore.svg)](https://www.nuget.org/packages/Equinox.EventStore/): [EventStoreDB](https://eventstore.org/) Adapter designed to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.EventStore) on `Equinox.Core`, `EventStore.Client >= 22.0.0-preview`, `FSharp.Control.TaskSeq >= 0.3.0`), EventStore Server version `21.10` or later). **NO NOT use for new projects - the TCP interface to EventStoreDB has long been deprecated, this package is only provided to ease migration scenarios and will be removed in due course** -- `Equinox.EventStoreDb` [![EventStoreDb NuGet](https://img.shields.io/nuget/v/Equinox.EventStoreDb.svg)](https://www.nuget.org/packages/Equinox.EventStoreDb/): Production-strength [EventStoreDB](https://eventstore.org/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.EventStoreDb) on `Equinox.Core`, `EventStore.Client.Grpc.Streams` >= `22.0.0`, `FSharp.Control.TaskSeq` >= `0.3.0`, EventStore Server version `21.10` or later) -- `Equinox.MessageDb` [![MessageDb NuGet](https://img.shields.io/nuget/v/Equinox.MessageDb.svg)](https://www.nuget.org/packages/Equinox.MessageDb/): [MessageDb](http://docs.eventide-project.org/user-guide/message-db/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.MessageDb) on `Equinox.Core`, `Npgsql` >= `6.0.0`, `FSharp.Control.TaskSeq` >= `0.3.0`)) +- `Equinox.EventStore` [![EventStore NuGet](https://img.shields.io/nuget/v/Equinox.EventStore.svg)](https://www.nuget.org/packages/Equinox.EventStore/): [EventStoreDB](https://eventstore.org/) Adapter designed to meet Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.EventStore) on `Equinox.Core`, `EventStore.Client >= 22.0.0-preview`, `FSharp.Control.TaskSeq`), EventStore Server version `21.10` or later). **NO NOT use for new projects - the TCP interface to EventStoreDB has long been deprecated, this package is only provided to ease migration scenarios and will be removed in due course** +- `Equinox.EventStoreDb` [![EventStoreDb NuGet](https://img.shields.io/nuget/v/Equinox.EventStoreDb.svg)](https://www.nuget.org/packages/Equinox.EventStoreDb/): Production-strength [EventStoreDB](https://eventstore.org/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.EventStoreDb) on `Equinox.Core`, `EventStore.Client.Grpc.Streams` >= `22.0.0`, `FSharp.Control.TaskSeq`, EventStore Server version `21.10` or later) +- `Equinox.MessageDb` [![MessageDb NuGet](https://img.shields.io/nuget/v/Equinox.MessageDb.svg)](https://www.nuget.org/packages/Equinox.MessageDb/): [MessageDb](http://docs.eventide-project.org/user-guide/message-db/) Adapter. ([depends](https://www.fuget.org/packages/Equinox.MessageDb) on `Equinox.Core`, `Npgsql` >= `6.0.0`, `FSharp.Control.TaskSeq`)) - `Equinox.SqlStreamStore` [![SqlStreamStore NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore/): [SqlStreamStore](https://github.com/SQLStreamStore/SQLStreamStore) Adapter derived from `Equinox.EventStore` - provides core facilities (but does not connect to a specific database; see sibling `SqlStreamStore`.* packages). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore) on `Equinox.Core`, `FsCodec`, `SqlStreamStore >= 1.2.0-beta.8`, `FSharp.Control.TaskSeq`) - `Equinox.SqlStreamStore.MsSql` [![MsSql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MsSql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MsSql/): [SqlStreamStore.MsSql](https://sqlstreamstore.readthedocs.io/en/latest/sqlserver) Sql Server `Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MsSql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MsSql >= 1.2.0-beta.8`) - `Equinox.SqlStreamStore.MySql` [![MySql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MySql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MySql/): `SqlStreamStore.MySql` MySQL `Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MySql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MySql >= 1.2.0-beta.8`) From afaad3ea77c812bb9215108b3dc74dd15aee70f6 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 15:45:41 +0000 Subject: [PATCH 08/11] Final cleanup --- src/Equinox.CosmosStore/CosmosStore.fs | 2 +- src/Equinox.DynamoStore/DynamoStore.fs | 4 +- .../Equinox.DynamoStore.fsproj | 5 +- src/Equinox.EventStore/EventStore.fs | 4 +- src/Equinox.EventStoreDb/Caching.fs | 8 ++-- src/Equinox.EventStoreDb/EventStoreDb.fs | 7 +-- src/Equinox.MessageDb/MessageDb.fs | 47 +++++++++---------- src/Equinox.SqlStreamStore/SqlStreamStore.fs | 18 +++---- 8 files changed, 46 insertions(+), 49 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 42ed9b095..f7c564e3d 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -1116,7 +1116,7 @@ module internal Caching = tryReadCache : string -> Task>, updateCache : string -> struct (StreamToken * 'state) -> Task, checkUnfolds, compressUnfolds, mapUnfolds : Choice 'state -> 'event array, 'event array -> 'state -> 'event array * 'event array>) = - let cache streamName (inner : CancellationToken -> Task<_>) ct = task { + let cache streamName (inner : CancellationToken -> Task) ct = task { let! tokenAndState = inner ct do! updateCache streamName tokenAndState return tokenAndState } diff --git a/src/Equinox.DynamoStore/DynamoStore.fs b/src/Equinox.DynamoStore/DynamoStore.fs index 5cc731e6f..5ec2a79f8 100644 --- a/src/Equinox.DynamoStore/DynamoStore.fs +++ b/src/Equinox.DynamoStore/DynamoStore.fs @@ -1162,9 +1162,9 @@ module internal Caching = type CachingCategory<'event, 'state, 'context> ( category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, isOrigin : 'event -> bool, - tryReadCache : string -> Task>, updateCache : string -> struct (_ * _) -> Task, + tryReadCache : string -> Task>, updateCache : string -> struct (StreamToken * 'state) -> Task, checkUnfolds, mapUnfolds : Choice 'state -> 'event array, 'event array -> 'state -> 'event array * 'event array>) = - let cache streamName (inner : CancellationToken -> Task<_>) ct = task { + let cache streamName (inner : CancellationToken -> Task) ct = task { let! tokenAndState = inner ct do! updateCache streamName tokenAndState return tokenAndState } diff --git a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj index c74a851f6..95eb27c7a 100644 --- a/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj +++ b/src/Equinox.DynamoStore/Equinox.DynamoStore.fsproj @@ -13,10 +13,7 @@ - - - contentFiles - + diff --git a/src/Equinox.EventStore/EventStore.fs b/src/Equinox.EventStore/EventStore.fs index bad53fe8c..555124ef7 100755 --- a/src/Equinox.EventStore/EventStore.fs +++ b/src/Equinox.EventStore/EventStore.fs @@ -232,7 +232,7 @@ module private Read = let version = match versionFromStream with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, events } - let call pos log = loggedReadSlice conn streamName Direction.Forward batchSize pos log + let call = loggedReadSlice conn streamName Direction.Forward batchSize let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName @@ -271,7 +271,7 @@ module private Read = let version = match versionFromStream.Value with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, eventsForward } - let call pos = loggedReadSlice conn streamName Direction.Backward batchSize pos + let call = loggedReadSlice conn streamName Direction.Backward batchSize let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) let log = log |> Log.prop "batchSize" batchSize |> Log.prop "stream" streamName let startPosition = int64 StreamPosition.End diff --git a/src/Equinox.EventStoreDb/Caching.fs b/src/Equinox.EventStoreDb/Caching.fs index f72073efb..311db2bb3 100644 --- a/src/Equinox.EventStoreDb/Caching.fs +++ b/src/Equinox.EventStoreDb/Caching.fs @@ -2,10 +2,11 @@ module Equinox.Core.Caching open System.Threading.Tasks -type internal Decorator<'event, 'state, 'context>( - inner : ICategory<'event, 'state, 'context>, updateCache : string -> struct (StreamToken * 'state) -> Task) = +type internal Decorator<'event, 'state, 'context> + ( inner : ICategory<'event, 'state, 'context>, + updateCache : string -> struct (StreamToken * 'state) -> Task) = - let cache streamName (inner : Task<_>) = task { + let cache streamName (inner : Task) = task { let! tokenAndState = inner do! updateCache streamName tokenAndState return tokenAndState } @@ -13,7 +14,6 @@ type internal Decorator<'event, 'state, 'context>( interface ICategory<'event, 'state, 'context> with member _.Load(log, categoryName, streamId, streamName, allowStale, requireLeader, ct) = inner.Load(log, categoryName, streamId, streamName, allowStale, requireLeader, ct) |> cache streamName - member _.TrySync(log, categoryName, streamId, streamName, context, maybeInit, streamToken, state, events, ct) = task { match! inner.TrySync((log, categoryName, streamId, streamName, context, maybeInit, streamToken, state, events, ct)) with | SyncResult.Conflict resync -> return SyncResult.Conflict (fun ct -> resync ct |> cache streamName) diff --git a/src/Equinox.EventStoreDb/EventStoreDb.fs b/src/Equinox.EventStoreDb/EventStoreDb.fs index cbc955614..a179d5c8e 100755 --- a/src/Equinox.EventStoreDb/EventStoreDb.fs +++ b/src/Equinox.EventStoreDb/EventStoreDb.fs @@ -400,14 +400,15 @@ type private Category<'event, 'state, 'context>(context : EventStoreContext, cod let! struct (token, events) = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger, ct) = + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log, ct) = load fold initial (loadAlgorithm streamName requireLeader log ct) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger, ct) = + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log, ct) = load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate, ct)) member x.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) : Task> = task { + streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) + : Task> = task { let encode e = codec.Encode(ctx, e) let events = match access with diff --git a/src/Equinox.MessageDb/MessageDb.fs b/src/Equinox.MessageDb/MessageDb.fs index 8ad1712b8..3ff2de78e 100644 --- a/src/Equinox.MessageDb/MessageDb.fs +++ b/src/Equinox.MessageDb/MessageDb.fs @@ -4,7 +4,6 @@ open Equinox.Core open Equinox.Core.Tracing open Equinox.MessageDb.Core open FsCodec -open FsCodec.Core open Serilog open System open System.Diagnostics @@ -184,7 +183,7 @@ module Read = let inline len (bytes : EventBody) = bytes.Length let private resolvedEventLen (x : ITimelineEvent) = len x.Data + len x.Meta let private resolvedEventBytes events = events |> Array.sumBy resolvedEventLen - let private loggedReadSlice reader streamName batchSize batchIndex startPos requiresLeader (log : ILogger) ct : Task<_> = task { + let private loggedReadSlice reader streamName batchSize requiresLeader startPos batchIndex (log : ILogger) ct : Task<_> = task { let parentAct = Activity.Current use act = source.StartActivity("ReadSlice", ActivityKind.Client) if act <> null then act.AddStreamFromParent(parentAct).AddBatch(batchSize, batchIndex).AddStartPosition(startPos).AddLeader(requiresLeader) |> ignore @@ -261,8 +260,8 @@ module Read = return version, events } let act = Activity.Current if act <> null then act.AddBatchSize(batchSize).AddStartPosition(startPosition).AddLoadMethod("BatchForward") |> ignore - let call pos batchIndex = loggedReadSlice reader streamName batchSize batchIndex pos requiresLeader - let retryingLoggingReadSlice pos batchIndex log = Log.withLoggedRetries retryPolicy "readAttempt" (call pos batchIndex) log + let call = loggedReadSlice reader streamName batchSize requiresLeader + let retryingLoggingReadSlice pos batchIndex = Log.withLoggedRetries retryPolicy "readAttempt" (call pos batchIndex) let log = log |> Log.prop "batchSize" batchSize |> Log.prop "stream" streamName let batches ct : Task array> = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition ct @@ -272,6 +271,7 @@ module Read = return version, events } module private Token = + // NOTE MessageDB's streamVersion is -1 based, similar to ESDB and SSS let create streamVersion : StreamToken = { value = box streamVersion @@ -288,8 +288,8 @@ module private Token = let supersedes struct (current, x) = x.version > current.version - module private Snapshot = + let inline snapshotCategory original = original + ":snapshot" let inline streamName category (streamId : string) = Equinox.Core.StreamName.render (snapshotCategory category) streamId type Meta = {| streamVersion : int64 |} // STJ doesn't want to serialize it unless its anonymous @@ -315,11 +315,11 @@ type BatchOptions(getBatchSize : Func, []?batchCountLimit) = member val MaxBatches = batchCountLimit [] -type GatewaySyncResult = Written of StreamToken | ConflictUnknown +type internal GatewaySyncResult = Written of StreamToken | ConflictUnknown type MessageDbContext(connection : MessageDbConnection, batchOptions : BatchOptions) = new ( connection : MessageDbConnection, - // Max number of Events to retrieve in a single batch. Also affects frequency of RollingSnapshots. Default: 500. + // Max number of Events to retrieve in a single batch. Also affects frequency of Snapshots. Default: 500. [] ?batchSize) = MessageDbContext(connection, BatchOptions(batchSize = defaultArg batchSize 500)) member val BatchOptions = batchOptions @@ -336,18 +336,13 @@ type MessageDbContext(connection : MessageDbConnection, batchOptions : BatchOpti let! _, events = Read.loadLastEvent log connection.ReadRetryPolicy connection.Reader requireLeader snapshotStream (Some eventType) ct return Snapshot.decode tryDecode events } - member _.StoreSnapshot(category, streamId, log, event, ct) = task { - let snapshotStream = Snapshot.streamName category streamId - let category = Snapshot.snapshotCategory category - do! Write.writeEvents log None connection.Writer (category, streamId, snapshotStream) Any [| event |] ct :> Task } - member _.Reload(streamName, requireLeader, log, token, tryDecode, ct) : Task = task { let streamVersion = Token.streamVersion token let startPos = streamVersion + 1L // Reading a stream uses {inclusive} positions, but the streamVersion is `-1`-based let! version, events = Read.loadForwardsFrom log connection.ReadRetryPolicy connection.Reader batchOptions.BatchSize batchOptions.MaxBatches streamName startPos requireLeader ct return Token.create (max streamVersion version), Array.chooseV tryDecode events } - member _.TrySync(log, category, streamId, streamName, token, encodedEvents : IEventData array, ct): Task = task { + member internal _.TrySync(log, category, streamId, streamName, token, encodedEvents : IEventData array, ct): Task = task { let streamVersion = Token.streamVersion token match! Write.writeEvents log connection.WriteRetryPolicy connection.Writer (category, streamId, streamName) (StreamVersion streamVersion) encodedEvents ct with | MdbSyncResult.ConflictUnknown -> @@ -356,6 +351,11 @@ type MessageDbContext(connection : MessageDbConnection, batchOptions : BatchOpti let token = Token.create version' return GatewaySyncResult.Written token } + member _.StoreSnapshot(category, streamId, log, event, ct) = task { + let snapshotStream = Snapshot.streamName category streamId + let category = Snapshot.snapshotCategory category + do! Write.writeEvents log None connection.Writer (category, streamId, snapshotStream) Any [| event |] ct :> Task } + [] type AccessStrategy<'event, 'state> = /// Load only the single most recent event defined in in a stream and trust that it'll be decoded and @@ -390,22 +390,15 @@ type private Category<'event, 'state, 'context>(context : MessageDbContext, code let! token, events = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, category, streamId, streamName, requireLeader, log : ILogger, ct) = + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, category, streamId, streamName, requireLeader, log, ct) = load fold initial (loadAlgorithm category streamId streamName requireLeader log ct) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName, requireLeader, token, log : ILogger, ct) = + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName, requireLeader, token, log, ct) = load fold state (context.Reload(streamName, requireLeader, log, token, codec.TryDecode, ct)) - member _.StoreSnapshot(category, streamId, log, ctx, token, snap, ct) = - let encoded = codec.Encode(ctx, snap) - let encoded = EventData.Create( - encoded.EventType, - encoded.Data, - meta = Snapshot.meta token) - context.StoreSnapshot(category, streamId, log, encoded, ct) - member x.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - categoryName, streamId, streamName, token, state : 'state, events : 'event array, ctx : 'context, ct) : Task> = task { + categoryName, streamId, streamName, token, state : 'state, events : 'event array, ctx : 'context, ct) + : Task> = task { let encode e = codec.Encode(ctx, e) let encodedEvents : IEventData array = events |> Array.map encode match! context.TrySync(log, categoryName, streamId, streamName, token, encodedEvents, ct) with @@ -420,6 +413,12 @@ type private Category<'event, 'state, 'context>(context : MessageDbContext, code do! x.StoreSnapshot(categoryName, streamId, log, ctx, token', toSnap state', ct) return SyncResult.Written (token', state') } + member _.StoreSnapshot(category, streamId, log, ctx, token, snapshotEvent, ct) = + let encodedWithMeta = + let rawEvent = codec.Encode(ctx, snapshotEvent) + FsCodec.Core.EventData.Create(rawEvent.EventType, rawEvent.Data, meta = Snapshot.meta token) + context.StoreSnapshot(category, streamId, log, encodedWithMeta, ct) + type private Folder<'event, 'state, 'context>(category : Category<'event, 'state, 'context>, fold : 'state -> 'event seq -> 'state, initial : 'state, ?readCache) = interface ICategory<'event, 'state, 'context> with member _.Load(log, categoryName, streamId, streamName, allowStale, requireLeader, ct) = task { diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index 193651c24..9446b2619 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -223,8 +223,8 @@ module private Read = |> TaskSeq.toArrayAsync let version = match versionFromStream with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, events } - let call pos = loggedReadSlice conn streamName Direction.Forward batchSize pos - let retryingLoggingReadSlice pos ct = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) ct + let call = loggedReadSlice conn streamName Direction.Forward batchSize + let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName let batches ct : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition ct @@ -259,7 +259,7 @@ module private Read = let eventsForward = Array.Reverse(tempBackward); tempBackward // sic - relatively cheap, in-place reverse of something we own let version = match versionFromStream.Value with Some version -> version | None -> invalidOp "no version encountered in event batch stream" return version, eventsForward } - let call pos = loggedReadSlice conn streamName Direction.Backward batchSize pos + let call = loggedReadSlice conn streamName Direction.Backward batchSize let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) let log = log |> Log.prop "batchSize" batchSize |> Log.prop "stream" streamName let startPosition = int64 Position.End @@ -434,20 +434,20 @@ type private Category<'event, 'state, 'context>(context : SqlStreamStoreContext, | None -> context.LoadBatched(streamName, requireLeader, log, tryDecode, None, ct) | Some AccessStrategy.LatestKnownEvent | Some (AccessStrategy.RollingSnapshots _) -> context.LoadBackwardsStoppingAtCompactionEvent(streamName, requireLeader, log, tryDecode, isOrigin, ct) - let load (fold : 'state -> 'event seq -> 'state) initial f = task { + + let load (fold : 'state -> 'event seq -> 'state) initial f : Task = task { let! token, events = f return struct (token, fold initial events) } - member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log : ILogger, ct) - : Task = + member _.Load(fold : 'state -> 'event seq -> 'state, initial : 'state, streamName : string, requireLeader, log, ct) = load fold initial (loadAlgorithm streamName requireLeader log ct) - member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log : ILogger, ct) - : Task = + member _.Reload(fold : 'state -> 'event seq -> 'state, state : 'state, streamName : string, requireLeader, token, log, ct) = load fold state (context.Reload(streamName, requireLeader, log, token, tryDecode, compactionPredicate, ct)) member _.TrySync<'context> ( log : ILogger, fold : 'state -> 'event seq -> 'state, - streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) : Task> = task { + streamName, (Token.Unpack token as streamToken), state : 'state, events : 'event array, ctx : 'context, ct) + : Task> = task { let encode e = codec.Encode(ctx, e) let events = match access with From 5634057960920298e82dcb24b92eb364a7e9ffd9 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 16:20:32 +0000 Subject: [PATCH 09/11] Tidy Stopwatch.time --- src/Equinox.Core/StopwatchInterval.fs | 12 ++---------- src/Equinox.CosmosStore/CosmosStore.fs | 11 +++++------ src/Equinox.EventStore/EventStore.fs | 4 ++-- src/Equinox.SqlStreamStore/SqlStreamStore.fs | 4 ++-- 4 files changed, 11 insertions(+), 20 deletions(-) diff --git a/src/Equinox.Core/StopwatchInterval.fs b/src/Equinox.Core/StopwatchInterval.fs index 1c8921f79..ab4dbcba9 100755 --- a/src/Equinox.Core/StopwatchInterval.fs +++ b/src/Equinox.Core/StopwatchInterval.fs @@ -8,15 +8,6 @@ open System.Threading.Tasks type Stopwatch = - /// Times an async computation, returning the result with a time range measurement. - /// Function to execute / time. - [] - static member Time(f : Async<'T>) : Async = async { - let startTicks = Stopwatch.GetTimestamp() - let! result = f - let endTicks = Stopwatch.GetTimestamp() - return StopwatchInterval(startTicks, endTicks), result } - /// Converts a tick count (derived from two Stopwatch.GetTimeStamp() Tick Counters) into a number of seconds [] static member TicksToSeconds(ticks : int64) : double = @@ -33,8 +24,9 @@ and [] StopwatchInterval(startTicks : int64, e override x.ToString () = sprintf "%g ms" x.ElapsedMilliseconds module Stopwatch = + [] - let time (ct : 'a) (f : 'a -> Task<'T>) : Task = task { + let time (ct : CancellationToken) (f : CancellationToken -> Task<'T>) : Task = task { let startTicks = Stopwatch.GetTimestamp() let! result = f ct let endTicks = Stopwatch.GetTimestamp() diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index f7c564e3d..683406374 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -630,7 +630,7 @@ module internal Query = use _ = query // see https://github.com/jet/equinox/issues/225 - in the Cosmos V4 SDK, all this is managed IAsyncEnumerable let mutable i = 0 while query.HasMoreResults do - let! t, (res : FeedResponse<'t>) = Stopwatch.time ct (fun ct -> query.ReadNextAsync ct) + let! t, (res : FeedResponse<'t>) = (fun ct -> query.ReadNextAsync(ct)) |> Stopwatch.time ct yield map i t res } let private mkQuery (log : ILogger) (container : Container, stream : string) includeTip (maxItems : int) (direction : Direction, minIndex, maxIndex) : FeedIterator = let order = if direction = Direction.Forward then "ASC" else "DESC" @@ -762,7 +762,7 @@ module internal Query = let walkLazy<'event> (log : ILogger) (container, stream) maxItems maxRequests (tryDecode : ITimelineEvent -> 'event option, isOrigin : 'event -> bool) - (direction, minIndex, maxIndex, ct) + (direction, minIndex, maxIndex, ct : CancellationToken) : IAsyncEnumerable<'event[]> = taskSeq { let query = mkQuery log (container, stream) true maxItems (direction, minIndex, maxIndex) @@ -885,7 +885,7 @@ module Prune = let tip = { tip with i = tip.i + int64 count; e = Array.skip count tip.e } let ro = ItemRequestOptions(EnableContentResponseOnWrite = false, IfMatchEtag = tip._etag) - let! t, updateRes = container.ReplaceItemAsync(tip, tip.id, PartitionKey stream, ro, ct) |> Async.AwaitTaskCorrect |> Stopwatch.Time + let! t, updateRes = (fun ct -> container.ReplaceItemAsync(tip, tip.id, PartitionKey stream, ro, ct)) |> Stopwatch.time ct let rc, ms = tipRu + updateRes.RequestCharge, t.ElapsedMilliseconds let reqMetric : Log.Measurement = { database = container.Database.Id; container = container.Id; stream = stream; interval = t; bytes = -1; count = count; ru = rc } let log = let evt = Log.Metric.Trim reqMetric in log |> Log.event evt @@ -1599,9 +1599,8 @@ module Events = /// NB typically, it is recommended to ensure idempotency of operations by using the `append` and related API as /// this facilitates ensuring consistency is maintained, and yields reduced latency and Request Charges impacts /// (See equivalent APIs on `Context` that yield `Position` values) - let appendAtEnd (ctx : EventsContext) (streamName : string) (events : IEventData<_>[]) : Async = async { - let! ct = Async.CancellationToken - return! ctx.NonIdempotentAppend(ctx.StreamId streamName, events) |> stripPosition } + let appendAtEnd (ctx : EventsContext) (streamName : string) (events : IEventData<_>[]) : Async = + ctx.NonIdempotentAppend(ctx.StreamId streamName, events) |> stripPosition /// Requests deletion of events up and including the specified index. /// Due to the need to preserve ordering of data in the stream, only complete Batches will be removed. diff --git a/src/Equinox.EventStore/EventStore.fs b/src/Equinox.EventStore/EventStore.fs index 555124ef7..869f59a27 100755 --- a/src/Equinox.EventStore/EventStore.fs +++ b/src/Equinox.EventStore/EventStore.fs @@ -236,8 +236,8 @@ module private Read = let retryingLoggingReadSlice pos = Log.withLoggedRetries retryPolicy "readAttempt" (call pos) let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName - let batches : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition - let! t, (version, events) = Stopwatch.time CancellationToken.None <| fun _ct -> mergeBatches batches + let batches _ct : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition + let! t, (version, events) = (batches >> mergeBatches) |> Stopwatch.time CancellationToken.None log |> logBatchRead direction streamName t events batchSize version return version, events } diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index 9446b2619..d3e22e923 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -228,7 +228,7 @@ module private Read = let direction = Direction.Forward let log = log |> Log.prop "batchSize" batchSize |> Log.prop "direction" direction |> Log.prop "stream" streamName let batches ct : IAsyncEnumerable = readBatches log retryingLoggingReadSlice maxPermittedBatchReads startPosition ct - let! t, (version, events) = (fun ct -> batches ct |> mergeBatches) |> Stopwatch.time ct + let! t, (version, events) = (batches >> mergeBatches) |> Stopwatch.time ct log |> logBatchRead direction streamName t events batchSize version return version, events } let partitionPayloadFrom firstUsedEventNumber : ResolvedEvent[] -> int * int = @@ -266,7 +266,7 @@ module private Read = let direction = Direction.Backward let readlog = log |> Log.prop "direction" direction let batchesBackward ct : IAsyncEnumerable = readBatches readlog retryingLoggingReadSlice maxPermittedBatchReads startPosition ct - let! t, (version, events) = (fun ct -> batchesBackward ct |> mergeFromCompactionPointOrStartFromBackwardsStream log) |> Stopwatch.time ct + let! t, (version, events) = (batchesBackward >> mergeFromCompactionPointOrStartFromBackwardsStream log) |> Stopwatch.time ct log |> logBatchRead direction streamName t (Array.map ValueTuple.fst events) batchSize version return version, events } From 2b324de06b7d8da8f3bc743857d156c7f83d6274 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 16:29:21 +0000 Subject: [PATCH 10/11] Remove superfluous ATCs --- src/Equinox.CosmosStore/CosmosStore.fs | 2 +- src/Equinox.DynamoStore/DynamoStore.fs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 683406374..f8459476a 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -549,7 +549,7 @@ module Initialization = return c } let private createStoredProcIfNotExists (c : Container) (name, body) ct : Task = task { - try let! r = c.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(id = name, body = body), cancellationToken = ct) |> Async.AwaitTaskCorrect + try let! r = c.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(id = name, body = body), cancellationToken = ct) return r.RequestCharge with :? CosmosException as ce when ce.StatusCode = System.Net.HttpStatusCode.Conflict -> return ce.RequestCharge } let private applyBatchAndTipContainerProperties (cp : ContainerProperties) = diff --git a/src/Equinox.DynamoStore/DynamoStore.fs b/src/Equinox.DynamoStore/DynamoStore.fs index 5ec2a79f8..ba8ff15f4 100644 --- a/src/Equinox.DynamoStore/DynamoStore.fs +++ b/src/Equinox.DynamoStore/DynamoStore.fs @@ -1445,7 +1445,7 @@ type EventsContext internal match maxCount with | Some limit -> maxCountPredicate limit | None -> fun _ -> false - let! token, events = store.Read(log, stream, (*consistentRead*)false, direction, (ValueSome, isOrigin), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) |> Async.AwaitTaskCorrect + let! token, events = store.Read(log, stream, (*consistentRead*)false, direction, (ValueSome, isOrigin), ct, ?minIndex = minIndex, ?maxIndex = maxIndex) if direction = Direction.Backward then System.Array.Reverse events return token, events } From 993e7c336f53ae6154d917da261de8b6ae17adf3 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Dec 2022 16:32:13 +0000 Subject: [PATCH 11/11] Remove straggler AwaitTask calls --- .../Equinox.CosmosStore.Integration/CacheCellTests.fs | 10 +++++----- .../CosmosCoreIntegration.fs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs b/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs index 28f855afc..92d98c5f4 100644 --- a/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs +++ b/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs @@ -13,7 +13,7 @@ let ``AsyncLazy correctness`` () = async { let mutable count = 0 let cell = AsyncLazy(fun () -> task { return Interlocked.Increment &count }) false =! cell.IsValid(ValueNone) - let! accessResult = [|1 .. 100|] |> Array.map (fun _ -> cell.Await() |> Async.AwaitTask) |> Async.Parallel + let! accessResult = [|1 .. 100|] |> Array.map (fun _ -> cell.Await() |> Async.AwaitTaskCorrect) |> Async.Parallel true =! cell.IsValid(ValueNone) test <@ accessResult |> Array.forall ((=) 1) @> } @@ -27,13 +27,13 @@ let ``AsyncCacheCell correctness`` () = async { false =! cell.IsValid() - let! accessResult = [|1 .. 100|] |> Array.map (fun _i -> cell.Await CancellationToken.None |> Async.AwaitTask) |> Async.Parallel + let! accessResult = [|1 .. 100|] |> Array.map (fun _i -> cell.Await CancellationToken.None |> Async.AwaitTaskCorrect) |> Async.Parallel test <@ accessResult |> Array.forall ((=) 1) @> true =! cell.IsValid() expectedValue <- expectedValue + 1 - let! accessResult = [|1 .. 100|] |> Array.map (fun _i -> cell.Await CancellationToken.None |> Async.AwaitTask) |> Async.Parallel + let! accessResult = [|1 .. 100|] |> Array.map (fun _i -> cell.Await CancellationToken.None |> Async.AwaitTaskCorrect) |> Async.Parallel test <@ accessResult |> Array.forall ((=) 2) @> true =! cell.IsValid() } @@ -63,7 +63,7 @@ let ``AsyncCacheCell correctness with throwing`` initiallyThrowing = async { throwing <- false false =! cell.IsValid() else - let! r = cell.Await CancellationToken.None |> Async.AwaitTask + let! r = cell.Await CancellationToken.None |> Async.AwaitTaskCorrect true =! cell.IsValid() test <@ 1 = r @> @@ -87,7 +87,7 @@ let ``AsyncCacheCell correctness with throwing`` initiallyThrowing = async { expectedValue <- expectedValue + 1 - let! accessResult = [|1 .. 10|] |> Array.map (fun _ -> cell.Await CancellationToken.None |> Async.AwaitTask) |> Async.Parallel + let! accessResult = [|1 .. 10|] |> Array.map (fun _ -> cell.Await CancellationToken.None |> Async.AwaitTaskCorrect) |> Async.Parallel test <@ accessResult |> Array.forall ((=) 4) @> true =! cell.IsValid() } diff --git a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs index bcafacd8d..e78a0daf1 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs @@ -240,7 +240,7 @@ type Tests(testOutputHelper) = let! expected = add6EventsIn2BatchesEx ctx streamName 4 let! seq = Events.getAll ctx streamName 0L 1 - let! res = seq |> TaskSeq.takeWhileInclusive (fun _ -> false) |> TaskSeq.collectSeq id |> TaskSeq.toArrayAsync |> Async.AwaitTask + let! res = seq |> TaskSeq.takeWhileInclusive (fun _ -> false) |> TaskSeq.collectSeq id |> TaskSeq.toArrayAsync |> Async.AwaitTaskCorrect let expected = expected |> Array.take 1 verifyCorrectEvents 0L expected res @@ -301,7 +301,7 @@ type Tests(testOutputHelper) = res |> TaskSeq.collectSeq id |> TaskSeq.takeWhileInclusive (fun x -> x.Index <> 4L) - |> TaskSeq.toArrayAsync |> Async.AwaitTask + |> TaskSeq.toArrayAsync |> Async.AwaitTaskCorrect let expected = expected |> Array.skip 4 // omit index 0, 1 as we vote to finish at 2L verifyCorrectEventsBackward 5L expected res