diff --git a/pkg/kv/kvserver/batcheval/cmd_add_sstable.go b/pkg/kv/kvserver/batcheval/cmd_add_sstable.go index fa0dac2b40d3..1bd10eed0b7d 100644 --- a/pkg/kv/kvserver/batcheval/cmd_add_sstable.go +++ b/pkg/kv/kvserver/batcheval/cmd_add_sstable.go @@ -49,9 +49,11 @@ func declareKeysAddSSTable( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { +) error { args := req.(*kvpb.AddSSTableRequest) - DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + if err := DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset); err != nil { + return err + } // We look up the range descriptor key to return its span. latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeDescriptorKey(rs.GetStartKey())}) @@ -72,6 +74,7 @@ func declareKeysAddSSTable( Key: keys.MVCCRangeKeyGCKey(rs.GetRangeID()), }) } + return nil } // AddSSTableRewriteConcurrency sets the concurrency of a single SST rewrite. diff --git a/pkg/kv/kvserver/batcheval/cmd_barrier.go b/pkg/kv/kvserver/batcheval/cmd_barrier.go index 40bacd038fac..a03f992292d5 100644 --- a/pkg/kv/kvserver/batcheval/cmd_barrier.go +++ b/pkg/kv/kvserver/batcheval/cmd_barrier.go @@ -32,7 +32,7 @@ func declareKeysBarrier( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // Barrier is special-cased in the concurrency manager to *not* actually // grab these latches. Instead, any conflicting latches with these are waited // on, but new latches aren't inserted. @@ -44,6 +44,7 @@ func declareKeysBarrier( // follower. We don't currently need any guarantees regarding concurrent // reads, so this is acceptable. latchSpans.AddNonMVCC(spanset.SpanReadWrite, req.Header().Span()) + return nil } // Barrier evaluation is a no-op, as all the latch waiting happens in diff --git a/pkg/kv/kvserver/batcheval/cmd_clear_range.go b/pkg/kv/kvserver/batcheval/cmd_clear_range.go index f3c3d0c742c2..304b18d9314f 100644 --- a/pkg/kv/kvserver/batcheval/cmd_clear_range.go +++ b/pkg/kv/kvserver/batcheval/cmd_clear_range.go @@ -46,8 +46,11 @@ func declareKeysClearRange( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { - DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) +) error { + err := DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + if err != nil { + return err + } // We look up the range descriptor key to check whether the span // is equal to the entire range for fast stats updating. latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeDescriptorKey(rs.GetStartKey())}) @@ -71,6 +74,7 @@ func declareKeysClearRange( latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: keys.MVCCRangeKeyGCKey(rs.GetRangeID()), }) + return nil } // ClearRange wipes all MVCC versions of keys covered by the specified diff --git a/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go b/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go index a6d6c955b29f..944db3f639d9 100644 --- a/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go @@ -189,7 +189,9 @@ func TestCmdClearRange(t *testing.T) { // should not cross the range bounds. var latchSpans spanset.SpanSet var lockSpans lockspanset.LockSpanSet - declareKeysClearRange(&desc, &cArgs.Header, cArgs.Args, &latchSpans, &lockSpans, 0) + require.NoError(t, + declareKeysClearRange(&desc, &cArgs.Header, cArgs.Args, &latchSpans, &lockSpans, 0), + ) batch := &wrappedBatch{Batch: spanset.NewBatchAt(eng.NewBatch(), &latchSpans, cArgs.Header.Timestamp)} defer batch.Close() diff --git a/pkg/kv/kvserver/batcheval/cmd_compute_checksum.go b/pkg/kv/kvserver/batcheval/cmd_compute_checksum.go index d0dba475d072..2ea8b89c99f0 100644 --- a/pkg/kv/kvserver/batcheval/cmd_compute_checksum.go +++ b/pkg/kv/kvserver/batcheval/cmd_compute_checksum.go @@ -36,7 +36,7 @@ func declareKeysComputeChecksum( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // The correctness of range merges depends on the lease applied index of a // range not being bumped while the RHS is subsumed. ComputeChecksum bumps a // range's LAI and thus needs to be serialized with Subsume requests, in order @@ -47,6 +47,7 @@ func declareKeysComputeChecksum( // declare access over at least one key. We choose to declare read-only access // over the range descriptor key. latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeDescriptorKey(rs.GetStartKey())}) + return nil } // ReplicaChecksumVersion versions the checksum computation. Requests silently no-op diff --git a/pkg/kv/kvserver/batcheval/cmd_conditional_put.go b/pkg/kv/kvserver/batcheval/cmd_conditional_put.go index d4798cfcb3fd..b9221177a987 100644 --- a/pkg/kv/kvserver/batcheval/cmd_conditional_put.go +++ b/pkg/kv/kvserver/batcheval/cmd_conditional_put.go @@ -33,12 +33,12 @@ func declareKeysConditionalPut( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { +) error { args := req.(*kvpb.ConditionalPutRequest) if args.Inline { - DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + return DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset) } else { - DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + return DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) } } diff --git a/pkg/kv/kvserver/batcheval/cmd_delete_range.go b/pkg/kv/kvserver/batcheval/cmd_delete_range.go index 58c76328b6f9..a51484d4dab0 100644 --- a/pkg/kv/kvserver/batcheval/cmd_delete_range.go +++ b/pkg/kv/kvserver/batcheval/cmd_delete_range.go @@ -38,12 +38,17 @@ func declareKeysDeleteRange( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { +) error { args := req.(*kvpb.DeleteRangeRequest) if args.Inline { - DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + if err := DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset); err != nil { + return err + } } else { - DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + err := DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + if err != nil { + return err + } } // When writing range tombstones, we must look for adjacent range tombstones @@ -77,6 +82,7 @@ func declareKeysDeleteRange( }) } } + return nil } const maxDeleteRangeBatchBytes = 32 << 20 diff --git a/pkg/kv/kvserver/batcheval/cmd_delete_range_test.go b/pkg/kv/kvserver/batcheval/cmd_delete_range_test.go index 672f1778f924..9603874d0c8c 100644 --- a/pkg/kv/kvserver/batcheval/cmd_delete_range_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_delete_range_test.go @@ -254,7 +254,9 @@ func TestDeleteRangeTombstone(t *testing.T) { // bounds. var latchSpans spanset.SpanSet var lockSpans lockspanset.LockSpanSet - declareKeysDeleteRange(evalCtx.Desc, &h, req, &latchSpans, &lockSpans, 0) + require.NoError(t, + declareKeysDeleteRange(evalCtx.Desc, &h, req, &latchSpans, &lockSpans, 0), + ) batch := spanset.NewBatchAt(engine.NewBatch(), &latchSpans, h.Timestamp) defer batch.Close() diff --git a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go index 241058185746..2b2f68e1272c 100644 --- a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go +++ b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go @@ -48,13 +48,14 @@ func init() { // declareKeys{End,Heartbeat}Transaction. func declareKeysWriteTransaction( _ ImmutableRangeState, header *kvpb.Header, req kvpb.Request, latchSpans *spanset.SpanSet, -) { +) error { if header.Txn != nil { header.Txn.AssertInitialized(context.TODO()) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{ Key: keys.TransactionKey(req.Header().Key, header.Txn.ID), }) } + return nil } func declareKeysEndTxn( @@ -64,9 +65,11 @@ func declareKeysEndTxn( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { et := req.(*kvpb.EndTxnRequest) - declareKeysWriteTransaction(rs, header, req, latchSpans) + if err := declareKeysWriteTransaction(rs, header, req, latchSpans); err != nil { + return err + } var minTxnTS hlc.Timestamp if header.Txn != nil { header.Txn.AssertInitialized(context.TODO()) @@ -208,6 +211,7 @@ func declareKeysEndTxn( } } } + return nil } // EndTxn either commits or aborts (rolls back) an extant transaction according diff --git a/pkg/kv/kvserver/batcheval/cmd_export.go b/pkg/kv/kvserver/batcheval/cmd_export.go index 3145dfb1468d..b2e1b26291c8 100644 --- a/pkg/kv/kvserver/batcheval/cmd_export.go +++ b/pkg/kv/kvserver/batcheval/cmd_export.go @@ -76,14 +76,18 @@ func declareKeysExport( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { - DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) +) error { + err := DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + if err != nil { + return err + } latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeGCThresholdKey(header.RangeID)}) // Export requests will usually not hold latches during their evaluation. // // See call to `AssertAllowed()` in GetGCThreshold() to understand why we need // to disable these assertions for export requests. latchSpans.DisableUndeclaredAccessAssertions() + return nil } // evalExport dumps the requested keys into files of non-overlapping key ranges diff --git a/pkg/kv/kvserver/batcheval/cmd_gc.go b/pkg/kv/kvserver/batcheval/cmd_gc.go index 1e48463bb6c9..37dc2f48ea48 100644 --- a/pkg/kv/kvserver/batcheval/cmd_gc.go +++ b/pkg/kv/kvserver/batcheval/cmd_gc.go @@ -38,7 +38,7 @@ func declareKeysGC( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { gcr := req.(*kvpb.GCRequest) if gcr.RangeKeys != nil { // When GC-ing MVCC range key tombstones, we need to serialize with @@ -112,6 +112,7 @@ func declareKeysGC( // Needed for updating optional GC hint. latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.RangeGCHintKey(rs.GetRangeID())}) latchSpans.DisableUndeclaredAccessAssertions() + return nil } // Create latches and merge adjacent. diff --git a/pkg/kv/kvserver/batcheval/cmd_heartbeat_txn.go b/pkg/kv/kvserver/batcheval/cmd_heartbeat_txn.go index 24d7fc2e1c37..7535b3fc31bf 100644 --- a/pkg/kv/kvserver/batcheval/cmd_heartbeat_txn.go +++ b/pkg/kv/kvserver/batcheval/cmd_heartbeat_txn.go @@ -36,8 +36,8 @@ func declareKeysHeartbeatTransaction( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { - declareKeysWriteTransaction(rs, header, req, latchSpans) +) error { + return declareKeysWriteTransaction(rs, header, req, latchSpans) } // HeartbeatTxn updates the transaction status and heartbeat diff --git a/pkg/kv/kvserver/batcheval/cmd_lease_info.go b/pkg/kv/kvserver/batcheval/cmd_lease_info.go index 3cca944d3339..30c78a6468b0 100644 --- a/pkg/kv/kvserver/batcheval/cmd_lease_info.go +++ b/pkg/kv/kvserver/batcheval/cmd_lease_info.go @@ -34,8 +34,9 @@ func declareKeysLeaseInfo( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeLeaseKey(rs.GetRangeID())}) + return nil } // LeaseInfo returns information about the lease holder for the range. diff --git a/pkg/kv/kvserver/batcheval/cmd_lease_request.go b/pkg/kv/kvserver/batcheval/cmd_lease_request.go index 3e0abd0fc158..13040c882ddd 100644 --- a/pkg/kv/kvserver/batcheval/cmd_lease_request.go +++ b/pkg/kv/kvserver/batcheval/cmd_lease_request.go @@ -35,12 +35,13 @@ func declareKeysRequestLease( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // NOTE: RequestLease is run on replicas that do not hold the lease, so // acquiring latches would not help synchronize with other requests. As // such, the request does not declare latches. See also // concurrency.shouldIgnoreLatches(). latchSpans.DisableUndeclaredAccessAssertions() + return nil } // RequestLease sets the range lease for this range. The command fails diff --git a/pkg/kv/kvserver/batcheval/cmd_lease_transfer.go b/pkg/kv/kvserver/batcheval/cmd_lease_transfer.go index 7d053d41c9c6..9a646775f7b0 100644 --- a/pkg/kv/kvserver/batcheval/cmd_lease_transfer.go +++ b/pkg/kv/kvserver/batcheval/cmd_lease_transfer.go @@ -35,7 +35,7 @@ func declareKeysTransferLease( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // TransferLease must not run concurrently with any other request so it uses // latches to synchronize with all other reads and writes on the outgoing // leaseholder. Additionally, it observes the state of the timestamp cache @@ -56,6 +56,7 @@ func declareKeysTransferLease( // reads. We'd need to be careful here, so we should only pull on this if we // decide that doing so is important. declareAllKeys(latchSpans) + return nil } // TransferLease sets the lease holder for the range. diff --git a/pkg/kv/kvserver/batcheval/cmd_migrate.go b/pkg/kv/kvserver/batcheval/cmd_migrate.go index a0fc7d03ab59..372244af7cb0 100644 --- a/pkg/kv/kvserver/batcheval/cmd_migrate.go +++ b/pkg/kv/kvserver/batcheval/cmd_migrate.go @@ -36,7 +36,7 @@ func declareKeysMigrate( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // TODO(irfansharif): This will eventually grow to capture the super set of // all keys accessed by all migrations defined here. That could get // cumbersome. We could spruce up the migration type and allow authors to @@ -45,6 +45,7 @@ func declareKeysMigrate( latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.RangeVersionKey(rs.GetRangeID())}) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeDescriptorKey(rs.GetStartKey())}) + return nil } // migrationRegistry is a global registry of all KV-level migrations. See diff --git a/pkg/kv/kvserver/batcheval/cmd_probe.go b/pkg/kv/kvserver/batcheval/cmd_probe.go index f5fca4b8559e..b68c9f2fac85 100644 --- a/pkg/kv/kvserver/batcheval/cmd_probe.go +++ b/pkg/kv/kvserver/batcheval/cmd_probe.go @@ -29,13 +29,14 @@ func declareKeysProbe( _ *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // Declare no keys. This means that we're not even serializing with splits // (i.e. a probe could be directed at a key that will become the right-hand // side of the split, and the split races ahead of the probe though the probe // will still execute on the left-hand side). This is acceptable; we want the // probe to bypass as much of the above-raft machinery as possible so that it // gives us a signal on the replication layer alone. + return nil } func init() { diff --git a/pkg/kv/kvserver/batcheval/cmd_push_txn.go b/pkg/kv/kvserver/batcheval/cmd_push_txn.go index 3b183d1d1716..2cc110788516 100644 --- a/pkg/kv/kvserver/batcheval/cmd_push_txn.go +++ b/pkg/kv/kvserver/batcheval/cmd_push_txn.go @@ -42,10 +42,11 @@ func declareKeysPushTransaction( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { pr := req.(*kvpb.PushTxnRequest) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.TransactionKey(pr.PusheeTxn.Key, pr.PusheeTxn.ID)}) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.AbortSpanKey(rs.GetRangeID(), pr.PusheeTxn.ID)}) + return nil } // PushTxn resolves conflicts between concurrent txns (or between diff --git a/pkg/kv/kvserver/batcheval/cmd_put.go b/pkg/kv/kvserver/batcheval/cmd_put.go index 2523f3fdfe40..4e5974618863 100644 --- a/pkg/kv/kvserver/batcheval/cmd_put.go +++ b/pkg/kv/kvserver/batcheval/cmd_put.go @@ -33,12 +33,12 @@ func declareKeysPut( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { +) error { args := req.(*kvpb.PutRequest) if args.Inline { - DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + return DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset) } else { - DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + return DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) } } diff --git a/pkg/kv/kvserver/batcheval/cmd_query_intent.go b/pkg/kv/kvserver/batcheval/cmd_query_intent.go index b32c41ec098e..3ff89042d2f0 100644 --- a/pkg/kv/kvserver/batcheval/cmd_query_intent.go +++ b/pkg/kv/kvserver/batcheval/cmd_query_intent.go @@ -34,11 +34,12 @@ func declareKeysQueryIntent( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // QueryIntent requests read the specified keys at the maximum timestamp in // order to read any intent present, if one exists, regardless of the // timestamp it was written at. latchSpans.AddNonMVCC(spanset.SpanReadOnly, req.Header().Span()) + return nil } // QueryIntent checks if an intent exists for the specified transaction at the diff --git a/pkg/kv/kvserver/batcheval/cmd_query_locks.go b/pkg/kv/kvserver/batcheval/cmd_query_locks.go index 45ef8b6d2472..84333a67663d 100644 --- a/pkg/kv/kvserver/batcheval/cmd_query_locks.go +++ b/pkg/kv/kvserver/batcheval/cmd_query_locks.go @@ -35,9 +35,10 @@ func declareKeysQueryLocks( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // Latch on the range descriptor during evaluation of query locks. latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeDescriptorKey(rs.GetStartKey())}) + return nil } // QueryLocks uses the concurrency manager to query the state of locks diff --git a/pkg/kv/kvserver/batcheval/cmd_query_txn.go b/pkg/kv/kvserver/batcheval/cmd_query_txn.go index 778e57b19a19..f58d6dab1e3e 100644 --- a/pkg/kv/kvserver/batcheval/cmd_query_txn.go +++ b/pkg/kv/kvserver/batcheval/cmd_query_txn.go @@ -36,9 +36,10 @@ func declareKeysQueryTransaction( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { qr := req.(*kvpb.QueryTxnRequest) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.TransactionKey(qr.Txn.Key, qr.Txn.ID)}) + return nil } // QueryTxn fetches the current state of a transaction. diff --git a/pkg/kv/kvserver/batcheval/cmd_range_stats.go b/pkg/kv/kvserver/batcheval/cmd_range_stats.go index 5d91904dd230..a76d0c0d0cb5 100644 --- a/pkg/kv/kvserver/batcheval/cmd_range_stats.go +++ b/pkg/kv/kvserver/batcheval/cmd_range_stats.go @@ -34,11 +34,15 @@ func declareKeysRangeStats( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { - DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset) +) error { + err := DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + if err != nil { + return err + } // The request will return the descriptor and lease. latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeDescriptorKey(rs.GetStartKey())}) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeLeaseKey(rs.GetRangeID())}) + return nil } // RangeStats returns the MVCC statistics for a range. diff --git a/pkg/kv/kvserver/batcheval/cmd_recompute_stats.go b/pkg/kv/kvserver/batcheval/cmd_recompute_stats.go index 6688118d88ce..ec29072d669f 100644 --- a/pkg/kv/kvserver/batcheval/cmd_recompute_stats.go +++ b/pkg/kv/kvserver/batcheval/cmd_recompute_stats.go @@ -38,7 +38,7 @@ func declareKeysRecomputeStats( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // We don't declare any user key in the range. This is OK since all we're doing is computing a // stats delta, and applying this delta commutes with other operations on the same key space. // @@ -58,6 +58,7 @@ func declareKeysRecomputeStats( latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.TransactionKey(rdKey, uuid.Nil)}) // Disable the assertions which check that all reads were previously declared. latchSpans.DisableUndeclaredAccessAssertions() + return nil } // RecomputeStats recomputes the MVCCStats stored for this range and adjust them accordingly, diff --git a/pkg/kv/kvserver/batcheval/cmd_recover_txn.go b/pkg/kv/kvserver/batcheval/cmd_recover_txn.go index 45c021a00fe7..43eeff94ec84 100644 --- a/pkg/kv/kvserver/batcheval/cmd_recover_txn.go +++ b/pkg/kv/kvserver/batcheval/cmd_recover_txn.go @@ -36,10 +36,11 @@ func declareKeysRecoverTransaction( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { rr := req.(*kvpb.RecoverTxnRequest) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.TransactionKey(rr.Txn.Key, rr.Txn.ID)}) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.AbortSpanKey(rs.GetRangeID(), rr.Txn.ID)}) + return nil } // RecoverTxn attempts to recover the specified transaction from an diff --git a/pkg/kv/kvserver/batcheval/cmd_resolve_intent.go b/pkg/kv/kvserver/batcheval/cmd_resolve_intent.go index aff655cfd18b..21950130f52e 100644 --- a/pkg/kv/kvserver/batcheval/cmd_resolve_intent.go +++ b/pkg/kv/kvserver/batcheval/cmd_resolve_intent.go @@ -31,7 +31,7 @@ func init() { func declareKeysResolveIntentCombined( rs ImmutableRangeState, req kvpb.Request, latchSpans *spanset.SpanSet, -) { +) error { var status roachpb.TransactionStatus var txnID uuid.UUID var minTxnTS hlc.Timestamp @@ -51,6 +51,7 @@ func declareKeysResolveIntentCombined( // intent, but we can't tell whether we will or not ahead of time. latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.AbortSpanKey(rs.GetRangeID(), txnID)}) } + return nil } func declareKeysResolveIntent( @@ -60,8 +61,8 @@ func declareKeysResolveIntent( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { - declareKeysResolveIntentCombined(rs, req, latchSpans) +) error { + return declareKeysResolveIntentCombined(rs, req, latchSpans) } func resolveToMetricType(status roachpb.TransactionStatus, poison bool) *result.Metrics { diff --git a/pkg/kv/kvserver/batcheval/cmd_resolve_intent_range.go b/pkg/kv/kvserver/batcheval/cmd_resolve_intent_range.go index 715ccd3d8073..c9db13b0643d 100644 --- a/pkg/kv/kvserver/batcheval/cmd_resolve_intent_range.go +++ b/pkg/kv/kvserver/batcheval/cmd_resolve_intent_range.go @@ -33,8 +33,8 @@ func declareKeysResolveIntentRange( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { - declareKeysResolveIntentCombined(rs, req, latchSpans) +) error { + return declareKeysResolveIntentCombined(rs, req, latchSpans) } // ResolveIntentRange resolves write intents in the specified diff --git a/pkg/kv/kvserver/batcheval/cmd_resolve_intent_test.go b/pkg/kv/kvserver/batcheval/cmd_resolve_intent_test.go index e1961f3d255d..07c741f35283 100644 --- a/pkg/kv/kvserver/batcheval/cmd_resolve_intent_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_resolve_intent_test.go @@ -111,7 +111,7 @@ func TestDeclareKeysResolveIntent(t *testing.T) { if !ranged { cArgs.Args = &ri - declareKeysResolveIntent(&desc, &h, &ri, &latchSpans, &lockSpans, 0) + require.NoError(t, declareKeysResolveIntent(&desc, &h, &ri, &latchSpans, &lockSpans, 0)) batch := spanset.NewBatch(engine.NewBatch(), &latchSpans) defer batch.Close() if _, err := ResolveIntent(ctx, batch, cArgs, &kvpb.ResolveIntentResponse{}); err != nil { @@ -119,7 +119,9 @@ func TestDeclareKeysResolveIntent(t *testing.T) { } } else { cArgs.Args = &rir - declareKeysResolveIntentRange(&desc, &h, &rir, &latchSpans, &lockSpans, 0) + require.NoError( + t, declareKeysResolveIntentRange(&desc, &h, &rir, &latchSpans, &lockSpans, 0), + ) batch := spanset.NewBatch(engine.NewBatch(), &latchSpans) defer batch.Close() if _, err := ResolveIntentRange(ctx, batch, cArgs, &kvpb.ResolveIntentRangeResponse{}); err != nil { @@ -205,7 +207,7 @@ func TestResolveIntentAfterPartialRollback(t *testing.T) { } ri.Key = k - declareKeysResolveIntent(&desc, &h, &ri, &spans, nil, 0) + require.NoError(t, declareKeysResolveIntent(&desc, &h, &ri, &spans, nil, 0)) rbatch = spanset.NewBatch(db.NewBatch(), &spans) defer rbatch.Close() @@ -229,7 +231,7 @@ func TestResolveIntentAfterPartialRollback(t *testing.T) { rir.Key = k rir.EndKey = endKey - declareKeysResolveIntentRange(&desc, &h, &rir, &spans, nil, 0) + require.NoError(t, declareKeysResolveIntentRange(&desc, &h, &rir, &spans, nil, 0)) rbatch = spanset.NewBatch(db.NewBatch(), &spans) defer rbatch.Close() diff --git a/pkg/kv/kvserver/batcheval/cmd_revert_range.go b/pkg/kv/kvserver/batcheval/cmd_revert_range.go index 22a087e8db7d..ad5e7fd1ea21 100644 --- a/pkg/kv/kvserver/batcheval/cmd_revert_range.go +++ b/pkg/kv/kvserver/batcheval/cmd_revert_range.go @@ -43,9 +43,12 @@ func declareKeysRevertRange( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { +) error { args := req.(*kvpb.RevertRangeRequest) - DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + err := DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans, maxOffset) + if err != nil { + return err + } // We look up the range descriptor key to check whether the span // is equal to the entire range for fast stats updating. latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{Key: keys.RangeDescriptorKey(rs.GetStartKey())}) @@ -68,6 +71,7 @@ func declareKeysRevertRange( latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: keys.MVCCRangeKeyGCKey(rs.GetRangeID()), }) + return nil } // isEmptyKeyTimeRange checks if the span has no writes in (since,until]. diff --git a/pkg/kv/kvserver/batcheval/cmd_subsume.go b/pkg/kv/kvserver/batcheval/cmd_subsume.go index 6926f66a469c..38b634d163bf 100644 --- a/pkg/kv/kvserver/batcheval/cmd_subsume.go +++ b/pkg/kv/kvserver/batcheval/cmd_subsume.go @@ -37,13 +37,14 @@ func declareKeysSubsume( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { // Subsume must not run concurrently with any other command. It declares a // non-MVCC write over every addressable key in the range; this guarantees // that it conflicts with any other command because every command must // declare at least one addressable key. It does not, in fact, write any // keys. declareAllKeys(latchSpans) + return nil } // Subsume freezes a range for merging with its left-hand neighbor. When called diff --git a/pkg/kv/kvserver/batcheval/cmd_truncate_log.go b/pkg/kv/kvserver/batcheval/cmd_truncate_log.go index 3c982aa6c5f2..7b07122203ba 100644 --- a/pkg/kv/kvserver/batcheval/cmd_truncate_log.go +++ b/pkg/kv/kvserver/batcheval/cmd_truncate_log.go @@ -37,9 +37,10 @@ func declareKeysTruncateLog( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { prefix := keys.RaftLogPrefix(rs.GetRangeID()) latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: prefix, EndKey: prefix.PrefixEnd()}) + return nil } // TruncateLog discards a prefix of the raft log. Truncating part of a log that diff --git a/pkg/kv/kvserver/batcheval/command.go b/pkg/kv/kvserver/batcheval/command.go index 0a05cb93a579..7881dd4ed27e 100644 --- a/pkg/kv/kvserver/batcheval/command.go +++ b/pkg/kv/kvserver/batcheval/command.go @@ -33,7 +33,7 @@ type DeclareKeysFunc func( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) +) error // ImmutableRangeState exposes the properties of a Range that cannot change // across a Range's lifetime. The interface is used to manage the visibility diff --git a/pkg/kv/kvserver/batcheval/declare.go b/pkg/kv/kvserver/batcheval/declare.go index 35a5b3a5c292..793f9461607c 100644 --- a/pkg/kv/kvserver/batcheval/declare.go +++ b/pkg/kv/kvserver/batcheval/declare.go @@ -36,12 +36,13 @@ func DefaultDeclareKeys( latchSpans *spanset.SpanSet, _ *lockspanset.LockSpanSet, _ time.Duration, -) { +) error { access := spanset.SpanReadWrite if kvpb.IsReadOnly(req) && !kvpb.IsLocking(req) { access = spanset.SpanReadOnly } latchSpans.AddMVCC(access, req.Header().Span(), header.Timestamp) + return nil } // DefaultDeclareIsolatedKeys is similar to DefaultDeclareKeys, but it declares @@ -56,7 +57,7 @@ func DefaultDeclareIsolatedKeys( latchSpans *spanset.SpanSet, lockSpans *lockspanset.LockSpanSet, maxOffset time.Duration, -) { +) error { var access spanset.SpanAccess var str lock.Strength timestamp := header.Timestamp @@ -96,7 +97,16 @@ func DefaultDeclareIsolatedKeys( str, dur = readOnlyReq.KeyLocking() switch str { case lock.None: - panic(errors.AssertionFailedf("unexpected non-locking read handling")) + // One reason we can be in this branch is if someone has asked for a + // replicated non-locking read. Detect this nonsensical case to better + // word the error message. + if dur == lock.Replicated { + return errors.AssertionFailedf( + "incompatible key locking strength %s and durability %s", str.String(), dur.String(), + ) + } else { + return errors.AssertionFailedf("unexpected non-locking read handling") + } case lock.Shared: access = spanset.SpanReadOnly // Unlike non-locking reads, shared-locking reads are isolated from @@ -130,12 +140,13 @@ func DefaultDeclareIsolatedKeys( // write latches at hlc.MaxTimestamp. access = spanset.SpanReadWrite default: - panic(errors.AssertionFailedf("unexpected lock strength %s", str)) + return errors.AssertionFailedf("unexpected lock strength %s", str) } } } latchSpans.AddMVCC(access, req.Header().Span(), timestamp) lockSpans.Add(str, req.Header().Span()) + return nil } // DeclareKeysForRefresh determines whether a Refresh request should declare @@ -151,24 +162,27 @@ func DeclareKeysForRefresh( latchSpans *spanset.SpanSet, lss *lockspanset.LockSpanSet, dur time.Duration, -) { +) error { if header.WaitPolicy == lock.WaitPolicy_Error { - DefaultDeclareIsolatedKeys(irs, header, req, latchSpans, lss, dur) + return DefaultDeclareIsolatedKeys(irs, header, req, latchSpans, lss, dur) } else { - DefaultDeclareKeys(irs, header, req, latchSpans, lss, dur) + return DefaultDeclareKeys(irs, header, req, latchSpans, lss, dur) } } // DeclareKeysForBatch adds all keys that the batch with the provided header // touches to the given SpanSet. This does not include keys touched during the // processing of the batch's individual commands. -func DeclareKeysForBatch(rs ImmutableRangeState, header *kvpb.Header, latchSpans *spanset.SpanSet) { +func DeclareKeysForBatch( + rs ImmutableRangeState, header *kvpb.Header, latchSpans *spanset.SpanSet, +) error { if header.Txn != nil { header.Txn.AssertInitialized(context.TODO()) latchSpans.AddNonMVCC(spanset.SpanReadOnly, roachpb.Span{ Key: keys.AbortSpanKey(rs.GetRangeID(), header.Txn.ID), }) } + return nil } // declareAllKeys declares a non-MVCC write over every addressable key. This diff --git a/pkg/kv/kvserver/batcheval/declare_test.go b/pkg/kv/kvserver/batcheval/declare_test.go index 171f2d71003d..38125a2a66b6 100644 --- a/pkg/kv/kvserver/batcheval/declare_test.go +++ b/pkg/kv/kvserver/batcheval/declare_test.go @@ -82,7 +82,10 @@ func TestRequestsSerializeWithAllKeys(t *testing.T) { Sequence: 0, }) - command.DeclareKeys(desc, &header, otherRequest, &otherLatchSpans, &otherLockSpans, 0) + err := command.DeclareKeys(desc, &header, otherRequest, &otherLatchSpans, &otherLockSpans, 0) + if err != nil { + t.Error(err) + } if !allLatchSpans.Intersects(&otherLatchSpans) { t.Errorf("%s does not serialize with declareAllKeys", method) } diff --git a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go index 07591fff9eff..79b6f72a4aef 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go +++ b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go @@ -1048,7 +1048,10 @@ func (c *cluster) collectSpans( h := kvpb.Header{Txn: txn, Timestamp: ts, WaitPolicy: wp} for _, req := range reqs { if cmd, ok := batcheval.LookupCommand(req.Method()); ok { - cmd.DeclareKeys(c.rangeDesc, &h, req, latchSpans, lockSpans, 0) + err := cmd.DeclareKeys(c.rangeDesc, &h, req, latchSpans, lockSpans, 0) + if err != nil { + t.Fatal(err) + } } else { t.Fatalf("unrecognized command %s", req.Method()) } diff --git a/pkg/kv/kvserver/replica_send.go b/pkg/kv/kvserver/replica_send.go index 45d5d3c2791a..859474813b80 100644 --- a/pkg/kv/kvserver/replica_send.go +++ b/pkg/kv/kvserver/replica_send.go @@ -1185,11 +1185,17 @@ func (r *Replica) collectSpans( // than the request timestamp, and may have to retry at a higher timestamp. // This is still safe as we're only ever writing at timestamps higher than the // timestamp any write latch would be declared at. - batcheval.DeclareKeysForBatch(desc, &ba.Header, latchSpans) + err := batcheval.DeclareKeysForBatch(desc, &ba.Header, latchSpans) + if err != nil { + return nil, nil, concurrency.PessimisticEval, err + } for _, union := range ba.Requests { inner := union.GetInner() if cmd, ok := batcheval.LookupCommand(inner.Method()); ok { - cmd.DeclareKeys(desc, &ba.Header, inner, latchSpans, lockSpans, r.Clock().MaxOffset()) + err := cmd.DeclareKeys(desc, &ba.Header, inner, latchSpans, lockSpans, r.Clock().MaxOffset()) + if err != nil { + return nil, nil, concurrency.PessimisticEval, err + } if considerOptEvalForLimit { switch inner.(type) { case *kvpb.ScanRequest, *kvpb.ReverseScanRequest: diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 0feb1c8b8ac6..d71d5216ac7f 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -2736,7 +2736,7 @@ func TestReplicaLatchingSplitDeclaresWrites(t *testing.T) { var spans spanset.SpanSet cmd, _ := batcheval.LookupCommand(kvpb.EndTxn) - cmd.DeclareKeys( + err := cmd.DeclareKeys( &roachpb.RangeDescriptor{StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("e")}, &kvpb.Header{}, &kvpb.EndTxnRequest{ @@ -2757,6 +2757,7 @@ func TestReplicaLatchingSplitDeclaresWrites(t *testing.T) { nil, 0, ) + require.NoError(t, err) for _, tc := range []struct { access spanset.SpanAccess key roachpb.Key