diff --git a/c-deps/libroach/include/libroach.h b/c-deps/libroach/include/libroach.h index 29ee60a5f66c..a5ead7ba02c0 100644 --- a/c-deps/libroach/include/libroach.h +++ b/c-deps/libroach/include/libroach.h @@ -350,6 +350,8 @@ typedef struct { int32_t len; // count is the number of key/value pairs in bufs. int32_t count; + // bytes is the number of bytes (as measured by TargetSize) in bufs. + int64_t bytes; } DBChunkedBuffer; // DBScanResults contains the key/value pairs and intents encoded diff --git a/c-deps/libroach/mvcc.h b/c-deps/libroach/mvcc.h index f9d5535ee951..633304031dfe 100644 --- a/c-deps/libroach/mvcc.h +++ b/c-deps/libroach/mvcc.h @@ -156,6 +156,7 @@ template class mvccScanner { if (kvs_->Count() > 0) { kvs_->GetChunks(&results_.data.bufs, &results_.data.len); results_.data.count = kvs_->Count(); + results_.data.bytes = kvs_->NumBytes(); } if (intents_->Count() > 0) { results_.intents = ToDBSlice(intents_->Data()); diff --git a/pkg/kv/split_test.go b/pkg/kv/split_test.go index 7dae0940b3d0..ce863ffcb2ea 100644 --- a/pkg/kv/split_test.go +++ b/pkg/kv/split_test.go @@ -116,7 +116,7 @@ func TestRangeSplitMeta(t *testing.T) { } testutils.SucceedsSoon(t, func() error { - if _, _, _, err := engine.MVCCScan(ctx, s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, engine.MVCCScanOptions{}); err != nil { + if _, err := engine.MVCCScan(ctx, s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, engine.MVCCScanOptions{}); err != nil { return errors.Errorf("failed to verify no dangling intents: %s", err) } return nil @@ -226,7 +226,7 @@ func TestRangeSplitsWithWritePressure(t *testing.T) { // for timing of finishing the test writer and a possibly-ongoing // asynchronous split. testutils.SucceedsSoon(t, func() error { - if _, _, _, err := engine.MVCCScan(ctx, s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, engine.MVCCScanOptions{}); err != nil { + if _, err := engine.MVCCScan(ctx, s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, engine.MVCCScanOptions{}); err != nil { return errors.Errorf("failed to verify no dangling intents: %s", err) } return nil diff --git a/pkg/server/node_test.go b/pkg/server/node_test.go index dc660ff8d6f1..64732271d1dc 100644 --- a/pkg/server/node_test.go +++ b/pkg/server/node_test.go @@ -216,12 +216,12 @@ func TestBootstrapCluster(t *testing.T) { } // Scan the complete contents of the local database directly from the engine. - rows, _, _, err := engine.MVCCScan(ctx, e, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, engine.MVCCScanOptions{}) + res, err := engine.MVCCScan(ctx, e, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, engine.MVCCScanOptions{}) if err != nil { t.Fatal(err) } var foundKeys keySlice - for _, kv := range rows { + for _, kv := range res.KVs { foundKeys = append(foundKeys, kv.Key) } var expectedKeys = keySlice{ diff --git a/pkg/storage/addressing_test.go b/pkg/storage/addressing_test.go index 48148765d3d2..5b111883e71c 100644 --- a/pkg/storage/addressing_test.go +++ b/pkg/storage/addressing_test.go @@ -166,8 +166,7 @@ func TestUpdateRangeAddressing(t *testing.T) { // to RocksDB will be asynchronous. var kvs []roachpb.KeyValue testutils.SucceedsSoon(t, func() error { - var err error - kvs, _, _, err = engine.MVCCScan(ctx, store.Engine(), keys.MetaMin, keys.MetaMax, + res, err := engine.MVCCScan(ctx, store.Engine(), keys.MetaMin, keys.MetaMax, math.MaxInt64, hlc.MaxTimestamp, engine.MVCCScanOptions{}) if err != nil { // Wait for the intent to be resolved. @@ -176,6 +175,7 @@ func TestUpdateRangeAddressing(t *testing.T) { } t.Fatal(err) } + kvs = res.KVs return nil }) metas := metaSlice{} diff --git a/pkg/storage/batcheval/cmd_reverse_scan.go b/pkg/storage/batcheval/cmd_reverse_scan.go index 42760688179e..3a78f94e9f67 100644 --- a/pkg/storage/batcheval/cmd_reverse_scan.go +++ b/pkg/storage/batcheval/cmd_reverse_scan.go @@ -34,15 +34,12 @@ func ReverseScan( h := cArgs.Header reply := resp.(*roachpb.ReverseScanResponse) + var res engine.MVCCScanResult var err error - var intents []roachpb.Intent - var resumeSpan *roachpb.Span switch args.ScanFormat { case roachpb.BATCH_RESPONSE: - var kvData [][]byte - var numKvs int64 - kvData, numKvs, resumeSpan, intents, err = engine.MVCCScanToBytes( + res, err = engine.MVCCScanToBytes( ctx, reader, args.Key, args.EndKey, cArgs.MaxKeys, h.Timestamp, engine.MVCCScanOptions{ Inconsistent: h.ReadConsistency != roachpb.CONSISTENT, @@ -52,11 +49,9 @@ func ReverseScan( if err != nil { return result.Result{}, err } - reply.NumKeys = numKvs - reply.BatchResponses = kvData + reply.BatchResponses = res.KVData case roachpb.KEY_VALUES: - var rows []roachpb.KeyValue - rows, resumeSpan, intents, err = engine.MVCCScan( + res, err = engine.MVCCScan( ctx, reader, args.Key, args.EndKey, cArgs.MaxKeys, h.Timestamp, engine.MVCCScanOptions{ Inconsistent: h.ReadConsistency != roachpb.CONSISTENT, Txn: h.Txn, @@ -65,19 +60,19 @@ func ReverseScan( if err != nil { return result.Result{}, err } - reply.NumKeys = int64(len(rows)) - reply.Rows = rows + reply.Rows = res.KVs default: panic(fmt.Sprintf("Unknown scanFormat %d", args.ScanFormat)) } - if resumeSpan != nil { - reply.ResumeSpan = resumeSpan + reply.NumKeys = res.NumKeys + if res.ResumeSpan != nil { + reply.ResumeSpan = res.ResumeSpan reply.ResumeReason = roachpb.RESUME_KEY_LIMIT } if h.ReadConsistency == roachpb.READ_UNCOMMITTED { - reply.IntentRows, err = CollectIntentRows(ctx, reader, cArgs, intents) + reply.IntentRows, err = CollectIntentRows(ctx, reader, cArgs, res.Intents) } - return result.FromEncounteredIntents(intents), err + return result.FromEncounteredIntents(res.Intents), err } diff --git a/pkg/storage/batcheval/cmd_scan.go b/pkg/storage/batcheval/cmd_scan.go index 3beb1b018b0c..6f504b61cd53 100644 --- a/pkg/storage/batcheval/cmd_scan.go +++ b/pkg/storage/batcheval/cmd_scan.go @@ -34,15 +34,12 @@ func Scan( h := cArgs.Header reply := resp.(*roachpb.ScanResponse) + var res engine.MVCCScanResult var err error - var intents []roachpb.Intent - var resumeSpan *roachpb.Span switch args.ScanFormat { case roachpb.BATCH_RESPONSE: - var kvData [][]byte - var numKvs int64 - kvData, numKvs, resumeSpan, intents, err = engine.MVCCScanToBytes( + res, err = engine.MVCCScanToBytes( ctx, reader, args.Key, args.EndKey, cArgs.MaxKeys, h.Timestamp, engine.MVCCScanOptions{ Inconsistent: h.ReadConsistency != roachpb.CONSISTENT, @@ -51,11 +48,9 @@ func Scan( if err != nil { return result.Result{}, err } - reply.NumKeys = numKvs - reply.BatchResponses = kvData + reply.BatchResponses = res.KVData case roachpb.KEY_VALUES: - var rows []roachpb.KeyValue - rows, resumeSpan, intents, err = engine.MVCCScan( + res, err = engine.MVCCScan( ctx, reader, args.Key, args.EndKey, cArgs.MaxKeys, h.Timestamp, engine.MVCCScanOptions{ Inconsistent: h.ReadConsistency != roachpb.CONSISTENT, Txn: h.Txn, @@ -63,19 +58,19 @@ func Scan( if err != nil { return result.Result{}, err } - reply.NumKeys = int64(len(rows)) - reply.Rows = rows + reply.Rows = res.KVs default: panic(fmt.Sprintf("Unknown scanFormat %d", args.ScanFormat)) } + reply.NumKeys = res.NumKeys - if resumeSpan != nil { - reply.ResumeSpan = resumeSpan + if res.ResumeSpan != nil { + reply.ResumeSpan = res.ResumeSpan reply.ResumeReason = roachpb.RESUME_KEY_LIMIT } if h.ReadConsistency == roachpb.READ_UNCOMMITTED { - reply.IntentRows, err = CollectIntentRows(ctx, reader, cArgs, intents) + reply.IntentRows, err = CollectIntentRows(ctx, reader, cArgs, res.Intents) } - return result.FromEncounteredIntents(intents), err + return result.FromEncounteredIntents(res.Intents), err } diff --git a/pkg/storage/engine/bench_test.go b/pkg/storage/engine/bench_test.go index 10d27da70e8e..1527fe3ac25f 100644 --- a/pkg/storage/engine/bench_test.go +++ b/pkg/storage/engine/bench_test.go @@ -240,14 +240,14 @@ func runMVCCScan(ctx context.Context, b *testing.B, emk engineMaker, opts benchS endKey = endKey.Next() walltime := int64(5 * (rand.Int31n(int32(opts.numVersions)) + 1)) ts := hlc.Timestamp{WallTime: walltime} - kvs, _, _, err := MVCCScan(ctx, eng, startKey, endKey, int64(opts.numRows), ts, MVCCScanOptions{ + res, err := MVCCScan(ctx, eng, startKey, endKey, int64(opts.numRows), ts, MVCCScanOptions{ Reverse: opts.reverse, }) if err != nil { b.Fatalf("failed scan: %+v", err) } - if len(kvs) != opts.numRows { - b.Fatalf("failed to scan: %d != %d", len(kvs), opts.numRows) + if len(res.KVs) != opts.numRows { + b.Fatalf("failed to scan: %d != %d", len(res.KVs), opts.numRows) } } diff --git a/pkg/storage/engine/engine.go b/pkg/storage/engine/engine.go index eabc2152c865..5a73cf6e446d 100644 --- a/pkg/storage/engine/engine.go +++ b/pkg/storage/engine/engine.go @@ -145,7 +145,7 @@ type MVCCIterator interface { // in the buffer. MVCCScan( start, end roachpb.Key, max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, - ) (kvData [][]byte, numKVs int64, resumeSpan *roachpb.Span, intents []roachpb.Intent, err error) + ) (MVCCScanResult, error) } // IterOptions contains options used to create an Iterator. diff --git a/pkg/storage/engine/mvcc.go b/pkg/storage/engine/mvcc.go index 595b51ad3e15..2e8ccc6e2114 100644 --- a/pkg/storage/engine/mvcc.go +++ b/pkg/storage/engine/mvcc.go @@ -2264,7 +2264,7 @@ func MVCCDeleteRange( prevSeqTxn.Sequence-- scanTxn = prevSeqTxn } - kvs, resumeSpan, _, err := MVCCScan( + res, err := MVCCScan( ctx, rw, key, endKey, max, scanTs, MVCCScanOptions{Txn: scanTxn}) if err != nil { return nil, nil, 0, err @@ -2273,9 +2273,9 @@ func MVCCDeleteRange( buf := newPutBuffer() iter := rw.NewIterator(IterOptions{Prefix: true}) - for i := range kvs { + for i := range res.KVs { err = mvccPutInternal( - ctx, rw, iter, ms, kvs[i].Key, timestamp, nil, txn, buf, nil) + ctx, rw, iter, ms, res.KVs[i].Key, timestamp, nil, txn, buf, nil) if err != nil { break } @@ -2285,14 +2285,14 @@ func MVCCDeleteRange( buf.release() var keys []roachpb.Key - if returnKeys && err == nil && len(kvs) > 0 { - keys = make([]roachpb.Key, len(kvs)) - for i := range kvs { - keys[i] = kvs[i].Key + if returnKeys && err == nil && len(res.KVs) > 0 { + keys = make([]roachpb.Key, len(res.KVs)) + for i := range res.KVs { + keys[i] = res.KVs[i].Key } } - return keys, resumeSpan, int64(len(kvs)), err + return keys, res.ResumeSpan, res.NumKeys, err } func mvccScanToBytes( @@ -2302,16 +2302,16 @@ func mvccScanToBytes( max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, -) (kvData [][]byte, numKVs int64, resumeSpan *roachpb.Span, intents []roachpb.Intent, err error) { +) (MVCCScanResult, error) { if len(endKey) == 0 { - return nil, 0, nil, nil, emptyKeyError() + return MVCCScanResult{}, emptyKeyError() } if err := opts.validate(); err != nil { - return nil, 0, nil, nil, err + return MVCCScanResult{}, err } if max == 0 { - resumeSpan = &roachpb.Span{Key: key, EndKey: endKey} - return nil, 0, resumeSpan, nil, nil + resumeSpan := &roachpb.Span{Key: key, EndKey: endKey} + return MVCCScanResult{ResumeSpan: resumeSpan}, nil } // If the iterator has a specialized implementation, defer to that. @@ -2336,24 +2336,30 @@ func mvccScanToBytes( } mvccScanner.init(opts.Txn) - resumeSpan, err = mvccScanner.scan() + + var res MVCCScanResult + var err error + res.ResumeSpan, err = mvccScanner.scan() if err != nil { - return nil, 0, nil, nil, err + return MVCCScanResult{}, err } - kvData = mvccScanner.results.finish() - numKVs = mvccScanner.results.count + res.KVData = mvccScanner.results.finish() + res.NumKeys = mvccScanner.results.count + res.NumBytes = mvccScanner.results.bytes - intents, err = buildScanIntents(mvccScanner.intents.Repr()) + res.Intents, err = buildScanIntents(mvccScanner.intents.Repr()) if err != nil { - return nil, 0, nil, nil, err + return MVCCScanResult{}, err } - if !opts.Inconsistent && len(intents) > 0 { - return nil, 0, resumeSpan, nil, &roachpb.WriteIntentError{Intents: intents} + if !opts.Inconsistent && len(res.Intents) > 0 { + // TODO(tbg): don't return resume span. See: + // https://github.com/cockroachdb/cockroach/pull/44542 + return MVCCScanResult{ResumeSpan: res.ResumeSpan}, &roachpb.WriteIntentError{Intents: res.Intents} } - return + return res, nil } // mvccScanToKvs converts the raw key/value pairs returned by Iterator.MVCCScan @@ -2365,12 +2371,15 @@ func mvccScanToKvs( max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, -) ([]roachpb.KeyValue, *roachpb.Span, []roachpb.Intent, error) { - kvData, numKVs, resumeSpan, intents, err := mvccScanToBytes(ctx, iter, key, endKey, max, timestamp, opts) +) (MVCCScanResult, error) { + res, err := mvccScanToBytes(ctx, iter, key, endKey, max, timestamp, opts) if err != nil { - return nil, nil, nil, err + return MVCCScanResult{}, err } - kvs := make([]roachpb.KeyValue, numKVs) + res.KVs = make([]roachpb.KeyValue, res.NumKeys) + kvData := res.KVData + res.KVData = nil + var k MVCCKey var rawBytes []byte var i int @@ -2378,15 +2387,15 @@ func mvccScanToKvs( for len(data) > 0 { k, rawBytes, data, err = MVCCScanDecodeKeyValue(data) if err != nil { - return nil, nil, nil, err + return MVCCScanResult{}, err } - kvs[i].Key = k.Key - kvs[i].Value.RawBytes = rawBytes - kvs[i].Value.Timestamp = k.Timestamp + res.KVs[i].Key = k.Key + res.KVs[i].Value.RawBytes = rawBytes + res.KVs[i].Value.Timestamp = k.Timestamp i++ } } - return kvs, resumeSpan, intents, err + return res, err } func buildScanIntents(data []byte) ([]roachpb.Intent, error) { @@ -2455,6 +2464,21 @@ func (opts *MVCCScanOptions) validate() error { return nil } +// MVCCScanResult groups the values returned from an MVCCScan operation. Depending +// on the operation invoked, KVData or KVs is populated, but never both. +type MVCCScanResult struct { + KVData [][]byte + KVs []roachpb.KeyValue + NumKeys int64 + // NumBytes is the number of bytes this scan result accrued in terms of the + // MVCCScanOptions.TargetBytes parameter. This roughly measures the bytes + // used for encoding the uncompressed kv pairs contained in the result. + NumBytes int64 + + ResumeSpan *roachpb.Span + Intents []roachpb.Intent +} + // MVCCScan scans the key range [key, endKey) in the provided reader up to some // maximum number of results in ascending order. If it hits max, it returns a // "resume span" to be used in the next call to this function. If the limit is @@ -2499,7 +2523,7 @@ func MVCCScan( max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, -) ([]roachpb.KeyValue, *roachpb.Span, []roachpb.Intent, error) { +) (MVCCScanResult, error) { iter := reader.NewIterator(IterOptions{LowerBound: key, UpperBound: endKey}) defer iter.Close() return mvccScanToKvs(ctx, iter, key, endKey, max, timestamp, opts) @@ -2513,7 +2537,7 @@ func MVCCScanToBytes( max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, -) ([][]byte, int64, *roachpb.Span, []roachpb.Intent, error) { +) (MVCCScanResult, error) { iter := reader.NewIterator(IterOptions{LowerBound: key, UpperBound: endKey}) defer iter.Close() return mvccScanToBytes(ctx, iter, key, endKey, max, timestamp, opts) @@ -2539,12 +2563,17 @@ func MVCCIterate( for { const maxKeysPerScan = 1000 - kvs, resume, newIntents, err := mvccScanToKvs( + res, err := mvccScanToKvs( ctx, iter, key, endKey, maxKeysPerScan, timestamp, opts) if err != nil { switch tErr := err.(type) { case *roachpb.WriteIntentError: // In the case of WriteIntentErrors, accumulate affected keys but continue scan. + // + // TODO(tbg): this code must have been written that way to use res.Intents even + // on this error, but this is already not populated any more by mvccScanToKvs. + // Explicitly zero out `res` here after: + // https://github.com/cockroachdb/cockroach/pull/44542. if wiErr == nil { wiErr = tErr } else { @@ -2556,16 +2585,16 @@ func MVCCIterate( } } - if len(newIntents) > 0 { + if len(res.Intents) > 0 { if intents == nil { - intents = newIntents + intents = res.Intents } else { - intents = append(intents, newIntents...) + intents = append(intents, res.Intents...) } } - for i := range kvs { - done, err := f(kvs[i]) + for i := range res.KVs { + done, err := f(res.KVs[i]) if err != nil { return nil, err } @@ -2578,13 +2607,13 @@ func MVCCIterate( } } - if resume == nil { + if res.ResumeSpan == nil { break } if opts.Reverse { - endKey = resume.EndKey + endKey = res.ResumeSpan.EndKey } else { - key = resume.Key + key = res.ResumeSpan.Key } } diff --git a/pkg/storage/engine/mvcc_history_test.go b/pkg/storage/engine/mvcc_history_test.go index d0c6c1af3116..7e771cd9340a 100644 --- a/pkg/storage/engine/mvcc_history_test.go +++ b/pkg/storage/engine/mvcc_history_test.go @@ -708,20 +708,23 @@ func cmdScan(e *evalCtx) error { e.scanArg(key, &tb) opts.TargetBytes = int64(tb) } - vals, resumeSpan, intents, err := MVCCScan(e.ctx, e.engine, key, endKey, max, ts, opts) + res, err := MVCCScan(e.ctx, e.engine, key, endKey, max, ts, opts) // NB: the error is returned below. This ensures the test can // ascertain no result is populated in the intents when an error // occurs. - for _, intent := range intents { + for _, intent := range res.Intents { fmt.Fprintf(e.results.buf, "scan: %v -> intent {%s} %s\n", key, intent.Txn, intent.Status) } - for _, val := range vals { + for _, val := range res.KVs { fmt.Fprintf(e.results.buf, "scan: %v -> %v @%v\n", val.Key, val.Value.PrettyPrint(), val.Value.Timestamp) } - if resumeSpan != nil { - fmt.Fprintf(e.results.buf, "scan: resume span [%s,%s)\n", resumeSpan.Key, resumeSpan.EndKey) + if res.ResumeSpan != nil { + fmt.Fprintf(e.results.buf, "scan: resume span [%s,%s)\n", res.ResumeSpan.Key, res.ResumeSpan.EndKey) } - if len(vals) == 0 { + if opts.TargetBytes > 0 { + fmt.Fprintf(e.results.buf, "scan: %d bytes (target %d)\n", res.NumBytes, opts.TargetBytes) + } + if len(res.KVs) == 0 { fmt.Fprintf(e.results.buf, "scan: %v-%v -> \n", key, endKey) } return err diff --git a/pkg/storage/engine/mvcc_test.go b/pkg/storage/engine/mvcc_test.go index c6dbd9f4bf2f..15ec823e4910 100644 --- a/pkg/storage/engine/mvcc_test.go +++ b/pkg/storage/engine/mvcc_test.go @@ -77,6 +77,18 @@ var ( }...) ) +func unwrapMVCCScan( + ctx context.Context, + reader Reader, + key, endKey roachpb.Key, + max int64, + timestamp hlc.Timestamp, + opts MVCCScanOptions, +) ([]roachpb.KeyValue, *roachpb.Span, []roachpb.Intent, error) { + res, err := MVCCScan(ctx, reader, key, endKey, max, timestamp, opts) + return res.KVs, res.ResumeSpan, res.Intents, err +} + // createTestRocksDBEngine returns a new in-memory RocksDB engine with 1MB of // storage capacity. func createTestRocksDBEngine() Engine { @@ -470,7 +482,7 @@ func TestMVCCGetUncertainty(t *testing.T) { } else if val == nil || !bytes.Equal(val.RawBytes, value1.RawBytes) { t.Fatalf("wanted %q, got %v", value1.RawBytes, val) } - if kvs, _, _, err := MVCCScan( + if kvs, _, _, err := unwrapMVCCScan( ctx, engine, testKey1, testKey1.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxn, ); err != nil { t.Fatal(err) @@ -499,7 +511,7 @@ func TestMVCCGetUncertainty(t *testing.T) { } else if _, ok := err.(*roachpb.ReadWithinUncertaintyIntervalError); !ok { t.Fatalf("wanted a ReadWithinUncertaintyIntervalError, got %+v", err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey2, testKey2.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxn, ); err == nil { t.Fatal("wanted an error") @@ -520,7 +532,7 @@ func TestMVCCGetUncertainty(t *testing.T) { } else if _, ok := err.(*roachpb.ReadWithinUncertaintyIntervalError); !ok { t.Fatalf("wanted a ReadWithinUncertaintyIntervalError, got %+v", err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey2, testKey2.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxnMaxTS9, ); err == nil { t.Fatal("wanted an error") @@ -538,7 +550,7 @@ func TestMVCCGetUncertainty(t *testing.T) { if _, _, err := mvccGet(ctx, engine, testKey2, hlc.Timestamp{WallTime: 7}, getOptsTxnMaxTS7); err != nil { t.Fatal(err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey2, testKey2.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxnMaxTS7, ); err != nil { t.Fatal(err) @@ -569,7 +581,7 @@ func TestMVCCGetUncertainty(t *testing.T) { } else if _, ok := err.(*roachpb.WriteIntentError); !ok { t.Fatalf("wanted a WriteIntentError, got %+v", err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey3, testKey3.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxn, ); err == nil { t.Fatal("wanted an error") @@ -589,7 +601,7 @@ func TestMVCCGetUncertainty(t *testing.T) { } else if _, ok := err.(*roachpb.WriteIntentError); !ok { t.Fatalf("wanted a WriteIntentError, got %+v", err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey3, testKey3.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxnMaxTS9, ); err == nil { t.Fatal("wanted an error") @@ -607,7 +619,7 @@ func TestMVCCGetUncertainty(t *testing.T) { if _, _, err := mvccGet(ctx, engine, testKey3, hlc.Timestamp{WallTime: 7}, getOptsTxnMaxTS7); err != nil { t.Fatal(err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey3, testKey3.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxnMaxTS7, ); err != nil { t.Fatal(err) @@ -637,7 +649,7 @@ func TestMVCCGetUncertainty(t *testing.T) { } else if _, ok := err.(*roachpb.ReadWithinUncertaintyIntervalError); !ok { t.Fatalf("wanted a ReadWithinUncertaintyIntervalError, got %+v", err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey4, testKey4.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxn, ); err == nil { t.Fatal("wanted an error") @@ -660,7 +672,7 @@ func TestMVCCGetUncertainty(t *testing.T) { } else if _, ok := err.(*roachpb.ReadWithinUncertaintyIntervalError); !ok { t.Fatalf("wanted a ReadWithinUncertaintyIntervalError, got %+v", err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey4, testKey4.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxnMaxTS9, ); err == nil { t.Fatal("wanted an error") @@ -680,7 +692,7 @@ func TestMVCCGetUncertainty(t *testing.T) { if _, _, err := mvccGet(ctx, engine, testKey4, hlc.Timestamp{WallTime: 7}, getOptsTxnMaxTS7); err != nil { t.Fatal(err) } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey4, testKey4.PrefixEnd(), 10, hlc.Timestamp{WallTime: 7}, scanOptsTxnMaxTS7, ); err != nil { t.Fatal(err) @@ -1058,7 +1070,7 @@ func TestMVCCScanWriteIntentError(t *testing.T) { if scan.consistent { cStr = "consistent" } - kvs, _, intents, err := MVCCScan(ctx, engine, testKey1, testKey4.Next(), math.MaxInt64, + kvs, _, intents, err := unwrapMVCCScan(ctx, engine, testKey1, testKey4.Next(), math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Inconsistent: !scan.consistent, Txn: scan.txn}) wiErr, _ := err.(*roachpb.WriteIntentError) if (err == nil) != (wiErr == nil) { @@ -1316,7 +1328,7 @@ func TestMVCCInvalidateIterator(t *testing.T) { case "get": _, _, err = MVCCGet(ctx, batch, key, ts2, MVCCGetOptions{}) case "scan": - _, _, _, err = MVCCScan(ctx, batch, key, roachpb.KeyMax, math.MaxInt64, ts2, MVCCScanOptions{}) + _, _, _, err = unwrapMVCCScan(ctx, batch, key, roachpb.KeyMax, math.MaxInt64, ts2, MVCCScanOptions{}) case "findSplitKey": _, err = MVCCFindSplitKey(ctx, batch, roachpb.RKeyMin, roachpb.RKeyMax, 64<<20) case "computeStats": @@ -1366,7 +1378,7 @@ func mvccScanTest(ctx context.Context, t *testing.T, engine Engine) { t.Fatal(err) } - kvs, resumeSpan, _, err := MVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, + kvs, resumeSpan, _, err := unwrapMVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1382,7 +1394,7 @@ func mvccScanTest(ctx context.Context, t *testing.T, engine Engine) { t.Fatalf("resumeSpan = %+v", resumeSpan) } - kvs, resumeSpan, _, err = MVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, + kvs, resumeSpan, _, err = unwrapMVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 4}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1398,7 +1410,7 @@ func mvccScanTest(ctx context.Context, t *testing.T, engine Engine) { t.Fatalf("resumeSpan = %+v", resumeSpan) } - kvs, resumeSpan, _, err = MVCCScan(ctx, engine, testKey4, keyMax, math.MaxInt64, + kvs, resumeSpan, _, err = unwrapMVCCScan(ctx, engine, testKey4, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1417,7 +1429,7 @@ func mvccScanTest(ctx context.Context, t *testing.T, engine Engine) { }); err != nil { t.Fatal(err) } - kvs, _, _, err = MVCCScan(ctx, engine, keyMin, testKey2, math.MaxInt64, + kvs, _, _, err = unwrapMVCCScan(ctx, engine, keyMin, testKey2, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1468,7 +1480,7 @@ func TestMVCCScanMaxNum(t *testing.T) { t.Fatal(err) } - kvs, resumeSpan, _, err := MVCCScan(ctx, engine, testKey2, testKey4, 1, + kvs, resumeSpan, _, err := unwrapMVCCScan(ctx, engine, testKey2, testKey4, 1, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1482,7 +1494,7 @@ func TestMVCCScanMaxNum(t *testing.T) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } - kvs, resumeSpan, _, err = MVCCScan(ctx, engine, testKey2, testKey4, 0, + kvs, resumeSpan, _, err = unwrapMVCCScan(ctx, engine, testKey2, testKey4, 0, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1496,7 +1508,7 @@ func TestMVCCScanMaxNum(t *testing.T) { // Note: testKey6, though not scanned directly, is important in testing that // the computed resume span does not extend beyond the upper bound of a scan. - kvs, resumeSpan, _, err = MVCCScan(ctx, engine, testKey4, testKey5, 1, + kvs, resumeSpan, _, err = unwrapMVCCScan(ctx, engine, testKey4, testKey5, 1, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1508,7 +1520,7 @@ func TestMVCCScanMaxNum(t *testing.T) { t.Fatalf("resumeSpan = %+v", resumeSpan) } - kvs, resumeSpan, _, err = MVCCScan(ctx, engine, testKey5, testKey6.Next(), 1, + kvs, resumeSpan, _, err = unwrapMVCCScan(ctx, engine, testKey5, testKey6.Next(), 1, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) @@ -1559,7 +1571,7 @@ func TestMVCCScanWithKeyPrefix(t *testing.T) { t.Fatal(err) } - kvs, _, _, err := MVCCScan(ctx, engine, roachpb.Key("/a"), roachpb.Key("/b"), math.MaxInt64, + kvs, _, _, err := unwrapMVCCScan(ctx, engine, roachpb.Key("/a"), roachpb.Key("/b"), math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -1598,7 +1610,7 @@ func TestMVCCScanInTxn(t *testing.T) { t.Fatal(err) } - kvs, _, _, err := MVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, + kvs, _, _, err := unwrapMVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Txn: txn1}) if err != nil { t.Fatal(err) @@ -1611,7 +1623,7 @@ func TestMVCCScanInTxn(t *testing.T) { t.Fatal("the value should not be empty") } - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, testKey2, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{}, ); err == nil { t.Fatal("expected error on uncommitted write intent") @@ -1632,7 +1644,7 @@ func TestMVCCScanInconsistent(t *testing.T) { defer engine.Close() // A scan with consistent=false should fail in a txn. - if _, _, _, err := MVCCScan( + if _, _, _, err := unwrapMVCCScan( ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Inconsistent: true, Txn: txn1}, ); err == nil { @@ -1670,7 +1682,7 @@ func TestMVCCScanInconsistent(t *testing.T) { {Span: roachpb.Span{Key: testKey1}, Txn: txn1ts2.TxnMeta}, {Span: roachpb.Span{Key: testKey3}, Txn: txn2ts5.TxnMeta}, } - kvs, _, intents, err := MVCCScan( + kvs, _, intents, err := unwrapMVCCScan( ctx, engine, testKey1, testKey4.Next(), math.MaxInt64, hlc.Timestamp{WallTime: 7}, MVCCScanOptions{Inconsistent: true}, ) @@ -1697,7 +1709,7 @@ func TestMVCCScanInconsistent(t *testing.T) { // Now try a scan at a historical timestamp. expIntents = expIntents[:1] - kvs, _, intents, err = MVCCScan(ctx, engine, testKey1, testKey4.Next(), math.MaxInt64, + kvs, _, intents, err = unwrapMVCCScan(ctx, engine, testKey1, testKey4.Next(), math.MaxInt64, hlc.Timestamp{WallTime: 3}, MVCCScanOptions{Inconsistent: true}) if !reflect.DeepEqual(intents, expIntents) { t.Fatal(err) @@ -1757,7 +1769,7 @@ func TestMVCCDeleteRange(t *testing.T) { if expected := (roachpb.Span{Key: testKey4, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } - kvs, _, _, _ := MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, + kvs, _, _, _ := unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 4 || !bytes.Equal(kvs[0].Key, testKey1) || @@ -1813,7 +1825,7 @@ func TestMVCCDeleteRange(t *testing.T) { if expected := (roachpb.Span{Key: testKey2, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } - kvs, _, _, _ = MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, _ = unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 4 || !bytes.Equal(kvs[0].Key, testKey1) || @@ -1841,7 +1853,7 @@ func TestMVCCDeleteRange(t *testing.T) { if resumeSpan != nil { t.Fatalf("wrong resume key: expected nil, found %v", resumeSpan) } - kvs, _, _, _ = MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, _ = unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey1) || @@ -1863,7 +1875,7 @@ func TestMVCCDeleteRange(t *testing.T) { if resumeSpan != nil { t.Fatalf("wrong resume key: expected nil, found %v", resumeSpan) } - kvs, _, _, _ = MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, _ = unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 0 { t.Fatal("the value should be empty") @@ -1921,7 +1933,7 @@ func TestMVCCDeleteRangeReturnKeys(t *testing.T) { if expected := (roachpb.Span{Key: testKey4, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } - kvs, _, _, _ := MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, _ := unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 4 || !bytes.Equal(kvs[0].Key, testKey1) || @@ -1950,7 +1962,7 @@ func TestMVCCDeleteRangeReturnKeys(t *testing.T) { if expected := (roachpb.Span{Key: testKey2, EndKey: testKey6}); !resumeSpan.EqualValue(expected) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } - kvs, _, _, _ = MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, _ = unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 4 || !bytes.Equal(kvs[0].Key, testKey1) || @@ -1987,7 +1999,7 @@ func TestMVCCDeleteRangeReturnKeys(t *testing.T) { if resumeSpan != nil { t.Fatalf("wrong resume key: expected nil, found %v", resumeSpan) } - kvs, _, _, _ = MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, _ = unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, testKey1) || @@ -2012,7 +2024,7 @@ func TestMVCCDeleteRangeReturnKeys(t *testing.T) { if resumeSpan != nil { t.Fatalf("wrong resume key: %v", resumeSpan) } - kvs, _, _, _ = MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, _ = unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if len(kvs) != 0 { t.Fatal("the value should be empty") @@ -2137,7 +2149,7 @@ func TestMVCCUncommittedDeleteRangeVisible(t *testing.T) { } txn.Epoch++ - kvs, _, _, _ := MVCCScan(ctx, engine, testKey1, testKey4, math.MaxInt64, + kvs, _, _, _ := unwrapMVCCScan(ctx, engine, testKey1, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 3}, MVCCScanOptions{Txn: txn}) if e := 2; len(kvs) != e { t.Fatalf("e = %d, got %d", e, len(kvs)) @@ -2235,7 +2247,7 @@ func TestMVCCDeleteRangeInline(t *testing.T) { Value: value6, }, } - kvs, _, _, err := MVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, + kvs, _, _, err := unwrapMVCCScan(ctx, engine, keyMin, keyMax, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{}) if err != nil { t.Fatal(err) @@ -2317,7 +2329,7 @@ func TestMVCCClearTimeRange(t *testing.T) { assertKVs := func(t *testing.T, reader Reader, at hlc.Timestamp, expected []roachpb.KeyValue) { t.Helper() - actual, _, _, err := MVCCScan(ctx, reader, keyMin, keyMax, 100, at, MVCCScanOptions{}) + actual, _, _, err := unwrapMVCCScan(ctx, reader, keyMin, keyMax, 100, at, MVCCScanOptions{}) require.NoError(t, err) require.Equal(t, expected, actual) } @@ -2444,17 +2456,17 @@ func TestMVCCClearTimeRange(t *testing.T) { // Scan (< k3 to avoid intent) to confirm that k2 was indeed reverted to // value as of ts3 (i.e. v4 was cleared to expose v2). - actual, _, _, err := MVCCScan(ctx, e, keyMin, testKey3, 100, ts5, MVCCScanOptions{}) + actual, _, _, err := unwrapMVCCScan(ctx, e, keyMin, testKey3, 100, ts5, MVCCScanOptions{}) require.NoError(t, err) require.Equal(t, ts3Content[:2], actual) // Verify the intent was left alone. - _, _, _, err = MVCCScan(ctx, e, testKey3, testKey4, 100, ts5, MVCCScanOptions{}) + _, _, _, err = unwrapMVCCScan(ctx, e, testKey3, testKey4, 100, ts5, MVCCScanOptions{}) require.Error(t, err) // Scan (> k3 to avoid intent) to confirm that k5 was indeed reverted to // value as of ts3 (i.e. v4 was cleared to expose v2). - actual, _, _, err = MVCCScan(ctx, e, testKey4, keyMax, 100, ts5, MVCCScanOptions{}) + actual, _, _, err = unwrapMVCCScan(ctx, e, testKey4, keyMax, 100, ts5, MVCCScanOptions{}) require.NoError(t, err) require.Equal(t, ts3Content[2:], actual) }) @@ -2568,7 +2580,7 @@ func TestMVCCClearTimeRangeOnRandomData(t *testing.T) { t.Run(fmt.Sprintf("revert-%d", i), func(t *testing.T) { revertTo := hlc.Timestamp{WallTime: int64(reverts[i])} // MVCC-Scan at the revert time. - scannedBefore, _, _, err := MVCCScan(ctx, e, keyMin, keyMax, numKVs, revertTo, MVCCScanOptions{}) + scannedBefore, _, _, err := unwrapMVCCScan(ctx, e, keyMin, keyMax, numKVs, revertTo, MVCCScanOptions{}) require.NoError(t, err) // Revert to the revert time. @@ -2585,7 +2597,7 @@ func TestMVCCClearTimeRangeOnRandomData(t *testing.T) { require.Equal(t, computeStats(t, e, keyMin, keyMax, 2000), ms) // Scanning at "now" post-revert should yield the same result as scanning // at revert-time pre-revert. - scannedAfter, _, _, err := MVCCScan(ctx, e, keyMin, keyMax, numKVs, now, MVCCScanOptions{}) + scannedAfter, _, _, err := unwrapMVCCScan(ctx, e, keyMin, keyMax, numKVs, now, MVCCScanOptions{}) require.NoError(t, err) require.Equal(t, scannedBefore, scannedAfter) }) @@ -2782,7 +2794,7 @@ func TestMVCCReverseScan(t *testing.T) { t.Fatal(err) } - kvs, resumeSpan, _, err := MVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, + kvs, resumeSpan, _, err := unwrapMVCCScan(ctx, engine, testKey2, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { @@ -2799,7 +2811,7 @@ func TestMVCCReverseScan(t *testing.T) { t.Fatalf("resumeSpan = %+v", resumeSpan) } - kvs, resumeSpan, _, err = MVCCScan(ctx, engine, testKey2, testKey4, 1, hlc.Timestamp{WallTime: 1}, + kvs, resumeSpan, _, err = unwrapMVCCScan(ctx, engine, testKey2, testKey4, 1, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { @@ -2814,7 +2826,7 @@ func TestMVCCReverseScan(t *testing.T) { t.Fatalf("expected = %+v, resumeSpan = %+v", expected, resumeSpan) } - kvs, resumeSpan, _, err = MVCCScan(ctx, engine, testKey2, testKey4, 0, hlc.Timestamp{WallTime: 1}, + kvs, resumeSpan, _, err = unwrapMVCCScan(ctx, engine, testKey2, testKey4, 0, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { @@ -2829,7 +2841,7 @@ func TestMVCCReverseScan(t *testing.T) { // The first key we encounter has multiple versions and we need to read the // latest. - kvs, _, _, err = MVCCScan(ctx, engine, testKey2, testKey3, 1, hlc.Timestamp{WallTime: 4}, + kvs, _, _, err = unwrapMVCCScan(ctx, engine, testKey2, testKey3, 1, hlc.Timestamp{WallTime: 4}, MVCCScanOptions{Reverse: true}) if err != nil { @@ -2843,7 +2855,7 @@ func TestMVCCReverseScan(t *testing.T) { // The first key we encounter is newer than our read timestamp and we need to // back up to the previous key. - kvs, _, _, err = MVCCScan(ctx, engine, testKey4, testKey6, 1, hlc.Timestamp{WallTime: 1}, + kvs, _, _, err = unwrapMVCCScan(ctx, engine, testKey4, testKey6, 1, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { @@ -2856,7 +2868,7 @@ func TestMVCCReverseScan(t *testing.T) { } // Scan only the first key in the key space. - kvs, _, _, err = MVCCScan(ctx, engine, testKey1, testKey1.Next(), 1, hlc.Timestamp{WallTime: 1}, + kvs, _, _, err = unwrapMVCCScan(ctx, engine, testKey1, testKey1.Next(), 1, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { @@ -2899,7 +2911,7 @@ func TestMVCCReverseScanFirstKeyInFuture(t *testing.T) { t.Fatal(err) } - kvs, _, _, err := MVCCScan(ctx, engine, testKey1, testKey4, math.MaxInt64, + kvs, _, _, err := unwrapMVCCScan(ctx, engine, testKey1, testKey4, math.MaxInt64, hlc.Timestamp{WallTime: 2}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) @@ -2940,7 +2952,7 @@ func TestMVCCReverseScanSeeksOverRepeatedKeys(t *testing.T) { t.Fatal(err) } - kvs, _, _, err := MVCCScan(ctx, engine, testKey1, testKey3, math.MaxInt64, + kvs, _, _, err := unwrapMVCCScan(ctx, engine, testKey1, testKey3, math.MaxInt64, hlc.Timestamp{WallTime: 1}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) @@ -2987,7 +2999,7 @@ func TestMVCCReverseScanStopAtSmallestKey(t *testing.T) { } } - kvs, _, _, err := MVCCScan(ctx, engine, testKey1, testKey3, math.MaxInt64, + kvs, _, _, err := unwrapMVCCScan(ctx, engine, testKey1, testKey3, math.MaxInt64, hlc.Timestamp{WallTime: ts}, MVCCScanOptions{Reverse: true}) if err != nil { t.Fatal(err) diff --git a/pkg/storage/engine/rocksdb.go b/pkg/storage/engine/rocksdb.go index 6583c80bb328..ce66dcf591e5 100644 --- a/pkg/storage/engine/rocksdb.go +++ b/pkg/storage/engine/rocksdb.go @@ -1585,7 +1585,7 @@ func (r *batchIterator) MVCCGet( func (r *batchIterator) MVCCScan( start, end roachpb.Key, max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, -) (kvData [][]byte, numKVs int64, resumeSpan *roachpb.Span, intents []roachpb.Intent, err error) { +) (MVCCScanResult, error) { r.batch.flushMutations() return r.iter.MVCCScan(start, end, max, timestamp, opts) } @@ -2471,16 +2471,16 @@ func (r *rocksDBIterator) MVCCGet( func (r *rocksDBIterator) MVCCScan( start, end roachpb.Key, max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, -) (kvData [][]byte, numKVs int64, resumeSpan *roachpb.Span, intents []roachpb.Intent, err error) { +) (MVCCScanResult, error) { if opts.Inconsistent && opts.Txn != nil { - return nil, 0, nil, nil, errors.Errorf("cannot allow inconsistent reads within a transaction") + return MVCCScanResult{}, errors.Errorf("cannot allow inconsistent reads within a transaction") } if len(end) == 0 { - return nil, 0, nil, nil, emptyKeyError() + return MVCCScanResult{}, emptyKeyError() } if max == 0 { - resumeSpan = &roachpb.Span{Key: start, EndKey: end} - return nil, 0, resumeSpan, nil, nil + resumeSpan := &roachpb.Span{Key: start, EndKey: end} + return MVCCScanResult{ResumeSpan: resumeSpan}, nil } r.clearState() @@ -2493,18 +2493,20 @@ func (r *rocksDBIterator) MVCCScan( ) if err := statusToError(state.status); err != nil { - return nil, 0, nil, nil, err + return MVCCScanResult{}, err } if err := writeTooOldToError(timestamp, state.write_too_old_timestamp); err != nil { - return nil, 0, nil, nil, err + return MVCCScanResult{}, err } if err := uncertaintyToError(timestamp, state.uncertainty_timestamp, opts.Txn); err != nil { - return nil, 0, nil, nil, err + return MVCCScanResult{}, err } - kvData = [][]byte{copyFromSliceVector(state.data.bufs, state.data.len)} - numKVs = int64(state.data.count) + kvData := [][]byte{copyFromSliceVector(state.data.bufs, state.data.len)} + numKVs := int64(state.data.count) + numBytes := int64(state.data.bytes) + var resumeSpan *roachpb.Span if resumeKey := cSliceToGoBytes(state.resume_key); resumeKey != nil { if opts.Reverse { resumeSpan = &roachpb.Span{Key: start, EndKey: roachpb.Key(resumeKey).Next()} @@ -2513,17 +2515,26 @@ func (r *rocksDBIterator) MVCCScan( } } - intents, err = buildScanIntents(cSliceToGoBytes(state.intents)) + intents, err := buildScanIntents(cSliceToGoBytes(state.intents)) if err != nil { - return nil, 0, nil, nil, err + return MVCCScanResult{}, err } if !opts.Inconsistent && len(intents) > 0 { // When encountering intents during a consistent scan we still need to // return the resume key. - return nil, 0, resumeSpan, nil, &roachpb.WriteIntentError{Intents: intents} + // + // TODO(tbg): this is a lie? See: + // https://github.com/cockroachdb/cockroach/pull/44542 + return MVCCScanResult{ResumeSpan: resumeSpan}, &roachpb.WriteIntentError{Intents: intents} } - return kvData, numKVs, resumeSpan, intents, nil + return MVCCScanResult{ + KVData: kvData, + NumKeys: numKVs, + NumBytes: numBytes, + ResumeSpan: resumeSpan, + Intents: intents, + }, nil } func (r *rocksDBIterator) SetUpperBound(key roachpb.Key) { diff --git a/pkg/storage/engine/rocksdb_iter_stats_test.go b/pkg/storage/engine/rocksdb_iter_stats_test.go index 0e57a2f83f40..903f12337f6a 100644 --- a/pkg/storage/engine/rocksdb_iter_stats_test.go +++ b/pkg/storage/engine/rocksdb_iter_stats_test.go @@ -64,7 +64,7 @@ func TestIterStats(t *testing.T) { } // Scanning a key range containing the tombstone sees it. for i := 0; i < 10; i++ { - if _, _, _, err := mvccScanToKvs( + if _, err := mvccScanToKvs( ctx, iter, roachpb.KeyMin, roachpb.KeyMax, math.MaxInt64, hlc.Timestamp{}, MVCCScanOptions{}, ); err != nil { t.Fatal(err) diff --git a/pkg/storage/engine/tee.go b/pkg/storage/engine/tee.go index 003974c2854a..e3635d42cb5d 100644 --- a/pkg/storage/engine/tee.go +++ b/pkg/storage/engine/tee.go @@ -1223,45 +1223,48 @@ func kvDataEqual(ctx context.Context, data1 []byte, data2 [][]byte) bool { // MVCCScan implements the MVCCIterator interface. func (t *TeeEngineIter) MVCCScan( start, end roachpb.Key, max int64, timestamp hlc.Timestamp, opts MVCCScanOptions, -) (kvData [][]byte, numKVs int64, resumeSpan *roachpb.Span, intents []roachpb.Intent, err error) { - kvData1, numKvs1, resumeSpan1, intents1, err := mvccScanToBytes(t.ctx, t.iter1, start, end, max, timestamp, opts) - kvData2, numKvs2, resumeSpan2, intents2, err2 := mvccScanToBytes(t.ctx, t.iter2, start, end, max, timestamp, opts) +) (MVCCScanResult, error) { + res1, err := mvccScanToBytes(t.ctx, t.iter1, start, end, max, timestamp, opts) + res2, err2 := mvccScanToBytes(t.ctx, t.iter2, start, end, max, timestamp, opts) - if err = fatalOnErrorMismatch(t.ctx, err, err2); err != nil { - return nil, 0, nil, nil, err + if err := fatalOnErrorMismatch(t.ctx, err, err2); err != nil { + return MVCCScanResult{}, err } - if numKvs1 != numKvs2 { - log.Fatalf(t.ctx, "mismatching number of KVs returned from engines MVCCScan: %d != %d", numKvs1, numKvs2) + if res1.NumKeys != res2.NumKeys { + log.Fatalf(t.ctx, "mismatching number of KVs returned from engines MVCCScan: %d != %d", res1.NumKeys, res2.NumKeys) + } + if res1.NumBytes != res2.NumBytes { + log.Fatalf(t.ctx, "mismatching NumBytes returned from engines MVCCScan: %d != %d", res1.NumBytes, res2.NumBytes) } // At least one side is expected to have only one contiguous slice inside it. // This lets us simplify the checking code below. equal := false - if len(kvData2) != 1 && len(kvData1) != 1 { + if len(res1.KVData) != 1 && len(res2.KVData) != 1 { panic("unsupported multiple-slice result from both iterators in MVCCScan") - } else if len(kvData2) == 1 { + } else if len(res2.KVData) == 1 { // Swap the two slices so that the first argument is the one with only one // slice inside it. - equal = kvDataEqual(t.ctx, kvData2[0], kvData1) + equal = kvDataEqual(t.ctx, res2.KVData[0], res1.KVData) } else { - equal = kvDataEqual(t.ctx, kvData1[0], kvData2) + equal = kvDataEqual(t.ctx, res1.KVData[0], res2.KVData) } if !equal { - log.Fatalf(t.ctx, "mismatching kv data returned by engines: %v != %v", kvData1, kvData2) + log.Fatalf(t.ctx, "mismatching kv data returned by engines: %v != %v", res1.KVData, res2.KVData) } - if !resumeSpan1.Equal(resumeSpan2) { - log.Fatalf(t.ctx, "mismatching resume spans returned by engines: %v != %v", resumeSpan1, resumeSpan2) + if !res1.ResumeSpan.Equal(res2.ResumeSpan) { + log.Fatalf(t.ctx, "mismatching resume spans returned by engines: %v != %v", res1.ResumeSpan, res2.ResumeSpan) } - if len(intents1) != len(intents2) { - log.Fatalf(t.ctx, "mismatching number of intents returned by engines: %v != %v", len(intents1), len(intents2)) + if len(res1.Intents) != len(res2.Intents) { + log.Fatalf(t.ctx, "mismatching number of intents returned by engines: %v != %v", len(res1.Intents), len(res2.Intents)) } - for i := range intents1 { - if !intents1[i].Equal(intents2[i]) { - log.Fatalf(t.ctx, "mismatching intents returned by engines: %v != %v", intents1[i], intents2[i]) + for i := range res1.Intents { + if !res1.Intents[i].Equal(res2.Intents[i]) { + log.Fatalf(t.ctx, "mismatching intents returned by engines: %v != %v", res1.Intents[i], res2.Intents[i]) } } - return kvData1, numKvs1, resumeSpan1, intents1, err + return res1, err } diff --git a/pkg/storage/engine/testdata/mvcc_histories/target_bytes b/pkg/storage/engine/testdata/mvcc_histories/target_bytes index 81fcfea5e59f..148da0306a47 100644 --- a/pkg/storage/engine/testdata/mvcc_histories/target_bytes +++ b/pkg/storage/engine/testdata/mvcc_histories/target_bytes @@ -63,9 +63,11 @@ with ts=300,0 k=a end=z targetbytes=10000000 scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: "e" -> /BYTES/mnopqr @0.000000123,45 +scan: 108 bytes (target 10000000) scan: "e" -> /BYTES/mnopqr @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: "a" -> /BYTES/abcdef @0.000000123,45 +scan: 108 bytes (target 10000000) run ok @@ -86,12 +88,14 @@ scan k=a end=z ts=300,0 targetbytes=1 ---- scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: resume span ["aa","z") +scan: 34 bytes (target 1) run ok scan k=a end=z ts=300,0 targetbytes=34 ---- scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: resume span ["aa","z") +scan: 34 bytes (target 34) run ok scan k=a end=z ts=300,0 targetbytes=35 @@ -99,6 +103,7 @@ scan k=a end=z ts=300,0 targetbytes=35 scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: resume span ["e","z") +scan: 74 bytes (target 35) run ok scan k=a end=z ts=300,0 targetbytes=74 @@ -106,6 +111,7 @@ scan k=a end=z ts=300,0 targetbytes=74 scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: resume span ["e","z") +scan: 74 bytes (target 74) run ok scan k=a end=z ts=300,0 targetbytes=75 @@ -113,6 +119,7 @@ scan k=a end=z ts=300,0 targetbytes=75 scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: "e" -> /BYTES/mnopqr @0.000000123,45 +scan: 108 bytes (target 75) # Works just the same when not starting on an existing key. run ok @@ -120,6 +127,7 @@ scan k=b end=z ts=300 targetbytes=1 ---- scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: resume span ["e","z") +scan: 40 bytes (target 1) # Reverse scans. @@ -128,12 +136,14 @@ scan k=a end=z ts=300,0 targetbytes=1 reverse=true ---- scan: "e" -> /BYTES/mnopqr @0.000000123,45 scan: resume span ["a","c\x00") +scan: 34 bytes (target 1) run ok scan k=a end=z ts=300,0 targetbytes=34 reverse=true ---- scan: "e" -> /BYTES/mnopqr @0.000000123,45 scan: resume span ["a","c\x00") +scan: 34 bytes (target 34) run ok scan k=a end=z ts=300,0 targetbytes=35 reverse=true @@ -141,6 +151,7 @@ scan k=a end=z ts=300,0 targetbytes=35 reverse=true scan: "e" -> /BYTES/mnopqr @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: resume span ["a","aa\x00") +scan: 74 bytes (target 35) run ok scan k=a end=z ts=300,0 targetbytes=74 reverse=true @@ -148,6 +159,7 @@ scan k=a end=z ts=300,0 targetbytes=74 reverse=true scan: "e" -> /BYTES/mnopqr @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: resume span ["a","aa\x00") +scan: 74 bytes (target 74) run ok scan k=a end=z ts=300,0 targetbytes=75 reverse=true @@ -155,6 +167,7 @@ scan k=a end=z ts=300,0 targetbytes=75 reverse=true scan: "e" -> /BYTES/mnopqr @0.000000123,45 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: "a" -> /BYTES/abcdef @0.000000123,45 +scan: 108 bytes (target 75) # Scans that return the tombstone (at aa@250,1). The kv pair at a has 34 bytes, # aa has 24 (just a key). @@ -164,6 +177,7 @@ scan k=a end=z ts=300,0 targetbytes=34 tombstones=true ---- scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: resume span ["aa","z") +scan: 34 bytes (target 34) run ok scan k=a end=z ts=300,0 targetbytes=35 tombstones=true @@ -171,6 +185,7 @@ scan k=a end=z ts=300,0 targetbytes=35 tombstones=true scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: "aa" -> / @0.000000250,1 scan: resume span ["c","z") +scan: 58 bytes (target 35) run ok scan k=a end=z ts=300,0 targetbytes=58 tombstones=true @@ -178,6 +193,7 @@ scan k=a end=z ts=300,0 targetbytes=58 tombstones=true scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: "aa" -> / @0.000000250,1 scan: resume span ["c","z") +scan: 58 bytes (target 58) run ok scan k=a end=z ts=300,0 targetbytes=59 tombstones=true @@ -186,6 +202,7 @@ scan: "a" -> /BYTES/abcdef @0.000000123,45 scan: "aa" -> / @0.000000250,1 scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: resume span ["e","z") +scan: 98 bytes (target 59) # ... and similarly in reverse. @@ -194,6 +211,7 @@ scan k=a end=d ts=300,0 targetbytes=40 reverse=true tombstones=true ---- scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: resume span ["a","aa\x00") +scan: 40 bytes (target 40) run ok scan k=a end=d ts=300,0 targetbytes=41 reverse=true tombstones=true @@ -201,6 +219,7 @@ scan k=a end=d ts=300,0 targetbytes=41 reverse=true tombstones=true scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: "aa" -> / @0.000000250,1 scan: resume span ["a","a\x00") +scan: 64 bytes (target 41) run ok scan k=a end=d ts=300,0 targetbytes=64 reverse=true tombstones=true @@ -208,6 +227,7 @@ scan k=a end=d ts=300,0 targetbytes=64 reverse=true tombstones=true scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: "aa" -> / @0.000000250,1 scan: resume span ["a","a\x00") +scan: 64 bytes (target 64) run ok scan k=a end=d ts=300,0 targetbytes=65 reverse=true tombstones=true @@ -215,3 +235,4 @@ scan k=a end=d ts=300,0 targetbytes=65 reverse=true tombstones=true scan: "c" -> /BYTES/ghijkllkjihg @0.000000123,45 scan: "aa" -> / @0.000000250,1 scan: "a" -> /BYTES/abcdef @0.000000123,45 +scan: 98 bytes (target 65) diff --git a/pkg/storage/spanset/batch.go b/pkg/storage/spanset/batch.go index 949c9bb6c959..bdda9ce40c4a 100644 --- a/pkg/storage/spanset/batch.go +++ b/pkg/storage/spanset/batch.go @@ -248,14 +248,14 @@ func (i *Iterator) MVCCGet( // MVCCScan is part of the engine.MVCCIterator interface. func (i *Iterator) MVCCScan( start, end roachpb.Key, max int64, timestamp hlc.Timestamp, opts engine.MVCCScanOptions, -) (kvData [][]byte, numKVs int64, resumeSpan *roachpb.Span, intents []roachpb.Intent, err error) { +) (engine.MVCCScanResult, error) { if i.spansOnly { if err := i.spans.CheckAllowed(SpanReadOnly, roachpb.Span{Key: start, EndKey: end}); err != nil { - return nil, 0, nil, nil, err + return engine.MVCCScanResult{}, err } } else { if err := i.spans.CheckAllowedAt(SpanReadOnly, roachpb.Span{Key: start, EndKey: end}, timestamp); err != nil { - return nil, 0, nil, nil, err + return engine.MVCCScanResult{}, err } } return i.i.(engine.MVCCIterator).MVCCScan(start, end, max, timestamp, opts) diff --git a/pkg/ts/db_test.go b/pkg/ts/db_test.go index cad10ca0ab49..fa20184ccccb 100644 --- a/pkg/ts/db_test.go +++ b/pkg/ts/db_test.go @@ -122,13 +122,13 @@ func (tm *testModelRunner) getActualData() map[string]roachpb.Value { // Scan over all TS Keys stored in the engine startKey := keys.TimeseriesPrefix endKey := startKey.PrefixEnd() - keyValues, _, _, err := engine.MVCCScan(context.Background(), tm.Eng, startKey, endKey, math.MaxInt64, tm.Clock.Now(), engine.MVCCScanOptions{}) + res, err := engine.MVCCScan(context.Background(), tm.Eng, startKey, endKey, math.MaxInt64, tm.Clock.Now(), engine.MVCCScanOptions{}) if err != nil { tm.t.Fatalf("error scanning TS data from engine: %s", err) } kvMap := make(map[string]roachpb.Value) - for _, kv := range keyValues { + for _, kv := range res.KVs { kvMap[string(kv.Key)] = kv.Value }