Skip to content

Commit

Permalink
metamorphic: allow snapshots in multi-instance, remove dbObjID
Browse files Browse the repository at this point in the history
This change deprecates and removes `generator.db` in the metamorphic
package as it's duplicated by the `dbs` slice that was added to
support multi-instance mode. All uses of `dbObjID` outside of
tests are also updated to support multi-instance mode, and rely
on deriving db IDs or having them passed-in instead of assuming
one.

Snapshot operations are also updated to track the underlying DB
IDs, and are now re-enabled in multi-instance mode albeit with
some limitations that will be addressed in cockroachdb#2885.
  • Loading branch information
itsbilal committed Nov 2, 2023
1 parent 424019f commit 0f93da9
Show file tree
Hide file tree
Showing 11 changed files with 101 additions and 100 deletions.
7 changes: 2 additions & 5 deletions metamorphic/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,13 +172,10 @@ func defaultConfig() config {
func multiInstanceConfig() config {
cfg := defaultConfig()
cfg.ops[replicate] = 5
// Single deletes and merges are disabled in multi-instance mode, as
// replicateOp doesn't support them.
cfg.ops[writerSingleDelete] = 0
cfg.ops[writerMerge] = 0
// TODO(bilal): The disabled operations below should also be supported
// in the two-instance test, once they're updated to work in multi-instance
// mode.
cfg.ops[newSnapshot] = 0
cfg.ops[snapshotClose] = 0
return cfg
}

Expand Down
62 changes: 37 additions & 25 deletions metamorphic/generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,8 @@ func suffixFromInt(suffix int64) []byte {
return testkeys.Suffix(suffix)
}

func (g *generator) randKeyToSingleDelete(id objID) []byte {
keys := g.keyManager.eligibleSingleDeleteKeys(id)
func (g *generator) randKeyToSingleDelete(id, dbID objID) []byte {
keys := g.keyManager.eligibleSingleDeleteKeys(id, dbID)
length := len(keys)
if length == 0 {
return nil
Expand Down Expand Up @@ -562,7 +562,9 @@ func (g *generator) dbCheckpoint() {
spans[i].Start = start
spans[i].End = end
}
dbID := g.dbs.rand(g.rng)
g.add(&checkpointOp{
dbID: dbID,
spans: spans,
})
}
Expand All @@ -584,7 +586,7 @@ func (g *generator) dbCompact() {
}

func (g *generator) dbFlush() {
g.add(&flushOp{dbObjID})
g.add(&flushOp{g.dbs.rand(g.rng)})
}

func (g *generator) dbRatchetFormatMajorVersion() {
Expand Down Expand Up @@ -680,7 +682,7 @@ func (g *generator) newIter() {
// closes.
}
g.iterReaderID[iterID] = readerID
g.deriveDB(iterID, readerID)
dbID := g.deriveDB(iterID)

var opts iterOpts
if !g.maybeSetSnapshotIterBounds(readerID, &opts) {
Expand Down Expand Up @@ -721,16 +723,11 @@ func (g *generator) newIter() {
g.itersLastOpts[iterID] = opts
g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
g.iterReaderID[iterID] = readerID
var derivedDBID objID
if readerID.tag() == batchTag {
g.deriveDB(iterID, readerID)
derivedDBID = g.objDB[iterID]
}
g.add(&newIterOp{
readerID: readerID,
iterID: iterID,
iterOpts: opts,
derivedDBID: derivedDBID,
derivedDBID: dbID,
})
}

Expand All @@ -752,12 +749,16 @@ func (g *generator) randKeyTypesAndMask() (keyTypes uint32, maskSuffix []byte) {
return keyTypes, maskSuffix
}

func (g *generator) deriveDB(readerID, parentID objID) {
dbParentID := parentID
func (g *generator) deriveDB(readerID objID) objID {
if readerID.tag() == iterTag {
readerID = g.iterReaderID[readerID]
}
dbParentID := readerID
if dbParentID.tag() != dbTag {
dbParentID = g.objDB[dbParentID]
}
g.objDB[readerID] = dbParentID
return dbParentID
}

func (g *generator) newIterUsingClone() {
Expand All @@ -778,7 +779,7 @@ func (g *generator) newIterUsingClone() {
}
readerID := g.iterReaderID[existingIterID]
g.iterReaderID[iterID] = readerID
g.deriveDB(iterID, readerID)
g.deriveDB(iterID)

var refreshBatch bool
if readerID.tag() == batchTag {
Expand Down Expand Up @@ -815,7 +816,8 @@ func (g *generator) iterClose(iterID objID) {
// closes.
}

g.add(&closeOp{objID: iterID, derivedDBID: g.objDB[iterID]})
readerID := g.iterReaderID[iterID]
g.add(&closeOp{objID: iterID, derivedDBID: g.objDB[readerID]})
}

func (g *generator) iterSetBounds(iterID objID) {
Expand Down Expand Up @@ -1003,7 +1005,7 @@ func (g *generator) iterSeekPrefixGE(iterID objID) {
// random key.
if g.rng.Intn(10) >= 1 {
possibleKeys := make([][]byte, 0, 100)
inRangeKeys := g.randKeyToReadWithinBounds(lower, upper, dbObjID)
inRangeKeys := g.randKeyToReadWithinBounds(lower, upper, g.objDB[iterID])
for _, keyMeta := range inRangeKeys {
posKey := keyMeta.key
var foundWriteWithoutDelete bool
Expand Down Expand Up @@ -1144,13 +1146,8 @@ func (g *generator) readerGet() {
key = g.randKeyToRead(0.001) // 0.1% new keys
}
derivedDBID := objID(0)
if dbID, ok := g.objDB[readerID]; ok && readerID.tag() == batchTag {
if dbID, ok := g.objDB[readerID]; ok && (readerID.tag() == batchTag || readerID.tag() == snapTag) {
derivedDBID = dbID
} else if readerID.tag() == snapTag {
// TODO(bilal): This is legacy behaviour as snapshots aren't supported in
// two-instance mode yet. Track snapshots in g.objDB and objToDB and remove this
// conditional.
derivedDBID = dbObjID
}
g.add(&getOp{readerID: readerID, key: key, derivedDBID: derivedDBID})
}
Expand Down Expand Up @@ -1211,26 +1208,40 @@ func (g *generator) newSnapshot() {
g.init.snapshotSlots++
g.liveSnapshots = append(g.liveSnapshots, snapID)
g.liveReaders = append(g.liveReaders, snapID)
dbID := g.dbs.rand(g.rng)
g.objDB[snapID] = dbID

iters := make(objIDSet)
g.snapshots[snapID] = iters
g.readers[snapID] = iters

s := &newSnapshotOp{
dbID: dbID,
snapID: snapID,
}

// With 75% probability, impose bounds on the keys that may be read with the
// snapshot. Setting bounds allows some runs of the metamorphic test to use
// a EventuallyFileOnlySnapshot instead of a Snapshot, testing equivalence
// between the two for reads within those bounds.
if g.rng.Float64() < 0.75 {
//
// If we're in multi-instance mode, we must always create bounds, as we will
// always create EventuallyFileOnlySnapshots to allow commands that use excises
// (eg. replicateOp) to work.
if g.rng.Float64() < 0.75 || g.dbs.Len() > 1 {
s.bounds = g.generateDisjointKeyRanges(
g.rng.Intn(5) + 1, /* between 1-5 */
)
g.snapshotBounds[snapID] = s.bounds
}
g.add(s)
if g.dbs.Len() > 1 {
// Do a flush after each EFOS, if we're in multi-instance mode. This limits
// the testing area of EFOS, but allows them to be used alongside operations
// that do an excise (eg. replicateOp). This will be revisited when
// https://github.com/cockroachdb/pebble/issues/2885 is implemented.
g.add(&flushOp{dbID})
}
}

func (g *generator) snapshotClose() {
Expand All @@ -1248,10 +1259,10 @@ func (g *generator) snapshotClose() {
for _, id := range iters.sorted() {
g.liveIters.remove(id)
delete(g.iters, id)
g.add(&closeOp{objID: id, derivedDBID: g.objDB[id]})
g.add(&closeOp{objID: id, derivedDBID: g.objDB[snapID]})
}

g.add(&closeOp{objID: snapID})
g.add(&closeOp{objID: snapID, derivedDBID: g.objDB[snapID]})
}

func (g *generator) writerApply() {
Expand Down Expand Up @@ -1469,7 +1480,8 @@ func (g *generator) writerSingleDelete() {
}

writerID := g.liveWriters.rand(g.rng)
key := g.randKeyToSingleDelete(writerID)
dbID := g.objDB[writerID]
key := g.randKeyToSingleDelete(writerID, dbID)
if key == nil {
return
}
Expand Down
12 changes: 6 additions & 6 deletions metamorphic/generator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (

func TestGenerator(t *testing.T) {
rng := randvar.NewRand()
g := newGenerator(rng, defaultConfig(), newKeyManager())
g := newGenerator(rng, defaultConfig(), newKeyManager(1 /* numInstances */))

g.newBatch()
g.newBatch()
Expand Down Expand Up @@ -63,7 +63,7 @@ func TestGenerator(t *testing.T) {
t.Logf("\n%s", g)
}

g = newGenerator(rng, defaultConfig(), newKeyManager())
g = newGenerator(rng, defaultConfig(), newKeyManager(1 /* numInstances */))

g.newSnapshot()
g.newSnapshot()
Expand Down Expand Up @@ -96,7 +96,7 @@ func TestGenerator(t *testing.T) {
t.Logf("\n%s", g)
}

g = newGenerator(rng, defaultConfig(), newKeyManager())
g = newGenerator(rng, defaultConfig(), newKeyManager(1 /* numInstances */))

g.newIndexedBatch()
g.newIndexedBatch()
Expand Down Expand Up @@ -133,7 +133,7 @@ func TestGeneratorRandom(t *testing.T) {
generateFromSeed := func(cfg config) string {
rng := rand.New(rand.NewSource(seed))
count := ops.Uint64(rng)
return formatOps(generate(rng, count, cfg, newKeyManager()))
return formatOps(generate(rng, count, cfg, newKeyManager(cfg.numInstances)))
}

for i := range cfgs {
Expand Down Expand Up @@ -170,7 +170,7 @@ func TestGeneratorRandom(t *testing.T) {

func TestGenerateRandKeyToReadInRange(t *testing.T) {
rng := randvar.NewRand()
g := newGenerator(rng, defaultConfig(), newKeyManager())
g := newGenerator(rng, defaultConfig(), newKeyManager(1 /* numInstances */))
// Seed 100 initial keys.
for i := 0; i < 100; i++ {
_ = g.randKeyToWrite(1.0)
Expand Down Expand Up @@ -199,7 +199,7 @@ func TestGenerateRandKeyToReadInRange(t *testing.T) {

func TestGenerateDisjointKeyRanges(t *testing.T) {
rng := randvar.NewRand()
g := newGenerator(rng, defaultConfig(), newKeyManager())
g := newGenerator(rng, defaultConfig(), newKeyManager(1 /* numInstances */))

for i := 0; i < 10; i++ {
keyRanges := g.generateDisjointKeyRanges(5)
Expand Down
34 changes: 17 additions & 17 deletions metamorphic/key_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,20 +207,20 @@ func (k *keyManager) nextMetaTimestamp() int {
return ret
}

var dbObjID objID = makeObjID(dbTag, 1)

// newKeyManager returns a pointer to a new keyManager. Callers should
// interact with this using addNewKey, eligible*Keys, update,
// canTolerateApplyFailure methods only.
func newKeyManager() *keyManager {
func newKeyManager(numInstances int) *keyManager {
m := &keyManager{
comparer: testkeys.Comparer,
byObjKey: make(map[string]*keyMeta),
byObj: make(map[objID][]*keyMeta),
globalKeysMap: make(map[string]*keyMeta),
globalKeyPrefixesMap: make(map[string]struct{}),
}
m.byObj[dbObjID] = []*keyMeta{}
for i := 1; i <= max(numInstances, 1); i++ {
m.byObj[makeObjID(dbTag, uint32(i))] = []*keyMeta{}
}
return m
}

Expand Down Expand Up @@ -345,8 +345,8 @@ func (k *keyManager) checkForDelOrSingleDelTransition(dbMeta *keyMeta, globalMet
}
}

func (k *keyManager) checkForDelOrSingleDelTransitionInDB() {
keys := k.byObj[dbObjID]
func (k *keyManager) checkForDelOrSingleDelTransitionInDB(dbID objID) {
keys := k.byObj[dbID]
for _, dbMeta := range keys {
globalMeta := k.globalKeysMap[string(dbMeta.key)]
k.checkForDelOrSingleDelTransition(dbMeta, globalMeta)
Expand Down Expand Up @@ -385,7 +385,7 @@ func (k *keyManager) update(o op) {
meta.dels++
globalMeta.dels++
meta.updateOps = append(meta.updateOps, keyUpdate{true, k.nextMetaTimestamp()})
if s.writerID == dbObjID {
if s.writerID.tag() == dbTag {
k.checkForDelOrSingleDelTransition(meta, globalMeta)
}
case *singleDeleteOp:
Expand All @@ -397,25 +397,25 @@ func (k *keyManager) update(o op) {
meta.singleDel = true
globalMeta.singleDel = true
meta.updateOps = append(meta.updateOps, keyUpdate{true, k.nextMetaTimestamp()})
if s.writerID == dbObjID {
if s.writerID.tag() == dbTag {
k.checkForDelOrSingleDelTransition(meta, globalMeta)
}
case *ingestOp:
// For each batch, merge all keys with the keys in the DB.
for _, batchID := range s.batchIDs {
k.mergeKeysInto(batchID, dbObjID)
k.mergeKeysInto(batchID, s.dbID)
}
k.checkForDelOrSingleDelTransitionInDB()
k.checkForDelOrSingleDelTransitionInDB(s.dbID)
case *applyOp:
// Merge the keys from this writer into the parent writer.
k.mergeKeysInto(s.batchID, s.writerID)
if s.writerID == dbObjID {
k.checkForDelOrSingleDelTransitionInDB()
if s.writerID.tag() == dbTag {
k.checkForDelOrSingleDelTransitionInDB(s.writerID)
}
case *batchCommitOp:
// Merge the keys from the batch with the keys from the DB.
k.mergeKeysInto(s.batchID, dbObjID)
k.checkForDelOrSingleDelTransitionInDB()
k.mergeKeysInto(s.batchID, s.dbID)
k.checkForDelOrSingleDelTransitionInDB(s.dbID)
}
}

Expand Down Expand Up @@ -461,7 +461,7 @@ func (k *keyManager) eligibleWriteKeys() (keys [][]byte) {

// eligibleSingleDeleteKeys returns a slice of keys that can be safely single
// deleted, given the writer id.
func (k *keyManager) eligibleSingleDeleteKeys(id objID) (keys [][]byte) {
func (k *keyManager) eligibleSingleDeleteKeys(id, dbID objID) (keys [][]byte) {
// Creating and sorting this slice of keys is wasteful given that the
// caller will pick one, but makes it simpler for unit testing.
addForObjID := func(id objID) {
Expand All @@ -472,8 +472,8 @@ func (k *keyManager) eligibleSingleDeleteKeys(id objID) (keys [][]byte) {
}
}
addForObjID(id)
if id != dbObjID {
addForObjID(dbObjID)
if id.tag() != dbTag {
addForObjID(dbID)
}
slices.SortFunc(keys, k.comparer.Compare)
return keys
Expand Down
Loading

0 comments on commit 0f93da9

Please sign in to comment.