From 8b976f5bf84db512f715e7d7052d2d2d21318653 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Tue, 26 Mar 2024 20:04:09 -0400 Subject: [PATCH] CBG-3764 create multi-xattr subdoc APIs (#6739) Co-authored-by: adamcfraser --- auth/auth.go | 4 +- base/bucket_gocb_test.go | 562 +++++++++++------- base/collection.go | 1 + base/collection_gocb.go | 2 +- base/collection_xattr.go | 353 ++++++----- base/collection_xattr_common.go | 140 ++--- base/error.go | 5 +- base/heartbeat.go | 2 +- base/leaky_datastore.go | 82 +-- base/rosmar_cluster.go | 6 +- base/sg_cluster_cfg.go | 2 +- db/attachment_compaction.go | 8 +- db/attachment_compaction_test.go | 98 +-- db/attachment_test.go | 27 +- db/background_mgr.go | 2 +- db/crud.go | 45 +- db/crud_test.go | 17 +- db/database.go | 38 +- db/database_collection.go | 10 + db/database_test.go | 12 +- db/import.go | 42 +- db/import_test.go | 35 +- db/util_testing.go | 2 +- go.mod | 10 +- go.sum | 18 +- rest/adminapitest/admin_api_test.go | 1 + rest/api_test.go | 10 +- .../attachment_compaction_api_test.go | 2 +- rest/importtest/collections_import_test.go | 12 +- rest/importtest/import_test.go | 24 +- rest/importuserxattrtest/import_test.go | 74 ++- rest/importuserxattrtest/rawdoc_test.go | 7 +- rest/importuserxattrtest/revcache_test.go | 29 +- rest/importuserxattrtest/revid_import_test.go | 22 +- rest/indextest/index_test.go | 8 +- rest/revocation_test.go | 15 +- rest/user_api_test.go | 21 +- rest/utilities_testing_attachment.go | 13 +- rest/xattr_upgrade_test.go | 6 +- xdcr/cbs_xdcr_test.go | 16 +- 40 files changed, 1023 insertions(+), 760 deletions(-) diff --git a/auth/auth.go b/auth/auth.go index c399084e2c..aa3d22ed4a 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -489,7 +489,7 @@ func (auth *Authenticator) Save(p Principal) error { return err } - casOut, writeErr := auth.datastore.WriteCas(p.DocID(), 0, 0, p.Cas(), p, 0) + casOut, writeErr := auth.datastore.WriteCas(p.DocID(), 0, p.Cas(), p, 0) if writeErr != nil { return writeErr } @@ -512,7 +512,7 @@ func (auth *Authenticator) Save(p Principal) error { // Used for resync func (auth *Authenticator) UpdateSequenceNumber(p Principal, seq uint64) error { p.SetSequence(seq) - casOut, writeErr := auth.datastore.WriteCas(p.DocID(), 0, 0, p.Cas(), p, 0) + casOut, writeErr := auth.datastore.WriteCas(p.DocID(), 0, p.Cas(), p, 0) if writeErr != nil { return writeErr } diff --git a/base/bucket_gocb_test.go b/base/bucket_gocb_test.go index 7dbee78b60..ea0ff5a606 100644 --- a/base/bucket_gocb_test.go +++ b/base/bucket_gocb_test.go @@ -20,7 +20,6 @@ import ( "github.com/couchbase/gocb/v2" sgbucket "github.com/couchbase/sg-bucket" - pkgerrors "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -109,10 +108,10 @@ func TestWriteCasBasic(t *testing.T) { val := []byte("bar2") cas := uint64(0) - cas, err := dataStore.WriteCas(key, 0, 0, cas, []byte("bar"), sgbucket.Raw) + cas, err := dataStore.WriteCas(key, 0, cas, []byte("bar"), sgbucket.Raw) require.NoError(t, err) - casOut, err := dataStore.WriteCas(key, 0, 0, cas, val, sgbucket.Raw) + casOut, err := dataStore.WriteCas(key, 0, cas, val, sgbucket.Raw) require.NoError(t, err) require.NotEqual(t, cas, casOut) @@ -134,16 +133,16 @@ func TestWriteCasAdvanced(t *testing.T) { casZero := uint64(0) // write doc to bucket, giving cas value of 0 - _, err := dataStore.WriteCas(key, 0, 0, casZero, []byte("bar"), sgbucket.Raw) + _, err := dataStore.WriteCas(key, 0, casZero, []byte("bar"), sgbucket.Raw) require.NoError(t, err) // try to write doc to bucket, giving cas value of 0 again -- exepct a failure - secondWriteCas, err := dataStore.WriteCas(key, 0, 0, casZero, []byte("bar"), sgbucket.Raw) + secondWriteCas, err := dataStore.WriteCas(key, 0, casZero, []byte("bar"), sgbucket.Raw) require.Error(t, err) // try to write doc to bucket again, giving invalid cas value -- expect a failure // also, expect no retries, however there is currently no easy way to detect that. - _, err = dataStore.WriteCas(key, 0, 0, secondWriteCas-1, []byte("bar"), sgbucket.Raw) + _, err = dataStore.WriteCas(key, 0, secondWriteCas-1, []byte("bar"), sgbucket.Raw) require.Error(t, err) require.NoError(t, dataStore.Delete(key)) @@ -389,15 +388,20 @@ func TestXattrWriteCasSimple(t *testing.T) { xattrVal["rev"] = "1-1234" cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, syncMutateInOpts()) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, syncMutateInOpts()) require.NoError(t, err) log.Printf("Post-write, cas is %d", cas) - var retrievedVal map[string]interface{} - var retrievedXattr map[string]interface{} - getCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + require.Contains(t, xattrs, xattrName) + marshalledXattr, ok := xattrs[xattrName] + require.True(t, ok) + var retrievedVal map[string]any + var retrievedXattr map[string]any + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) assert.Equal(t, cas, getCas) assert.Equal(t, val["body_field"], retrievedVal["body_field"]) assert.Equal(t, xattrVal["seq"], retrievedXattr["seq"]) @@ -410,9 +414,12 @@ func TestXattrWriteCasSimple(t *testing.T) { assert.Equal(t, Crc32cHashString(valBytes), macroBodyHashString) // Validate against $document.value_crc32c - var retrievedVxattr map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key, "$document", "", &retrievedVal, &retrievedVxattr, nil) + _, xattrs, _, err = dataStore.GetWithXattrs(ctx, key, []string{"$document"}) require.NoError(t, err) + + var retrievedVxattr map[string]interface{} + require.NoError(t, json.Unmarshal(xattrs["$document"], &retrievedVxattr)) + vxattrCrc32c, ok := retrievedVxattr["value_crc32c"].(string) assert.True(t, ok, "Unable to retrieve virtual xattr crc32c as string") @@ -441,16 +448,21 @@ func TestXattrWriteCasUpsert(t *testing.T) { xattrVal["rev"] = "1-1234" cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) log.Printf("Post-write, cas is %d", cas) var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - getCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) - log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal, retrievedXattr) assert.Equal(t, cas, getCas) + err = JSONUnmarshal(xattrs[xattrName], &retrievedXattr) + require.NoError(t, err) + + err = JSONUnmarshal(rawVal, &retrievedVal) + require.NoError(t, err) + assert.Equal(t, val["body_field"], retrievedVal["body_field"]) assert.Equal(t, xattrVal["seq"], retrievedXattr["seq"]) assert.Equal(t, xattrVal["rev"], retrievedXattr["rev"]) @@ -460,14 +472,19 @@ func TestXattrWriteCasUpsert(t *testing.T) { xattrVal2 := make(map[string]interface{}) xattrVal2["seq"] = float64(124) xattrVal2["rev"] = "2-5678" - cas, err = dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, getCas, val2, xattrVal2, nil) + cas, err = dataStore.WriteWithXattrs(ctx, key, 0, getCas, MustJSONMarshal(t, val2), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal2)}, nil) assert.NoError(t, err, "WriteCasWithXattr error") log.Printf("Post-write, cas is %d", cas) var retrievedVal2 map[string]interface{} var retrievedXattr2 map[string]interface{} - getCas, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal2, &retrievedXattr2, nil) + rawVal2, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + err = JSONUnmarshal(rawVal2, &retrievedVal2) + require.NoError(t, err) + + require.NoError(t, JSONUnmarshal(xattrs[xattrName], &retrievedXattr2)) + log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal2, retrievedXattr2) assert.Equal(t, cas, getCas) assert.Equal(t, val2["body_field"], retrievedVal2["body_field"]) @@ -496,15 +513,18 @@ func TestXattrWriteCasWithXattrCasCheck(t *testing.T) { xattrVal["rev"] = "1-1234" cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) - require.NoError(t, err) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) + require.NoError(t, err, "WriteCasWithXattr error") log.Printf("Post-write, cas is %d", cas) var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - getCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, "") + rawVal, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) - log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal, retrievedXattr) + marshalledXattr, ok := xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) assert.Equal(t, cas, getCas) assert.Equal(t, val["sg_field"], retrievedVal["sg_field"]) assert.Equal(t, xattrVal["seq"], retrievedXattr["seq"]) @@ -518,15 +538,18 @@ func TestXattrWriteCasWithXattrCasCheck(t *testing.T) { // Attempt to update with the previous CAS val["sg_field"] = "sg_value_mod" xattrVal["rev"] = "2-1234" - _, err = dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, getCas, val, xattrVal, nil) + _, err = dataStore.WriteWithXattrs(ctx, key, 0, getCas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) assert.True(t, IsCasMismatch(err), "error is %v", err) // Retrieve again, ensure we get the SDK value, SG xattr retrievedVal = nil retrievedXattr = nil - _, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, _, err = dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) - log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal, retrievedXattr) + marshalledXattr, ok = xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) assert.Equal(t, nil, retrievedVal["sg_field"]) assert.Equal(t, updatedVal["sdk_field"], retrievedVal["sdk_field"]) assert.Equal(t, xattrVal["seq"], retrievedXattr["seq"]) @@ -548,24 +571,25 @@ func TestXattrWriteCasRaw(t *testing.T) { xattrName := SyncXattrName val := make(map[string]interface{}) val["body_field"] = "1234" - xattrVal := make(map[string]interface{}) xattrVal["seq"] = float64(123) xattrVal["rev"] = "1-1234" + xattrValRaw := MustJSONMarshal(t, xattrVal) + cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, MustJSONMarshal(t, val), MustJSONMarshal(t, xattrVal), nil) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: xattrValRaw}, nil) require.NoError(t, err) - var retrievedValByte []byte - var retrievedXattrByte []byte - getCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedValByte, &retrievedXattrByte, nil) + rawVal, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) var retrievedVal map[string]interface{} + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) + retrievedXattrByte, ok := xattrs[xattrName] + require.True(t, ok) var retrievedXattr map[string]interface{} - _ = json.Unmarshal(retrievedValByte, &retrievedVal) - _ = json.Unmarshal(retrievedXattrByte, &retrievedXattr) + require.NoError(t, json.Unmarshal(retrievedXattrByte, &retrievedXattr)) log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal, retrievedXattr) assert.Equal(t, cas, getCas) assert.Equal(t, val["body_field"], retrievedVal["body_field"]) @@ -597,14 +621,18 @@ func TestXattrWriteCasTombstoneResurrect(t *testing.T) { // Write document with xattr cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) log.Printf("Post-write, cas is %d", cas) var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - getCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + marshalledXattr, ok := xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal, retrievedXattr) assert.Equal(t, cas, getCas) assert.Equal(t, val["body_field"], retrievedVal["body_field"]) @@ -620,12 +648,17 @@ func TestXattrWriteCasTombstoneResurrect(t *testing.T) { xattrVal = make(map[string]interface{}) xattrVal["seq"] = float64(456) xattrVal["rev"] = "2-2345" - _, err = dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + + _, err = dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // Verify retrieval - _, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, _, err = dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + marshalledXattr, ok = xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal, retrievedXattr) assert.Equal(t, val["body_field"], retrievedVal["body_field"]) @@ -655,14 +688,22 @@ func TestXattrWriteCasTombstoneUpdate(t *testing.T) { // Write document with xattr cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) + log.Printf("Wrote document") log.Printf("Post-write, cas is %d", cas) var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - getCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + + rawVal, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + marshalledXattr, ok := xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) + + log.Printf("Retrieved document") log.Printf("TestWriteCasXATTR retrieved: %s, %s", retrievedVal, retrievedXattr) assert.Equal(t, cas, getCas) assert.Equal(t, val["body_field"], retrievedVal["body_field"]) @@ -676,15 +717,21 @@ func TestXattrWriteCasTombstoneUpdate(t *testing.T) { xattrVal = make(map[string]interface{}) xattrVal["seq"] = float64(456) xattrVal["rev"] = "2-2345" - _, err = dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, nil, xattrVal, nil) + + _, err = dataStore.WriteWithXattrs(ctx, key, 0, cas, nil, map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) log.Printf("Updated tombstoned document") // Verify retrieval var modifiedVal map[string]interface{} var modifiedXattr map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &modifiedVal, &modifiedXattr, nil) + + rawVal, xattrs, _, err = dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + xattrBytes, ok := xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(xattrBytes, &modifiedXattr)) + require.NoError(t, JSONUnmarshal(rawVal, &modifiedVal)) log.Printf("Retrieved tombstoned document") log.Printf("TestWriteCasXATTR retrieved modified: %s, %s", modifiedVal, modifiedXattr) assert.Equal(t, xattrVal["seq"], modifiedXattr["seq"]) @@ -693,7 +740,6 @@ func TestXattrWriteCasTombstoneUpdate(t *testing.T) { // TestXattrWriteUpdateXattr. Validates basic write of document with xattr, and retrieval of the same doc w/ xattr. func TestXattrWriteUpdateXattr(t *testing.T) { - SkipXattrTestsIfNotEnabled(t) ctx := TestCtx(t) @@ -711,28 +757,23 @@ func TestXattrWriteUpdateXattr(t *testing.T) { xattrVal["rev"] = "1-1234" // Dummy write update function that increments 'counter' in the doc and 'seq' in the xattr - writeUpdateFunc := func(doc []byte, xattr []byte, userXattr []byte, cas uint64) ( - updatedDoc []byte, updatedXattr []byte, isDelete bool, updatedExpiry *uint32, updatedSpec []sgbucket.MacroExpansionSpec, err error) { - + writeUpdateFunc := func(doc []byte, xattrs map[string][]byte, cas uint64) (sgbucket.UpdatedDoc, error) { var docMap map[string]interface{} var xattrMap map[string]interface{} // Marshal the doc if len(doc) > 0 { - err = JSONUnmarshal(doc, &docMap) + err := JSONUnmarshal(doc, &docMap) if err != nil { - return nil, nil, false, nil, nil, pkgerrors.Wrapf(err, "Unable to unmarshal incoming doc") + return sgbucket.UpdatedDoc{}, fmt.Errorf("Unable to unmarshal incoming doc: %w", err) } } else { // No incoming doc, treat as insert. docMap = make(map[string]interface{}) } - + xattr := xattrs[xattrName] // Marshal the xattr if len(xattr) > 0 { - err = JSONUnmarshal(xattr, &xattrMap) - if err != nil { - return nil, nil, false, nil, nil, pkgerrors.Wrapf(err, "Unable to unmarshal incoming xattr") - } + require.NoError(t, JSONUnmarshal(xattr, &xattrMap)) } else { // No incoming xattr, treat as insert. xattrMap = make(map[string]interface{}) @@ -753,30 +794,40 @@ func TestXattrWriteUpdateXattr(t *testing.T) { } else { xattrMap["seq"] = float64(1) } - - updatedDoc = MustJSONMarshal(t, docMap) - updatedXattr = MustJSONMarshal(t, xattrMap) - return updatedDoc, updatedXattr, false, nil, updatedSpec, nil + updatedDoc := sgbucket.UpdatedDoc{ + Doc: MustJSONMarshal(t, docMap), + Xattrs: map[string][]byte{xattrName: MustJSONMarshal(t, xattrMap)}, + } + return updatedDoc, nil } // Insert - _, err := dataStore.WriteUpdateWithXattr(ctx, key, xattrName, "", 0, nil, nil, writeUpdateFunc) + _, err := dataStore.WriteUpdateWithXattrs(ctx, key, []string{xattrName}, 0, nil, nil, writeUpdateFunc) require.NoError(t, err) var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) - log.Printf("Retrieval after WriteUpdate insert: doc: %v, xattr: %v", retrievedVal, retrievedXattr) + rawVal, xattrs, _, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + marshalledXattr, ok := xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) + + log.Printf("Retrieval after WriteUpdate insert: doc: %v, xattr: %v", retrievedVal, retrievedXattr) assert.Equal(t, float64(1), retrievedVal["counter"]) assert.Equal(t, float64(1), retrievedXattr["seq"]) // Update - _, err = dataStore.WriteUpdateWithXattr(ctx, key, xattrName, "", 0, nil, nil, writeUpdateFunc) + _, err = dataStore.WriteUpdateWithXattrs(ctx, key, []string{xattrName}, 0, nil, nil, writeUpdateFunc) require.NoError(t, err) - _, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, _, err = dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + marshalledXattr, ok = xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(marshalledXattr, &retrievedXattr)) + require.NoError(t, JSONUnmarshal(rawVal, &retrievedVal)) log.Printf("Retrieval after WriteUpdate update: doc: %v, xattr: %v", retrievedVal, retrievedXattr) assert.Equal(t, float64(2), retrievedVal["counter"]) @@ -795,61 +846,56 @@ func TestWriteUpdateWithXattrUserXattr(t *testing.T) { xattrKey := SyncXattrName userXattrKey := "UserXattr" - writeUpdateFunc := func(doc []byte, xattr []byte, userXattr []byte, cas uint64) (updatedDoc []byte, updatedXattr []byte, isDelete bool, updatedExpiry *uint32, updatedSpec []sgbucket.MacroExpansionSpec, err error) { + writeUpdateFunc := func(doc []byte, xattrs map[string][]byte, cas uint64) (sgbucket.UpdatedDoc, error) { + xattr := xattrs[xattrKey] var docMap map[string]interface{} var xattrMap map[string]interface{} if len(doc) > 0 { - err = JSONUnmarshal(xattr, &docMap) - if err != nil { - return nil, nil, false, nil, nil, err - } + require.NoError(t, JSONUnmarshal(xattr, &docMap)) } else { docMap = make(map[string]interface{}) } if len(xattr) > 0 { - err = JSONUnmarshal(xattr, &xattrMap) - if err != nil { - return nil, nil, false, nil, nil, err - } + require.NoError(t, JSONUnmarshal(xattr, &xattrMap)) } else { xattrMap = make(map[string]interface{}) } + userXattr := xattrs[userXattrKey] var userXattrMap map[string]interface{} if len(userXattr) > 0 { - err = JSONUnmarshal(userXattr, &userXattrMap) - if err != nil { - return nil, nil, false, nil, nil, err - } + require.NoError(t, JSONUnmarshal(userXattr, &userXattrMap)) } else { userXattrMap = nil } docMap["userXattrVal"] = userXattrMap - updatedDoc = MustJSONMarshal(t, docMap) - updatedXattr = MustJSONMarshal(t, xattrMap) - - return updatedDoc, updatedXattr, false, nil, updatedSpec, nil + return sgbucket.UpdatedDoc{ + Doc: MustJSONMarshal(t, docMap), + Xattrs: map[string][]byte{ + xattrKey: MustJSONMarshal(t, xattrMap), + }, + }, nil } - _, err := dataStore.WriteUpdateWithXattr(ctx, key, xattrKey, userXattrKey, 0, nil, nil, writeUpdateFunc) - assert.NoError(t, err) + _, err := dataStore.WriteUpdateWithXattrs(ctx, key, []string{xattrKey, userXattrKey}, 0, nil, nil, writeUpdateFunc) + require.NoError(t, err) var gotBody map[string]interface{} - _, err = dataStore.Get(key, &gotBody) + cas, err := dataStore.Get(key, &gotBody) assert.NoError(t, err) assert.Equal(t, nil, gotBody["userXattrVal"]) userXattrVal := map[string]interface{}{"val": "val"} - _, err = dataStore.WriteUserXattr(key, userXattrKey, userXattrVal) + _, err = dataStore.UpdateXattrs(ctx, key, 0, cas, map[string][]byte{userXattrKey: MustJSONMarshal(t, userXattrVal)}, nil) assert.NoError(t, err) - _, err = dataStore.WriteUpdateWithXattr(ctx, key, xattrKey, userXattrKey, 0, nil, nil, writeUpdateFunc) + _, err = dataStore.WriteUpdateWithXattrs(ctx, key, []string{xattrKey, userXattrKey}, 0, nil, nil, writeUpdateFunc) assert.NoError(t, err) _, err = dataStore.Get(key, &gotBody) @@ -877,9 +923,10 @@ func TestXattrDeleteDocument(t *testing.T) { xattrVal["rev"] = "1-1234" key := t.Name() + // Create w/ XATTR, delete doc and XATTR, retrieve doc (expect fail), retrieve XATTR (expect success) cas := uint64(0) - _, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + _, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // Delete the document. @@ -888,9 +935,12 @@ func TestXattrDeleteDocument(t *testing.T) { // Verify delete of body was successful, retrieve XATTR var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, _, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + require.Nil(t, rawVal) assert.Len(t, retrievedVal, 0) + assert.Equal(t, 0, len(retrievedVal)) + require.NoError(t, JSONUnmarshal(xattrs[xattrName], &retrievedXattr)) assert.Equal(t, float64(123), retrievedXattr["seq"]) } @@ -918,7 +968,7 @@ func TestXattrDeleteDocumentUpdate(t *testing.T) { // Create w/ XATTR, delete doc and XATTR, retrieve doc (expect fail), retrieve XATTR (expect success) cas := uint64(0) - _, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + _, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // Delete the document. @@ -927,9 +977,13 @@ func TestXattrDeleteDocumentUpdate(t *testing.T) { // Verify delete of body was successful, retrieve XATTR var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - getCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, getCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) + require.Nil(t, rawVal) + assert.Len(t, retrievedVal, 0) + assert.Equal(t, 0, len(retrievedVal)) + require.NoError(t, JSONUnmarshal(xattrs[xattrName], &retrievedXattr)) assert.Equal(t, float64(1), retrievedXattr["seq"]) log.Printf("Post-delete xattr (1): %s", retrievedXattr) log.Printf("Post-delete cas (1): %x", getCas) @@ -937,14 +991,17 @@ func TestXattrDeleteDocumentUpdate(t *testing.T) { // Update the xattr only xattrVal["seq"] = 2 xattrVal["rev"] = "1-1234" - casOut, writeErr := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, getCas, nil, xattrVal, nil) + casOut, writeErr := dataStore.WriteWithXattrs(ctx, key, 0, getCas, nil, map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) assert.NoError(t, writeErr, "Error updating xattr post-delete") log.Printf("WriteCasWithXattr cas: %d", casOut) // Retrieve the document, validate cas values - var postDeleteVal map[string]interface{} var postDeleteXattr map[string]interface{} - getCas2, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &postDeleteVal, &postDeleteXattr, nil) + postDeleteVal, xattrs, getCas2, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) + postDeleteXattrBytes, ok := xattrs[xattrName] + require.True(t, ok) + require.Nil(t, postDeleteVal) + require.NoError(t, JSONUnmarshal(postDeleteXattrBytes, &postDeleteXattr)) assert.NoError(t, err, "Error getting document post-delete") assert.Equal(t, float64(2), postDeleteXattr["seq"]) assert.Len(t, postDeleteVal, 0) @@ -973,20 +1030,22 @@ func TestXattrDeleteDocumentAndUpdateXattr(t *testing.T) { // Create w/ XATTR, delete doc and XATTR, retrieve doc (expect fail), retrieve XATTR (expect fail) cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) - _, mutateErr := dataStore.UpdateXattrDeleteBody(ctx, key, xattrName, 0, cas, xattrVal, nil) - assert.NoError(t, mutateErr) + const deleteBody = true + _, err = dataStore.WriteTombstoneWithXattrs(ctx, key, 0, cas, map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, deleteBody, nil) + require.NoError(t, err) // Verify delete of body and update of XATTR - var retrievedVal map[string]interface{} var retrievedXattr map[string]interface{} - mutateCas, err := dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + rawVal, xattrs, mutateCas, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) + require.Nil(t, rawVal) require.NoError(t, err) - assert.Len(t, retrievedVal, 0) + retrievedXattrBytes, ok := xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(retrievedXattrBytes, &retrievedXattr)) assert.Equal(t, float64(123), retrievedXattr["seq"]) - log.Printf("value: %v, xattr: %v", retrievedVal, retrievedXattr) log.Printf("MutateInEx cas: %v", mutateCas) } @@ -1021,13 +1080,13 @@ func TestXattrTombstoneDocAndUpdateXattr(t *testing.T) { // Create w/ XATTR cas1 := uint64(0) - cas1, err = dataStore.WriteCasWithXattr(ctx, key1, xattrName, 0, cas1, val, xattrVal, nil) + cas1, err = dataStore.WriteWithXattrs(ctx, key1, 0, cas1, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // 2. Create document with no XATTR val = make(map[string]interface{}) val["type"] = key2 - cas2, writeErr := dataStore.WriteCas(key2, 0, 0, 0, val, 0) + cas2, writeErr := dataStore.WriteCas(key2, 0, 0, val, 0) assert.NoError(t, writeErr) // 3. Xattr, no document @@ -1040,7 +1099,7 @@ func TestXattrTombstoneDocAndUpdateXattr(t *testing.T) { // Create w/ XATTR cas3int := uint64(0) - cas3int, err = dataStore.WriteCasWithXattr(ctx, key3, xattrName, 0, cas3int, val, xattrVal, nil) + cas3int, err = dataStore.WriteWithXattrs(ctx, key3, 0, cas3int, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // Delete the doc body cas3, removeErr := dataStore.Remove(key3, cas3int) @@ -1067,21 +1126,33 @@ func TestXattrTombstoneDocAndUpdateXattr(t *testing.T) { log.Printf("Delete testing for key: %v", key) - // First attempt to update with a bad cas value, and ensure we're getting the expected error - _, errCasMismatch := dataStore.WriteWithXattr(ctx, key, xattrName, 0, uint64(1234), nil, xattrValBytes, true, shouldDeleteBody[i], nil) + if key != key3 { - assert.True(t, IsCasMismatch(errCasMismatch), fmt.Sprintf("Expected cas mismatch for %s", key)) + // First attempt to update with a bad cas value, and ensure we're getting the expected error + _, errCasMismatch := dataStore.WriteTombstoneWithXattrs(ctx, key, 0, uint64(1234), map[string][]byte{xattrName: xattrValBytes}, shouldDeleteBody[i], nil) - _, errDelete := dataStore.WriteWithXattr(ctx, key, xattrName, 0, casValues[i], nil, xattrValBytes, true, shouldDeleteBody[i], nil) - log.Printf("Delete error: %v", errDelete) + require.True(t, IsCasMismatch(errCasMismatch), fmt.Sprintf("Expected cas mismatch for %s", key)) - assert.NoError(t, errDelete, fmt.Sprintf("Unexpected error deleting %s", key)) + _, errDelete := dataStore.WriteTombstoneWithXattrs(ctx, key, 0, casValues[i], map[string][]byte{xattrName: xattrValBytes}, shouldDeleteBody[i], nil) + log.Printf("Delete error: %v", errDelete) + + require.NoError(t, errDelete, fmt.Sprintf("Unexpected error deleting %s", key)) + } else { + _, errCasMismatch := dataStore.WriteWithXattrs(ctx, key, 0, uint64(1234), nil, map[string][]byte{xattrName: xattrValBytes}, nil) + + require.True(t, IsCasMismatch(errCasMismatch), fmt.Sprintf("Expected cas mismatch for %s", key)) + + _, errDelete := dataStore.WriteWithXattrs(ctx, key, 0, casValues[i], nil, map[string][]byte{xattrName: xattrValBytes}, nil) + log.Printf("Delete error: %v", errDelete) + + require.NoError(t, errDelete, fmt.Sprintf("Unexpected error deleting %s", key)) + } assert.True(t, verifyDocDeletedXattrExists(ctx, dataStore, key, xattrName), fmt.Sprintf("Expected doc %s to be deleted", key)) } // Now attempt to tombstone key4 (NoDocNoXattr), should not return an error (per SG #3307). Should save xattr metadata. log.Printf("Deleting key: %v", key4) - _, errDelete := dataStore.WriteWithXattr(ctx, key4, xattrName, 0, uint64(0), nil, xattrValBytes, true, false, nil) + _, errDelete := dataStore.WriteTombstoneWithXattrs(ctx, key4, 0, 0, map[string][]byte{xattrName: xattrValBytes}, false, nil) assert.NoError(t, errDelete, "Unexpected error tombstoning non-existent doc") assert.True(t, verifyDocDeletedXattrExists(ctx, dataStore, key4, xattrName), "Expected doc to be deleted, but xattrs to exist") @@ -1118,7 +1189,7 @@ func TestXattrDeleteDocAndXattr(t *testing.T) { // Create w/ XATTR cas1 := uint64(0) - _, err = dataStore.WriteCasWithXattr(ctx, key1, xattrName, 0, cas1, val, xattrVal, nil) + _, err = dataStore.WriteWithXattrs(ctx, key1, 0, cas1, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // 2. Create document with no XATTR @@ -1137,8 +1208,9 @@ func TestXattrDeleteDocAndXattr(t *testing.T) { // Create w/ XATTR cas3int := uint64(0) - _, err = dataStore.WriteCasWithXattr(ctx, key3, xattrName, 0, cas3int, val, xattrVal, nil) + _, err = dataStore.WriteWithXattrs(ctx, key3, 0, cas3int, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) + // Delete the doc body require.NoError(t, dataStore.Delete(key3)) @@ -1149,17 +1221,17 @@ func TestXattrDeleteDocAndXattr(t *testing.T) { keys := []string{key1, key2, key3} for _, key := range keys { log.Printf("Deleting key: %v", key) - errDelete := dataStore.DeleteWithXattr(ctx, key, xattrName) - assert.NoError(t, errDelete, fmt.Sprintf("Unexpected error deleting %s", key)) - assert.True(t, verifyDocAndXattrDeleted(ctx, dataStore, key, xattrName), "Expected doc to be deleted") + errDelete := dataStore.DeleteWithXattrs(ctx, key, []string{xattrName}) + require.NoError(t, errDelete, fmt.Sprintf("Unexpected error deleting %s", key)) + requireDocAndXattrDeleted(t, dataStore, key, xattrName) } // Now attempt to delete key4 (NoDocNoXattr), which is expected to return a Key Not Found error log.Printf("Deleting key: %v", key4) - errDelete := dataStore.DeleteWithXattr(ctx, key4, xattrName) + errDelete := dataStore.DeleteWithXattrs(ctx, key4, []string{xattrName}) assert.Error(t, errDelete) assert.Truef(t, IsDocNotFoundError(errDelete), "Exepcted keynotfound error but got %v", errDelete) - assert.True(t, verifyDocAndXattrDeleted(ctx, dataStore, key4, xattrName), "Expected doc to be deleted") + requireDocAndXattrDeleted(t, dataStore, key4, xattrName) } // This simulates a race condition by calling deleteWithXattrInternal() and passing a custom @@ -1181,7 +1253,7 @@ func TestDeleteWithXattrWithSimulatedRaceResurrect(t *testing.T) { createTombstonedDoc(t, dataStore, key, xattrName) numTimesCalledBack := 0 - callback := func(k string, xattrKey string) { + callback := func(k string, _ []string) { // Only want the callback to execute once. Should be called multiple times (twice) due to expected // cas failure due to using stale cas @@ -1196,7 +1268,7 @@ func TestDeleteWithXattrWithSimulatedRaceResurrect(t *testing.T) { xattrVal := make(map[string]interface{}) xattrVal["seq"] = float64(456) xattrVal["rev"] = "2-2345" - _, writeErr := dataStore.WriteCasWithXattr(ctx, k, xattrKey, 0, 0, updatedVal, xattrVal, nil) + _, writeErr := dataStore.WriteWithXattrs(ctx, k, 0, 0, MustJSONMarshal(t, updatedVal), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, writeErr) } @@ -1204,7 +1276,7 @@ func TestDeleteWithXattrWithSimulatedRaceResurrect(t *testing.T) { // case to KvXattrStore to pass to deleteWithXattrInternal collection, ok := dataStore.(*Collection) require.True(t, ok) - deleteErr := deleteWithXattrInternal(ctx, collection, key, xattrName, callback) + deleteErr := deleteWithXattrInternal(ctx, collection, key, []string{xattrName}, callback) assert.Equal(t, 1, numTimesCalledBack) assert.Error(t, deleteErr) } @@ -1237,7 +1309,7 @@ func TestXattrRetrieveDocumentAndXattr(t *testing.T) { // Create w/ XATTR cas := uint64(0) - _, err = dataStore.WriteCasWithXattr(ctx, key1, xattrName, 0, cas, val, xattrVal, nil) + _, err = dataStore.WriteWithXattrs(ctx, key1, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // 2. Create document with no XATTR @@ -1256,8 +1328,9 @@ func TestXattrRetrieveDocumentAndXattr(t *testing.T) { // Create w/ XATTR cas = uint64(0) - _, err = dataStore.WriteCasWithXattr(ctx, key3, xattrName, 0, cas, val, xattrVal, nil) + _, err = dataStore.WriteWithXattrs(ctx, key3, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) + // Delete the doc require.NoError(t, dataStore.Delete(key3)) @@ -1266,31 +1339,37 @@ func TestXattrRetrieveDocumentAndXattr(t *testing.T) { // Attempt to retrieve all 4 docs var key1DocResult map[string]interface{} var key1XattrResult map[string]interface{} - _, key1err := dataStore.GetWithXattr(ctx, key1, xattrName, "", &key1DocResult, &key1XattrResult, nil) + rawVal, key1xattrs, _, key1err := dataStore.GetWithXattrs(ctx, key1, []string{xattrName}) assert.NoError(t, key1err, "Unexpected error retrieving doc w/ xattr") + key1xattrBytes, ok := key1xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(key1xattrBytes, &key1XattrResult)) + require.NoError(t, JSONUnmarshal(rawVal, &key1DocResult)) assert.Equal(t, key1, key1DocResult["type"]) assert.Equal(t, "1-1234", key1XattrResult["rev"]) var key2DocResult map[string]interface{} - var key2XattrResult map[string]interface{} - _, key2err := dataStore.GetWithXattr(ctx, key2, xattrName, "", &key2DocResult, &key2XattrResult, nil) + rawVal, key2xattrs, _, key2err := dataStore.GetWithXattrs(ctx, key2, []string{xattrName}) assert.NoError(t, key2err, "Unexpected error retrieving doc w/out xattr") + require.NotContains(t, key2xattrs, xattrName) + require.NoError(t, JSONUnmarshal(rawVal, &key2DocResult)) assert.Equal(t, key2, key2DocResult["type"]) - assert.Nil(t, key2XattrResult) var key3DocResult map[string]interface{} var key3XattrResult map[string]interface{} - _, key3err := dataStore.GetWithXattr(ctx, key3, xattrName, "", &key3DocResult, &key3XattrResult, nil) + rawVal, key3xattrs, _, key3err := dataStore.GetWithXattrs(ctx, key3, []string{xattrName}) + require.Nil(t, rawVal) + key3xattrBytes, ok := key3xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(key3xattrBytes, &key3XattrResult)) assert.NoError(t, key3err, "Unexpected error retrieving doc w/out xattr") assert.Nil(t, key3DocResult) assert.Equal(t, "1-1234", key3XattrResult["rev"]) - var key4DocResult map[string]interface{} - var key4XattrResult map[string]interface{} - _, key4err := dataStore.GetWithXattr(ctx, key4, xattrName, "", &key4DocResult, &key4XattrResult, nil) - assert.True(t, IsDocNotFoundError(key4err)) - assert.Nil(t, key4DocResult) - assert.Nil(t, key4XattrResult) + key4DocRaw, key4xattrs, _, key4err := dataStore.GetWithXattrs(ctx, key4, []string{xattrName}) + RequireDocNotFoundError(t, key4err) + require.NotContains(t, key4xattrs, xattrName) + assert.Nil(t, key4DocRaw) } @@ -1326,13 +1405,13 @@ func TestXattrMutateDocAndXattr(t *testing.T) { // Create w/ XATTR cas1 := uint64(0) - cas1, err = dataStore.WriteCasWithXattr(ctx, key1, xattrName, 0, cas1, val, xattrVal, nil) + cas1, err = dataStore.WriteWithXattrs(ctx, key1, 0, cas1, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // 2. Create document with no XATTR val = make(map[string]interface{}) val["type"] = key2 - cas2, err := dataStore.WriteCas(key2, 0, 0, 0, val, 0) + cas2, err := dataStore.WriteCas(key2, 0, 0, val, 0) require.NoError(t, err) // 3. Xattr, no document @@ -1345,7 +1424,7 @@ func TestXattrMutateDocAndXattr(t *testing.T) { // Create w/ XATTR cas3int := uint64(0) - cas3int, err = dataStore.WriteCasWithXattr(ctx, key3, xattrName, 0, cas3int, val, xattrVal, nil) + cas3int, err = dataStore.WriteWithXattrs(ctx, key3, 0, cas3int, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // Delete the doc body require.NoError(t, dataStore.Delete(key3)) @@ -1362,42 +1441,58 @@ func TestXattrMutateDocAndXattr(t *testing.T) { // Attempt to mutate all 4 docs exp := uint32(0) updatedVal["type"] = fmt.Sprintf("updated_%s", key1) - _, key1err := dataStore.WriteCasWithXattr(ctx, key1, xattrName, exp, cas1, &updatedVal, &updatedXattrVal, nil) + _, key1err := dataStore.WriteWithXattrs(ctx, key1, exp, cas1, MustJSONMarshal(t, updatedVal), map[string][]byte{xattrName: MustJSONMarshal(t, updatedXattrVal)}, nil) assert.NoError(t, key1err, fmt.Sprintf("Unexpected error mutating %s", key1)) var key1DocResult map[string]interface{} var key1XattrResult map[string]interface{} - _, key1err = dataStore.GetWithXattr(ctx, key1, xattrName, "", &key1DocResult, &key1XattrResult, nil) + docRaw, key1xattrs, _, key1err := dataStore.GetWithXattrs(ctx, key1, []string{xattrName}) require.NoError(t, key1err) + key1xattrBytes, ok := key1xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(key1xattrBytes, &key1XattrResult)) + require.NoError(t, JSONUnmarshal(docRaw, &key1DocResult)) assert.Equal(t, fmt.Sprintf("updated_%s", key1), key1DocResult["type"]) assert.Equal(t, "2-1234", key1XattrResult["rev"]) updatedVal["type"] = fmt.Sprintf("updated_%s", key2) - _, key2err := dataStore.WriteCasWithXattr(ctx, key2, xattrName, exp, cas2, &updatedVal, &updatedXattrVal, nil) + _, key2err := dataStore.WriteWithXattrs(ctx, key2, exp, cas2, MustJSONMarshal(t, updatedVal), map[string][]byte{xattrName: MustJSONMarshal(t, &updatedXattrVal)}, nil) assert.NoError(t, key2err, fmt.Sprintf("Unexpected error mutating %s", key2)) var key2DocResult map[string]interface{} var key2XattrResult map[string]interface{} - _, key2err = dataStore.GetWithXattr(ctx, key2, xattrName, "", &key2DocResult, &key2XattrResult, nil) + docRaw, key2xattrs, _, key2err := dataStore.GetWithXattrs(ctx, key2, []string{xattrName}) require.NoError(t, key2err) + key2xattrBytes, ok := key2xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(key2xattrBytes, &key2XattrResult)) + require.NoError(t, JSONUnmarshal(docRaw, &key2DocResult)) assert.Equal(t, fmt.Sprintf("updated_%s", key2), key2DocResult["type"]) assert.Equal(t, "2-1234", key2XattrResult["rev"]) updatedVal["type"] = fmt.Sprintf("updated_%s", key3) - _, key3err := dataStore.WriteCasWithXattr(ctx, key3, xattrName, exp, cas3int, &updatedVal, &updatedXattrVal, nil) + _, key3err := dataStore.WriteWithXattrs(ctx, key3, exp, cas3int, MustJSONMarshal(t, updatedVal), map[string][]byte{xattrName: MustJSONMarshal(t, updatedXattrVal)}, nil) assert.NoError(t, key3err, fmt.Sprintf("Unexpected error mutating %s", key3)) var key3DocResult map[string]interface{} var key3XattrResult map[string]interface{} - _, key3err = dataStore.GetWithXattr(ctx, key3, xattrName, "", &key3DocResult, &key3XattrResult, nil) + docRaw, key3xattrs, _, key3err := dataStore.GetWithXattrs(ctx, key3, []string{xattrName}) require.NoError(t, key3err) + key3xattrBytes, ok := key3xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(key3xattrBytes, &key3XattrResult)) + require.NoError(t, JSONUnmarshal(docRaw, &key3DocResult)) assert.Equal(t, fmt.Sprintf("updated_%s", key3), key3DocResult["type"]) assert.Equal(t, "2-1234", key3XattrResult["rev"]) updatedVal["type"] = fmt.Sprintf("updated_%s", key4) - _, key4err := dataStore.WriteCasWithXattr(ctx, key4, xattrName, exp, uint64(cas4), &updatedVal, &updatedXattrVal, nil) + _, key4err := dataStore.WriteWithXattrs(ctx, key4, exp, uint64(cas4), MustJSONMarshal(t, updatedVal), map[string][]byte{xattrName: MustJSONMarshal(t, updatedXattrVal)}, nil) assert.NoError(t, key4err, fmt.Sprintf("Unexpected error mutating %s", key4)) var key4DocResult map[string]interface{} var key4XattrResult map[string]interface{} - _, key4err = dataStore.GetWithXattr(ctx, key4, xattrName, "", &key4DocResult, &key4XattrResult, nil) + docRaw, key4xattrs, _, key4err := dataStore.GetWithXattrs(ctx, key4, []string{xattrName}) require.NoError(t, key4err) + key4xattrBytes, ok := key4xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(key4xattrBytes, &key4XattrResult)) + require.NoError(t, JSONUnmarshal(docRaw, &key4DocResult)) assert.Equal(t, fmt.Sprintf("updated_%s", key4), key4DocResult["type"]) assert.Equal(t, "2-1234", key4XattrResult["rev"]) @@ -1443,55 +1538,50 @@ func TestGetXattr(t *testing.T) { // Create w/ XATTR cas := uint64(0) - _, err = dataStore.WriteCasWithXattr(ctx, key1, xattrName1, 0, cas, val1, xattrVal1, nil) + _, err = dataStore.WriteWithXattrs(ctx, key1, 0, cas, MustJSONMarshal(t, val1), map[string][]byte{xattrName1: MustJSONMarshal(t, xattrVal1)}, nil) require.NoError(t, err) - var response map[string]interface{} - // Get Xattr From Existing Doc with Existing Xattr - _, err = dataStore.GetXattr(ctx, key1, xattrName1, &response) + xattrs, _, err := dataStore.GetXattrs(ctx, key1, []string{xattrName1}) assert.NoError(t, err) + xattr1ResultBytes, ok := xattrs[xattrName1] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(xattr1ResultBytes, &xattrVal1)) - assert.Equal(t, xattrVal1["seq"], response["seq"]) - assert.Equal(t, xattrVal1["rev"], response["rev"]) + assert.Equal(t, xattrVal1["seq"], xattrVal1["seq"]) + assert.Equal(t, xattrVal1["rev"], xattrVal1["rev"]) // Get Xattr From Existing Doc With Non-Existent Xattr -> ErrSubDocBadMulti - _, err = dataStore.GetXattr(ctx, key1, "non-exist", &response) + _, _, err = dataStore.GetXattrs(ctx, key1, []string{"non-exist"}) assert.Error(t, err) - assert.True(t, IsXattrNotFoundError(err)) + require.True(t, IsXattrNotFoundError(err)) // Get Xattr From Non-Existent Doc With Non-Existent Xattr - _, err = dataStore.GetXattr(ctx, "non-exist", "non-exist", &response) + _, _, err = dataStore.GetXattrs(ctx, "non-exist", []string{"non-exist"}) assert.Error(t, err) assert.True(t, IsDocNotFoundError(err)) // Get Xattr From Tombstoned Doc With Existing System Xattr (ErrSubDocSuccessDeleted) - cas, err = dataStore.WriteCasWithXattr(ctx, key2, SyncXattrName, 0, uint64(0), val2, xattrVal2, nil) + cas, err = dataStore.WriteWithXattrs(ctx, key2, 0, uint64(0), MustJSONMarshal(t, val2), map[string][]byte{SyncXattrName: MustJSONMarshal(t, xattrVal2)}, nil) require.NoError(t, err) _, err = dataStore.Remove(key2, cas) require.NoError(t, err) - _, err = dataStore.GetXattr(ctx, key2, SyncXattrName, &response) - assert.NoError(t, err) // Get Xattr From Tombstoned Doc With Non-Existent System Xattr -> SubDocMultiPathFailureDeleted - _, err = dataStore.GetXattr(ctx, key2, "_non-exist", &response) - assert.Error(t, err) - assert.True(t, IsXattrNotFoundError(err)) + _, _, err = dataStore.GetXattrs(ctx, key2, []string{"_non-exist"}) + requireXattrNotFoundError(t, err) // Get Xattr and Body From Tombstoned Doc With Non-Existent System Xattr -> SubDocMultiPathFailureDeleted - var v, xv, userXv map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key2, "_non-exist", "", &v, &xv, &userXv) - assert.Error(t, err) - assert.True(t, IsDocNotFoundError(err)) + _, _, _, err = dataStore.GetWithXattrs(ctx, key2, []string{"_non-exist"}) + RequireDocNotFoundError(t, err) // Get Xattr From Tombstoned Doc With Deleted User Xattr - cas, err = dataStore.WriteCasWithXattr(ctx, key3, xattrName3, 0, uint64(0), val3, xattrVal3, nil) + cas, err = dataStore.WriteWithXattrs(ctx, key3, 0, uint64(0), MustJSONMarshal(t, val3), map[string][]byte{xattrName3: MustJSONMarshal(t, xattrVal3)}, nil) require.NoError(t, err) _, err = dataStore.Remove(key3, cas) require.NoError(t, err) - _, err = dataStore.GetXattr(ctx, key3, xattrName3, &response) - assert.Error(t, err) - assert.True(t, IsXattrNotFoundError(err)) + _, _, err = dataStore.GetXattrs(ctx, key3, []string{xattrName3}) + requireXattrNotFoundError(t, err) } func TestGetXattrAndBody(t *testing.T) { @@ -1534,46 +1624,55 @@ func TestGetXattrAndBody(t *testing.T) { // Create w/ XATTR cas := uint64(0) - _, err = dataStore.WriteCasWithXattr(ctx, key1, xattrName1, 0, cas, val1, xattrVal1, nil) + _, err = dataStore.WriteWithXattrs(ctx, key1, 0, cas, MustJSONMarshal(t, val1), map[string][]byte{xattrName1: MustJSONMarshal(t, xattrVal1)}, nil) require.NoError(t, err) // Get Xattr From Existing Doc with Existing Xattr - var v, xv, userXv map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key1, xattrName1, "", &v, &xv, &userXv) + _, xattrs, _, err := dataStore.GetWithXattrs(ctx, key1, []string{xattrName1}) assert.NoError(t, err) - assert.Equal(t, xattrVal1["seq"], xv["seq"]) - assert.Equal(t, xattrVal1["rev"], xv["rev"]) + xattr1ResultBytes, ok := xattrs[xattrName1] + require.True(t, ok) + var xattr1Result map[string]interface{} + require.NoError(t, JSONUnmarshal(xattr1ResultBytes, &xattr1Result)) + assert.Equal(t, xattrVal1["seq"], xattr1Result["seq"]) + assert.Equal(t, xattrVal1["rev"], xattr1Result["rev"]) // Get body and Xattr From Existing Doc With Non-Existent Xattr -> returns body only - _, err = dataStore.GetWithXattr(ctx, key1, "non-exist", "", &v, &xv, &userXv) + var v map[string]any + var docRaw []byte + docRaw, xattrs, _, err = dataStore.GetWithXattrs(ctx, key1, []string{"non-exist"}) + + require.NoError(t, JSONUnmarshal(docRaw, &v)) assert.NoError(t, err) assert.Equal(t, val1["type"], v["type"]) + require.Empty(t, xattrs) // Get Xattr From Non-Existent Doc With Non-Existent Xattr - _, err = dataStore.GetWithXattr(ctx, "non-exist", "non-exist", "", &v, &xv, &userXv) + _, _, _, err = dataStore.GetWithXattrs(ctx, "non-exist", []string{"non-exist"}) assert.Error(t, err) assert.True(t, IsDocNotFoundError(err)) // Get Xattr From Tombstoned Doc With Existing System Xattr (ErrSubDocSuccessDeleted) - cas, err = dataStore.WriteCasWithXattr(ctx, key2, SyncXattrName, 0, uint64(0), val2, xattrVal2, nil) + cas, err = dataStore.WriteWithXattrs(ctx, key2, 0, uint64(0), MustJSONMarshal(t, val2), map[string][]byte{SyncXattrName: MustJSONMarshal(t, xattrVal2)}, nil) require.NoError(t, err) _, err = dataStore.Remove(key2, cas) require.NoError(t, err) - _, err = dataStore.GetWithXattr(ctx, key2, SyncXattrName, "", &v, &xv, &userXv) + _, xattrs, _, err = dataStore.GetWithXattrs(ctx, key2, []string{SyncXattrName}) assert.NoError(t, err) + require.Contains(t, xattrs, SyncXattrName) // Get Xattr From Tombstoned Doc With Non-Existent System Xattr -> returns not found - _, err = dataStore.GetWithXattr(ctx, key2, "_non-exist", "", &v, &xv, &userXv) + _, _, _, err = dataStore.GetWithXattrs(ctx, key2, []string{"_non-exist"}) assert.Error(t, err) assert.True(t, IsDocNotFoundError(err)) // Get Xattr From Tombstoned Doc With Deleted User Xattr -> returns not found - cas, err = dataStore.WriteCasWithXattr(ctx, key3, xattrName3, 0, uint64(0), val3, xattrVal3, nil) + cas, err = dataStore.WriteWithXattrs(ctx, key3, 0, uint64(0), MustJSONMarshal(t, val3), map[string][]byte{xattrName3: MustJSONMarshal(t, xattrVal3)}, nil) require.NoError(t, err) _, err = dataStore.Remove(key3, cas) require.NoError(t, err) - _, err = dataStore.GetWithXattr(ctx, key3, xattrName3, "", &v, &xv, &userXv) + _, _, _, err = dataStore.GetWithXattrs(ctx, key3, []string{xattrName3}) assert.Error(t, err) assert.True(t, IsDocNotFoundError(err)) } @@ -1862,11 +1961,11 @@ func createTombstonedDoc(t *testing.T, dataStore sgbucket.DataStore, key, xattrN ctx := TestCtx(t) // Create w/ doc and XATTR cas := uint64(0) - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrName, 0, cas, val, xattrVal, nil) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrName: MustJSONMarshal(t, xattrVal)}, nil) require.NoError(t, err) // Create tombstone revision which deletes doc body but preserves XATTR - _, mutateErr := dataStore.DeleteBody(ctx, key, xattrName, 0, cas, nil) + _, mutateErr := dataStore.WriteTombstoneWithXattrs(ctx, key, 0, cas, nil, true, nil) /* flags := gocb.SubdocDocFlagAccessDeleted _, mutateErr := dataStore.dataStore.MutateInEx(key, flags, gocb.Cas(cas), uint32(0)). @@ -1878,30 +1977,29 @@ func createTombstonedDoc(t *testing.T, dataStore sgbucket.DataStore, key, xattrN require.NoError(t, mutateErr) // Verify delete of body and XATTR - var retrievedVal map[string]interface{} - var retrievedXattr map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + docRaw, xattrs, _, err := dataStore.GetWithXattrs(ctx, key, []string{xattrName}) require.NoError(t, err) - - require.Len(t, retrievedVal, 0) + require.Nil(t, docRaw) + var retrievedXattr map[string]interface{} + retrieveXattrBytes, ok := xattrs[xattrName] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(retrieveXattrBytes, &retrievedXattr)) require.Equal(t, float64(123), retrievedXattr["seq"]) } -func verifyDocAndXattrDeleted(ctx context.Context, store sgbucket.XattrStore, key, xattrName string) bool { - var retrievedVal map[string]interface{} - var retrievedXattr map[string]interface{} - _, err := store.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) - return IsDocNotFoundError(err) +func requireDocAndXattrDeleted(t *testing.T, store sgbucket.XattrStore, key, xattrName string) { + _, _, _, err := store.GetWithXattrs(TestCtx(t), key, []string{xattrName}) + RequireDocNotFoundError(t, err) } func verifyDocDeletedXattrExists(ctx context.Context, store sgbucket.XattrStore, key, xattrName string) bool { - var retrievedVal map[string]interface{} - var retrievedXattr map[string]interface{} - _, err := store.GetWithXattr(ctx, key, xattrName, "", &retrievedVal, &retrievedXattr, nil) + docRaw, xattrs, _, err := store.GetWithXattrs(ctx, key, []string{xattrName}) - log.Printf("verification for key: %s body: %s xattr: %s", key, retrievedVal, retrievedXattr) - if err != nil || len(retrievedVal) > 0 || len(retrievedXattr) == 0 { + retrievedXattr := xattrs[xattrName] + + log.Printf("verification for key: %s body: %s xattr: %s", key, docRaw, retrievedXattr) + if err != nil || len(docRaw) > 0 || len(retrievedXattr) == 0 { return false } return true @@ -1928,7 +2026,7 @@ func TestUpdateXattrWithDeleteBodyAndIsDelete(t *testing.T) { cas := uint64(0) // CAS-safe write of the document and it's associated named extended attributes - cas, err := dataStore.WriteCasWithXattr(ctx, key, xattrKey, 0, cas, val, xattrVal, syncMutateInOpts()) + cas, err := dataStore.WriteWithXattrs(ctx, key, 0, cas, MustJSONMarshal(t, val), map[string][]byte{xattrKey: MustJSONMarshal(t, xattrVal)}, syncMutateInOpts()) require.NoError(t, err) updatedXattrVal := make(map[string]interface{}) @@ -1936,17 +2034,19 @@ func TestUpdateXattrWithDeleteBodyAndIsDelete(t *testing.T) { updatedXattrVal["rev"] = "2-EmDC" // Attempt to delete the document body (deleteBody = true); isDelete is true to mark this doc as a tombstone. - + const deleteBody = true xattrValBytes := MustJSONMarshal(t, updatedXattrVal) - _, errDelete := dataStore.WriteWithXattr(ctx, key, xattrKey, 0, cas, nil, xattrValBytes, true, true, syncMutateInOpts()) + _, errDelete := dataStore.WriteTombstoneWithXattrs(ctx, key, 0, cas, map[string][]byte{xattrKey: xattrValBytes}, deleteBody, syncMutateInOpts()) assert.NoError(t, errDelete, fmt.Sprintf("Unexpected error deleting %s", key)) assert.True(t, verifyDocDeletedXattrExists(ctx, dataStore, key, xattrKey), fmt.Sprintf("Expected doc %s to be deleted", key)) - var docResult map[string]interface{} var xattrResult map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, key, xattrKey, "", &docResult, &xattrResult, nil) + docRaw, xattrs, _, err := dataStore.GetWithXattrs(ctx, key, []string{xattrKey}) assert.NoError(t, err) - assert.Len(t, docResult, 0) + require.Nil(t, docRaw) + xattrBytes, ok := xattrs[xattrKey] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(xattrBytes, &xattrResult)) assert.Equal(t, "2-EmDC", xattrResult["rev"]) assert.Equal(t, "0x00000000", xattrResult[xattrMacroValueCrc32c]) } @@ -1966,21 +2066,28 @@ func TestUserXattrGetWithXattr(t *testing.T) { syncXattrVal := map[string]interface{}{"val": "syncVal"} userXattrVal := map[string]interface{}{"val": "userXattrVal"} - err := dataStore.Set(docKey, 0, nil, docVal) + cas, err := dataStore.WriteCas(docKey, 0, 0, MustJSONMarshal(t, docVal), sgbucket.Raw) assert.NoError(t, err) - _, err = dataStore.WriteUserXattr(docKey, "_sync", syncXattrVal) - assert.NoError(t, err) - - _, err = dataStore.WriteUserXattr(docKey, "test", userXattrVal) - assert.NoError(t, err) - - var docValRet, syncXattrValRet, userXattrValRet map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, docKey, SyncXattrName, "test", &docValRet, &syncXattrValRet, &userXattrValRet) + xattrs := map[string][]byte{ + "_sync": MustJSONMarshal(t, syncXattrVal), + "test": MustJSONMarshal(t, userXattrVal), + } + if bucket.IsSupported(sgbucket.BucketStoreFeatureMultiXattrSubdocOperations) { + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, xattrs, nil) + require.NoError(t, err) + } else { + for k, v := range xattrs { + cas, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{k: v}, nil) + require.NoError(t, err) + } + } + var docValRet map[string]any + docRaw, xattrsResult, _, err := dataStore.GetWithXattrs(ctx, docKey, []string{SyncXattrName, "test"}) + require.NoError(t, JSONUnmarshal(docRaw, &docValRet)) assert.NoError(t, err) assert.Equal(t, docVal, docValRet) - assert.Equal(t, syncXattrVal, syncXattrValRet) - assert.Equal(t, userXattrVal, userXattrValRet) + assert.Equal(t, xattrs, xattrsResult) } func TestUserXattrGetWithXattrNil(t *testing.T) { @@ -1998,16 +2105,22 @@ func TestUserXattrGetWithXattrNil(t *testing.T) { syncXattrVal := map[string]interface{}{"val": "syncVal"} err := dataStore.Set(docKey, 0, nil, docVal) - assert.NoError(t, err) + require.NoError(t, err) - _, err = dataStore.WriteUserXattr(docKey, "_sync", syncXattrVal) + _, err = dataStore.SetXattrs(ctx, docKey, map[string][]byte{"_sync": MustJSONMarshal(t, syncXattrVal)}) assert.NoError(t, err) - var docValRet, syncXattrValRet, userXattrValRet map[string]interface{} - _, err = dataStore.GetWithXattr(ctx, docKey, SyncXattrName, "test", &docValRet, &syncXattrValRet, &userXattrValRet) + var docValRet map[string]any + docRaw, xattrs, _, err := dataStore.GetWithXattrs(ctx, docKey, []string{SyncXattrName, "test"}) + require.NoError(t, JSONUnmarshal(docRaw, &docValRet)) assert.NoError(t, err) assert.Equal(t, docVal, docValRet) + syncXattrValBytes, ok := xattrs[SyncXattrName] + require.True(t, ok) + var syncXattrValRet map[string]any + require.NoError(t, JSONUnmarshal(syncXattrValBytes, &syncXattrValRet)) assert.Equal(t, syncXattrVal, syncXattrValRet) + assert.NotContains(t, xattrs, "test") } func TestInsertTombstoneWithXattr(t *testing.T) { @@ -2030,18 +2143,21 @@ func TestInsertTombstoneWithXattr(t *testing.T) { xattrVal["rev"] = "1-EmDC" cas := uint64(0) - // Attempt to delete the document body (deleteBody = true); isDelete is true to mark this doc as a tombstone. + // Document doesn't exist, so write tombstone with (deleteBody = false) to create this doc as a tombstone. xattrValBytes := MustJSONMarshal(t, xattrVal) - _, errDelete := dataStore.WriteWithXattr(ctx, key, xattrKey, 0, cas, nil, xattrValBytes, true, false, syncMutateInOpts()) + _, errDelete := dataStore.WriteTombstoneWithXattrs(ctx, key, 0, cas, map[string][]byte{xattrKey: xattrValBytes}, false, syncMutateInOpts()) assert.NoError(t, errDelete, fmt.Sprintf("Unexpected error deleting %s", key)) assert.True(t, verifyDocDeletedXattrExists(ctx, dataStore, key, xattrKey), fmt.Sprintf("Expected doc %s to be deleted", key)) - var docResult map[string]interface{} var xattrResult map[string]interface{} - _, err := dataStore.GetWithXattr(ctx, key, xattrKey, "", &docResult, &xattrResult, nil) + docRaw, xattrs, _, err := dataStore.GetWithXattrs(ctx, key, []string{xattrKey}) assert.NoError(t, err) - assert.Len(t, docResult, 0) + require.Nil(t, docRaw) + require.NoError(t, JSONUnmarshal(xattrs[xattrKey], &xattrResult)) assert.Equal(t, "1-EmDC", xattrResult["rev"]) + xattrBytes, ok := xattrs[xattrKey] + require.True(t, ok) + require.NoError(t, JSONUnmarshal(xattrBytes, &xattrResult)) assert.Equal(t, "0x00000000", xattrResult[xattrMacroValueCrc32c]) } @@ -2295,6 +2411,13 @@ func TestMobileSystemCollectionCRUD(t *testing.T) { require.NoError(t, err) } +func TestDeleteBody(t *testing.T) { + ctx := TestCtx(t) + b := GetTestBucket(t) + defer b.Close(ctx) + +} + // Used to test standard sync mutateInOpts from the base package func syncMutateInOpts() *sgbucket.MutateInOptions { return &sgbucket.MutateInOptions{ @@ -2304,3 +2427,8 @@ func syncMutateInOpts() *sgbucket.MutateInOptions { }, } } + +func requireXattrNotFoundError(t *testing.T, err error) { + require.Error(t, err) + assert.True(t, IsXattrNotFoundError(err), "Expected an XattrMissingError but got %v", err) +} diff --git a/base/collection.go b/base/collection.go index a6edd3710a..994c3d0735 100644 --- a/base/collection.go +++ b/base/collection.go @@ -242,6 +242,7 @@ func (b *GocbV2Bucket) IsSupported(feature sgbucket.BucketStoreFeature) bool { return false } return len(agent.N1qlEps()) > 0 + // added in Couchbase Server 6.6 case sgbucket.BucketStoreFeatureCreateDeletedWithXattr: status, err := b.bucket.Internal().CapabilityStatus(gocb.CapabilityCreateAsDeleted) if err != nil { diff --git a/base/collection_gocb.go b/base/collection_gocb.go index 39171bf477..4fa194ea1d 100644 --- a/base/collection_gocb.go +++ b/base/collection_gocb.go @@ -219,7 +219,7 @@ func (c *Collection) SetRaw(k string, exp uint32, opts *sgbucket.UpsertOptions, return err } -func (c *Collection) WriteCas(k string, flags int, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (casOut uint64, err error) { +func (c *Collection) WriteCas(k string, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (casOut uint64, err error) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() diff --git a/base/collection_xattr.go b/base/collection_xattr.go index 41d1762864..2c5ac02c98 100644 --- a/base/collection_xattr.go +++ b/base/collection_xattr.go @@ -12,6 +12,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "github.com/couchbase/gocb/v2" "github.com/couchbase/gocbcore/v10" @@ -32,7 +33,6 @@ func (c *Collection) IsSupported(feature sgbucket.BucketStoreFeature) bool { } var _ sgbucket.XattrStore = &Collection{} -var _ sgbucket.UserXattrStore = &Collection{} func init() { LookupOptsAccessDeleted = &gocb.LookupInOptions{} @@ -43,21 +43,47 @@ func (c *Collection) GetSpec() BucketSpec { return c.Bucket.Spec } -// Implementation of the XattrStore interface primarily invokes common wrappers that in turn invoke SDK-specific SubdocXattrStore API -func (c *Collection) WriteCasWithXattr(ctx context.Context, k string, xattrKey string, exp uint32, cas uint64, v interface{}, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { - return WriteCasWithXattr(ctx, c, k, xattrKey, exp, cas, opts, v, xv) -} +// InsertTombstoneWithXattrs inserts a new server tombstone with the specified system xattrs +func (c *Collection) InsertTombstoneWithXattrs(ctx context.Context, k string, exp uint32, xattrValue map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { -func (c *Collection) WriteWithXattr(ctx context.Context, k string, xattrKey string, exp uint32, cas uint64, v []byte, xv []byte, isDelete bool, deleteBody bool, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { // If this is a tombstone, we want to delete the document and update the xattr - return WriteWithXattr(ctx, c, k, xattrKey, exp, cas, opts, v, xv, isDelete, deleteBody) + c.Bucket.waitForAvailKvOp() + defer c.Bucket.releaseKvOp() + + supportsTombstoneCreation := c.IsSupported(sgbucket.BucketStoreFeatureCreateDeletedWithXattr) + + var docFlags gocb.SubdocDocFlag + if supportsTombstoneCreation { + docFlags = gocb.SubdocDocFlagCreateAsDeleted | gocb.SubdocDocFlagAccessDeleted | gocb.SubdocDocFlagAddDoc + } else { + docFlags = gocb.SubdocDocFlagMkDoc + } + + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrValue)) + for xattrKey, value := range xattrValue { + mutateOps = append(mutateOps, gocb.UpsertSpec(xattrKey, bytesToRawMessage(value), UpsertSpecXattr)) + } + + mutateOps = appendMacroExpansions(mutateOps, opts) + options := &gocb.MutateInOptions{ + StoreSemantic: gocb.StoreSemanticsReplace, // set replace here, as we're explicitly setting SubdocDocFlagMkDoc above if tombstone creation is not supported + Expiry: CbsExpiryToDuration(exp), + Cas: gocb.Cas(0), + } + options.Internal.DocFlags = docFlags + result, mutateErr := c.Collection.MutateIn(k, mutateOps, options) + if mutateErr != nil { + return 0, mutateErr + } + return uint64(result.Cas()), nil } -func (c *Collection) DeleteWithXattr(ctx context.Context, k string, xattrKey string) error { - return DeleteWithXattr(ctx, c, k, xattrKey) +func (c *Collection) DeleteWithXattrs(ctx context.Context, k string, xattrKeys []string) error { + return DeleteWithXattrs(ctx, c, k, xattrKeys) } -func (c *Collection) GetXattr(ctx context.Context, k string, xattrKey string, xv interface{}) (casOut uint64, err error) { - return c.SubdocGetXattr(ctx, k, xattrKey, xv) +func (c *Collection) GetXattrs(ctx context.Context, k string, xattrKeys []string) (xattrs map[string][]byte, casOut uint64, err error) { + _, xattrs, casOut, err = c.subdocGetBodyAndXattrs(ctx, k, xattrKeys, false) + return xattrs, casOut, err } func (c *Collection) GetSubDocRaw(ctx context.Context, k string, subdocKey string) ([]byte, uint64, error) { @@ -68,53 +94,24 @@ func (c *Collection) WriteSubDoc(ctx context.Context, k string, subdocKey string return c.SubdocWrite(ctx, k, subdocKey, cas, value) } -func (c *Collection) GetWithXattr(ctx context.Context, k string, xattrKey string, userXattrKey string, rv interface{}, xv interface{}, uxv interface{}) (cas uint64, err error) { - return c.SubdocGetBodyAndXattr(ctx, k, xattrKey, userXattrKey, rv, xv, uxv) +func (c *Collection) GetWithXattrs(ctx context.Context, k string, xattrKeys []string) ([]byte, map[string][]byte, uint64, error) { + return c.subdocGetBodyAndXattrs(ctx, k, xattrKeys, true) } -func (c *Collection) WriteUpdateWithXattr(ctx context.Context, k string, xattrKey string, userXattrKey string, exp uint32, previous *sgbucket.BucketDocument, opts *sgbucket.MutateInOptions, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error) { - return WriteUpdateWithXattr(ctx, c, k, xattrKey, userXattrKey, exp, previous, opts, callback) +func (c *Collection) SetXattrs(ctx context.Context, k string, xattrs map[string][]byte) (casOut uint64, err error) { + return c.SubdocSetXattrs(k, xattrs) } -func (c *Collection) SetXattr(ctx context.Context, k string, xattrKey string, xv []byte) (casOut uint64, err error) { - return SetXattr(ctx, c, k, xattrKey, xv) +func (c *Collection) RemoveXattrs(ctx context.Context, k string, xattrKeys []string, cas uint64) (err error) { + return RemoveXattrs(ctx, c, k, xattrKeys, cas) } -func (c *Collection) RemoveXattr(ctx context.Context, k string, xattrKey string, cas uint64) (err error) { - return RemoveXattr(ctx, c, k, xattrKey, cas) +func (c *Collection) DeleteSubDocPaths(ctx context.Context, k string, paths ...string) (err error) { + return removeSubdocPaths(ctx, c, k, paths...) } func (c *Collection) DeleteXattrs(ctx context.Context, k string, xattrKeys ...string) (err error) { - return DeleteXattrs(ctx, c, k, xattrKeys...) -} - -// SubdocGetXattr retrieves the named xattr -// Notes on error handling -// - gocb v2 returns subdoc errors at the op level, in the ContentAt response -// - 'successful' error codes, like SucDocSuccessDeleted, aren't returned, and instead just set the internal.Deleted property on the response -func (c *Collection) SubdocGetXattr(ctx context.Context, k string, xattrKey string, xv interface{}) (casOut uint64, err error) { - c.Bucket.waitForAvailKvOp() - defer c.Bucket.releaseKvOp() - - ops := []gocb.LookupInSpec{ - gocb.GetSpec(xattrKey, GetSpecXattr), - } - res, lookupErr := c.Collection.LookupIn(k, ops, LookupOptsAccessDeleted) - if lookupErr == nil { - xattrContErr := res.ContentAt(0, xv) - // On error here, treat as the xattr wasn't found - if xattrContErr != nil { - DebugfCtx(ctx, KeyCRUD, "No xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), xattrContErr) - return 0, ErrXattrNotFound - } - cas := uint64(res.Cas()) - return cas, nil - } else if errors.Is(lookupErr, gocbcore.ErrDocumentNotFound) { - TracefCtx(ctx, KeyCRUD, "No document found for key=%s", UD(k)) - return 0, ErrNotFound - } else { - return 0, lookupErr - } + return removeSubdocPaths(ctx, c, k, xattrKeys...) } func (c *Collection) SubdocGetRaw(ctx context.Context, k string, subdocKey string) ([]byte, uint64, error) { @@ -191,53 +188,97 @@ func (c *Collection) SubdocWrite(ctx context.Context, k string, subdocKey string return casOut, err } -// SubdocGetBodyAndXattr retrieves the document body and xattr in a single LookupIn subdoc operation. Does not require both to exist. -func (c *Collection) SubdocGetBodyAndXattr(ctx context.Context, k string, xattrKey string, userXattrKey string, rv interface{}, xv interface{}, uxv interface{}) (cas uint64, err error) { +// subdocGetBodyAndXattr retrieves the document body and xattrs in a single LookupIn subdoc operation. Does not require both to exist. +func (c *Collection) subdocGetBodyAndXattrs(ctx context.Context, k string, xattrKeys []string, fetchBody bool) (rawBody []byte, xattrs map[string][]byte, cas uint64, err error) { + xattrKey2 := "" + // Backward compatibility for one system xattr and one user xattr support. + if !c.IsSupported(sgbucket.BucketStoreFeatureMultiXattrSubdocOperations) { + if len(xattrKeys) > 2 { + return nil, nil, 0, fmt.Errorf("subdocGetBodyAndXattrs: more than 2 xattrKeys %+v not supported in this version of Couchbase Server", xattrKeys) + } + if len(xattrKeys) == 2 { + xattrKey2 = xattrKeys[1] + xattrKeys = []string{xattrKeys[0]} + } + } + xattrs = make(map[string][]byte, len(xattrKeys)) worker := func() (shouldRetry bool, err error, value uint64) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() - // First, attempt to get the document and xattr in one shot. - ops := []gocb.LookupInSpec{ - gocb.GetSpec(xattrKey, GetSpecXattr), - gocb.GetSpec("", &gocb.GetSpecOptions{}), + ops := make([]gocb.LookupInSpec, 0, len(xattrKeys)+1) + for _, xattrKey := range xattrKeys { + ops = append(ops, gocb.GetSpec(xattrKey, GetSpecXattr)) + } + if fetchBody { + ops = append(ops, gocb.GetSpec("", &gocb.GetSpecOptions{})) } res, lookupErr := c.Collection.LookupIn(k, ops, LookupOptsAccessDeleted) - // There are two 'partial success' error codes: // ErrMemdSubDocBadMulti - one of the subdoc operations failed. Occurs when doc exists but xattr does not // ErrMemdSubDocMultiPathFailureDeleted - one of the subdoc operations failed, and the doc is deleted. Occurs when xattr exists but doc is deleted (tombstone) switch lookupErr { case nil, gocbcore.ErrMemdSubDocBadMulti: // Attempt to retrieve the document body, if present - docContentErr := res.ContentAt(1, rv) - xattrContentErr := res.ContentAt(0, xv) + var docContentErr error + if fetchBody { + docContentErr = res.ContentAt(uint(len(xattrKeys)), &rawBody) + } + cas = uint64(res.Cas()) + var xattrErrors []error + for i, xattrKey := range xattrKeys { + var xattr []byte + xattrContentErr := res.ContentAt(uint(i), &xattr) + if xattrContentErr != nil { + xattrErrors = append(xattrErrors, xattrContentErr) + DebugfCtx(ctx, KeyCRUD, "No xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), xattrContentErr) + continue + } + xattrs[xattrKey] = xattr + } cas = uint64(res.Cas()) - if isKVError(docContentErr, memd.StatusSubDocMultiPathFailureDeleted) && isKVError(xattrContentErr, memd.StatusSubDocMultiPathFailureDeleted) { - // No doc, no xattr can be treated as NotFound from Sync Gateway's perspective, even if it is a server tombstone, but should return cas - DebugfCtx(ctx, KeyCRUD, "No xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), xattrContentErr) + // If doc and all xattrs are not found, treat as ErrNotFound + if isKVError(docContentErr, memd.StatusSubDocMultiPathFailureDeleted) && len(xattrErrors) == len(xattrKeys) { return false, ErrNotFound, cas } - if docContentErr != nil { - DebugfCtx(ctx, KeyCRUD, "No document body found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), docContentErr) + // If doc not requested and no xattrs are found, treat as ErrXattrNotFound + if !fetchBody && len(xattrErrors) == len(xattrKeys) { + return false, ErrXattrNotFound, cas } - // Attempt to retrieve the xattr, if present - if xattrContentErr != nil { - DebugfCtx(ctx, KeyCRUD, "No xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), xattrContentErr) + + if docContentErr != nil { + DebugfCtx(ctx, KeyCRUD, "No document body found for key=%s, xattrKeys=%s: %v", UD(k), UD(xattrKeys), docContentErr) } case gocbcore.ErrMemdSubDocMultiPathFailureDeleted: // ErrSubDocMultiPathFailureDeleted - one of the subdoc operations failed, and the doc is deleted. Occurs when xattr may exist but doc is deleted (tombstone) - xattrContentErr := res.ContentAt(0, xv) cas = uint64(res.Cas()) - if xattrContentErr != nil { - // No doc, no xattr means the doc isn't found - DebugfCtx(ctx, KeyCRUD, "No xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), xattrContentErr) + var xattrErrors []error + for i, xattrKey := range xattrKeys { + var xattr []byte + xattrContentErr := res.ContentAt(uint(i), xattr) + if xattrContentErr != nil { + xattrErrors = append(xattrErrors, xattrContentErr) + DebugfCtx(ctx, KeyCRUD, "No xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), xattrContentErr) + continue + } + xattrs[xattrKey] = xattr + } + + if len(xattrErrors) == len(xattrs) { + // No doc, no xattrs means the doc isn't found + DebugfCtx(ctx, KeyCRUD, "No xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKeys), xattrErrors) return false, ErrNotFound, cas } + + if len(xattrErrors) > 0 { + DebugfCtx(ctx, KeyCRUD, "Partial xattr content found for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKeys), xattrErrors) + return false, ErrXattrPartialFound, cas + } + return false, nil, cas default: // KeyNotFound is returned as KVError @@ -247,42 +288,43 @@ func (c *Collection) SubdocGetBodyAndXattr(ctx context.Context, k string, xattrK shouldRetry = c.isRecoverableReadError(lookupErr) return shouldRetry, lookupErr, uint64(0) } - - // TODO: We may be able to improve in the future by having this secondary op as part of the first. At present - // there is no support to obtain more than one xattr in a single operation however MB-28041 is filed for this. - if userXattrKey != "" { - userXattrCas, userXattrErr := c.SubdocGetXattr(ctx, k, userXattrKey, uxv) - switch pkgerrors.Cause(userXattrErr) { + // If BucketStoreFeatureMultiXattrSubdocOperations is not supported, do a second get for the second xattr. + if xattrKey2 != "" { + xattrs2, xattr2Cas, xattr2Err := c.GetXattrs(ctx, k, []string{xattrKey2}) + switch pkgerrors.Cause(xattr2Err) { case gocb.ErrDocumentNotFound: // If key not found it has been deleted in between the first op and this op. - return false, err, userXattrCas + return false, err, xattr2Cas case ErrXattrNotFound: // Xattr doesn't exist, can skip case nil: - if cas != userXattrCas { + if cas != xattr2Cas { return true, errors.New("cas mismatch between user xattr and document body"), uint64(0) } default: // Unknown error occurred - // Shouldn't retry as any recoverable error will have been retried already in SubdocGetXattr - return false, userXattrErr, uint64(0) + // Shouldn't retry as any recoverable error will have been retried already in GetXattrs + return false, xattr2Err, uint64(0) + } + xattr2, ok := xattrs2[xattrKey2] + if ok { + xattrs[xattrKey2] = xattr2 } } return false, nil, cas } // Kick off retry loop - err, cas = RetryLoopCas(ctx, "SubdocGetBodyAndXattr", worker, DefaultRetrySleeper()) + err, cas = RetryLoopCas(ctx, "subdocGetBodyAndXattrs", worker, DefaultRetrySleeper()) if err != nil { - err = pkgerrors.Wrapf(err, "SubdocGetBodyAndXattr %v", UD(k).Redact()) + err = pkgerrors.Wrapf(err, "subdocGetBodyAndXattrs %v", UD(k).Redact()) } - return cas, err + return rawBody, xattrs, cas, err } -// InsertXattr inserts a new server tombstone with an associated mobile xattr. Writes cas and crc32c to the xattr using -// macro expansion. -func (c *Collection) InsertXattr(_ context.Context, k string, xattrKey string, exp uint32, cas uint64, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { +// createTombstone inserts a new server tombstone with associated xattrs. Writes cas and crc32c to the xattr using macro expansion. +func (c *Collection) createTombstone(_ context.Context, k string, exp uint32, cas uint64, xattrs map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() @@ -295,8 +337,9 @@ func (c *Collection) InsertXattr(_ context.Context, k string, xattrKey string, e docFlags = gocb.SubdocDocFlagMkDoc } - mutateOps := []gocb.MutateInSpec{ - gocb.UpsertSpec(xattrKey, bytesToRawMessage(xv), UpsertSpecXattr), + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrs)) + for xattrKey, xattrVal := range xattrs { + mutateOps = append(mutateOps, gocb.UpsertSpec(xattrKey, bytesToRawMessage(xattrVal), UpsertSpecXattr)) } mutateOps = appendMacroExpansions(mutateOps, opts) options := &gocb.MutateInOptions{ @@ -312,16 +355,16 @@ func (c *Collection) InsertXattr(_ context.Context, k string, xattrKey string, e return uint64(result.Cas()), nil } -// InsertBodyAndXattr inserts a document and associated mobile xattr in a single mutateIn operation. Writes cas and crc32c to the xattr using -// macro expansion. -func (c *Collection) InsertBodyAndXattr(_ context.Context, k string, xattrKey string, exp uint32, v interface{}, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { +// insertBodyAndXattrs inserts a document and associated xattrs in a single mutateIn operation. Writes cas and crc32c to the xattr using macro expansion. +func (c *Collection) insertBodyAndXattrs(_ context.Context, k string, exp uint32, v interface{}, xattrs map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() - mutateOps := []gocb.MutateInSpec{ - gocb.UpsertSpec(xattrKey, bytesToRawMessage(xv), UpsertSpecXattr), - gocb.ReplaceSpec("", bytesToRawMessage(v), nil), + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrs)+1) + for xattrKey, xv := range xattrs { + mutateOps = append(mutateOps, gocb.UpsertSpec(xattrKey, bytesToRawMessage(xv), UpsertSpecXattr)) } + mutateOps = append(mutateOps, gocb.ReplaceSpec("", bytesToRawMessage(v), nil)) mutateOps = appendMacroExpansions(mutateOps, opts) options := &gocb.MutateInOptions{ Expiry: CbsExpiryToDuration(exp), @@ -332,7 +375,6 @@ func (c *Collection) InsertBodyAndXattr(_ context.Context, k string, xattrKey st return 0, mutateErr } return uint64(result.Cas()), nil - } // SubdocInsert performs a subdoc insert operation to the specified path in the document body. @@ -365,9 +407,11 @@ func (c *Collection) SubdocInsert(_ context.Context, k string, fieldPath string, } // SubdocSetXattr performs a set of the given xattr. Does a straight set with no cas. -func (c *Collection) SubdocSetXattr(k string, xattrKey string, xv interface{}) (casOut uint64, err error) { - mutateOps := []gocb.MutateInSpec{ - gocb.UpsertSpec(xattrKey, bytesToRawMessage(xv), UpsertSpecXattr), +func (c *Collection) SubdocSetXattrs(k string, xvs map[string][]byte) (casOut uint64, err error) { + + mutateOps := make([]gocb.MutateInSpec, 0, len(xvs)) + for xattrKey, xv := range xvs { + mutateOps = append(mutateOps, gocb.UpsertSpec(xattrKey, bytesToRawMessage(xv), UpsertSpecXattr)) } options := &gocb.MutateInOptions{ StoreSemantic: gocb.StoreSemanticsUpsert, @@ -382,14 +426,17 @@ func (c *Collection) SubdocSetXattr(k string, xattrKey string, xv interface{}) ( return uint64(result.Cas()), nil } -// UpdateXattr updates the xattr on an existing document. Writes cas and crc32c to the xattr using -// macro expansion. -func (c *Collection) UpdateXattr(_ context.Context, k string, xattrKey string, exp uint32, cas uint64, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { +// UpdateXattrs updates the xattrs on an existing document. Writes cas and crc32c to the xattr using macro expansion. +func (c *Collection) UpdateXattrs(ctx context.Context, k string, exp uint32, cas uint64, xattrs map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { + if !c.IsSupported(sgbucket.BucketStoreFeatureMultiXattrSubdocOperations) && len(xattrs) >= 2 { + return 0, fmt.Errorf("UpdateXattrs: more than 1 xattr %v not supported in UpdateXattrs in this version of Couchbase Server", xattrs) + } c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() - mutateOps := []gocb.MutateInSpec{ - gocb.UpsertSpec(xattrKey, bytesToRawMessage(xv), UpsertSpecXattr), + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrs)) + for xattrKey, xattrVal := range xattrs { + mutateOps = append(mutateOps, gocb.UpsertSpec(xattrKey, bytesToRawMessage(xattrVal), UpsertSpecXattr)) } mutateOps = appendMacroExpansions(mutateOps, opts) @@ -407,16 +454,16 @@ func (c *Collection) UpdateXattr(_ context.Context, k string, xattrKey string, e return uint64(result.Cas()), nil } -// UpdateBodyAndXattr updates the document body and xattr of an existing document. Writes cas and crc32c to the xattr using -// macro expansion. -func (c *Collection) UpdateBodyAndXattr(ctx context.Context, k string, xattrKey string, exp uint32, cas uint64, opts *sgbucket.MutateInOptions, v interface{}, xv interface{}) (casOut uint64, err error) { +// updateBodyAndXattr updates the document body and xattrs of an existing document. Writes cas and crc32c to the xattr using macro expansion. +func (c *Collection) updateBodyAndXattrs(ctx context.Context, k string, exp uint32, cas uint64, opts *sgbucket.MutateInOptions, v interface{}, xattrs map[string][]byte) (casOut uint64, err error) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() - mutateOps := []gocb.MutateInSpec{ - gocb.UpsertSpec(xattrKey, bytesToRawMessage(xv), UpsertSpecXattr), - gocb.ReplaceSpec("", bytesToRawMessage(v), nil), + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrs)+1) + for xattrKey, xattrVal := range xattrs { + mutateOps = append(mutateOps, gocb.UpsertSpec(xattrKey, bytesToRawMessage(xattrVal), UpsertSpecXattr)) } + mutateOps = append(mutateOps, gocb.ReplaceSpec("", bytesToRawMessage(v), nil)) mutateOps = appendMacroExpansions(mutateOps, opts) options := &gocb.MutateInOptions{ @@ -432,6 +479,29 @@ func (c *Collection) UpdateBodyAndXattr(ctx context.Context, k string, xattrKey return uint64(result.Cas()), nil } +// updateXattrDeleteBody deletes the document body and updates the xattrs of an existing document. Writes cas and crc32c to the xattr using macro expansion. +func (c *Collection) updateXattrsDeleteBody(_ context.Context, k string, exp uint32, cas uint64, xattrs map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { + c.Bucket.waitForAvailKvOp() + defer c.Bucket.releaseKvOp() + + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrs)+1) + for xattrKey, xattrVal := range xattrs { + mutateOps = append(mutateOps, gocb.UpsertSpec(xattrKey, bytesToRawMessage(xattrVal), UpsertSpecXattr)) + } + mutateOps = append(mutateOps, gocb.RemoveSpec("", nil)) + mutateOps = appendMacroExpansions(mutateOps, opts) + options := &gocb.MutateInOptions{ + StoreSemantic: gocb.StoreSemanticsReplace, + Expiry: CbsExpiryToDuration(exp), + Cas: gocb.Cas(cas), + } + result, mutateErr := c.Collection.MutateIn(k, mutateOps, options) + if mutateErr != nil { + return 0, mutateErr + } + return uint64(result.Cas()), nil +} + // UpdateXattrDeleteBody deletes the document body and updates the xattr of an existing document. Writes cas and crc32c to the xattr using // macro expansion. func (c *Collection) UpdateXattrDeleteBody(_ context.Context, k, xattrKey string, exp uint32, cas uint64, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { @@ -455,14 +525,16 @@ func (c *Collection) UpdateXattrDeleteBody(_ context.Context, k, xattrKey string return uint64(result.Cas()), nil } -// SubdocDeleteXattr deletes an xattr of an existing document (or document tombstone) -func (c *Collection) SubdocDeleteXattr(k string, xattrKey string, cas uint64) (err error) { +// subdocDeleteXattrs deletes xattrs of an existing document (or document tombstone) +func (c *Collection) subdocDeleteXattrs(k string, xattrKeys []string, cas uint64) (err error) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() - mutateOps := []gocb.MutateInSpec{ - gocb.RemoveSpec(xattrKey, RemoveSpecXattr), + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrKeys)) + for _, xattrKey := range xattrKeys { + mutateOps = append(mutateOps, gocb.RemoveSpec(xattrKey, RemoveSpecXattr)) } + options := &gocb.MutateInOptions{ Cas: gocb.Cas(cas), } @@ -472,8 +544,8 @@ func (c *Collection) SubdocDeleteXattr(k string, xattrKey string, cas uint64) (e return mutateErr } -// SubdocDeleteXattrs will delete the supplied xattr keys from a document. Not a cas safe operation. -func (c *Collection) SubdocDeleteXattrs(k string, xattrKeys ...string) error { +// subdocRemovePaths will delete the supplied xattr keys from a document. Not a cas safe operation. +func (c *Collection) subdocRemovePaths(k string, xattrKeys ...string) error { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() @@ -488,14 +560,16 @@ func (c *Collection) SubdocDeleteXattrs(k string, xattrKeys ...string) error { } // SubdocDeleteXattr deletes the document body and associated xattr of an existing document. -func (c *Collection) DeleteBodyAndXattr(_ context.Context, k string, xattrKey string) (err error) { +func (c *Collection) deleteBodyAndXattrs(_ context.Context, k string, xattrKeys []string) (err error) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() - mutateOps := []gocb.MutateInSpec{ - gocb.RemoveSpec(xattrKey, RemoveSpecXattr), - gocb.RemoveSpec("", nil), + mutateOps := make([]gocb.MutateInSpec, 0, len(xattrKeys)+1) + + for _, xattrKey := range xattrKeys { + mutateOps = append(mutateOps, gocb.RemoveSpec(xattrKey, RemoveSpecXattr)) } + mutateOps = append(mutateOps, gocb.RemoveSpec("", nil)) options := &gocb.MutateInOptions{ StoreSemantic: gocb.StoreSemanticsReplace, } @@ -516,8 +590,8 @@ func (c *Collection) DeleteBodyAndXattr(_ context.Context, k string, xattrKey st return mutateErr } -// DeleteBody deletes the document body of an existing document, and updates cas and crc32c in the associated xattr. -func (c *Collection) DeleteBody(_ context.Context, k string, xattrKey string, exp uint32, cas uint64, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { +// deleteBody deletes the document body of an existing document, and updates cas and crc32c in the associated xattr. Used in Couchbase Server < 6.6 +func (c *Collection) deleteBody(_ context.Context, k string, exp uint32, cas uint64, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { c.Bucket.waitForAvailKvOp() defer c.Bucket.releaseKvOp() @@ -579,43 +653,6 @@ func bytesToRawMessage(v interface{}) interface{} { } } -func (c *Collection) WriteUserXattr(k string, xattrKey string, xattrVal interface{}) (uint64, error) { - c.Bucket.waitForAvailKvOp() - defer c.Bucket.releaseKvOp() - - mutateOps := []gocb.MutateInSpec{ - gocb.UpsertSpec(xattrKey, bytesToRawMessage(xattrVal), UpsertSpecXattr), - } - options := &gocb.MutateInOptions{ - StoreSemantic: gocb.StoreSemanticsUpsert, - } - - result, mutateErr := c.Collection.MutateIn(k, mutateOps, options) - if mutateErr != nil { - return 0, mutateErr - } - return uint64(result.Cas()), nil -} - -func (c *Collection) DeleteUserXattr(k string, xattrKey string) (uint64, error) { - c.Bucket.waitForAvailKvOp() - defer c.Bucket.releaseKvOp() - - mutateOps := []gocb.MutateInSpec{ - gocb.RemoveSpec(xattrKey, RemoveSpecXattr), - } - options := &gocb.MutateInOptions{ - Cas: gocb.Cas(0), - } - options.Internal.DocFlags = gocb.SubdocDocFlagAccessDeleted - - result, mutateErr := c.Collection.MutateIn(k, mutateOps, options) - if mutateErr != nil { - return 0, mutateErr - } - return uint64(result.Cas()), nil -} - // appendMacroExpansions will append macro expansions defined in MutateInOptions to the provided // gocb MutateInSpec. func appendMacroExpansions(mutateInSpec []gocb.MutateInSpec, opts *sgbucket.MutateInOptions) []gocb.MutateInSpec { diff --git a/base/collection_xattr_common.go b/base/collection_xattr_common.go index 312f9a537e..87608df48f 100644 --- a/base/collection_xattr_common.go +++ b/base/collection_xattr_common.go @@ -23,15 +23,14 @@ const ( ) // CAS-safe write of a document and it's associated named xattr -func WriteCasWithXattr(ctx context.Context, store *Collection, k string, xattrKey string, exp uint32, cas uint64, opts *sgbucket.MutateInOptions, v interface{}, xv interface{}) (casOut uint64, err error) { +func (c *Collection) WriteWithXattrs(ctx context.Context, k string, exp uint32, cas uint64, v []byte, xattrs map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { worker := func() (shouldRetry bool, err error, value uint64) { - // cas=0 specifies an insert if cas == 0 { - casOut, err = store.InsertBodyAndXattr(ctx, k, xattrKey, exp, v, xv, opts) + casOut, err = c.insertBodyAndXattrs(ctx, k, exp, v, xattrs, opts) if err != nil { - shouldRetry = store.isRecoverableWriteError(err) + shouldRetry = c.isRecoverableWriteError(err) return shouldRetry, err, uint64(0) } return false, nil, casOut @@ -40,16 +39,16 @@ func WriteCasWithXattr(ctx context.Context, store *Collection, k string, xattrKe // Otherwise, replace existing value if v != nil { // Have value and xattr value - update both - casOut, err = store.UpdateBodyAndXattr(ctx, k, xattrKey, exp, cas, opts, v, xv) + casOut, err = c.updateBodyAndXattrs(ctx, k, exp, cas, opts, v, xattrs) if err != nil { - shouldRetry = store.isRecoverableWriteError(err) + shouldRetry = c.isRecoverableWriteError(err) return shouldRetry, err, uint64(0) } } else { // Update xattr only - casOut, err = store.UpdateXattr(ctx, k, xattrKey, exp, cas, xv, opts) + casOut, err = c.UpdateXattrs(ctx, k, exp, cas, xattrs, opts) if err != nil { - shouldRetry = store.isRecoverableWriteError(err) + shouldRetry = c.isRecoverableWriteError(err) return shouldRetry, err, uint64(0) } } @@ -65,21 +64,8 @@ func WriteCasWithXattr(ctx context.Context, store *Collection, k string, xattrKe return cas, err } -// Single attempt to update a document and xattr. Setting isDelete=true and value=nil will delete the document body. Both -// update types (UpdateTombstoneXattr, WriteCasWithXattr) include recoverable error retry. - -func WriteWithXattr(ctx context.Context, store *Collection, k string, xattrKey string, exp uint32, cas uint64, opts *sgbucket.MutateInOptions, value []byte, xattrValue []byte, isDelete bool, deleteBody bool) (casOut uint64, err error) { - // If this is a tombstone, we want to delete the document and update the xattr - if isDelete { - return UpdateTombstoneXattr(ctx, store, k, xattrKey, exp, cas, xattrValue, deleteBody, opts) - } else { - // Not a delete - update the body and xattr - return WriteCasWithXattr(ctx, store, k, xattrKey, exp, cas, opts, value, xattrValue) - } -} - // CAS-safe update of a document's xattr (only). Deletes the document body if deleteBody is true. -func UpdateTombstoneXattr(ctx context.Context, store *Collection, k string, xattrKey string, exp uint32, cas uint64, xv interface{}, deleteBody bool, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { +func (c *Collection) WriteTombstoneWithXattrs(ctx context.Context, k string, exp uint32, cas uint64, xattrs map[string][]byte, deleteBody bool, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { requiresBodyRemoval := false worker := func() (shouldRetry bool, err error, value uint64) { @@ -89,30 +75,29 @@ func UpdateTombstoneXattr(ctx context.Context, store *Collection, k string, xatt // If deleteBody == true, remove the body and update xattr if deleteBody { - casOut, tombstoneErr = store.UpdateXattrDeleteBody(ctx, k, xattrKey, exp, cas, xv, opts) + casOut, tombstoneErr = c.updateXattrsDeleteBody(ctx, k, exp, cas, xattrs, opts) } else { if cas == 0 { // if cas == 0, create a new server tombstone with xattr - casOut, tombstoneErr = store.InsertXattr(ctx, k, xattrKey, exp, cas, xv, opts) - // If one-step tombstone creation is not supported, set flag for document body removal - requiresBodyRemoval = !store.IsSupported(sgbucket.BucketStoreFeatureCreateDeletedWithXattr) + casOut, tombstoneErr = c.createTombstone(ctx, k, exp, cas, xattrs, opts) + requiresBodyRemoval = !c.IsSupported(sgbucket.BucketStoreFeatureCreateDeletedWithXattr) } else { // If cas is non-zero, this is an already existing tombstone. Update xattr only - casOut, tombstoneErr = store.UpdateXattr(ctx, k, xattrKey, exp, cas, xv, opts) + casOut, tombstoneErr = c.UpdateXattrs(ctx, k, exp, cas, xattrs, opts) } } if tombstoneErr != nil { - shouldRetry = store.isRecoverableWriteError(tombstoneErr) + shouldRetry = c.isRecoverableWriteError(tombstoneErr) return shouldRetry, tombstoneErr, uint64(0) } return false, nil, casOut } // Kick off retry loop - err, cas = RetryLoopCas(ctx, "UpdateTombstoneXattr", worker, DefaultRetrySleeper()) + err, cas = RetryLoopCas(ctx, "UpdateTombstoneXattrs", worker, DefaultRetrySleeper()) if err != nil { - err = pkgerrors.Wrapf(err, "Error during UpdateTombstoneXattr with key %v", UD(k).Redact()) + err = pkgerrors.Wrapf(err, "Error during UpdateTombstoneXattrs with key %v", UD(k).Redact()) return cas, err } @@ -123,7 +108,7 @@ func UpdateTombstoneXattr(ctx context.Context, store *Collection, k string, xatt if requiresBodyRemoval { worker := func() (shouldRetry bool, err error, value uint64) { - casOut, removeErr := store.DeleteBody(ctx, k, xattrKey, exp, cas, opts) + casOut, removeErr := c.deleteBody(ctx, k, exp, cas, opts) if removeErr != nil { // If there is a cas mismatch the body has since been updated and so we don't need to bother removing // body in this operation @@ -131,7 +116,7 @@ func UpdateTombstoneXattr(ctx context.Context, store *Collection, k string, xatt return false, nil, cas } - shouldRetry = store.isRecoverableWriteError(removeErr) + shouldRetry = c.isRecoverableWriteError(removeErr) return shouldRetry, removeErr, uint64(0) } return false, nil, casOut @@ -148,18 +133,17 @@ func UpdateTombstoneXattr(ctx context.Context, store *Collection, k string, xatt return cas, err } -// WriteUpdateWithXattr retrieves the existing doc from the bucket, invokes the callback to update +// WriteUpdateWithXattrs retrieves the existing doc from the bucket, invokes the callback to update // the document, then writes the new document to the bucket. Will repeat this process on cas // failure. // // If previous document is provided, will use it on 1st iteration instead of retrieving from bucket. // A zero CAS in `previous` is interpreted as no document existing; this can be used to short- // circuit the initial Get when the document is unlikely to already exist. -func WriteUpdateWithXattr(ctx context.Context, store *Collection, k string, xattrKey string, userXattrKey string, exp uint32, previous *sgbucket.BucketDocument, opts *sgbucket.MutateInOptions, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error) { +func (c *Collection) WriteUpdateWithXattrs(ctx context.Context, k string, xattrKeys []string, exp uint32, previous *sgbucket.BucketDocument, opts *sgbucket.MutateInOptions, callback sgbucket.WriteUpdateWithXattrsFunc) (casOut uint64, err error) { var value []byte - var xattrValue []byte - var userXattrValue []byte + var xattrs map[string][]byte var cas uint64 emptyCas := uint64(0) @@ -170,30 +154,30 @@ func WriteUpdateWithXattr(ctx context.Context, store *Collection, k string, xatt // A zero CAS is interpreted as no document existing. if previous.Cas != 0 { value = previous.Body - xattrValue = previous.Xattr + xattrs = previous.Xattrs cas = previous.Cas - userXattrValue = previous.UserXattr } previous = nil // a retry will get value from bucket, as below } else { // If no existing value has been provided, or on a retry, // retrieve the current value from the bucket - cas, err = store.SubdocGetBodyAndXattr(ctx, k, xattrKey, userXattrKey, &value, &xattrValue, &userXattrValue) + value, xattrs, cas, err = c.subdocGetBodyAndXattrs(ctx, k, xattrKeys, true) if err != nil { if pkgerrors.Cause(err) != ErrNotFound { // Unexpected error, cancel writeupdate - DebugfCtx(ctx, KeyCRUD, "Retrieval of existing doc failed during WriteUpdateWithXattr for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), err) + DebugfCtx(ctx, KeyCRUD, "Retrieval of existing doc failed during WriteUpdateWithXattr for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKeys), err) return emptyCas, err } // Key not found - initialize values value = nil - xattrValue = nil + xattrs = nil } } // Invoke callback to get updated value - updatedValue, updatedXattrValue, isDelete, callbackExpiry, updatedSpec, err := callback(value, xattrValue, userXattrValue, cas) + updatedDoc, err := callback(value, xattrs, cas) + //updatedValue, updatedXattrValue, isDelete, callbackExpiry, updatedSpec, err := callback(value, xattrs, cas) // If it's an ErrCasFailureShouldRetry, then retry by going back through the for loop if err == ErrCasFailureShouldRetry { @@ -204,16 +188,21 @@ func WriteUpdateWithXattr(ctx context.Context, store *Collection, k string, xatt if err != nil { return emptyCas, err } - if callbackExpiry != nil { - exp = *callbackExpiry + if updatedDoc.Expiry != nil { + exp = *updatedDoc.Expiry } - if updatedSpec != nil { - opts.MacroExpansion = append(opts.MacroExpansion, updatedSpec...) + if updatedDoc.Spec != nil { + opts.MacroExpansion = append(opts.MacroExpansion, updatedDoc.Spec...) } + var writeErr error // Attempt to write the updated document to the bucket. Mark body for deletion if previous body was non-empty deleteBody := value != nil - casOut, writeErr := WriteWithXattr(ctx, store, k, xattrKey, exp, cas, opts, updatedValue, updatedXattrValue, isDelete, deleteBody) + if updatedDoc.IsTombstone { + _, writeErr = c.WriteTombstoneWithXattrs(ctx, k, exp, cas, updatedDoc.Xattrs, deleteBody, opts) + } else { + _, writeErr = c.WriteWithXattrs(ctx, k, exp, cas, updatedDoc.Doc, updatedDoc.Xattrs, opts) + } if writeErr == nil { return casOut, nil @@ -225,22 +214,22 @@ func WriteUpdateWithXattr(ctx context.Context, store *Collection, k string, xatt // conflict/duplicate handling on retry. } else { // WriteWithXattr already handles retry on recoverable errors, so fail on any errors other than ErrKeyExists - WarnfCtx(ctx, "Failed to update doc with xattr for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKey), writeErr) + WarnfCtx(ctx, "Failed to update doc with xattr for key=%s, xattrKey=%s: %v", UD(k), UD(xattrKeys), writeErr) return emptyCas, writeErr } // Reset value, xattr, cas for cas retry value = nil - xattrValue = nil + xattrs = nil cas = 0 } } -// SetXattr performs a subdoc set on the supplied xattrKey. Implements a retry for recoverable failures. -func SetXattr(ctx context.Context, store *Collection, k string, xattrKey string, xv []byte) (casOut uint64, err error) { +// SetXattrs performs a subdoc set on the supplied xattrs. Implements a retry for recoverable failures. +func SetXattrs(ctx context.Context, store *Collection, k string, xvs map[string][]byte) (casOut uint64, err error) { worker := func() (shouldRetry bool, err error, value uint64) { - casOut, writeErr := store.SubdocSetXattr(k, xattrKey, xv) + casOut, writeErr := store.SubdocSetXattrs(k, xvs) if writeErr == nil { return false, nil, casOut } @@ -263,9 +252,9 @@ func SetXattr(ctx context.Context, store *Collection, k string, xattrKey string, } // RemoveXattr performs a cas safe subdoc delete of the provided key. Will retry if a recoverable failure occurs. -func RemoveXattr(ctx context.Context, store *Collection, k string, xattrKey string, cas uint64) error { +func RemoveXattrs(ctx context.Context, store *Collection, k string, xattrKeys []string, cas uint64) error { worker := func() (shouldRetry bool, err error, value interface{}) { - writeErr := store.SubdocDeleteXattr(k, xattrKey, cas) + writeErr := store.subdocDeleteXattrs(k, xattrKeys, cas) if writeErr == nil { return false, nil, nil } @@ -278,19 +267,18 @@ func RemoveXattr(ctx context.Context, store *Collection, k string, xattrKey stri return false, err, nil } - err, _ := RetryLoop(ctx, "RemoveXattr", worker, DefaultRetrySleeper()) + err, _ := RetryLoop(ctx, "RemoveXattrs", worker, DefaultRetrySleeper()) if err != nil { - err = pkgerrors.Wrapf(err, "RemoveXattr with key %v xattr %v", UD(k).Redact(), UD(xattrKey).Redact()) + err = pkgerrors.Wrapf(err, "RemoveXattr with key %v xattr %v", UD(k).Redact(), UD(xattrKeys).Redact()) } return err } -// DeleteXattrs performs a subdoc delete of the provided keys. Retries any recoverable failures. Not cas safe does a -// straight delete. -func DeleteXattrs(ctx context.Context, store *Collection, k string, xattrKeys ...string) error { +// removeSubdocPaths performs a subdoc delete of the provided keys. Retries any recoverable failures. Not cas safe. +func removeSubdocPaths(ctx context.Context, store *Collection, k string, subdocPaths ...string) error { worker := func() (shouldRetry bool, err error, value interface{}) { - writeErr := store.SubdocDeleteXattrs(k, xattrKeys...) + writeErr := store.subdocRemovePaths(k, subdocPaths...) if writeErr == nil { return false, nil, nil } @@ -303,9 +291,9 @@ func DeleteXattrs(ctx context.Context, store *Collection, k string, xattrKeys .. return false, err, nil } - err, _ := RetryLoop(ctx, "DeleteXattrs", worker, DefaultRetrySleeper()) + err, _ := RetryLoop(ctx, "RemoveSubdocPaths", worker, DefaultRetrySleeper()) if err != nil { - err = pkgerrors.Wrapf(err, "DeleteXattrs with keys %q xattr %v", UD(k).Redact(), UD(strings.Join(xattrKeys, ",")).Redact()) + err = pkgerrors.Wrapf(err, "DeleteXattrs with keys %q xattr %v", UD(k).Redact(), UD(strings.Join(subdocPaths, ",")).Redact()) } return err @@ -325,35 +313,35 @@ func DeleteXattrs(ctx context.Context, store *Collection, k string, xattrKeys .. // Expected errors: // - Temporary server overloaded errors, in which case the caller should retry // - If the doc is in the the NoDoc and NoXattr state, it will return a KeyNotFound error -func DeleteWithXattr(ctx context.Context, store *Collection, k string, xattrKey string) error { +func DeleteWithXattrs(ctx context.Context, store *Collection, k string, xattrKeys []string) error { // Delegate to internal method that can take a testing-related callback - return deleteWithXattrInternal(ctx, store, k, xattrKey, nil) + return deleteWithXattrInternal(ctx, store, k, xattrKeys, nil) } // A function that will be called back after the first delete attempt but before second delete attempt // to simulate the doc having changed state (artificially injected race condition) -type deleteWithXattrRaceInjection func(k string, xattrKey string) +type deleteWithXattrRaceInjection func(k string, xattrKeys []string) -func deleteWithXattrInternal(ctx context.Context, store *Collection, k string, xattrKey string, callback deleteWithXattrRaceInjection) error { +func deleteWithXattrInternal(ctx context.Context, store *Collection, k string, xattrKeys []string, callback deleteWithXattrRaceInjection) error { - DebugfCtx(ctx, KeyCRUD, "DeleteWithXattr called with key: %v xattrKey: %v", UD(k), UD(xattrKey)) + DebugfCtx(ctx, KeyCRUD, "DeleteWithXattr called with key: %v xattrKey: %v", UD(k), UD(xattrKeys)) // Try to delete body and xattrs in single op // NOTE: ongoing discussion w/ KV Engine team on whether this should handle cases where the body // doesn't exist (eg, a tombstoned xattr doc) by just ignoring the "delete body" mutation, rather // than current behavior of returning gocb.ErrKeyNotFound - mutateErr := store.DeleteBodyAndXattr(ctx, k, xattrKey) + mutateErr := store.deleteBodyAndXattrs(ctx, k, xattrKeys) if IsDocNotFoundError(mutateErr) { // Invoke the testing related callback. This is a no-op in non-test contexts. if callback != nil { - callback(k, xattrKey) + callback(k, xattrKeys) } // KeyNotFound indicates there is no doc body. Try to delete only the xattr. - return deleteDocXattrOnly(ctx, store, k, xattrKey, callback) + return deleteDocXattrOnly(ctx, store, k, xattrKeys, callback) } else if IsXattrNotFoundError(mutateErr) { // Invoke the testing related callback. This is a no-op in non-test contexts. if callback != nil { - callback(k, xattrKey) + callback(k, xattrKeys) } // ErrXattrNotFound indicates there is no XATTR. Try to delete only the body. return store.Delete(k) @@ -364,19 +352,17 @@ func deleteWithXattrInternal(ctx context.Context, store *Collection, k string, x } -func deleteDocXattrOnly(ctx context.Context, store *Collection, k string, xattrKey string, callback deleteWithXattrRaceInjection) error { +func deleteDocXattrOnly(ctx context.Context, store *Collection, k string, xattrKeys []string, callback deleteWithXattrRaceInjection) error { // Do get w/ xattr in order to get cas - var retrievedVal map[string]interface{} - var retrievedXattr map[string]interface{} - getCas, err := store.SubdocGetBodyAndXattr(ctx, k, xattrKey, "", &retrievedVal, &retrievedXattr, nil) + rawBody, _, getCas, err := store.GetWithXattrs(ctx, k, xattrKeys) if err != nil { return err } // If the doc body is non-empty at this point, then give up because it seems that a doc update has been // interleaved with the purge. Return error to the caller and cancel the purge. - if len(retrievedVal) != 0 { + if rawBody != nil { return fmt.Errorf("DeleteWithXattr was unable to delete the doc. Another update " + "was received which resurrected the doc by adding a new revision, in which case this delete operation is " + "considered as cancelled.") @@ -384,7 +370,7 @@ func deleteDocXattrOnly(ctx context.Context, store *Collection, k string, xattrK // Cas-safe delete of just the XATTR. Use SubdocDocFlagAccessDeleted since presumably the document body // has been deleted. - deleteXattrErr := store.SubdocDeleteXattr(k, xattrKey, getCas) + deleteXattrErr := store.subdocDeleteXattrs(k, xattrKeys, getCas) if deleteXattrErr != nil { // If the cas-safe delete of XATTR fails, return an error to the caller. // This might happen if there was a concurrent update interleaved with the purge (someone resurrected doc) diff --git a/base/error.go b/base/error.go index 033a4698f6..1f21ee027e 100644 --- a/base/error.go +++ b/base/error.go @@ -47,9 +47,12 @@ var ( ErrPathNotFound = sgbucket.ErrPathNotFound ErrPathExists = sgbucket.ErrPathExists - // ErrXattrNotFound is returned if a requested xattr is not present on a DCP event + // ErrXattrNotFound is returned if all requested xattrs are not present ErrXattrNotFound = &sgError{"Xattr Not Found"} + // ErrXattrPartialFound is returned if only a subset of requested xattrs are found + ErrXattrPartialFound = &sgError{"Some Requested Xattrs Not Found"} + // ErrXattrInvalidLen is returned if the xattr is corrupt. ErrXattrInvalidLen = &sgError{"Xattr stream length"} diff --git a/base/heartbeat.go b/base/heartbeat.go index f670772856..e1b5cb7186 100644 --- a/base/heartbeat.go +++ b/base/heartbeat.go @@ -449,7 +449,7 @@ func (dh *documentBackedListener) updateNodeList(ctx context.Context, nodeID str InfofCtx(ctx, KeyCluster, "Updating nodeList document (%s) with node IDs: %v", dh.nodeListKey, dh.nodeIDs) - casOut, err := dh.datastore.WriteCas(dh.nodeListKey, 0, 0, dh.cas, dh.nodeIDs, 0) + casOut, err := dh.datastore.WriteCas(dh.nodeListKey, 0, dh.cas, dh.nodeIDs, 0) if err == nil { // Successful update dh.cas = casOut diff --git a/base/leaky_datastore.go b/base/leaky_datastore.go index 0e67936b10..ba1b581a43 100644 --- a/base/leaky_datastore.go +++ b/base/leaky_datastore.go @@ -96,13 +96,13 @@ func (lds *LeakyDataStore) GetRaw(k string) (v []byte, cas uint64, err error) { } return lds.dataStore.GetRaw(k) } -func (lds *LeakyDataStore) GetWithXattr(ctx context.Context, k string, xattr string, userXattrKey string, rv interface{}, xv interface{}, uxv interface{}) (cas uint64, err error) { +func (lds *LeakyDataStore) GetWithXattrs(ctx context.Context, k string, xattrKeys []string) (body []byte, xattrs map[string][]byte, cas uint64, err error) { if lds.config.GetWithXattrCallback != nil { if err := lds.config.GetWithXattrCallback(k); err != nil { - return 0, err + return nil, nil, 0, err } } - return lds.dataStore.GetWithXattr(ctx, k, xattr, userXattrKey, rv, xv, uxv) + return lds.dataStore.GetWithXattrs(ctx, k, xattrKeys) } func (lds *LeakyDataStore) GetAndTouchRaw(k string, exp uint32) (v []byte, cas uint64, err error) { @@ -134,8 +134,8 @@ func (lds *LeakyDataStore) Delete(k string) error { func (lds *LeakyDataStore) Remove(k string, cas uint64) (casOut uint64, err error) { return lds.dataStore.Remove(k, cas) } -func (lds *LeakyDataStore) WriteCas(k string, flags int, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error) { - return lds.dataStore.WriteCas(k, flags, exp, cas, v, opt) +func (lds *LeakyDataStore) WriteCas(k string, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error) { + return lds.dataStore.WriteCas(k, exp, cas, v, opt) } func (lds *LeakyDataStore) Update(k string, exp uint32, callback sgbucket.UpdateFunc) (casOut uint64, err error) { if lds.config.UpdateCallback != nil { @@ -244,64 +244,52 @@ func (lds *LeakyDataStore) GetMaxVbno() (uint16, error) { return lds.bucket.GetMaxVbno() } -func (lds *LeakyDataStore) WriteCasWithXattr(ctx context.Context, k string, xattr string, exp uint32, cas uint64, v interface{}, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { - return lds.dataStore.WriteCasWithXattr(ctx, k, xattr, exp, cas, v, xv, opts) -} - -func (lds *LeakyDataStore) WriteWithXattr(ctx context.Context, k string, xattrKey string, exp uint32, cas uint64, value []byte, xattrValue []byte, isDelete bool, deleteBody bool, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { +func (lds *LeakyDataStore) WriteWithXattrs(ctx context.Context, k string, exp uint32, cas uint64, value []byte, xattrs map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { if lds.config.WriteWithXattrCallback != nil { lds.config.WriteWithXattrCallback(k) } - return lds.dataStore.WriteWithXattr(ctx, k, xattrKey, exp, cas, value, xattrValue, isDelete, deleteBody, opts) + return lds.dataStore.WriteWithXattrs(ctx, k, exp, cas, value, xattrs, opts) } -func (lds *LeakyDataStore) WriteUpdateWithXattr(ctx context.Context, k string, xattr string, userXattrKey string, exp uint32, previous *sgbucket.BucketDocument, opts *sgbucket.MutateInOptions, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error) { +func (lds *LeakyDataStore) WriteUpdateWithXattrs(ctx context.Context, k string, xattrKeys []string, exp uint32, previous *sgbucket.BucketDocument, opts *sgbucket.MutateInOptions, callback sgbucket.WriteUpdateWithXattrsFunc) (casOut uint64, err error) { if lds.config.UpdateCallback != nil { - wrapperCallback := func(current []byte, xattr []byte, userXattr []byte, cas uint64) (updated []byte, updatedXattr []byte, deletedDoc bool, expiry *uint32, updatedSpec []sgbucket.MacroExpansionSpec, err error) { - updated, updatedXattr, deletedDoc, expiry, updatedSpec, err = callback(current, xattr, userXattr, cas) + wrapperCallback := func(current []byte, xattrs map[string][]byte, cas uint64) (sgbucket.UpdatedDoc, error) { + updatedDoc, err := callback(current, xattrs, cas) lds.config.UpdateCallback(k) - return updated, updatedXattr, deletedDoc, expiry, updatedSpec, err + return updatedDoc, err } - return lds.dataStore.WriteUpdateWithXattr(ctx, k, xattr, userXattrKey, exp, previous, opts, wrapperCallback) + return lds.dataStore.WriteUpdateWithXattrs(ctx, k, xattrKeys, exp, previous, opts, wrapperCallback) } - return lds.dataStore.WriteUpdateWithXattr(ctx, k, xattr, userXattrKey, exp, previous, opts, callback) + return lds.dataStore.WriteUpdateWithXattrs(ctx, k, xattrKeys, exp, previous, opts, callback) } -func (lds *LeakyDataStore) SetXattr(ctx context.Context, k string, xattrKey string, xv []byte) (casOut uint64, err error) { +func (lds *LeakyDataStore) SetXattrs(ctx context.Context, k string, xv map[string][]byte) (casOut uint64, err error) { if lds.config.SetXattrCallback != nil { if err := lds.config.SetXattrCallback(k); err != nil { return 0, err } } - return lds.dataStore.SetXattr(ctx, k, xattrKey, xv) + return lds.dataStore.SetXattrs(ctx, k, xv) } -func (lds *LeakyDataStore) RemoveXattr(ctx context.Context, k string, xattrKey string, cas uint64) (err error) { - return lds.dataStore.RemoveXattr(ctx, k, xattrKey, cas) +func (lds *LeakyDataStore) RemoveXattrs(ctx context.Context, k string, xattrKeys []string, cas uint64) (err error) { + return lds.dataStore.RemoveXattrs(ctx, k, xattrKeys, cas) } -func (lds *LeakyDataStore) DeleteXattrs(ctx context.Context, k string, xattrKeys ...string) (err error) { - return lds.dataStore.DeleteXattrs(ctx, k, xattrKeys...) +func (lds *LeakyDataStore) DeleteSubDocPaths(ctx context.Context, k string, xattrKeys ...string) (err error) { + return lds.dataStore.DeleteSubDocPaths(ctx, k, xattrKeys...) } func (lds *LeakyDataStore) SubdocInsert(ctx context.Context, docID string, fieldPath string, cas uint64, value interface{}) error { return lds.dataStore.SubdocInsert(ctx, docID, fieldPath, cas, value) } -func (lds *LeakyDataStore) DeleteWithXattr(ctx context.Context, k string, xattr string) error { - return lds.dataStore.DeleteWithXattr(ctx, k, xattr) -} - -func (lds *LeakyDataStore) WriteUserXattr(docKey string, xattrKey string, xattrVal interface{}) (uint64, error) { - return lds.dataStore.WriteUserXattr(docKey, xattrKey, xattrVal) -} - -func (lds *LeakyDataStore) DeleteUserXattr(docKey string, xattrKey string) (uint64, error) { - return lds.dataStore.DeleteUserXattr(docKey, xattrKey) +func (lds *LeakyDataStore) DeleteWithXattrs(ctx context.Context, k string, xattrKeys []string) error { + return lds.dataStore.DeleteWithXattrs(ctx, k, xattrKeys) } -func (lds *LeakyDataStore) GetXattr(ctx context.Context, k string, xattr string, xv interface{}) (cas uint64, err error) { - return lds.dataStore.GetXattr(ctx, k, xattr, xv) +func (lds *LeakyDataStore) GetXattrs(ctx context.Context, k string, xattrKeys []string) (xattrs map[string][]byte, cas uint64, err error) { + return lds.dataStore.GetXattrs(ctx, k, xattrKeys) } func (lds *LeakyDataStore) GetSubDocRaw(ctx context.Context, k string, subdocKey string) ([]byte, uint64, error) { @@ -342,29 +330,13 @@ func (lds *LeakyDataStore) IsSupported(feature sgbucket.BucketStoreFeature) bool return lds.dataStore.IsSupported(feature) } -func (lds *LeakyDataStore) SubdocSetXattr(ctx context.Context, k string, xattrKey string, xv interface{}) (casOut uint64, err error) { - raw, err := JSONMarshal(xv) - if err == nil { - casOut, err = lds.dataStore.SetXattr(ctx, k, xattrKey, raw) - } - return -} - -func (lds *LeakyDataStore) UpdateXattr(ctx context.Context, k string, xattrKey string, exp uint32, cas uint64, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { - return lds.dataStore.UpdateXattr(ctx, k, xattrKey, exp, cas, xv, opts) +func (lds *LeakyDataStore) UpdateXattrs(ctx context.Context, k string, exp uint32, cas uint64, xv map[string][]byte, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { + return lds.dataStore.UpdateXattrs(ctx, k, exp, cas, xv, opts) } -func (lds *LeakyDataStore) UpdateXattrDeleteBody(ctx context.Context, k, xattrKey string, exp uint32, cas uint64, xv interface{}, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { - return lds.dataStore.UpdateXattrDeleteBody(ctx, k, xattrKey, exp, cas, xv, opts) - -} - -func (lds *LeakyDataStore) DeleteXattr(ctx context.Context, k string, xattrKey string, cas uint64) error { - return lds.dataStore.RemoveXattr(ctx, k, xattrKey, cas) -} +func (lds *LeakyDataStore) WriteTombstoneWithXattrs(ctx context.Context, k string, exp uint32, cas uint64, xv map[string][]byte, deleteBody bool, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { + return lds.dataStore.WriteTombstoneWithXattrs(ctx, k, exp, cas, xv, deleteBody, opts) -func (lds *LeakyDataStore) DeleteBody(ctx context.Context, k string, xattrKey string, exp uint32, cas uint64, opts *sgbucket.MutateInOptions) (casOut uint64, err error) { - return lds.dataStore.DeleteBody(ctx, k, xattrKey, exp, cas, opts) } func (lds *LeakyDataStore) GetSpec() BucketSpec { diff --git a/base/rosmar_cluster.go b/base/rosmar_cluster.go index 5b0f115dc1..ea260c0170 100644 --- a/base/rosmar_cluster.go +++ b/base/rosmar_cluster.go @@ -54,7 +54,7 @@ func (c *RosmarCluster) InsertMetadataDocument(ctx context.Context, location, ke } defer bucket.Close(ctx) - return bucket.DefaultDataStore().WriteCas(key, 0, 0, 0, value, 0) + return bucket.DefaultDataStore().WriteCas(key, 0, 0, value, 0) } // WriteMetadataDocument writes a metadata document, and fails on CAS mismatch @@ -65,7 +65,7 @@ func (c *RosmarCluster) WriteMetadataDocument(ctx context.Context, location, doc } defer bucket.Close(ctx) - return bucket.DefaultDataStore().WriteCas(docID, 0, 0, cas, value, 0) + return bucket.DefaultDataStore().WriteCas(docID, 0, cas, value, 0) } // TouchMetadataDocument sets the specified property in a bootstrap metadata document for a given bucket and key. Used to @@ -124,7 +124,7 @@ func (c *RosmarCluster) UpdateMetadataDocument(ctx context.Context, location, do return removeCasOut, nil } - replaceCfgCasOut, err := bucket.DefaultDataStore().WriteCas(docID, 0, 0, cas, newConfig, 0) + replaceCfgCasOut, err := bucket.DefaultDataStore().WriteCas(docID, 0, cas, newConfig, 0) if err != nil { if errors.As(err, &sgbucket.CasMismatchErr{}) { // retry on cas failure diff --git a/base/sg_cluster_cfg.go b/base/sg_cluster_cfg.go index 9e356f31d9..dcd8a20dd3 100644 --- a/base/sg_cluster_cfg.go +++ b/base/sg_cluster_cfg.go @@ -96,7 +96,7 @@ func (c *CfgSG) Set(cfgKey string, val []byte, cas uint64) (uint64, error) { } bucketKey := c.sgCfgBucketKey(cfgKey) - casOut, err := c.datastore.WriteCas(bucketKey, 0, 0, cas, val, 0) + casOut, err := c.datastore.WriteCas(bucketKey, 0, cas, val, 0) if IsCasMismatch(err) { InfofCtx(c.loggingCtx, KeyCluster, "cfg_sg: Set, ErrKeyExists key: %s, cas: %d", cfgKey, cas) diff --git a/db/attachment_compaction.go b/db/attachment_compaction.go index 35e97b2e85..a0c4c96ba8 100644 --- a/db/attachment_compaction.go +++ b/db/attachment_compaction.go @@ -116,7 +116,9 @@ func attachmentCompactMarkPhase(ctx context.Context, dataStore base.DataStore, c for attachmentName, attachmentDocID := range attachmentKeys { // Stamp the current compaction ID into the attachment xattr. This is performing the actual marking - _, err = dataStore.SetXattr(ctx, attachmentDocID, getCompactionIDSubDocPath(compactionID), []byte(strconv.Itoa(int(time.Now().Unix())))) + _, err = dataStore.SetXattrs(ctx, attachmentDocID, map[string][]byte{ + getCompactionIDSubDocPath(compactionID): []byte(strconv.Itoa(int(time.Now().Unix())))}, + ) // If an error occurs while stamping in that ID we need to fail this process and then the entire compaction // process. Otherwise, an attachment could end up getting erroneously deleted in the later sweep phase. @@ -469,7 +471,7 @@ func attachmentCompactCleanupPhase(ctx context.Context, dataStore base.DataStore // Note that if this operation fails with a cas mismatch we will fall through to the following per ID // delete. This can occur if another compact process ends up mutating / deleting the xattr. if len(compactIDSyncMap) == len(toDeleteCompactIDPaths) { - err = dataStore.RemoveXattr(ctx, docID, base.AttachmentCompactionXattrName, event.Cas) + err = dataStore.RemoveXattrs(ctx, docID, []string{base.AttachmentCompactionXattrName}, event.Cas) if err == nil { return true } @@ -481,7 +483,7 @@ func attachmentCompactCleanupPhase(ctx context.Context, dataStore base.DataStore } // If we only want to remove select compact IDs delete each one through a subdoc operation - err = dataStore.DeleteXattrs(ctx, docID, toDeleteCompactIDPaths...) + err = dataStore.DeleteSubDocPaths(ctx, docID, toDeleteCompactIDPaths...) if err != nil && !errors.Is(err, base.ErrXattrNotFound) { base.WarnfCtx(ctx, "[%s] Failed to delete compaction IDs %s for doc %s: %v", compactionLoggingID, strings.Join(toDeleteCompactIDPaths, ","), base.UD(docID), err) return true diff --git a/db/attachment_compaction_test.go b/db/attachment_compaction_test.go index 2d27e5fba1..9be94bb0fb 100644 --- a/db/attachment_compaction_test.go +++ b/db/attachment_compaction_test.go @@ -10,7 +10,6 @@ package db import ( "context" - "errors" "fmt" "strconv" "strings" @@ -67,10 +66,11 @@ func TestAttachmentMark(t *testing.T) { assert.Equal(t, int64(13), attachmentsMarked) for _, attDocKey := range attKeys { - var attachmentData Body - _, err = dataStore.GetXattr(ctx, attDocKey, base.AttachmentCompactionXattrName, &attachmentData) + xattrs, _, err := dataStore.GetXattrs(ctx, attDocKey, []string{base.AttachmentCompactionXattrName}) assert.NoError(t, err) - + require.Contains(t, xattrs, base.AttachmentCompactionXattrName) + var attachmentData Body + require.NoError(t, base.JSONUnmarshal(xattrs[base.AttachmentCompactionXattrName], &attachmentData)) compactIDSection, ok := attachmentData[CompactionIDKey] require.True(t, ok) require.NotNil(t, compactIDSection) @@ -97,7 +97,9 @@ func TestAttachmentSweep(t *testing.T) { makeMarkedDoc := func(docid string, compactID string) { err := dataStore.SetRaw(docid, 0, nil, []byte("{}")) assert.NoError(t, err) - _, err = dataStore.SetXattr(ctx, docid, getCompactionIDSubDocPath(compactID), []byte(strconv.Itoa(int(time.Now().Unix())))) + _, err = dataStore.SetXattrs(ctx, docid, map[string][]byte{ + getCompactionIDSubDocPath(compactID): []byte(strconv.Itoa(int(time.Now().Unix())))}, + ) assert.NoError(t, err) } @@ -144,7 +146,9 @@ func TestAttachmentCleanup(t *testing.T) { makeMarkedDoc := func(docid string, compactID string) { err := dataStore.SetRaw(docid, 0, nil, []byte("{}")) assert.NoError(t, err) - _, err = dataStore.SetXattr(ctx, docid, getCompactionIDSubDocPath(compactID), []byte(strconv.Itoa(int(time.Now().Unix())))) + _, err = dataStore.SetXattrs(ctx, docid, map[string][]byte{ + getCompactionIDSubDocPath(compactID): []byte(strconv.Itoa(int(time.Now().Unix())))}, + ) assert.NoError(t, err) } @@ -153,7 +157,9 @@ func TestAttachmentCleanup(t *testing.T) { assert.NoError(t, err) compactIDsJSON, err := base.JSONMarshal(compactIDs) assert.NoError(t, err) - _, err = dataStore.SetXattr(ctx, docid, base.AttachmentCompactionXattrName+"."+CompactionIDKey, compactIDsJSON) + _, err = dataStore.SetXattrs(ctx, docid, map[string][]byte{ + base.AttachmentCompactionXattrName + "." + CompactionIDKey: compactIDsJSON, + }) assert.NoError(t, err) } @@ -202,32 +208,35 @@ func TestAttachmentCleanup(t *testing.T) { assert.NoError(t, err) for _, docID := range singleMarkedAttIDs { - var xattr map[string]interface{} - _, err := dataStore.GetXattr(ctx, docID, base.AttachmentCompactionXattrName, &xattr) + _, _, err := dataStore.GetXattrs(ctx, docID, []string{base.AttachmentCompactionXattrName}) assert.Error(t, err) - assert.True(t, errors.Is(err, base.ErrXattrNotFound)) + require.ErrorIs(t, err, base.ErrXattrNotFound) } for _, docID := range recentMultiMarkedAttIDs { - var xattr map[string]interface{} - _, err := dataStore.GetXattr(ctx, docID, base.AttachmentCompactionXattrName+"."+CompactionIDKey, &xattr) - assert.NoError(t, err) - + xattrName := base.AttachmentCompactionXattrName + "." + CompactionIDKey + xattrs, _, err := dataStore.GetXattrs(ctx, docID, []string{xattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, xattrName) + var xattr map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[xattrName], &xattr)) assert.NotContains(t, xattr, t.Name()) assert.Contains(t, xattr, "rand") } for _, docID := range oldMultiMarkedAttIDs { - var xattr map[string]interface{} - _, err := dataStore.GetXattr(ctx, docID, CompactionIDKey, &xattr) + _, _, err := dataStore.GetXattrs(ctx, docID, []string{CompactionIDKey}) assert.Error(t, err) - assert.True(t, errors.Is(err, base.ErrXattrNotFound)) + require.ErrorIs(t, err, base.ErrXattrNotFound) } for _, docID := range oneRecentOneOldMultiMarkedAttIDs { - var xattr map[string]interface{} - _, err := dataStore.GetXattr(ctx, docID, base.AttachmentCompactionXattrName+"."+CompactionIDKey, &xattr) - assert.NoError(t, err) + xattrName := base.AttachmentCompactionXattrName + "." + CompactionIDKey + xattrs, _, err := dataStore.GetXattrs(ctx, docID, []string{xattrName}) + require.NoError(t, err) + + var xattr map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[xattrName], &xattr)) assert.NotContains(t, xattr, t.Name()) assert.NotContains(t, xattr, "old") @@ -255,7 +264,9 @@ func TestAttachmentCleanupRollback(t *testing.T) { makeMarkedDoc := func(docid string, compactID string) { err := dataStore.SetRaw(docid, 0, nil, []byte("{}")) assert.NoError(t, err) - _, err = dataStore.SetXattr(ctx, docid, getCompactionIDSubDocPath(compactID), []byte(strconv.Itoa(int(time.Now().Unix())))) + _, err = dataStore.SetXattrs(ctx, docid, map[string][]byte{ + getCompactionIDSubDocPath(compactID): []byte(strconv.Itoa(int(time.Now().Unix())))}, + ) assert.NoError(t, err) } @@ -269,9 +280,8 @@ func TestAttachmentCleanupRollback(t *testing.T) { // assert there are marked attachments to clean up for _, docID := range singleMarkedAttIDs { - var xattr map[string]interface{} - _, err := dataStore.GetXattr(ctx, docID, base.AttachmentCompactionXattrName, &xattr) - assert.NoError(t, err) + _, _, err := dataStore.GetXattrs(ctx, docID, []string{base.AttachmentCompactionXattrName}) + require.NoError(t, err) } bucket, err := base.AsGocbV2Bucket(testDb.Bucket) @@ -312,10 +322,8 @@ func TestAttachmentCleanupRollback(t *testing.T) { // assert that the marked attachments have been "cleaned up" for _, docID := range singleMarkedAttIDs { - var xattr map[string]interface{} - _, err := dataStore.GetXattr(ctx, docID, base.AttachmentCompactionXattrName, &xattr) - assert.Error(t, err) - assert.True(t, errors.Is(err, base.ErrXattrNotFound)) + _, _, err := dataStore.GetXattrs(ctx, docID, []string{base.AttachmentCompactionXattrName}) + require.ErrorIs(t, err, base.ErrXattrNotFound) } } @@ -372,9 +380,12 @@ func TestAttachmentMarkAndSweepAndCleanup(t *testing.T) { assert.Error(t, err) } else { assert.NoError(t, err) - var xattr map[string]interface{} - _, err = dataStore.GetXattr(ctx, attDocKey, base.AttachmentCompactionXattrName+"."+CompactionIDKey, &xattr) - assert.NoError(t, err) + xattrName := base.AttachmentCompactionXattrName + "." + CompactionIDKey + xattrs, _, err := dataStore.GetXattrs(ctx, attDocKey, []string{xattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, xattrName) + var xattr map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[xattrName], &xattr)) assert.Contains(t, xattr, t.Name()) } } @@ -387,10 +398,8 @@ func TestAttachmentMarkAndSweepAndCleanup(t *testing.T) { _, err = dataStore.Get(attDocKey, &back) if !strings.Contains(attDocKey, "unmarked") { assert.NoError(t, err) - var xattr map[string]interface{} - _, err = dataStore.GetXattr(ctx, attDocKey, base.AttachmentCompactionXattrName+"."+CompactionIDKey, &xattr) - assert.Error(t, err) - assert.True(t, errors.Is(err, base.ErrXattrNotFound)) + _, _, err := dataStore.GetXattrs(ctx, attDocKey, []string{base.AttachmentCompactionXattrName + "." + CompactionIDKey}) + require.ErrorIs(t, err, base.ErrXattrNotFound) } } } @@ -753,7 +762,7 @@ func CreateLegacyAttachmentDoc(t *testing.T, ctx context.Context, db *DatabaseCo _, _, err = db.Put(ctx, docID, unmarshalledBody) require.NoError(t, err) - _, err = db.dataStore.WriteUpdateWithXattr(ctx, docID, base.SyncXattrName, "", 0, nil, nil, func(doc []byte, xattr []byte, userXattr []byte, cas uint64) (updatedDoc []byte, updatedXattr []byte, deletedDoc bool, expiry *uint32, updatedSpec []sgbucket.MacroExpansionSpec, err error) { + _, err = db.dataStore.WriteUpdateWithXattrs(ctx, docID, []string{base.SyncXattrName}, 0, nil, nil, func(doc []byte, xattrs map[string][]byte, cas uint64) (updatedDoc sgbucket.UpdatedDoc, err error) { attachmentSyncData := map[string]interface{}{ attID: map[string]interface{}{ "content_type": "application/json", @@ -767,13 +776,18 @@ func CreateLegacyAttachmentDoc(t *testing.T, ctx context.Context, db *DatabaseCo attachmentSyncDataBytes, err := base.JSONMarshal(attachmentSyncData) require.NoError(t, err) + xattr := xattrs[base.SyncXattrName] + xattr, err = base.InjectJSONPropertiesFromBytes(xattr, base.KVPairBytes{ Key: "attachments", Val: attachmentSyncDataBytes, }) require.NoError(t, err) - return doc, xattr, false, nil, updatedSpec, nil + return sgbucket.UpdatedDoc{ + Doc: doc, + Xattrs: map[string][]byte{base.SyncXattrName: xattr}, + }, nil }) require.NoError(t, err) @@ -818,7 +832,10 @@ func createConflictingDocOneLeafHasAttachmentBodyMap(t *testing.T, docID string, "time_saved": "2021-10-14T16:38:11.359443+01:00" }` - _, err := db.dataStore.WriteWithXattr(base.TestCtx(t), docID, base.SyncXattrName, 0, 0, []byte(`{"Winning Rev": true}`), []byte(syncData), false, false, nil) + xattrs := map[string][]byte{ + base.SyncXattrName: []byte(syncData), + } + _, err := db.dataStore.WriteWithXattrs(base.TestCtx(t), docID, 0, 0, []byte(`{"Winning Rev": true}`), xattrs, nil) assert.NoError(t, err) attDocID := MakeAttachmentKey(AttVersion1, docID, attDigest) @@ -867,7 +884,10 @@ func createConflictingDocOneLeafHasAttachmentBodyKey(t *testing.T, docID string, "time_saved": "2021-10-21T12:48:39.549095+01:00" }` - _, err := db.dataStore.WriteWithXattr(base.TestCtx(t), docID, base.SyncXattrName, 0, 0, []byte(`{"Winning Rev": true}`), []byte(syncData), false, false, nil) + xattrs := map[string][]byte{ + base.SyncXattrName: []byte(syncData), + } + _, err := db.dataStore.WriteWithXattrs(base.TestCtx(t), docID, 0, 0, []byte(`{"Winning Rev": true}`), xattrs, nil) assert.NoError(t, err) attDocID := MakeAttachmentKey(AttVersion1, docID, attDigest) diff --git a/db/attachment_test.go b/db/attachment_test.go index 2cd2adbad5..33dd298782 100644 --- a/db/attachment_test.go +++ b/db/attachment_test.go @@ -800,15 +800,8 @@ func TestMigrateBodyAttachments(t *testing.T) { } }` - var bodyVal map[string]interface{} - var xattrVal map[string]interface{} - err = base.JSONUnmarshal([]byte(bodyPre25), &bodyVal) - assert.NoError(t, err) - err = base.JSONUnmarshal([]byte(syncData), &xattrVal) - assert.NoError(t, err) - if base.TestUseXattrs() { - _, err = collection.dataStore.WriteCasWithXattr(ctx, docKey, base.SyncXattrName, 0, 0, bodyVal, xattrVal, DefaultMutateInOpts()) + _, err = collection.dataStore.WriteWithXattrs(ctx, docKey, 0, 0, []byte(bodyPre25), map[string][]byte{base.SyncXattrName: []byte(syncData)}, DefaultMutateInOpts()) assert.NoError(t, err) } else { newBody, err := base.InjectJSONPropertiesFromBytes([]byte(bodyPre25), base.KVPairBytes{Key: base.SyncPropertyName, Val: []byte(syncData)}) @@ -1094,15 +1087,8 @@ func TestMigrateBodyAttachmentsMerge(t *testing.T) { } }` - var bodyVal map[string]interface{} - var xattrVal map[string]interface{} - err = base.JSONUnmarshal([]byte(bodyPre25), &bodyVal) - assert.NoError(t, err) - err = base.JSONUnmarshal([]byte(syncData), &xattrVal) - assert.NoError(t, err) - if base.TestUseXattrs() { - _, err = collection.dataStore.WriteCasWithXattr(ctx, docKey, base.SyncXattrName, 0, 0, bodyVal, xattrVal, DefaultMutateInOpts()) + _, err = collection.dataStore.WriteWithXattrs(ctx, docKey, 0, 0, []byte(bodyPre25), map[string][]byte{base.SyncXattrName: []byte(syncData)}, DefaultMutateInOpts()) assert.NoError(t, err) } else { newBody, err := base.InjectJSONPropertiesFromBytes([]byte(bodyPre25), base.KVPairBytes{Key: base.SyncPropertyName, Val: []byte(syncData)}) @@ -1268,15 +1254,8 @@ func TestMigrateBodyAttachmentsMergeConflicting(t *testing.T) { } }` - var bodyVal map[string]interface{} - var xattrVal map[string]interface{} - err = base.JSONUnmarshal([]byte(bodyPre25), &bodyVal) - assert.NoError(t, err) - err = base.JSONUnmarshal([]byte(syncData), &xattrVal) - assert.NoError(t, err) - if base.TestUseXattrs() { - _, err = collection.dataStore.WriteCasWithXattr(ctx, docKey, base.SyncXattrName, 0, 0, bodyVal, xattrVal, DefaultMutateInOpts()) + _, err = collection.dataStore.WriteWithXattrs(ctx, docKey, 0, 0, []byte(bodyPre25), map[string][]byte{base.SyncXattrName: []byte(syncData)}, DefaultMutateInOpts()) assert.NoError(t, err) } else { newBody, err := base.InjectJSONPropertiesFromBytes([]byte(bodyPre25), base.KVPairBytes{Key: base.SyncPropertyName, Val: []byte(syncData)}) diff --git a/db/background_mgr.go b/db/background_mgr.go index ce2b5021d3..e37e66545b 100644 --- a/db/background_mgr.go +++ b/db/background_mgr.go @@ -193,7 +193,7 @@ func (b *BackgroundManager) markStart(ctx context.Context) error { // If we're running in cluster aware 'mode' base the check off of a heartbeat doc if b.isClusterAware() { - _, err := b.clusterAwareOptions.metadataStore.WriteCas(b.clusterAwareOptions.HeartbeatDocID(), 0, BackgroundManagerHeartbeatExpirySecs, 0, []byte("{}"), sgbucket.Raw) + _, err := b.clusterAwareOptions.metadataStore.WriteCas(b.clusterAwareOptions.HeartbeatDocID(), BackgroundManagerHeartbeatExpirySecs, 0, []byte("{}"), sgbucket.Raw) if base.IsCasMismatch(err) { // Check if markStop has been called but not yet processed var status HeartbeatDoc diff --git a/db/crud.go b/db/crud.go index bca0835aa1..d2cc1383dc 100644 --- a/db/crud.go +++ b/db/crud.go @@ -72,7 +72,7 @@ func (c *DatabaseCollection) GetDocumentWithRaw(ctx context.Context, docid strin // If existing doc wasn't an SG Write, import the doc. if !isSgWrite { var importErr error - doc, importErr = c.OnDemandImportForGet(ctx, docid, rawBucketDoc.Body, rawBucketDoc.Xattr, rawBucketDoc.UserXattr, rawBucketDoc.Cas) + doc, importErr = c.OnDemandImportForGet(ctx, docid, rawBucketDoc.Body, rawBucketDoc.Xattrs[base.SyncXattrName], rawBucketDoc.Xattrs[c.userXattrKey()], rawBucketDoc.Cas) if importErr != nil { return nil, nil, importErr } @@ -116,13 +116,13 @@ func (c *DatabaseCollection) GetDocumentWithRaw(ctx context.Context, docid strin func (c *DatabaseCollection) GetDocWithXattr(ctx context.Context, key string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, rawBucketDoc *sgbucket.BucketDocument, err error) { rawBucketDoc = &sgbucket.BucketDocument{} var getErr error - rawBucketDoc.Cas, getErr = c.dataStore.GetWithXattr(ctx, key, base.SyncXattrName, c.userXattrKey(), &rawBucketDoc.Body, &rawBucketDoc.Xattr, &rawBucketDoc.UserXattr) + rawBucketDoc.Body, rawBucketDoc.Xattrs, rawBucketDoc.Cas, getErr = c.dataStore.GetWithXattrs(ctx, key, c.syncAndUserXattrKeys()) if getErr != nil { return nil, nil, getErr } var unmarshalErr error - doc, unmarshalErr = unmarshalDocumentWithXattr(ctx, key, rawBucketDoc.Body, rawBucketDoc.Xattr, rawBucketDoc.UserXattr, rawBucketDoc.Cas, unmarshalLevel) + doc, unmarshalErr = unmarshalDocumentWithXattr(ctx, key, rawBucketDoc.Body, rawBucketDoc.Xattrs[base.SyncXattrName], rawBucketDoc.Xattrs[c.userXattrKey()], rawBucketDoc.Cas, unmarshalLevel) if unmarshalErr != nil { return nil, nil, unmarshalErr } @@ -142,12 +142,14 @@ func (c *DatabaseCollection) GetDocSyncData(ctx context.Context, docid string) ( if c.UseXattrs() { // Retrieve doc and xattr from bucket, unmarshal only xattr. // Triggers on-demand import when document xattr doesn't match cas. - var rawDoc, rawXattr, rawUserXattr []byte - cas, getErr := c.dataStore.GetWithXattr(ctx, key, base.SyncXattrName, c.userXattrKey(), &rawDoc, &rawXattr, &rawUserXattr) + rawDoc, xattrs, cas, getErr := c.dataStore.GetWithXattrs(ctx, key, c.syncAndUserXattrKeys()) if getErr != nil { return emptySyncData, getErr } + rawXattr := xattrs[base.SyncXattrName] + rawUserXattr := xattrs[c.userXattrKey()] + // Unmarshal xattr only doc, unmarshalErr := unmarshalDocumentWithXattr(ctx, docid, nil, rawXattr, rawUserXattr, cas, DocUnmarshalSync) if unmarshalErr != nil { @@ -195,11 +197,12 @@ func (c *DatabaseCollection) GetDocSyncData(ctx context.Context, docid string) ( // need to read the doc body from the bucket. func (db *DatabaseCollection) GetDocSyncDataNoImport(ctx context.Context, docid string, level DocumentUnmarshalLevel) (syncData SyncData, err error) { if db.UseXattrs() { + var xattrs map[string][]byte var cas uint64 - var xattrValue []byte - if cas, err = db.dataStore.GetXattr(ctx, docid, base.SyncXattrName, &xattrValue); err == nil { + xattrs, cas, err = db.dataStore.GetXattrs(ctx, docid, []string{base.SyncXattrName}) + if err == nil { var doc *Document - doc, err = unmarshalDocumentWithXattr(ctx, docid, nil, xattrValue, nil, cas, level) + doc, err = unmarshalDocumentWithXattr(ctx, docid, nil, xattrs[base.SyncXattrName], nil, cas, level) if err == nil { syncData = doc.SyncData } @@ -1899,7 +1902,7 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do // If we can't find sync metadata in the document body, check for upgrade. If upgrade, retry write using WriteUpdateWithXattr if err != nil && err.Error() == "409 Not imported" { _, bucketDocument := db.checkForUpgrade(ctx, key, DocUnmarshalAll) - if bucketDocument != nil && bucketDocument.Xattr != nil { + if bucketDocument != nil && bucketDocument.Xattrs[base.SyncXattrName] != nil { existingDoc = bucketDocument upgradeInProgress = true } @@ -1917,8 +1920,10 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do if expiry != nil { initialExpiry = *expiry } - casOut, err = db.dataStore.WriteUpdateWithXattr(ctx, key, base.SyncXattrName, db.userXattrKey(), initialExpiry, existingDoc, opts, func(currentValue []byte, currentXattr []byte, currentUserXattr []byte, cas uint64) (raw []byte, rawXattr []byte, deleteDoc bool, syncFuncExpiry *uint32, updatedSpec []sgbucket.MacroExpansionSpec, err error) { + casOut, err = db.dataStore.WriteUpdateWithXattrs(ctx, key, db.syncAndUserXattrKeys(), initialExpiry, existingDoc, opts, func(currentValue []byte, currentXattrs map[string][]byte, cas uint64) (updatedDoc sgbucket.UpdatedDoc, err error) { // Be careful: this block can be invoked multiple times if there are races! + currentXattr := currentXattrs[base.SyncXattrName] + currentUserXattr := currentXattrs[db.userXattrKey()] if doc, err = unmarshalDocumentWithXattr(ctx, docid, currentValue, currentXattr, currentUserXattr, cas, DocUnmarshalAll); err != nil { return } @@ -1936,12 +1941,12 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do } docExists := currentValue != nil - syncFuncExpiry, newRevID, storedDoc, oldBodyJSON, unusedSequences, changedAccessPrincipals, changedRoleAccessUsers, createNewRevIDSkipped, err = db.documentUpdateFunc(ctx, docExists, doc, allowImport, docSequence, unusedSequences, callback, expiry) + updatedDoc.Expiry, newRevID, storedDoc, oldBodyJSON, unusedSequences, changedAccessPrincipals, changedRoleAccessUsers, createNewRevIDSkipped, err = db.documentUpdateFunc(ctx, docExists, doc, allowImport, docSequence, unusedSequences, callback, expiry) if err != nil { return } // If importing and the sync function has modified the expiry, allow sgbucket.MutateInOptions to modify the expiry - if db.dataStore.IsSupported(sgbucket.BucketStoreFeaturePreserveExpiry) && syncFuncExpiry != nil { + if db.dataStore.IsSupported(sgbucket.BucketStoreFeaturePreserveExpiry) && updatedDoc.Expiry != nil { opts.PreserveExpiry = false } docSequence = doc.Sequence @@ -1952,12 +1957,14 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do return } - deleteDoc = currentRevFromHistory.Deleted + updatedDoc.IsTombstone = currentRevFromHistory.Deleted // Return the new raw document value for the bucket to store. doc.SetCrc32cUserXattrHash() - raw, rawXattr, err = doc.MarshalWithXattr() - docBytes = len(raw) + var rawXattr []byte + updatedDoc.Doc, rawXattr, err = doc.MarshalWithXattr() + docBytes = len(updatedDoc.Doc) + updatedDoc.Xattrs = map[string][]byte{base.SyncXattrName: rawXattr} // Warn when sync data is larger than a configured threshold if db.unsupportedOptions() != nil && db.unsupportedOptions().WarningThresholds != nil { @@ -1976,7 +1983,7 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do } base.DebugfCtx(ctx, base.KeyCRUD, "Saving doc (seq: #%d, id: %v rev: %v)", doc.Sequence, base.UD(doc.ID), doc.CurrentRev) - return raw, rawXattr, deleteDoc, syncFuncExpiry, updatedSpec, err + return updatedDoc, err }) if err != nil { if err == base.ErrDocumentMigrated { @@ -2285,7 +2292,7 @@ func (db *DatabaseCollectionWithUser) Purge(ctx context.Context, key string) err } if db.UseXattrs() { - return db.dataStore.DeleteWithXattr(ctx, key, base.SyncXattrName) + return db.dataStore.DeleteWithXattrs(ctx, key, []string{base.SyncXattrName}) } else { return db.dataStore.Delete(key) } @@ -2517,7 +2524,7 @@ func (db *DatabaseCollectionWithUser) RevDiff(ctx context.Context, docid string, doc, err := db.GetDocSyncDataNoImport(ctx, docid, DocUnmarshalHistory) if err != nil { - if !base.IsDocNotFoundError(err) && err != base.ErrXattrNotFound { + if !base.IsDocNotFoundError(err) && !errors.Is(err, base.ErrXattrNotFound) { base.WarnfCtx(ctx, "RevDiff(%q) --> %T %v", base.UD(docid), err, err) } missing = revids @@ -2579,7 +2586,7 @@ func (db *DatabaseCollectionWithUser) CheckProposedRev(ctx context.Context, doci } doc, err := db.GetDocSyncDataNoImport(ctx, docid, level) if err != nil { - if !base.IsDocNotFoundError(err) && err != base.ErrXattrNotFound { + if !base.IsDocNotFoundError(err) && !errors.Is(err, base.ErrXattrNotFound) { base.WarnfCtx(ctx, "CheckProposedRev(%q) --> %T %v", base.UD(docid), err, err) return ProposedRev_Error, "" } diff --git a/db/crud_test.go b/db/crud_test.go index 981e7a3d03..93d2df6744 100644 --- a/db/crud_test.go +++ b/db/crud_test.go @@ -35,8 +35,11 @@ type treeMeta struct { func getRevTreeList(ctx context.Context, dataStore sgbucket.DataStore, key string, useXattrs bool) (revTreeList, error) { switch useXattrs { case true: - var rawDoc, rawXattr []byte - _, getErr := dataStore.GetWithXattr(ctx, key, base.SyncXattrName, "", &rawDoc, &rawXattr, nil) + _, xattrs, _, getErr := dataStore.GetWithXattrs(ctx, key, []string{base.SyncXattrName}) + rawXattr, ok := xattrs[base.SyncXattrName] + if !ok { + return revTreeList{}, base.ErrXattrNotFound + } if getErr != nil { return revTreeList{}, getErr } @@ -1625,10 +1628,12 @@ func TestPutStampClusterUUID(t *testing.T) { require.NoError(t, err) require.Len(t, doc.ClusterUUID, 32) - var xattr map[string]string - _, err = collection.dataStore.GetWithXattr(ctx, key, base.SyncXattrName, "", &body, &xattr, nil) + _, xattrs, _, err := collection.dataStore.GetWithXattrs(ctx, key, []string{base.SyncXattrName}) require.NoError(t, err) - require.Len(t, xattr["cluster_uuid"], 32) + require.Contains(t, xattrs, base.SyncXattrName) + var xattr map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &xattr)) + require.Len(t, xattr["cluster_uuid"].(string), 32) } // TestAssignSequenceReleaseLoop repros conditions seen in CBG-3516 (where each sequence between nextSequence and docSequence has an unusedSeq doc) @@ -1660,7 +1665,7 @@ func TestAssignSequenceReleaseLoop(t *testing.T) { err = json.Unmarshal(sd, &newSyncData) require.NoError(t, err) newSyncData["sequence"] = doc.SyncData.Sequence + otherClusterSequenceOffset - _, err = collection.dataStore.UpdateXattr(ctx, doc.ID, base.SyncXattrName, 0, doc.Cas, newSyncData, DefaultMutateInOpts()) + _, err = collection.dataStore.UpdateXattrs(ctx, doc.ID, 0, doc.Cas, map[string][]byte{base.SyncXattrName: base.MustJSONMarshal(t, newSyncData)}, DefaultMutateInOpts()) require.NoError(t, err) _, doc, err = collection.Put(ctx, "doc1", Body{"foo": "buzz", BodyRev: rev}) diff --git a/db/database.go b/db/database.go index 57e8abd711..ad337cbdb0 100644 --- a/db/database.go +++ b/db/database.go @@ -1795,37 +1795,41 @@ func (db *DatabaseCollectionWithUser) resyncDocument(ctx context.Context, docid, db.dbStats().ServerlessStats.ImportProcessCompute.Add(stat) }() if db.UseXattrs() { - writeUpdateFunc := func(currentValue []byte, currentXattr []byte, currentUserXattr []byte, cas uint64) ( - raw []byte, rawXattr []byte, deleteDoc bool, expiry *uint32, updatedSpec []sgbucket.MacroExpansionSpec, err error) { + writeUpdateFunc := func(currentValue []byte, currentXattrs map[string][]byte, cas uint64) (sgbucket.UpdatedDoc, error) { // There's no scenario where a doc should from non-deleted to deleted during UpdateAllDocChannels processing, // so deleteDoc is always returned as false. if currentValue == nil || len(currentValue) == 0 { - return nil, nil, deleteDoc, nil, nil, base.ErrUpdateCancel + return sgbucket.UpdatedDoc{}, base.ErrUpdateCancel } - doc, err := unmarshalDocumentWithXattr(ctx, docid, currentValue, currentXattr, currentUserXattr, cas, DocUnmarshalAll) + doc, err := unmarshalDocumentWithXattr(ctx, docid, currentValue, currentXattrs[base.SyncXattrName], currentXattrs[db.userXattrKey()], cas, DocUnmarshalAll) if err != nil { - return nil, nil, deleteDoc, nil, nil, err + return sgbucket.UpdatedDoc{}, err } updatedDoc, shouldUpdate, updatedExpiry, updatedHighSeq, unusedSequences, err = db.getResyncedDocument(ctx, doc, regenerateSequences, unusedSequences) if err != nil { - return nil, nil, deleteDoc, nil, nil, err + return sgbucket.UpdatedDoc{}, err } - if shouldUpdate { - base.InfofCtx(ctx, base.KeyAccess, "Saving updated channels and access grants of %q", base.UD(docid)) - if updatedExpiry != nil { - updatedDoc.UpdateExpiry(*updatedExpiry) - } - doc.SetCrc32cUserXattrHash() - raw, rawXattr, err = updatedDoc.MarshalWithXattr() - return raw, rawXattr, deleteDoc, updatedExpiry, updatedSpec, err - } else { - return nil, nil, deleteDoc, nil, nil, base.ErrUpdateCancel + if !shouldUpdate { + return sgbucket.UpdatedDoc{}, base.ErrUpdateCancel + } + base.InfofCtx(ctx, base.KeyAccess, "Saving updated channels and access grants of %q", base.UD(docid)) + if updatedExpiry != nil { + updatedDoc.UpdateExpiry(*updatedExpiry) } + doc.SetCrc32cUserXattrHash() + raw, rawXattr, err := updatedDoc.MarshalWithXattr() + return sgbucket.UpdatedDoc{ + Doc: raw, + Xattrs: map[string][]byte{ + base.SyncXattrName: rawXattr, + }, + Expiry: updatedExpiry, + }, err } opts := &sgbucket.MutateInOptions{ MacroExpansion: macroExpandSpec(base.SyncXattrName), } - _, err = db.dataStore.WriteUpdateWithXattr(ctx, key, base.SyncXattrName, db.userXattrKey(), 0, nil, opts, writeUpdateFunc) + _, err = db.dataStore.WriteUpdateWithXattrs(ctx, key, db.syncAndUserXattrKeys(), 0, nil, opts, writeUpdateFunc) } else { _, err = db.dataStore.Update(key, 0, func(currentValue []byte) ([]byte, *uint32, bool, error) { // Be careful: this block can be invoked multiple times if there are races! diff --git a/db/database_collection.go b/db/database_collection.go index 61452206a6..75f4248ca2 100644 --- a/db/database_collection.go +++ b/db/database_collection.go @@ -257,6 +257,16 @@ func (c *DatabaseCollection) unsupportedOptions() *UnsupportedOptions { return c.dbCtx.Options.UnsupportedOptions } +// syncAndUserXattrKeys returns the xattr keys for the user and sync xattrs. +func (c *DatabaseCollection) syncAndUserXattrKeys() []string { + xattrKeys := []string{base.SyncXattrName} + userXattrKey := c.userXattrKey() + if userXattrKey != "" { + xattrKeys = append(xattrKeys, userXattrKey) + } + return xattrKeys +} + // Returns the xattr key that will be accessible from the sync function. This is controlled at a database level. func (c *DatabaseCollection) userXattrKey() string { return c.dbCtx.Options.UserXattrKey diff --git a/db/database_test.go b/db/database_test.go index 54b910a485..00c42e8a68 100644 --- a/db/database_test.go +++ b/db/database_test.go @@ -2422,7 +2422,6 @@ func TestRepairUnorderedRecentSequences(t *testing.T) { } func TestDeleteWithNoTombstoneCreationSupport(t *testing.T) { - if !base.TestUseXattrs() { t.Skip("Xattrs required") } @@ -2447,12 +2446,15 @@ func TestDeleteWithNoTombstoneCreationSupport(t *testing.T) { var doc Body var xattr Body + var xattrs map[string][]byte // Ensure document has been added - waitAndAssertCondition(t, func() bool { - _, err = collection.dataStore.GetWithXattr(ctx, "doc", "_sync", "", &doc, &xattr, nil) - return err == nil - }) + assert.EventuallyWithT(t, func(c *assert.CollectT) { + _, xattrs, _, err = collection.dataStore.GetWithXattrs(ctx, "doc", []string{base.SyncXattrName}) + assert.NoError(c, err) + }, time.Second*5, time.Millisecond*100) + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &xattr)) assert.Equal(t, int64(1), db.DbStats.SharedBucketImport().ImportCount.Value()) assert.Nil(t, doc) diff --git a/db/import.go b/db/import.go index bf7bb8e073..45855c192b 100644 --- a/db/import.go +++ b/db/import.go @@ -52,10 +52,14 @@ func (db *DatabaseCollectionWithUser) ImportDocRaw(ctx context.Context, docid st } existingBucketDoc := &sgbucket.BucketDocument{ - Body: value, - Xattr: xattrValue, - UserXattr: userXattrValue, - Cas: cas, + Body: value, + Xattrs: map[string][]byte{ + base.SyncXattrName: xattrValue, + }, + Cas: cas, + } + if db.userXattrKey() != "" { + existingBucketDoc.Xattrs[db.userXattrKey()] = userXattrValue } return db.importDoc(ctx, docid, body, expiry, isDelete, existingBucketDoc, mode) @@ -71,20 +75,23 @@ func (db *DatabaseCollectionWithUser) ImportDoc(ctx context.Context, docid strin // TODO: We need to remarshal the existing doc into bytes. Less performance overhead than the previous bucket op to get the value in WriteUpdateWithXattr, // but should refactor import processing to support using the already-unmarshalled doc. existingBucketDoc := &sgbucket.BucketDocument{ - Cas: existingDoc.Cas, - UserXattr: existingDoc.rawUserXattr, + Cas: existingDoc.Cas, + Xattrs: make(map[string][]byte), + } + if db.userXattrKey() != "" { + existingBucketDoc.Xattrs[db.userXattrKey()] = existingDoc.rawUserXattr } // If we marked this as having inline Sync Data ensure that the existingBucketDoc we pass to importDoc has syncData // in the body so we can detect this and perform the migrate if existingDoc.inlineSyncData { existingBucketDoc.Body, err = existingDoc.MarshalJSON() - existingBucketDoc.Xattr = nil + existingBucketDoc.Xattrs = nil } else { if existingDoc.Deleted { - existingBucketDoc.Xattr, err = base.JSONMarshal(existingDoc.SyncData) + existingBucketDoc.Xattrs[base.SyncXattrName], err = base.JSONMarshal(existingDoc.SyncData) } else { - existingBucketDoc.Body, existingBucketDoc.Xattr, err = existingDoc.MarshalWithXattr() + existingBucketDoc.Body, existingBucketDoc.Xattrs[base.SyncXattrName], err = existingDoc.MarshalWithXattr() } } @@ -270,7 +277,7 @@ func (db *DatabaseCollectionWithUser) importDoc(ctx context.Context, docid strin } } - shouldGenerateNewRev := bodyChanged || len(existingDoc.UserXattr) == 0 + shouldGenerateNewRev := bodyChanged || len(existingDoc.Xattrs[db.userXattrKey()]) == 0 // If the body has changed then the document has been updated and we should generate a new revision. Otherwise // the import was triggered by a user xattr mutation and therefore should not generate a new revision. @@ -386,9 +393,18 @@ func (db *DatabaseCollectionWithUser) migrateMetadata(ctx context.Context, docid } // Use WriteWithXattr to handle both normal migration and tombstone migration (xattr creation, body delete) - isDelete := doc.hasFlag(channels.Deleted) - deleteBody := isDelete && len(existingDoc.Body) > 0 - casOut, writeErr := db.dataStore.WriteWithXattr(ctx, docid, base.SyncXattrName, existingDoc.Expiry, existingDoc.Cas, value, xattrValue, isDelete, deleteBody, opts) + xattrs := map[string][]byte{ + base.SyncXattrName: xattrValue, + } + var casOut uint64 + var writeErr error + if doc.hasFlag(channels.Deleted) { + // Migration of tombstone. Delete body, update xattrs + casOut, writeErr = db.dataStore.WriteTombstoneWithXattrs(ctx, docid, existingDoc.Expiry, existingDoc.Cas, xattrs, true, opts) + } else { + // Non-tombstone - update doc and xattrs + casOut, writeErr = db.dataStore.WriteWithXattrs(ctx, docid, existingDoc.Expiry, existingDoc.Cas, value, xattrs, opts) + } if writeErr == nil { doc.Cas = casOut base.InfofCtx(ctx, base.KeyMigrate, "Successfully migrated doc %q", base.UD(docid)) diff --git a/db/import_test.go b/db/import_test.go index 4efbf84ca0..64ed50930a 100644 --- a/db/import_test.go +++ b/db/import_test.go @@ -247,15 +247,13 @@ func TestImportWithCasFailureUpdate(t *testing.T) { collection := GetSingleDatabaseCollectionWithUser(t, db) cas, _ := collection.dataStore.Get(key, &body) - _, err := collection.dataStore.WriteCas(key, 0, 0, cas, []byte(valStr), sgbucket.Raw) + _, err := collection.dataStore.WriteCas(key, 0, cas, []byte(valStr), sgbucket.Raw) assert.NoError(t, err) } } syncDataInXattrCallback := func(key string) { if runOnce { - var body map[string]interface{} - var xattr map[string]interface{} runOnce = false valStr := `{ @@ -288,9 +286,10 @@ func TestImportWithCasFailureUpdate(t *testing.T) { }` collection := GetSingleDatabaseCollectionWithUser(t, db) - cas, _ := collection.dataStore.GetWithXattr(ctx, key, base.SyncXattrName, "", &body, &xattr, nil) - _, err := collection.dataStore.WriteCasWithXattr(ctx, key, base.SyncXattrName, 0, cas, []byte(valStr), []byte(xattrStr), DefaultMutateInOpts()) - assert.NoError(t, err) + + _, _, cas, _ := collection.dataStore.GetWithXattrs(ctx, key, []string{base.SyncXattrName}) + _, err := collection.dataStore.WriteWithXattrs(ctx, key, 0, cas, []byte(valStr), map[string][]byte{base.SyncXattrName: []byte(xattrStr)}, DefaultMutateInOpts()) + require.NoError(t, err) } } @@ -338,13 +337,15 @@ func TestImportWithCasFailureUpdate(t *testing.T) { // Check document has the rev and new body var bodyOut map[string]interface{} - var xattrOut map[string]interface{} - - _, err = collection.dataStore.GetWithXattr(ctx, testcase.docname, base.SyncXattrName, "", &bodyOut, &xattrOut, nil) + rawDoc, xattrs, _, err := collection.dataStore.GetWithXattrs(ctx, testcase.docname, []string{base.SyncXattrName}) assert.NoError(t, err) - assert.Equal(t, "2-abc", xattrOut["rev"]) + require.Contains(t, xattrs, base.SyncXattrName) + var xattrOut map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &xattrOut)) + require.NoError(t, base.JSONUnmarshal(rawDoc, &bodyOut)) assert.Equal(t, "val2", bodyOut["field2"]) + assert.Equal(t, "2-abc", xattrOut["rev"]) }) } } @@ -424,9 +425,11 @@ func TestImportNullDocRaw(t *testing.T) { } func assertXattrSyncMetaRevGeneration(t *testing.T, dataStore base.DataStore, key string, expectedRevGeneration int) { - xattr := map[string]interface{}{} - _, err := dataStore.GetWithXattr(base.TestCtx(t), key, base.SyncXattrName, "", nil, &xattr, nil) + _, xattrs, _, err := dataStore.GetWithXattrs(base.TestCtx(t), key, []string{base.SyncXattrName}) assert.NoError(t, err, "Error Getting Xattr") + require.Contains(t, xattrs, base.SyncXattrName) + var xattr map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &xattr)) revision, ok := xattr["rev"] assert.True(t, ok) generation, _ := ParseRevID(base.TestCtx(t), revision.(string)) @@ -523,10 +526,12 @@ func TestImportStampClusterUUID(t *testing.T) { require.Len(t, importedDoc.ClusterUUID, 32) } - var xattr map[string]string - _, err = collection.dataStore.GetWithXattr(ctx, key, base.SyncXattrName, "", &body, &xattr, nil) + xattrs, _, err := collection.dataStore.GetXattrs(ctx, key, []string{base.SyncXattrName}) require.NoError(t, err) - require.Len(t, xattr["cluster_uuid"], 32) + require.Contains(t, xattrs, base.SyncXattrName) + var xattr map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &xattr)) + require.Len(t, xattr["cluster_uuid"].(string), 32) } // TestImporNonZeroStart makes sure docs written before sync gateway start get imported diff --git a/db/util_testing.go b/db/util_testing.go index 245fce4aec..336943206e 100644 --- a/db/util_testing.go +++ b/db/util_testing.go @@ -244,7 +244,7 @@ FROM ` + base.KeyspaceQueryToken + ` AS ks USE INDEX (sg_allDocs_x1)` // First, attempt to purge. var purgeErr error if base.TestUseXattrs() { - purgeErr = dataStore.DeleteWithXattr(ctx, row.Id, base.SyncXattrName) + purgeErr = dataStore.DeleteWithXattrs(ctx, row.Id, []string{base.SyncXattrName}) } else { purgeErr = dataStore.Delete(row.Id) } diff --git a/go.mod b/go.mod index d698aea8fd..c708f6cc2a 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,10 @@ require ( github.com/couchbase/gocbcore/v10 v10.3.1 github.com/couchbase/gomemcached v0.2.1 github.com/couchbase/goutils v0.1.2 - github.com/couchbase/sg-bucket v0.0.0-20240206113827-752ae6de6855 + github.com/couchbase/sg-bucket v0.0.0-20240326230241-0b197e169b27 github.com/couchbaselabs/go-fleecedelta v0.0.0-20220909152808-6d09efa7a338 github.com/couchbaselabs/gocbconnstr v1.0.5 - github.com/couchbaselabs/rosmar v0.0.0-20231220172938-669667777223 + github.com/couchbaselabs/rosmar v0.0.0-20240326232309-04dfb3337b60 github.com/elastic/gosigar v0.14.2 github.com/felixge/fgprof v0.9.3 github.com/google/uuid v1.6.0 @@ -34,7 +34,7 @@ require ( github.com/samuel/go-metrics v0.0.0-20150819231912-7ccf3e0e1fb1 github.com/shirou/gopsutil v3.21.11+incompatible github.com/shirou/gopsutil/v3 v3.23.10 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/net v0.22.0 @@ -66,7 +66,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/klauspost/compress v1.17.3 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mattn/go-sqlite3 v1.14.17 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -78,7 +78,7 @@ require ( github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect diff --git a/go.sum b/go.sum index 8ec20c642e..79c58203b2 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,8 @@ github.com/couchbase/goprotostellar v1.0.1 h1:mtDVYTgnnDSQ3t7mQRG6jl/tOXKOuuFM9P github.com/couchbase/goprotostellar v1.0.1/go.mod h1:gs1eioLVOHETTFWxDY4v7Q/kRPMgqmX6t/TPcI429ls= github.com/couchbase/goutils v0.1.2 h1:gWr8B6XNWPIhfalHNog3qQKfGiYyh4K4VhO3P2o9BCs= github.com/couchbase/goutils v0.1.2/go.mod h1:h89Ek/tiOxxqjz30nPPlwZdQbdB8BwgnuBxeoUe/ViE= -github.com/couchbase/sg-bucket v0.0.0-20240206113827-752ae6de6855 h1:mATYI89sJnZUL+99NLlgYcjZAo8hsOdUUYfean04IFQ= -github.com/couchbase/sg-bucket v0.0.0-20240206113827-752ae6de6855/go.mod h1:hy6J0RXx/Ry+5EiI8VVMetsVfBXQq5/djQLbvfRau0k= +github.com/couchbase/sg-bucket v0.0.0-20240326230241-0b197e169b27 h1:FGNvJsAQk6JZzuVXvoLXcoSQzOnQxWkywzYJFQqzXEg= +github.com/couchbase/sg-bucket v0.0.0-20240326230241-0b197e169b27/go.mod h1:5me3TJLTPfR0s3aMJZcPoTu5FT8oelaINz5l7Q3cApE= github.com/couchbase/tools-common/cloud v1.0.0 h1:SQZIccXoedbrThehc/r9BJbpi/JhwJ8X00PDjZ2gEBE= github.com/couchbase/tools-common/cloud v1.0.0/go.mod h1:6KVlRpbcnDWrvickUJ+xpqCWx1vgYYlEli/zL4xmZAg= github.com/couchbase/tools-common/fs v1.0.0 h1:HFA4xCF/r3BtZShFJUxzVvGuXtDkqGnaPzYJP3Kp1mw= @@ -72,8 +72,8 @@ github.com/couchbaselabs/gocbconnstr v1.0.5 h1:e0JokB5qbcz7rfnxEhNRTKz8q1svoRvDo github.com/couchbaselabs/gocbconnstr v1.0.5/go.mod h1:KV3fnIKMi8/AzX0O9zOrO9rofEqrRF1d2rG7qqjxC7o= github.com/couchbaselabs/gocbconnstr/v2 v2.0.0-20230515165046-68b522a21131 h1:2EAfFswAfgYn3a05DVcegiw6DgMgn1Mv5eGz6IHt1Cw= github.com/couchbaselabs/gocbconnstr/v2 v2.0.0-20230515165046-68b522a21131/go.mod h1:o7T431UOfFVHDNvMBUmUxpHnhivwv7BziUao/nMl81E= -github.com/couchbaselabs/rosmar v0.0.0-20231220172938-669667777223 h1:o56DcFusNJsIiCbnCdCLk9lNQmFDZVTLUQzmLQkJncg= -github.com/couchbaselabs/rosmar v0.0.0-20231220172938-669667777223/go.mod h1:+AjMZkAOGCeQRLjIBwehXKyWsNCPFrMKYz6lIaZ1idc= +github.com/couchbaselabs/rosmar v0.0.0-20240326232309-04dfb3337b60 h1:w9E8CEvQia8BPA+2Ai6dJh64wYTmxNUrXNPkKhtPpGw= +github.com/couchbaselabs/rosmar v0.0.0-20240326232309-04dfb3337b60/go.mod h1:MnlZ8BXE9Z7rUQEyb069P/6E9+YVkUxcqW5cmN23h0I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -145,8 +145,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= -github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -199,8 +199,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -209,8 +210,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= diff --git a/rest/adminapitest/admin_api_test.go b/rest/adminapitest/admin_api_test.go index 6dfa8def03..6f8b1e65da 100644 --- a/rest/adminapitest/admin_api_test.go +++ b/rest/adminapitest/admin_api_test.go @@ -3184,6 +3184,7 @@ func TestConfigsIncludeDefaults(t *testing.T) { func TestLegacyCredentialInheritance(t *testing.T) { rest.RequireBucketSpecificCredentials(t) base.SetUpTestLogging(t, base.LevelInfo, base.KeyHTTP) + base.SkipInvalidAuthForCouchbaseServer76(t) ctx := base.TestCtx(t) config := rest.BootstrapStartupConfigForTest(t) diff --git a/rest/api_test.go b/rest/api_test.go index 8f021d478a..3d92cbdcba 100644 --- a/rest/api_test.go +++ b/rest/api_test.go @@ -1658,9 +1658,11 @@ func TestWriteTombstonedDocUsingXattrs(t *testing.T) { } // Fetch the xattr and make sure it contains the above value - var retrievedVal map[string]interface{} - var retrievedXattr map[string]interface{} - _, err = rt.GetSingleDataStore().GetWithXattr(rt.Context(), "-21SK00U-ujxUO9fU2HezxL", base.SyncXattrName, "", &retrievedVal, &retrievedXattr, nil) + xattrs, _, err := rt.GetSingleDataStore().GetXattrs(rt.Context(), "-21SK00U-ujxUO9fU2HezxL", []string{base.SyncXattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + var retrievedXattr map[string]any + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &retrievedXattr)) assert.NoError(t, err, "Unexpected Error") assert.Equal(t, "2-466a1fab90a810dc0a63565b70680e4e", retrievedXattr["rev"]) @@ -2480,7 +2482,7 @@ func TestTombstonedBulkDocsWithExistingTombstone(t *testing.T) { // Create the document to trigger cas failure value := make(map[string]interface{}) value["foo"] = "bar" - insCas, err := bucket.DefaultDataStore().WriteCas(t.Name(), 0, 0, 0, value, 0) + insCas, err := bucket.DefaultDataStore().WriteCas(t.Name(), 0, 0, value, 0) require.NoError(t, err) // Delete document diff --git a/rest/attachmentcompactiontest/attachment_compaction_api_test.go b/rest/attachmentcompactiontest/attachment_compaction_api_test.go index d1f165fd70..7bff896f66 100644 --- a/rest/attachmentcompactiontest/attachment_compaction_api_test.go +++ b/rest/attachmentcompactiontest/attachment_compaction_api_test.go @@ -403,7 +403,7 @@ func TestAttachmentCompactionAbort(t *testing.T) { attID := fmt.Sprintf("testAtt-%d", i) attBody := map[string]interface{}{"value": strconv.Itoa(i)} attJSONBody, err := base.JSONMarshal(attBody) - assert.NoError(t, err) + require.NoError(t, err) rest.CreateLegacyAttachmentDoc(t, ctx, collection, dataStore, docID, []byte("{}"), attID, attJSONBody) } diff --git a/rest/importtest/collections_import_test.go b/rest/importtest/collections_import_test.go index 89b2d7c64c..27f16fc856 100644 --- a/rest/importtest/collections_import_test.go +++ b/rest/importtest/collections_import_test.go @@ -413,12 +413,14 @@ func TestMultiCollectionImportRemoveCollection(t *testing.T) { } func requireSyncData(rt *rest.RestTester, dataStore base.DataStore, docName string, hasSyncData bool) { - var rawDoc, rawXattr, rawUserXattr []byte - _, err := dataStore.GetWithXattr(rt.Context(), docName, base.SyncXattrName, rt.GetDatabase().Options.UserXattrKey, &rawDoc, &rawXattr, &rawUserXattr) - require.NoError(rt.TB, err) + xattrs, _, err := dataStore.GetXattrs(rt.Context(), docName, []string{base.SyncXattrName}) if hasSyncData { - require.NotEqual(rt.TB, "", string(rawXattr), "Expected data for %s %s", dataStore.GetName(), docName) + require.NoError(rt.TB, err) + require.Contains(rt.TB, xattrs, base.SyncXattrName) + require.NotEqual(rt.TB, "", string(xattrs[base.SyncXattrName]), "Expected data for %s %s", dataStore.GetName(), docName) } else { - require.Equal(rt.TB, "", string(rawXattr), "Expected no data for %s %s", dataStore.GetName(), docName) + require.Error(rt.TB, err) + require.True(rt.TB, base.IsXattrNotFoundError(err), "Expected xattr missing error but got %+v", err) + require.NotContains(rt.TB, xattrs, base.SyncXattrName) } } diff --git a/rest/importtest/import_test.go b/rest/importtest/import_test.go index 28cab5a917..22c1dc0818 100644 --- a/rest/importtest/import_test.go +++ b/rest/importtest/import_test.go @@ -627,7 +627,7 @@ func TestXattrImportMultipleActorOnDemandGet(t *testing.T) { xattrVal["actor"] = "not mobile" ctx := base.TestCtx(t) - _, mutateErr := dataStore.UpdateXattr(ctx, mobileKey, "_nonmobile", uint32(0), cas, xattrVal, nil) + _, mutateErr := dataStore.UpdateXattrs(ctx, mobileKey, uint32(0), cas, map[string][]byte{"_nonmobile": base.MustJSONMarshal(t, xattrVal)}, nil) assert.NoError(t, mutateErr, "Error updating non-mobile xattr for multi-actor document") @@ -679,7 +679,7 @@ func TestXattrImportMultipleActorOnDemandPut(t *testing.T) { // Modify the document via the SDK to add a new, non-mobile xattr xattrVal := make(map[string]interface{}) xattrVal["actor"] = "not mobile" - _, mutateErr := dataStore.UpdateXattr(ctx, mobileKey, "_nonmobile", uint32(0), cas, xattrVal, nil) + _, mutateErr := dataStore.UpdateXattrs(ctx, mobileKey, uint32(0), cas, map[string][]byte{"_nonmobile": base.MustJSONMarshal(t, xattrVal)}, nil) assert.NoError(t, mutateErr, "Error updating non-mobile xattr for multi-actor document") // Attempt to update the document again via Sync Gateway. Should not trigger import, PUT should be successful, @@ -740,7 +740,7 @@ func TestXattrImportMultipleActorOnDemandFeed(t *testing.T) { // Modify the document via the SDK to add a new, non-mobile xattr xattrVal := make(map[string]interface{}) xattrVal["actor"] = "not mobile" - _, mutateErr := dataStore.UpdateXattr(ctx, mobileKey, "_nonmobile", uint32(0), cas, xattrVal, nil) + _, mutateErr := dataStore.UpdateXattrs(ctx, mobileKey, uint32(0), cas, map[string][]byte{"_nonmobile": base.MustJSONMarshal(t, xattrVal)}, nil) assert.NoError(t, mutateErr, "Error updating non-mobile xattr for multi-actor document") // Wait until crc match count changes @@ -1492,10 +1492,11 @@ func TestImportZeroValueDecimalPlaces(t *testing.T) { for i := minDecimalPlaces; i <= maxDecimalPlaces; i++ { docID := "TestImportDecimalScale" + strconv.Itoa(i) - var docBody []byte var syncData db.SyncData - _, err := dataStore.GetWithXattr(ctx, docID, base.SyncXattrName, "", &docBody, &syncData, nil) + docBody, xattrs, _, err := dataStore.GetWithXattrs(ctx, docID, []string{base.SyncXattrName}) require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData)) assert.NotEqualf(t, "", syncData.CurrentRev, "Expecting non-empty rev ID for imported doc %v", docID) assert.Truef(t, strings.HasPrefix(syncData.CurrentRev, "1-"), "Expecting rev 1 for imported doc %v", docID) @@ -1557,10 +1558,11 @@ func TestImportZeroValueDecimalPlacesScientificNotation(t *testing.T) { ctx := base.TestCtx(t) for i := minDecimalPlaces; i <= maxDecimalPlaces; i++ { docID := "TestImportDecimalPlacesScientificNotation" + strconv.Itoa(i) - var docBody []byte var syncData db.SyncData - _, err := dataStore.GetWithXattr(ctx, docID, base.SyncXattrName, "", &docBody, &syncData, nil) + docBody, xattrs, _, err := dataStore.GetWithXattrs(ctx, docID, []string{base.SyncXattrName}) require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData)) assert.NotEqualf(t, "", syncData.CurrentRev, "Expecting non-empty rev ID for imported doc %v", docID) assert.Truef(t, strings.HasPrefix(syncData.CurrentRev, "1-"), "Expecting rev 1 for imported doc %v", docID) @@ -2048,7 +2050,7 @@ func TestUnexpectedBodyOnTombstone(t *testing.T) { // Modify the document via the SDK to add the body back xattrVal := make(map[string]interface{}) xattrVal["actor"] = "not mobile" - _, mutateErr := dataStore.UpdateXattr(ctx, mobileKey, "_nonmobile", uint32(0), cas, xattrVal, nil) + _, mutateErr := dataStore.UpdateXattrs(ctx, mobileKey, uint32(0), cas, map[string][]byte{"_nonmobile": base.MustJSONMarshal(t, xattrVal)}, nil) assert.NoError(t, mutateErr, "Error updating non-mobile xattr for multi-actor document") // Attempt to get the document again via Sync Gateway. Should not trigger import. @@ -2099,9 +2101,11 @@ func rawDocWithSyncMeta() string { } func assertXattrSyncMetaRevGeneration(t *testing.T, dataStore base.DataStore, key string, expectedRevGeneration int) { - xattr := map[string]interface{}{} - _, err := dataStore.GetWithXattr(base.TestCtx(t), key, base.SyncXattrName, "", nil, &xattr, nil) + xattrs, _, err := dataStore.GetXattrs(base.TestCtx(t), key, []string{base.SyncXattrName}) assert.NoError(t, err, "Error Getting Xattr") + xattr := map[string]interface{}{} + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &xattr)) revision, ok := xattr["rev"] assert.True(t, ok) generation, _ := db.ParseRevID(base.TestCtx(t), revision.(string)) diff --git a/rest/importuserxattrtest/import_test.go b/rest/importuserxattrtest/import_test.go index 0154d044a2..80d7f3c301 100644 --- a/rest/importuserxattrtest/import_test.go +++ b/rest/importuserxattrtest/import_test.go @@ -16,6 +16,7 @@ import ( "github.com/couchbase/sync_gateway/db" "github.com/couchbase/sync_gateway/rest" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestUserXattrAutoImport(t *testing.T) { @@ -47,8 +48,12 @@ func TestUserXattrAutoImport(t *testing.T) { resp := rt.SendAdminRequest("PUT", "/{{.keyspace}}/"+docKey, "{}") rest.RequireStatus(t, resp, http.StatusCreated) + ctx := rt.Context() + cas, err := dataStore.Get(docKey, nil) + + require.NoError(t, err) // Add xattr to doc - _, err := dataStore.WriteUserXattr(docKey, xattrKey, channelName) + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) assert.NoError(t, err) // Wait for doc to be imported @@ -60,26 +65,29 @@ func TestUserXattrAutoImport(t *testing.T) { // Ensure sync function has ran twice (once for PUT and once for xattr addition) assert.Equal(t, int64(2), rt.GetDatabase().DbStats.Database().SyncFunctionCount.Value()) - ctx := base.TestCtx(t) // Get Xattr and ensure channel value set correctly - var syncData db.SyncData - _, err = dataStore.GetXattr(ctx, docKey, base.SyncXattrName, &syncData) + xattrs, cas, err := dataStore.GetXattrs(ctx, docKey, []string{base.SyncXattrName}) assert.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + var syncData db.SyncData + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData)) assert.Equal(t, []string{channelName}, syncData.Channels.KeySet()) // Update xattr again but same value and ensure it isn't imported again (crc32 hash should match) - _, err = dataStore.WriteUserXattr(docKey, xattrKey, channelName) - assert.NoError(t, err) + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) + require.NoError(t, err) err = rt.WaitForCondition(func() bool { return rt.GetDatabase().DbStats.Database().Crc32MatchCount.Value() == 1 }) assert.NoError(t, err) - var syncData2 db.SyncData - _, err = dataStore.GetXattr(ctx, docKey, base.SyncXattrName, &syncData2) + xattrs, _, err = dataStore.GetXattrs(ctx, docKey, []string{base.SyncXattrName}) assert.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + var syncData2 db.SyncData + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData2)) assert.Equal(t, syncData.Crc32c, syncData2.Crc32c) assert.Equal(t, syncData.Crc32cUserXattr, syncData2.Crc32cUserXattr) @@ -96,8 +104,10 @@ func TestUserXattrAutoImport(t *testing.T) { assert.NoError(t, err) var syncData3 db.SyncData - _, err = dataStore.GetXattr(ctx, docKey, base.SyncXattrName, &syncData3) + xattrs, _, err = dataStore.GetXattrs(ctx, docKey, []string{base.SyncXattrName}) assert.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData3)) assert.Equal(t, syncData2.Crc32c, syncData3.Crc32c) assert.Equal(t, syncData2.Crc32cUserXattr, syncData3.Crc32cUserXattr) @@ -117,8 +127,10 @@ func TestUserXattrAutoImport(t *testing.T) { assert.Equal(t, int64(3), rt.GetDatabase().DbStats.Database().SyncFunctionCount.Value()) var syncData4 db.SyncData - _, err = dataStore.GetXattr(ctx, docKey, base.SyncXattrName, &syncData4) + xattrs, _, err = dataStore.GetXattrs(ctx, docKey, []string{base.SyncXattrName}) assert.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData4)) assert.Equal(t, base.Crc32cHashString(updateVal), syncData4.Crc32c) assert.Equal(t, syncData3.Crc32cUserXattr, syncData4.Crc32cUserXattr) @@ -166,9 +178,13 @@ func TestUserXattrOnDemandImportGET(t *testing.T) { // Ensure sync function has been ran on import assert.Equal(t, int64(1), rt.GetDatabase().DbStats.Database().SyncFunctionCount.Value()) + cas, err := dataStore.Get(docKey, nil) + require.NoError(t, err) + + ctx := base.TestCtx(t) // Write user xattr - _, err = dataStore.WriteUserXattr(docKey, xattrKey, channelName) - assert.NoError(t, err) + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) + require.NoError(t, err) // GET to trigger import resp = rt.SendAdminRequest("GET", "/{{.keyspace}}/"+docKey, "") @@ -183,26 +199,31 @@ func TestUserXattrOnDemandImportGET(t *testing.T) { // Ensure sync function has ran on import assert.Equal(t, int64(2), rt.GetDatabase().DbStats.Database().SyncFunctionCount.Value()) - ctx := base.TestCtx(t) // Get sync data for doc and ensure user xattr has been used correctly to set channel + xattrs, cas, err := dataStore.GetXattrs(ctx, docKey, []string{base.SyncXattrName}) + require.NoError(t, err) + + require.Contains(t, xattrs, base.SyncXattrName) var syncData db.SyncData - _, err = dataStore.GetXattr(ctx, docKey, base.SyncXattrName, &syncData) - assert.NoError(t, err) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData)) assert.Equal(t, []string{channelName}, syncData.Channels.KeySet()) // Write same xattr value - _, err = dataStore.WriteUserXattr(docKey, xattrKey, channelName) - assert.NoError(t, err) + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) + require.NoError(t, err) // Perform GET and ensure import isn't triggered as crc32 hash is the same resp = rt.SendAdminRequest("GET", "/{{.keyspace}}/"+docKey, "") rest.RequireStatus(t, resp, http.StatusOK) - var syncData2 db.SyncData - _, err = dataStore.GetXattr(ctx, docKey, base.SyncXattrName, &syncData2) + xattrs, _, err = dataStore.GetXattrs(ctx, docKey, []string{base.SyncXattrName}) assert.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + var syncData2 db.SyncData + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData2)) + assert.Equal(t, syncData.Crc32c, syncData2.Crc32c) assert.Equal(t, syncData.Crc32cUserXattr, syncData2.Crc32cUserXattr) assert.Equal(t, int64(2), rt.GetDatabase().DbStats.Database().SyncFunctionCount.Value()) @@ -253,10 +274,13 @@ func TestUserXattrOnDemandImportWrite(t *testing.T) { // Ensure sync function has ran on import assert.Equal(t, int64(2), rt.GetDatabase().DbStats.Database().SyncFunctionCount.Value()) + cas, err := dataStore.Get(docKey, nil) + require.NoError(t, err) + ctx := base.TestCtx(t) // Write user xattr - _, err = dataStore.WriteUserXattr(docKey, xattrKey, channelName) - assert.NoError(t, err) + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) + require.NoError(t, err) // Trigger import resp = rt.SendAdminRequest("PUT", "/{{.keyspace}}/"+docKey, `{"update": "update"}`) @@ -271,10 +295,10 @@ func TestUserXattrOnDemandImportWrite(t *testing.T) { // Ensure sync function has ran on import assert.Equal(t, int64(3), rt.GetDatabase().DbStats.Database().SyncFunctionCount.Value()) - ctx := base.TestCtx(t) + xattrs, _, err := dataStore.GetXattrs(ctx, docKey, []string{base.SyncXattrName}) + require.NoError(t, err) var syncData db.SyncData - _, err = dataStore.GetXattr(ctx, docKey, base.SyncXattrName, &syncData) - assert.NoError(t, err) - + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData)) assert.Equal(t, []string{channelName}, syncData.Channels.KeySet()) } diff --git a/rest/importuserxattrtest/rawdoc_test.go b/rest/importuserxattrtest/rawdoc_test.go index 1383ea0d2e..26463e1ee0 100644 --- a/rest/importuserxattrtest/rawdoc_test.go +++ b/rest/importuserxattrtest/rawdoc_test.go @@ -13,6 +13,7 @@ import ( "net/http" "testing" + "github.com/couchbase/sync_gateway/base" "github.com/couchbase/sync_gateway/rest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -36,7 +37,11 @@ func TestUserXattrsRawGet(t *testing.T) { rest.RequireStatus(t, resp, http.StatusCreated) require.NoError(t, rt.WaitForPendingChanges()) - _, err := rt.GetSingleDataStore().WriteUserXattr(docKey, xattrKey, "val") + cas, err := rt.GetSingleDataStore().Get(docKey, nil) + require.NoError(t, err) + + ctx := rt.Context() + _, err = rt.GetSingleDataStore().UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, "val")}, nil) assert.NoError(t, err) err = rt.WaitForCondition(func() bool { diff --git a/rest/importuserxattrtest/revcache_test.go b/rest/importuserxattrtest/revcache_test.go index 02e93c98da..d0738847d7 100644 --- a/rest/importuserxattrtest/revcache_test.go +++ b/rest/importuserxattrtest/revcache_test.go @@ -74,8 +74,12 @@ func TestUserXattrRevCache(t *testing.T) { resp := rt.SendAdminRequest("PUT", "/{{.keyspace}}/"+docKey, `{}`) rest.RequireStatus(t, resp, http.StatusCreated) require.NoError(t, rt.WaitForPendingChanges()) - _, err = dataStore.WriteUserXattr(docKey, xattrKey, "DEF") - assert.NoError(t, err) + + cas, err := rt.GetSingleDataStore().Get(docKey, nil) + require.NoError(t, err) + + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, "DEF")}, nil) + require.NoError(t, err) _, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes", "userDEF", false) assert.NoError(t, err) @@ -83,9 +87,13 @@ func TestUserXattrRevCache(t *testing.T) { resp = rt2.SendUserRequest("GET", "/{{.keyspace}}/"+docKey, ``, "userDEF") rest.RequireStatus(t, resp, http.StatusOK) + // get new cas to pass to UpdateXattrs + cas, err = rt.GetSingleDataStore().Get(docKey, nil) + require.NoError(t, err) + // Add channel ABC to the userXattr - _, err = dataStore.WriteUserXattr(docKey, xattrKey, channelName) - assert.NoError(t, err) + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) + require.NoError(t, err) // wait for import of the xattr change on both nodes _, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes", "userABC", false) @@ -153,8 +161,11 @@ func TestUserXattrDeleteWithRevCache(t *testing.T) { rest.RequireStatus(t, resp, http.StatusCreated) require.NoError(t, rt.WaitForPendingChanges()) + cas, err := rt.GetSingleDataStore().Get(docKey, nil) + require.NoError(t, err) + // Write DEF to the userXattrStore to give userDEF access - _, err = dataStore.WriteUserXattr(docKey, xattrKey, "DEF") + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, "DEF")}, nil) assert.NoError(t, err) _, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes", "userDEF", false) @@ -163,9 +174,13 @@ func TestUserXattrDeleteWithRevCache(t *testing.T) { resp = rt2.SendUserRequest("GET", "/{{.keyspace}}/"+docKey, ``, "userDEF") rest.RequireStatus(t, resp, http.StatusOK) + // FIXME, why is cas different after import? + cas, err = rt.GetSingleDataStore().Get(docKey, nil) + require.NoError(t, err) + // Delete DEF from the userXattr, removing the doc from channel DEF - _, err = dataStore.DeleteUserXattr(docKey, xattrKey) - assert.NoError(t, err) + err = dataStore.RemoveXattrs(ctx, docKey, []string{xattrKey}, cas) + require.NoError(t, err) // wait for import of the xattr change on both nodes _, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes", "userDEF", false) diff --git a/rest/importuserxattrtest/revid_import_test.go b/rest/importuserxattrtest/revid_import_test.go index b1d8938371..4612199997 100644 --- a/rest/importuserxattrtest/revid_import_test.go +++ b/rest/importuserxattrtest/revid_import_test.go @@ -54,8 +54,10 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { // Get current sync data var syncData db.SyncData - _, err := dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData) - assert.NoError(t, err) + xattrs, cas, err := dataStore.GetXattrs(rt.Context(), docKey, []string{base.SyncXattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + assert.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData)) docRev, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) assert.NoError(t, err) @@ -63,8 +65,8 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { assert.Equal(t, syncData.CurrentRev, docRev.RevID) // Write xattr to trigger import of user xattr - _, err = dataStore.WriteUserXattr(docKey, xattrKey, channelName) - assert.NoError(t, err) + _, err = dataStore.UpdateXattrs(rt.Context(), docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) + require.NoError(t, err) // Wait for import err = rt.WaitForCondition(func() bool { @@ -74,8 +76,10 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { // Ensure import worked and sequence incremented but that sequence did not var syncData2 db.SyncData - _, err = dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData2) - assert.NoError(t, err) + xattrs, _, err = dataStore.GetXattrs(rt.Context(), docKey, []string{base.SyncXattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + assert.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData2)) docRev2, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) assert.NoError(t, err) @@ -96,8 +100,10 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { assert.NoError(t, err) var syncData3 db.SyncData - _, err = dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData2) - assert.NoError(t, err) + xattrs, _, err = dataStore.GetXattrs(rt.Context(), docKey, []string{base.SyncXattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData3)) assert.NotEqual(t, syncData2.CurrentRev, syncData3.CurrentRev) } diff --git a/rest/indextest/index_test.go b/rest/indextest/index_test.go index af0b7bd711..6113785bac 100644 --- a/rest/indextest/index_test.go +++ b/rest/indextest/index_test.go @@ -655,10 +655,12 @@ func waitAndRequireDBState(t *testing.T, sc *rest.ServerContext, dbName string, } func requireActiveChannel(t *testing.T, dataStore base.DataStore, key string, channelName string) { - xattr := db.SyncData{} - _, err := dataStore.GetWithXattr(base.TestCtx(t), key, base.SyncXattrName, "", nil, &xattr, nil) + xattrs, _, err := dataStore.GetXattrs(base.TestCtx(t), key, []string{base.SyncXattrName}) require.NoError(t, err, "Error Getting Xattr as sync data") + require.Contains(t, xattrs, base.SyncXattrName) + var xattr db.SyncData + require.NoError(t, json.Unmarshal(xattrs[base.SyncXattrName], &xattr), "Error unmarshalling sync data") channel, ok := xattr.Channels[channelName] require.True(t, ok) - require.True(t, channel == nil) + require.Nil(t, channel) } diff --git a/rest/revocation_test.go b/rest/revocation_test.go index 7fa07f9fb8..411b41f19f 100644 --- a/rest/revocation_test.go +++ b/rest/revocation_test.go @@ -1499,16 +1499,23 @@ func TestRevocationWithUserXattrs(t *testing.T) { resp := rt.SendAdminRequest("PUT", "/{{.keyspace}}/accessDoc", `{}`) RequireStatus(t, resp, http.StatusCreated) - _, err := data.WriteUserXattr("accessDoc", xattrKey, map[string]interface{}{"userChannels": map[string]interface{}{"user": "a"}}) - assert.NoError(t, err) + ctx := rt.Context() + + cas, err := data.Get("accessDoc", nil) + require.NoError(t, err) + + _, err = data.UpdateXattrs(ctx, "accessDoc", 0, cas, map[string][]byte{xattrKey: []byte(`{"userChannels" : {"user": "a"}}`)}, nil) + require.NoError(t, err) _ = rt.PutDoc("doc", `{"channels": "a"}`) changes := revocationTester.getChanges(0, 2) assert.Len(t, changes.Results, 2) - _, err = data.WriteUserXattr("accessDoc", xattrKey, map[string]interface{}{}) - assert.NoError(t, err) + cas, err = data.Get("accessDoc", nil) + require.NoError(t, err) + + require.NoError(t, data.RemoveXattrs(ctx, "accessDoc", []string{xattrKey}, cas)) changes = revocationTester.getChanges(changes.Last_Seq, 1) assert.Len(t, changes.Results, 1) diff --git a/rest/user_api_test.go b/rest/user_api_test.go index a56ed4ccfd..1d84bcc56b 100644 --- a/rest/user_api_test.go +++ b/rest/user_api_test.go @@ -1147,8 +1147,12 @@ func TestRemovingUserXattr(t *testing.T) { RequireStatus(t, resp, http.StatusCreated) dataStore := rt.GetSingleDataStore() + + cas, err := dataStore.Get(docKey, nil) + require.NoError(t, err) + ctx := rt.Context() // Add xattr - _, err := dataStore.WriteUserXattr(docKey, xattrKey, channelName) + _, err = dataStore.UpdateXattrs(ctx, docKey, 0, cas, map[string][]byte{xattrKey: base.MustJSONMarshal(t, channelName)}, nil) assert.NoError(t, err) // Trigger import @@ -1159,14 +1163,17 @@ func TestRemovingUserXattr(t *testing.T) { assert.NoError(t, err) // Get sync data for doc and ensure user xattr has been used correctly to set channel + xattrs, cas, err := dataStore.GetXattrs(rt.Context(), docKey, []string{base.SyncXattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) + var syncData db.SyncData - _, err = dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData) - assert.NoError(t, err) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData)) assert.Equal(t, []string{channelName}, syncData.Channels.KeySet()) // Delete user xattr - _, err = dataStore.DeleteUserXattr(docKey, xattrKey) + err = dataStore.RemoveXattrs(ctx, docKey, []string{xattrKey}, cas) assert.NoError(t, err) // Trigger import @@ -1177,9 +1184,11 @@ func TestRemovingUserXattr(t *testing.T) { assert.NoError(t, err) // Ensure old channel set with user xattr has been removed + xattrs, _, err = dataStore.GetXattrs(rt.Context(), docKey, []string{base.SyncXattrName}) + require.NoError(t, err) + require.Contains(t, xattrs, base.SyncXattrName) var syncData2 db.SyncData - _, err = dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData2) - assert.NoError(t, err) + require.NoError(t, base.JSONUnmarshal(xattrs[base.SyncXattrName], &syncData2)) assert.Equal(t, uint64(3), syncData2.Channels[channelName].Seq) }) diff --git a/rest/utilities_testing_attachment.go b/rest/utilities_testing_attachment.go index ec2924ce7b..b4add2461a 100644 --- a/rest/utilities_testing_attachment.go +++ b/rest/utilities_testing_attachment.go @@ -53,7 +53,7 @@ func CreateLegacyAttachmentDoc(t *testing.T, ctx context.Context, collection *db _, _, err = collection.Put(ctx, docID, unmarshalledBody) require.NoError(t, err) - _, err = dataStore.WriteUpdateWithXattr(ctx, docID, base.SyncXattrName, "", 0, nil, nil, func(doc []byte, xattr []byte, userXattr []byte, cas uint64) (updatedDoc []byte, updatedXattr []byte, deletedDoc bool, expiry *uint32, updatedSpec []sgbucket.MacroExpansionSpec, err error) { + _, err = dataStore.WriteUpdateWithXattrs(ctx, docID, []string{base.SyncXattrName}, 0, nil, nil, func(doc []byte, xattrs map[string][]byte, cas uint64) (sgbucket.UpdatedDoc, error) { attachmentSyncData := map[string]interface{}{ attID: map[string]interface{}{ "content_type": "application/json", @@ -66,14 +66,19 @@ func CreateLegacyAttachmentDoc(t *testing.T, ctx context.Context, collection *db attachmentSyncDataBytes, err := base.JSONMarshal(attachmentSyncData) require.NoError(t, err) - + require.Contains(t, xattrs, base.SyncXattrName) + xattr := xattrs[base.SyncXattrName] xattr, err = base.InjectJSONPropertiesFromBytes(xattr, base.KVPairBytes{ Key: "attachments", Val: attachmentSyncDataBytes, }) require.NoError(t, err) - - return doc, xattr, false, nil, updatedSpec, nil + return sgbucket.UpdatedDoc{ + Doc: doc, + Xattrs: map[string][]byte{ + base.SyncXattrName: xattr, + }, + }, nil }) require.NoError(t, err) diff --git a/rest/xattr_upgrade_test.go b/rest/xattr_upgrade_test.go index b6fda5e916..4aeaf3f2ac 100644 --- a/rest/xattr_upgrade_test.go +++ b/rest/xattr_upgrade_test.go @@ -74,7 +74,7 @@ func TestCheckForUpgradeOnRead(t *testing.T) { ctx := base.TestCtx(t) // Create via the SDK with sync metadata intact - _, err := dataStore.WriteCasWithXattr(ctx, key, base.SyncXattrName, 0, 0, []byte(bodyString), []byte(xattrString), nil) + _, err := dataStore.WriteWithXattrs(ctx, key, 0, 0, []byte(bodyString), map[string][]byte{base.SyncXattrName: []byte(xattrString)}, nil) assert.NoError(t, err, "Error writing doc w/ xattr") // Attempt to get the documents via Sync Gateway. Should successfully retrieve doc by triggering @@ -151,7 +151,7 @@ func TestCheckForUpgradeOnWrite(t *testing.T) { ctx := base.TestCtx(t) // Create via the SDK with sync metadata intact - _, err := dataStore.WriteCasWithXattr(ctx, key, base.SyncXattrName, 0, 0, []byte(bodyString), []byte(xattrString), nil) + _, err := dataStore.WriteWithXattrs(ctx, key, 0, 0, []byte(bodyString), map[string][]byte{base.SyncXattrName: []byte(xattrString)}, nil) assert.NoError(t, err, "Error writing doc w/ xattr") require.NoError(t, rt.WaitForSequence(5)) @@ -220,7 +220,7 @@ func TestCheckForUpgradeFeed(t *testing.T) { ctx := base.TestCtx(t) // Create via the SDK with sync metadata intact - _, err := dataStore.WriteCasWithXattr(ctx, key, base.SyncXattrName, 0, 0, []byte(bodyString), []byte(xattrString), nil) + _, err := dataStore.WriteWithXattrs(ctx, key, 0, 0, []byte(bodyString), map[string][]byte{base.SyncXattrName: []byte(xattrString)}, nil) assert.NoError(t, err, "Error writing doc w/ xattr") require.NoError(t, rt.WaitForSequence(1)) diff --git a/xdcr/cbs_xdcr_test.go b/xdcr/cbs_xdcr_test.go index 07666d7257..4f116ec0ca 100644 --- a/xdcr/cbs_xdcr_test.go +++ b/xdcr/cbs_xdcr_test.go @@ -97,14 +97,16 @@ func TestMobileXDCRNoSyncDataCopied(t *testing.T) { // verify VV is written to docs that are replicated for _, doc := range []string{normalDoc, attachmentDoc} { require.EventuallyWithT(t, func(c *assert.CollectT) { - var response map[string]interface{} - _, err := bucket2.DefaultDataStore().GetXattr(ctx, doc, "_vv", &response) + xattrs, _, err := bucket2.DefaultDataStore().GetXattrs(ctx, doc, []string{"_vv"}) assert.NoError(c, err, "Could not get doc %s", doc) - for range response { - assert.NotNil(c, response[version]) - assert.NotNil(c, response[source]) - assert.NotNil(c, response[curCAS]) - } + vvXattrBytes, ok := xattrs["_vv"] + require.True(t, ok) + var vvXattrVal map[string]any + require.NoError(t, base.JSONUnmarshal(vvXattrBytes, &vvXattrVal)) + assert.NotNil(c, vvXattrVal[version]) + assert.NotNil(c, vvXattrVal[source]) + assert.NotNil(c, vvXattrVal[curCAS]) + }, time.Second*5, time.Millisecond*100) } }