diff --git a/beacon-chain/p2p/discovery_test.go b/beacon-chain/p2p/discovery_test.go index 35a628e9510e..faf6dfcb867a 100644 --- a/beacon-chain/p2p/discovery_test.go +++ b/beacon-chain/p2p/discovery_test.go @@ -237,7 +237,7 @@ func TestCreateLocalNode(t *testing.T) { // Check custody_subnet_count config. custodySubnetCount := new(uint64) require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, custodySubnetCount))) - require.Equal(t, uint64(1), *custodySubnetCount) + require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodySubnetCount) }) } } @@ -565,7 +565,7 @@ func TestRefreshPersistentSubnets(t *testing.T) { eip7594ForkEpoch = 10 ) - custodySubnetCount := uint64(1) + custodySubnetCount := params.BeaconConfig().CustodyRequirement // Set up epochs. defaultCfg := params.BeaconConfig() diff --git a/beacon-chain/p2p/testing/p2p.go b/beacon-chain/p2p/testing/p2p.go index a3df4951025a..842b490cef8e 100644 --- a/beacon-chain/p2p/testing/p2p.go +++ b/beacon-chain/p2p/testing/p2p.go @@ -48,6 +48,7 @@ const ( type TestP2P struct { t *testing.T BHost host.Host + EnodeID enode.ID pubsub *pubsub.PubSub joinedTopics map[string]*pubsub.Topic BroadcastCalled atomic.Bool @@ -292,8 +293,8 @@ func (*TestP2P) ENR() *enr.Record { } // NodeID returns the node id of the local peer. -func (*TestP2P) NodeID() enode.ID { - return [32]byte{} +func (p *TestP2P) NodeID() enode.ID { + return p.EnodeID } // DiscoveryAddresses -- diff --git a/beacon-chain/sync/data_columns_sampling_test.go b/beacon-chain/sync/data_columns_sampling_test.go index 46c03dab49d3..55e5620f6462 100644 --- a/beacon-chain/sync/data_columns_sampling_test.go +++ b/beacon-chain/sync/data_columns_sampling_test.go @@ -5,6 +5,7 @@ import ( "context" "crypto/sha256" "encoding/binary" + "fmt" "testing" "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" @@ -203,12 +204,14 @@ func TestDataColumnSampler1D_PeerManagement(t *testing.T) { testCases := []struct { numPeers int custodyRequirement uint64 + subnetCount uint64 expectedColumns [][]uint64 prunePeers map[int]bool // Peers to prune. }{ { numPeers: 3, custodyRequirement: 1, + subnetCount: 32, expectedColumns: [][]uint64{ {6, 38, 70, 102}, {3, 35, 67, 99}, @@ -221,6 +224,7 @@ func TestDataColumnSampler1D_PeerManagement(t *testing.T) { { numPeers: 3, custodyRequirement: 2, + subnetCount: 32, expectedColumns: [][]uint64{ {6, 16, 38, 48, 70, 80, 102, 112}, {3, 13, 35, 45, 67, 77, 99, 109}, @@ -232,7 +236,12 @@ func TestDataColumnSampler1D_PeerManagement(t *testing.T) { }, } + params.SetupTestConfigCleanup(t) for _, tc := range testCases { + cfg := params.BeaconConfig() + cfg.CustodyRequirement = tc.custodyRequirement + cfg.DataColumnSidecarSubnetCount = tc.subnetCount + params.OverrideBeaconConfig(cfg) test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers)) for i := 0; i < tc.numPeers; i++ { p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1) @@ -282,12 +291,14 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { testCases := []struct { numPeers int custodyRequirement uint64 + subnetCount uint64 columnsToDistribute [][]uint64 expectedDistribution []map[int][]uint64 }{ { numPeers: 3, custodyRequirement: 1, + subnetCount: 32, // peer custody maps // p0: {6, 38, 70, 102}, // p1: {3, 35, 67, 99}, @@ -318,6 +329,7 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { { numPeers: 3, custodyRequirement: 2, + subnetCount: 32, // peer custody maps // p0: {6, 16, 38, 48, 70, 80, 102, 112}, // p1: {3, 13, 35, 45, 67, 77, 99, 109}, @@ -340,8 +352,12 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { }, }, } - + params.SetupTestConfigCleanup(t) for _, tc := range testCases { + cfg := params.BeaconConfig() + cfg.CustodyRequirement = tc.custodyRequirement + cfg.DataColumnSidecarSubnetCount = tc.subnetCount + params.OverrideBeaconConfig(cfg) test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers)) for i := 0; i < tc.numPeers; i++ { p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1) @@ -351,7 +367,7 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { for idx, columns := range tc.columnsToDistribute { result := sampler.distributeSamplesToPeer(columns) - require.Equal(t, len(tc.expectedDistribution[idx]), len(result)) + require.Equal(t, len(tc.expectedDistribution[idx]), len(result), fmt.Sprintf("%v - %v", tc.expectedDistribution[idx], result)) for peerIdx, dist := range tc.expectedDistribution[idx] { for _, column := range dist { @@ -364,6 +380,10 @@ func TestDataColumnSampler1D_SampleDistribution(t *testing.T) { } func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) { + params.SetupTestConfigCleanup(t) + cfg := params.BeaconConfig() + cfg.DataColumnSidecarSubnetCount = 32 + params.OverrideBeaconConfig(cfg) test, sampler := setupDefaultDataColumnSamplerTest(t) sampler.refreshPeerInfo() @@ -391,6 +411,11 @@ func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) { } func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) { + params.SetupTestConfigCleanup(t) + cfg := params.BeaconConfig() + cfg.DataColumnSidecarSubnetCount = 32 + params.OverrideBeaconConfig(cfg) + testCases := []struct { name string samplesCount uint64 @@ -450,9 +475,9 @@ func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) { for _, tc := range testCases { test, sampler := setupDataColumnSamplerTest(t, 3) - p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, 1, tc.columnsNotToRespond, 1) - p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, 1, tc.columnsNotToRespond, 2) - p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, 1, tc.columnsNotToRespond, 3) + p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 1) + p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 2) + p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 3) test.peers = []*p2ptest.TestP2P{p1, p2, p3} sampler.refreshPeerInfo() diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index 46a9b3105ed7..01dac9256585 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -5,6 +5,7 @@ import ( "context" "crypto/sha256" "encoding/binary" + "encoding/hex" "fmt" "math" "sort" @@ -1364,7 +1365,11 @@ func TestCustodyAllNeededColumns(t *testing.T) { dataColumns[uint64(i)] = true } - custodyCounts := [...]uint64{4, 32, 4, 32} + custodyCounts := [...]uint64{ + 4 * params.BeaconConfig().CustodyRequirement, + 32 * params.BeaconConfig().CustodyRequirement, + 4 * params.BeaconConfig().CustodyRequirement, + 32 * params.BeaconConfig().CustodyRequirement} peersID := make([]peer.ID, 0, len(custodyCounts)) for _, custodyCount := range custodyCounts { @@ -1390,15 +1395,12 @@ func TestCustodyColumns(t *testing.T) { p2p: p2ptest.NewTestP2P(t), }) - expected := map[uint64]bool{6: true, 38: true, 70: true, 102: true} + expected := params.BeaconConfig().CustodyRequirement actual, err := blocksFetcher.custodyColumns() require.NoError(t, err) - require.Equal(t, len(expected), len(actual)) - for column := range expected { - require.Equal(t, true, actual[column]) - } + require.Equal(t, int(expected), len(actual)) } func TestMinInt(t *testing.T) { @@ -1725,7 +1727,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, peersParams: []peerParams{ { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 34, @@ -1750,7 +1752,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, }, { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 34, @@ -1775,7 +1777,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, }, { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 34, @@ -1787,7 +1789,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, }, { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 34, @@ -1841,7 +1843,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, peersParams: []peerParams{ { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 34, @@ -1901,7 +1903,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, peersParams: []peerParams{ { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 38, @@ -1917,7 +1919,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, }, { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 38, @@ -1950,7 +1952,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { }, peersParams: []peerParams{ { - csc: 32, + csc: 128, toRespond: map[string][][]responseParams{ (ðpb.DataColumnSidecarsByRangeRequest{ StartSlot: 38, @@ -1981,144 +1983,160 @@ func TestFetchDataColumnsFromPeers(t *testing.T) { } for _, tc := range testCases { - // Consistency checks. - require.Equal(t, len(tc.blocksParams), len(tc.addedRODataColumns)) + t.Run(tc.name, func(t *testing.T) { + // Consistency checks. + require.Equal(t, len(tc.blocksParams), len(tc.addedRODataColumns)) - // Create a context. - ctx := context.Background() + // Create a context. + ctx := context.Background() - // Initialize the trusted setup. - err := kzg.Start() - require.NoError(t, err) + // Initialize the trusted setup. + err := kzg.Start() + require.NoError(t, err) + + // Create blocks, RO data columns and data columns sidecar from slot. + roBlocks := make([]blocks.ROBlock, len(tc.blocksParams)) + roDatasColumns := make([][]blocks.RODataColumn, len(tc.blocksParams)) + dataColumnsSidecarFromSlot := make(map[primitives.Slot][]*ethpb.DataColumnSidecar, len(tc.blocksParams)) + + for i, blockParams := range tc.blocksParams { + pbSignedBeaconBlock := util.NewBeaconBlockDeneb() + pbSignedBeaconBlock.Block.Slot = blockParams.slot + + if blockParams.hasBlobs { + blobs := make([]kzg.Blob, blobsCount) + blobKzgCommitments := make([][]byte, blobsCount) - // Create blocks, RO data columns and data columns sidecar from slot. - roBlocks := make([]blocks.ROBlock, len(tc.blocksParams)) - roDatasColumns := make([][]blocks.RODataColumn, len(tc.blocksParams)) - dataColumnsSidecarFromSlot := make(map[primitives.Slot][]*ethpb.DataColumnSidecar, len(tc.blocksParams)) + for j := range blobsCount { + blob := getRandBlob(t, int64(i+j)) + blobs[j] = blob - for i, blockParams := range tc.blocksParams { - pbSignedBeaconBlock := util.NewBeaconBlockDeneb() - pbSignedBeaconBlock.Block.Slot = blockParams.slot + blobKzgCommitment, err := kzg.BlobToKZGCommitment(&blob) + require.NoError(t, err) - if blockParams.hasBlobs { - blobs := make([]kzg.Blob, blobsCount) - blobKzgCommitments := make([][]byte, blobsCount) + blobKzgCommitments[j] = blobKzgCommitment[:] + } - for j := range blobsCount { - blob := getRandBlob(t, int64(i+j)) - blobs[j] = blob + pbSignedBeaconBlock.Block.Body.BlobKzgCommitments = blobKzgCommitments + signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock) + require.NoError(t, err) - blobKzgCommitment, err := kzg.BlobToKZGCommitment(&blob) + pbDataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs) require.NoError(t, err) - blobKzgCommitments[j] = blobKzgCommitment[:] + dataColumnsSidecarFromSlot[blockParams.slot] = pbDataColumnsSidecar + + roDataColumns := make([]blocks.RODataColumn, 0, len(pbDataColumnsSidecar)) + for _, pbDataColumnSidecar := range pbDataColumnsSidecar { + roDataColumn, err := blocks.NewRODataColumn(pbDataColumnSidecar) + require.NoError(t, err) + + roDataColumns = append(roDataColumns, roDataColumn) + } + + roDatasColumns[i] = roDataColumns } - pbSignedBeaconBlock.Block.Body.BlobKzgCommitments = blobKzgCommitments signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock) require.NoError(t, err) - pbDataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs) + roBlock, err := blocks.NewROBlock(signedBeaconBlock) require.NoError(t, err) - dataColumnsSidecarFromSlot[blockParams.slot] = pbDataColumnsSidecar - - roDataColumns := make([]blocks.RODataColumn, 0, len(pbDataColumnsSidecar)) - for _, pbDataColumnSidecar := range pbDataColumnsSidecar { - roDataColumn, err := blocks.NewRODataColumn(pbDataColumnSidecar) - require.NoError(t, err) - - roDataColumns = append(roDataColumns, roDataColumn) - } - - roDatasColumns[i] = roDataColumns + roBlocks[i] = roBlock } - signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock) - require.NoError(t, err) + // Set the Deneb fork epoch. + params.BeaconConfig().DenebForkEpoch = tc.denebForkEpoch - roBlock, err := blocks.NewROBlock(signedBeaconBlock) - require.NoError(t, err) - - roBlocks[i] = roBlock - } + // Set the EIP-7594 fork epoch. + params.BeaconConfig().Eip7594ForkEpoch = tc.eip7954ForkEpoch - // Set the Deneb fork epoch. - params.BeaconConfig().DenebForkEpoch = tc.denebForkEpoch + // Save the blocks in the store. + storage := make(map[[fieldparams.RootLength]byte][]int) + for index, columns := range tc.storedDataColumns { + root := roBlocks[index].Root() - // Set the EIP-7594 fork epoch. - params.BeaconConfig().Eip7594ForkEpoch = tc.eip7954ForkEpoch - - // Save the blocks in the store. - storage := make(map[[fieldparams.RootLength]byte][]int) - for index, columns := range tc.storedDataColumns { - root := roBlocks[index].Root() + columnsSlice := make([]int, 0, len(columns)) + for column := range columns { + columnsSlice = append(columnsSlice, column) + } - columnsSlice := make([]int, 0, len(columns)) - for column := range columns { - columnsSlice = append(columnsSlice, column) + storage[root] = columnsSlice } - storage[root] = columnsSlice - } + blobStorageSummarizer := filesystem.NewMockBlobStorageSummarizer(t, storage) - blobStorageSummarizer := filesystem.NewMockBlobStorageSummarizer(t, storage) + // Create a chain and a clock. + chain, clock := defaultMockChain(t, tc.currentSlot) - // Create a chain and a clock. - chain, clock := defaultMockChain(t, tc.currentSlot) + // Create the P2P service. + p2pSvc := p2ptest.NewTestP2P(t, libp2p.Identity(genFixedCustodyPeer(t))) + nodeID, err := p2p.ConvertPeerIDToNodeID(p2pSvc.PeerID()) + require.NoError(t, err) + p2pSvc.EnodeID = nodeID - // Create the P2P service. - p2p := p2ptest.NewTestP2P(t) + // Connect the peers. + peers := make([]*p2ptest.TestP2P, 0, len(tc.peersParams)) + for i, peerParams := range tc.peersParams { + peer := createAndConnectPeer(t, p2pSvc, chain, dataColumnsSidecarFromSlot, peerParams, i) + peers = append(peers, peer) + } - // Connect the peers. - peers := make([]*p2ptest.TestP2P, 0, len(tc.peersParams)) - for i, peerParams := range tc.peersParams { - peer := createAndConnectPeer(t, p2p, chain, dataColumnsSidecarFromSlot, peerParams, i) - peers = append(peers, peer) - } + peersID := make([]peer.ID, 0, len(peers)) + for _, peer := range peers { + peerID := peer.PeerID() + peersID = append(peersID, peerID) + } - peersID := make([]peer.ID, 0, len(peers)) - for _, peer := range peers { - peerID := peer.PeerID() - peersID = append(peersID, peerID) - } + // Create `bwb`. + bwb := make([]blocks.BlockWithROBlobs, 0, len(tc.blocksParams)) + for _, roBlock := range roBlocks { + bwb = append(bwb, blocks.BlockWithROBlobs{Block: roBlock}) + } - // Create `bwb`. - bwb := make([]blocks.BlockWithROBlobs, 0, len(tc.blocksParams)) - for _, roBlock := range roBlocks { - bwb = append(bwb, blocks.BlockWithROBlobs{Block: roBlock}) - } + // Create the block fetcher. + blocksFetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ + clock: clock, + ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Deneb}, + p2p: p2pSvc, + bs: blobStorageSummarizer, + }) - // Create the block fetcher. - blocksFetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{ - clock: clock, - ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Deneb}, - p2p: p2p, - bs: blobStorageSummarizer, - }) + // Fetch the data columns from the peers. + err = blocksFetcher.fetchDataColumnsFromPeers(ctx, bwb, peersID) + require.NoError(t, err) - // Fetch the data columns from the peers. - err = blocksFetcher.fetchDataColumnsFromPeers(ctx, bwb, peersID) - require.NoError(t, err) + // Check the added RO data columns. + for i := range bwb { + blockWithROBlobs := bwb[i] + addedRODataColumns := tc.addedRODataColumns[i] - // Check the added RO data columns. - for i := range bwb { - blockWithROBlobs := bwb[i] - addedRODataColumns := tc.addedRODataColumns[i] + if addedRODataColumns == nil { + require.Equal(t, 0, len(blockWithROBlobs.Columns)) + continue + } - if addedRODataColumns == nil { - require.Equal(t, 0, len(blockWithROBlobs.Columns)) - continue - } + expectedRODataColumns := make([]blocks.RODataColumn, 0, len(tc.addedRODataColumns[i])) + for _, column := range addedRODataColumns { + roDataColumn := roDatasColumns[i][column] + expectedRODataColumns = append(expectedRODataColumns, roDataColumn) + } - expectedRODataColumns := make([]blocks.RODataColumn, 0, len(tc.addedRODataColumns[i])) - for _, column := range addedRODataColumns { - roDataColumn := roDatasColumns[i][column] - expectedRODataColumns = append(expectedRODataColumns, roDataColumn) + actualRODataColumns := blockWithROBlobs.Columns + require.DeepSSZEqual(t, expectedRODataColumns, actualRODataColumns) } + }) + } +} - actualRODataColumns := blockWithROBlobs.Columns - require.DeepSSZEqual(t, expectedRODataColumns, actualRODataColumns) - } +// This generates a peer which custodies the columns of 6,38,70 and 102. +func genFixedCustodyPeer(t *testing.T) crypto.PrivKey { + rawObj, err := hex.DecodeString("58f40e5010e67d07e5fb37c62d6027964de2bef532acf06cf4f1766f5273ae95") + if err != nil { + t.Fatal(err) } + pkey, err := crypto.UnmarshalSecp256k1PrivateKey(rawObj) + require.NoError(t, err) + return pkey } diff --git a/config/params/mainnet_config.go b/config/params/mainnet_config.go index 4815b4724f09..1f9551cd387b 100644 --- a/config/params/mainnet_config.go +++ b/config/params/mainnet_config.go @@ -299,7 +299,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{ NumberOfColumns: 128, MaxCellsInExtendedMatrix: 768, SamplesPerSlot: 8, - CustodyRequirement: 1, + CustodyRequirement: 4, MinEpochsForDataColumnSidecarsRequest: 4096, // Values related to networking parameters.