Skip to content

Commit

Permalink
Merge branch 'develop' into omerfirmak/defer-txpool-reorgs
Browse files Browse the repository at this point in the history
  • Loading branch information
omerfirmak authored Jul 25, 2024
2 parents d011a4b + e3bfb5f commit 33231f5
Show file tree
Hide file tree
Showing 8 changed files with 51 additions and 34 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ require (
github.com/prometheus/tsdb v0.7.1
github.com/rjeczalik/notify v0.9.1
github.com/rs/cors v1.7.0
github.com/scroll-tech/da-codec v0.1.1-0.20240716101216-c55ed9455cf4
github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923
github.com/scroll-tech/zktrie v0.8.4
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -392,8 +392,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.1-0.20240716101216-c55ed9455cf4 h1:40Lby3huKNFZ2EXzxqVpADB+caepDRrNRoUgTsCKN88=
github.com/scroll-tech/da-codec v0.1.1-0.20240716101216-c55ed9455cf4/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923 h1:A1ItzpnFDCHMh4g6cpeBZf7/fPf2lfwHbhjr/FSpk2w=
github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
Expand Down
7 changes: 4 additions & 3 deletions miner/scroll_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -544,12 +544,13 @@ func (w *worker) handlePipelineResult(res *pipeline.Result) error {
w.currentPipeline.Release()
w.currentPipeline = nil

if res.FinalBlock != nil {
w.updateSnapshot(res.FinalBlock)
}

// Rows being nil without an OverflowingTx means that block didn't go thru CCC,
// which means that we are not the sequencer. Do not attempt to commit.
if res.Rows == nil && res.OverflowingTx == nil {
if res.FinalBlock != nil {
w.updateSnapshot(res.FinalBlock)
}
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion params/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
const (
VersionMajor = 5 // Major version component of the current release
VersionMinor = 5 // Minor version component of the current release
VersionPatch = 14 // Patch version component of the current release
VersionPatch = 17 // Patch version component of the current release
VersionMeta = "mainnet" // Version metadata to append to the version string
)

Expand Down
2 changes: 1 addition & 1 deletion rollup/circuitcapacitychecker/libzkp/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ log = "0.4"
once_cell = "1.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
serde_json = { version = "1.0.66", features = ["unbounded_depth"] }

[profile.test]
opt-level = 3
Expand Down
32 changes: 22 additions & 10 deletions rollup/circuitcapacitychecker/libzkp/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ pub mod checker {
use std::panic;
use std::ptr::null;
use std::ffi::CStr;
use serde::Deserialize as Deserializea;
use serde_json::Deserializer;

#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CommonResult {
Expand Down Expand Up @@ -48,9 +50,19 @@ pub mod checker {
#[no_mangle]
pub unsafe extern "C" fn parse_json_to_rust_trace(trace_json_ptr: *const c_char) -> *mut BlockTrace {
let trace_json_cstr = unsafe { CStr::from_ptr(trace_json_ptr) };
let trace = serde_json::from_slice::<BlockTrace>(trace_json_cstr.to_bytes());
let trace_json_bytes = trace_json_cstr.to_bytes();
let mut deserializer = Deserializer::from_slice(trace_json_bytes);
deserializer.disable_recursion_limit();
let trace = BlockTrace::deserialize(&mut deserializer);
match trace {
Err(_) => return null_mut(),
Err(e) => {
log::warn!(
"failed to parse trace in parse_json_to_rust_trace, error: {:?}, trace_json_cstr: {:?}",
e,
trace_json_cstr,
);
return null_mut();
}
Ok(t) => return Box::into_raw(Box::new(t))
}
}
Expand Down Expand Up @@ -226,10 +238,10 @@ pub mod checker {
))?
.get_tx_num() as u64)
})
.map_or_else(
|e| bail!("circuit capacity checker (id: {id}) error in get_tx_num: {e:?}"),
|result| result,
)
.map_or_else(
|e| bail!("circuit capacity checker (id: {id}) error in get_tx_num: {e:?}"),
|result| result,
)
}

/// # Safety
Expand Down Expand Up @@ -260,10 +272,10 @@ pub mod checker {
.set_light_mode(light_mode);
Ok(())
})
.map_or_else(
|e| bail!("circuit capacity checker (id: {id}) error in set_light_mode: {e:?}"),
|result| result,
)
.map_or_else(
|e| bail!("circuit capacity checker (id: {id}) error in set_light_mode: {e:?}"),
|result| result,
)
}
}

Expand Down
8 changes: 6 additions & 2 deletions rollup/pipeline/pipeline.go
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ func (p *Pipeline) encodeStage(traces <-chan *BlockCandidate) <-chan *BlockCandi
trace.RustTrace = circuitcapacitychecker.MakeRustTrace(trace.LastTrace, buffer)
if trace.RustTrace == nil {
log.Error("making rust trace", "txHash", trace.LastTrace.Transactions[0].TxHash)
return
// ignore the error here, CCC stage will catch it and treat it as a CCC error
}
}
encodeTimer.UpdateSince(encodeStart)
Expand Down Expand Up @@ -431,7 +431,11 @@ func (p *Pipeline) cccStage(candidates <-chan *BlockCandidate, deadline time.Tim
var accRows *types.RowConsumption
var err error
if candidate != nil && p.ccc != nil {
accRows, err = p.ccc.ApplyTransactionRustTrace(candidate.RustTrace)
if candidate.RustTrace != nil {
accRows, err = p.ccc.ApplyTransactionRustTrace(candidate.RustTrace)
} else {
err = errors.New("no rust trace")
}
lastTxn := candidate.Txs[candidate.Txs.Len()-1]
cccTimer.UpdateSince(cccStart)
if err != nil {
Expand Down
28 changes: 14 additions & 14 deletions rollup/rollup_sync_service/rollup_sync_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,10 +241,15 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB
log.Warn("got nil when reading last finalized batch index. This should happen only once.")
}

parentBatchMeta := &rawdb.FinalizedBatchMeta{}
if startBatchIndex > 0 {
parentBatchMeta = rawdb.ReadFinalizedBatchMeta(s.db, startBatchIndex-1)
}

var highestFinalizedBlockNumber uint64
batchWriter := s.db.NewBatch()
for index := startBatchIndex; index <= batchIndex; index++ {
parentBatchMeta, chunks, err := s.getLocalInfoForBatch(index)
chunks, err := s.getLocalChunksForBatch(index)
if err != nil {
return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err)
}
Expand All @@ -256,6 +261,7 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB

rawdb.WriteFinalizedBatchMeta(batchWriter, index, finalizedBatchMeta)
highestFinalizedBlockNumber = endBlock
parentBatchMeta = finalizedBatchMeta

if index%100 == 0 {
log.Info("finalized batch progress", "batch index", index, "finalized l2 block height", endBlock)
Expand Down Expand Up @@ -283,17 +289,17 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB
return nil
}

func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.FinalizedBatchMeta, []*encoding.Chunk, error) {
func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encoding.Chunk, error) {
chunkBlockRanges := rawdb.ReadBatchChunkRanges(s.db, batchIndex)
if len(chunkBlockRanges) == 0 {
return nil, nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges")
return nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges")
}

endBlockNumber := chunkBlockRanges[len(chunkBlockRanges)-1].EndBlockNumber
for i := 0; i < defaultMaxRetries; i++ {
if s.ctx.Err() != nil {
log.Info("Context canceled", "reason", s.ctx.Err())
return nil, nil, s.ctx.Err()
return nil, s.ctx.Err()
}

localSyncedBlockHeight := s.bc.CurrentBlock().Number().Uint64()
Expand All @@ -308,7 +314,7 @@ func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.Fina

localSyncedBlockHeight := s.bc.CurrentBlock().Number().Uint64()
if localSyncedBlockHeight < endBlockNumber {
return nil, nil, fmt.Errorf("local node is not synced up to the required block height: %v, local synced block height: %v", endBlockNumber, localSyncedBlockHeight)
return nil, fmt.Errorf("local node is not synced up to the required block height: %v, local synced block height: %v", endBlockNumber, localSyncedBlockHeight)
}

chunks := make([]*encoding.Chunk, len(chunkBlockRanges))
Expand All @@ -317,12 +323,12 @@ func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.Fina
for j := cr.StartBlockNumber; j <= cr.EndBlockNumber; j++ {
block := s.bc.GetBlockByNumber(j)
if block == nil {
return nil, nil, fmt.Errorf("failed to get block by number: %v", i)
return nil, fmt.Errorf("failed to get block by number: %v", i)
}
txData := encoding.TxsToTxsData(block.Transactions())
state, err := s.bc.StateAt(block.Root())
if err != nil {
return nil, nil, fmt.Errorf("failed to get block state, block: %v, err: %w", block.Hash().Hex(), err)
return nil, fmt.Errorf("failed to get block state, block: %v, err: %w", block.Hash().Hex(), err)
}
withdrawRoot := withdrawtrie.ReadWTRSlot(rcfg.L2MessageQueueAddress, state)
chunks[i].Blocks[j-cr.StartBlockNumber] = &encoding.Block{
Expand All @@ -333,13 +339,7 @@ func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.Fina
}
}

// get metadata of parent batch: default to genesis batch metadata.
parentBatchMeta := &rawdb.FinalizedBatchMeta{}
if batchIndex > 0 {
parentBatchMeta = rawdb.ReadFinalizedBatchMeta(s.db, batchIndex-1)
}

return parentBatchMeta, chunks, nil
return chunks, nil
}

func (s *RollupSyncService) getChunkRanges(batchIndex uint64, vLog *types.Log) ([]*rawdb.ChunkBlockRange, error) {
Expand Down

0 comments on commit 33231f5

Please sign in to comment.