Skip to content
This repository has been archived by the owner on Aug 16, 2024. It is now read-only.

fix: use correct rollback tails for empty frames without history #158

Closed
wants to merge 9 commits into from
Closed
40 changes: 17 additions & 23 deletions src/tests/simple_tests/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,41 +2,35 @@
mod tests {
use crate::tests::simple_tests::{asm_tests::run_asm_based_test, Options};

#[test_log::test]
fn test_pubdata_and_storage_writes() {
fn test_snapshot_every_cycle(dir: &str, additional_contracts: &[i32]) {
run_asm_based_test(
"src/tests/simple_tests/testdata/log/storage/storage_writes",
&[],
&format!("src/tests/simple_tests/testdata/{}", dir),
additional_contracts,
Options {
// Do only 1 cycle per VM snapshot to really test all the boundary conditions.
cycles_per_vm_snapshot: 1,
..Default::default()
},
);
)
}

#[test_log::test]
fn test_pubdata_and_storage_writes() {
test_snapshot_every_cycle("log/storage/storage_writes", &[]);
}

#[test_log::test]
fn test_storage_reads() {
run_asm_based_test(
"src/tests/simple_tests/testdata/log/storage/storage_reads",
&[],
Options {
// Do only 1 cycle per VM snapshot to really test all the boundary conditions.
cycles_per_vm_snapshot: 1,
..Default::default()
},
)
test_snapshot_every_cycle("log/storage/storage_reads", &[]);
}

#[test_log::test]
fn test_storage_write_after_panic() {
test_snapshot_every_cycle("log/storage/storage_write_after_panic", &[]);
}

#[test_log::test]
fn test_storage_pubdata_refunds() {
run_asm_based_test(
"src/tests/simple_tests/testdata/log/storage/storage_pubdata_refunds",
&[],
Options {
// Do only 1 cycle per VM snapshot to really test all the boundary conditions.
cycles_per_vm_snapshot: 1,
..Default::default()
},
);
test_snapshot_every_cycle("log/storage/storage_pubdata_refunds", &[]);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
.text
.file "Test_26"
.rodata.cst32
.p2align 5
.text
.globl __entry
__entry:
.main:
add 1000, r0, r3
near_call r3, @test_panic, @expected_panic
revert("Near call not panicked")

test_panic:
add 1000, r0, r3
near_call r3, @test_panic2, @expected_panic2

expected_panic:
; check that we can access storage after panic
log.swrite r0, r0, r0
ret.ok r0

test_panic2:
ret.ok r0

expected_panic2:
ret.panic r0
2 changes: 1 addition & 1 deletion src/witness/callstack_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ pub struct CallstackWithAuxData {
pub full_history: Vec<CallstackActionHistoryEntry>,
pub log_queue_access_snapshots: Vec<(u32, RenumeratedQueryIndex)>,
pub log_access_history: Vec<(u32, QueryMarker)>,
pub child_into_parent: HashMap<usize, usize>,
pub child_into_parent: HashMap<usize, usize>, // ! TODO deadcode
pub flat_new_frames_history: Vec<(u32, CallStackEntry)>,
}

Expand Down
34 changes: 15 additions & 19 deletions src/witness/oracle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ pub fn create_artifacts_from_tracer<
let forward = callstack_with_aux_data.current_entry.forward_queue.clone();
let rollbacks = callstack_with_aux_data.current_entry.rollback_queue.clone();

// TODO deadcode?
let mut query_id_into_cycle_index = BTreeMap::new();

for (cycle, marker) in callstack_with_aux_data.log_access_history.iter() {
Expand All @@ -266,6 +267,8 @@ pub fn create_artifacts_from_tracer<
let mut original_log_queue_states = vec![];
let mut chain_of_states = vec![];
let mut original_log_queue_simulator = None;

// TODO deadcode?
let mut marker_into_queue_position_renumeration_index: HashMap<QueryMarker, usize> =
HashMap::new();

Expand All @@ -276,6 +279,8 @@ pub fn create_artifacts_from_tracer<
let mut sponges_data: HashMap<u32, LogAccessSpongesInfo<GoldilocksField>> = HashMap::new();

let mut global_beginnings_of_frames: BTreeMap<usize, u32> = BTreeMap::new();

// TODO deadcode?
let mut actions_in_each_frame: BTreeMap<usize, Vec<(u32, QueryMarker, usize)>> =
BTreeMap::new();

Expand Down Expand Up @@ -534,31 +539,22 @@ pub fn create_artifacts_from_tracer<
.last()
.map(|el| el.2 .1)
.unwrap_or([GoldilocksField::ZERO; QUEUE_STATE_WIDTH]);
let mut frame_rollback_tails = BTreeMap::new();

// we want to save rollback tails for every frame
let mut frame_rollback_tails = BTreeMap::new();
let mut rollback_queue_initial_tails_for_new_frames = vec![];
let max_frame_idx = callstack_with_aux_data.monotonic_frame_counter;

for frame_index in 0..max_frame_idx {
if frame_index == 0 {
let tail = global_end_of_storage_log;
frame_rollback_tails.insert(frame_index, tail);
let frame_beginning_cycle = global_beginnings_of_frames[&frame_index];
rollback_queue_initial_tails_for_new_frames.push((frame_beginning_cycle, tail));
continue;
}

let rollback_tail_marker = ExtendedLogQuery::FrameRollbackTailMarker(frame_index);
// wherever we have this marker we should look at the tail of the item right before it
let pos = log_position_mapping[&rollback_tail_marker];
let tail = if pos == -1 {
// empty
global_end_of_storage_log
// log_position_mapping should contain a pointer to the last state change in the chain_of_states before the rollback marker
// pointer is -1 if there were no such changes
let pointer = log_position_mapping[&rollback_tail_marker];
let tail = if pointer != -1 {
// take tail "after" item at pointer
chain_of_states[pointer as usize].2 .1
} else {
let pointer = pos as usize;
let element = chain_of_states[pointer].2 .1;

element
// We do not have any logs in history before the rollback of this frame
[GoldilocksField::ZERO; QUEUE_STATE_WIDTH]
};

frame_rollback_tails.insert(frame_index, tail);
Expand Down
Loading