diff --git a/Cargo.lock b/Cargo.lock index 0eb1bf7c6..6b9a8bc91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,9 +15,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] @@ -82,9 +82,9 @@ dependencies = [ [[package]] name = "async-io" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab006897723d9352f63e2b13047177c3982d8d79709d713ce7747a8f19fd1b0" +checksum = "83e21f3a490c72b3b0cf44962180e60045de2925d8dff97918f7ee43c8f637c7" dependencies = [ "autocfg", "concurrent-queue", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -312,9 +312,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.19" +version = "3.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68d43934757334b5c0519ff882e1ab9647ac0258b47c24c4f490d78e42697fd5" +checksum = "23b71c3ce99b7611011217b366d923f1d0a7e07a92bb2dbf1e84508c673ca3bd" dependencies = [ "atty", "bitflags", @@ -375,9 +375,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc948ebb96241bb40ab73effeb80d9f93afaad49359d159a5e61be51619fe813" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -404,9 +404,9 @@ dependencies = [ [[package]] name = "cs_serde_bytes" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b5b4802671350eaced846f9fa82233c214b5d358ea85289b81c3aed58a0c6c" +checksum = "5fc673ddabf48214550526b068dc28065a75f05e21e452880095247c635b1d91" dependencies = [ "serde", ] @@ -550,6 +550,7 @@ version = "9.0.0-alpha.1" dependencies = [ "anyhow", "fil_actors_runtime", + "frc42_dispatch", "fvm_ipld_blockstore", "fvm_ipld_encoding", "fvm_shared", @@ -591,6 +592,25 @@ dependencies = [ "serde", ] +[[package]] +name = "fil_actor_datacap" +version = "9.0.0-alpha.1" +dependencies = [ + "cid", + "fil_actors_runtime", + "frc42_dispatch", + "frc46_token", + "fvm_actor_utils", + "fvm_ipld_blockstore", + "fvm_ipld_encoding", + "fvm_ipld_hamt", + "fvm_shared", + "lazy_static", + "num-derive", + "num-traits", + "serde", +] + [[package]] name = "fil_actor_init" version = "9.0.0-alpha.1" @@ -618,12 +638,14 @@ dependencies = [ "fil_actor_reward", "fil_actor_verifreg", "fil_actors_runtime", + "frc46_token", "fvm_ipld_amt", "fvm_ipld_bitfield", "fvm_ipld_blockstore", "fvm_ipld_encoding", "fvm_ipld_hamt", "fvm_shared", + "integer-encoding", "itertools", "libipld-core", "log", @@ -670,6 +692,7 @@ dependencies = [ "anyhow", "cid", "fil_actors_runtime", + "frc42_dispatch", "fvm_ipld_blockstore", "fvm_ipld_encoding", "fvm_ipld_hamt", @@ -758,11 +781,14 @@ dependencies = [ "anyhow", "cid", "fil_actors_runtime", + "frc42_dispatch", + "frc46_token", "fvm_ipld_blockstore", "fvm_ipld_encoding", "fvm_ipld_hamt", "fvm_shared", "lazy_static", + "log", "num-derive", "num-traits", "serde", @@ -779,6 +805,7 @@ dependencies = [ "cid", "derive_builder", "fvm_ipld_amt", + "fvm_ipld_bitfield", "fvm_ipld_blockstore", "fvm_ipld_encoding", "fvm_ipld_hamt", @@ -813,6 +840,7 @@ dependencies = [ "fil_actor_account", "fil_actor_bundler", "fil_actor_cron", + "fil_actor_datacap", "fil_actor_init", "fil_actor_market", "fil_actor_miner", @@ -835,6 +863,7 @@ dependencies = [ "cid", "fil_actor_account", "fil_actor_cron", + "fil_actor_datacap", "fil_actor_init", "fil_actor_market", "fil_actor_miner", @@ -869,6 +898,67 @@ dependencies = [ "serde", ] +[[package]] +name = "frc42_dispatch" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a14895265f48c04193451b8a6810d58026f0ec603c47a7e52d94a3a3b5433bf" +dependencies = [ + "frc42_hasher", + "frc42_macros", + "fvm_ipld_encoding", + "fvm_sdk", + "fvm_shared", + "thiserror", +] + +[[package]] +name = "frc42_hasher" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13d43157d57ad21c9188be8edd0fad9ec720aa9735b256b41a188119917b3c77" +dependencies = [ + "fvm_sdk", + "fvm_shared", + "thiserror", +] + +[[package]] +name = "frc42_macros" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c343356c3652593452cc1cfe95535a915261342e2a4c13bb8388ca29b259a400" +dependencies = [ + "blake2b_simd", + "frc42_hasher", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frc46_token" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39f30577fceeca364f7a9e3041161bdc239d2d662683aec2160b065557d66e78" +dependencies = [ + "anyhow", + "cid", + "frc42_dispatch", + "fvm_actor_utils", + "fvm_ipld_amt", + "fvm_ipld_blockstore", + "fvm_ipld_encoding", + "fvm_ipld_hamt", + "fvm_sdk", + "fvm_shared", + "integer-encoding", + "num-traits", + "serde", + "serde_tuple", + "thiserror", +] + [[package]] name = "futures" version = "0.3.24" @@ -973,6 +1063,23 @@ dependencies = [ "slab", ] +[[package]] +name = "fvm_actor_utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b2605197066de1548fd18d5c367cd425be4dab83d4c0f5f9f730d0eec6124c" +dependencies = [ + "anyhow", + "cid", + "frc42_dispatch", + "fvm_ipld_blockstore", + "fvm_ipld_encoding", + "fvm_sdk", + "fvm_shared", + "num-traits", + "thiserror", +] + [[package]] name = "fvm_ipld_amt" version = "0.4.2" @@ -1426,9 +1533,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e" +checksum = "2f7254b99e31cad77da24b08ebf628882739a608578bb1bcdfc1f9c21260d7c0" [[package]] name = "os_str_bytes" @@ -1696,9 +1803,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899bf02746a2c92bf1053d9327dadb252b01af1f81f90cdb902411f518bc7215" +checksum = "cf9db03534dff993187064c4e0c05a5708d2a9728ace9a8959b77bedf415dac5" dependencies = [ "cfg-if", "cpufeatures", @@ -1707,9 +1814,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a31480366ec990f395a61b7c08122d99bd40544fdb5abcfc1b06bb29994312c" +checksum = "eaedf34ed289ea47c2b741bb72e5357a209512d67bcd4bda44359e5bf0470f56" dependencies = [ "digest", "keccak", @@ -1726,9 +1833,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10c98bba371b9b22a71a9414e420f92ddeb2369239af08200816169d5e2dd7aa" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -1804,6 +1911,7 @@ dependencies = [ "cid", "fil_actor_account", "fil_actor_cron", + "fil_actor_datacap", "fil_actor_init", "fil_actor_market", "fil_actor_miner", @@ -1815,6 +1923,7 @@ dependencies = [ "fil_actor_verifreg", "fil_actors_runtime", "fil_builtin_actors_state", + "frc46_token", "fvm_ipld_bitfield", "fvm_ipld_blockstore", "fvm_ipld_encoding", @@ -1843,18 +1952,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0a539a918745651435ac7db7a18761589a94cd7e94cd56999f828bf73c8a57" +checksum = "8c1b05ca9d106ba7d2e31a9dab4a64e7be2cce415321966ea3132c49a656e252" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c251e90f708e16c49a16f4917dc2131e75222b72edfa9cb7f7c58ae56aae0c09" +checksum = "e8f2591983642de85c921015f3f070c665a197ed69e417af436115e3a1407487" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 076a589f1..b0a7af1c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,16 +14,17 @@ publish = false [target.'cfg(target_arch = "wasm32")'.dependencies] fil_actor_account = { version = "9.0.0-alpha.1", path = "./actors/account", features = ["fil-actor"] } -fil_actor_verifreg = { version = "9.0.0-alpha.1", path = "./actors/verifreg", features = ["fil-actor"] } fil_actor_cron = { version = "9.0.0-alpha.1", path = "./actors/cron", features = ["fil-actor"] } +fil_actor_datacap = { version = "9.0.0-alpha.1", path = "./actors/datacap", features = ["fil-actor"] } +fil_actor_init = { version = "9.0.0-alpha.1", path = "./actors/init", features = ["fil-actor"] } fil_actor_market = { version = "9.0.0-alpha.1", path = "./actors/market", features = ["fil-actor"] } +fil_actor_miner = { version = "9.0.0-alpha.1", path = "./actors/miner", features = ["fil-actor"] } fil_actor_multisig = { version = "9.0.0-alpha.1", path = "./actors/multisig", features = ["fil-actor"] } fil_actor_paych = { version = "9.0.0-alpha.1", path = "./actors/paych", features = ["fil-actor"] } fil_actor_power = { version = "9.0.0-alpha.1", path = "./actors/power", features = ["fil-actor"] } -fil_actor_miner = { version = "9.0.0-alpha.1", path = "./actors/miner", features = ["fil-actor"] } fil_actor_reward = { version = "9.0.0-alpha.1", path = "./actors/reward", features = ["fil-actor"] } fil_actor_system = { version = "9.0.0-alpha.1", path = "./actors/system", features = ["fil-actor"] } -fil_actor_init = { version = "9.0.0-alpha.1", path = "./actors/init", features = ["fil-actor"] } +fil_actor_verifreg = { version = "9.0.0-alpha.1", path = "./actors/verifreg", features = ["fil-actor"] } [build-dependencies] fil_actor_bundler = "4.0.0" diff --git a/actors/account/Cargo.toml b/actors/account/Cargo.toml index 77748a8e0..1d80d174a 100644 --- a/actors/account/Cargo.toml +++ b/actors/account/Cargo.toml @@ -14,6 +14,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime" } +frc42_dispatch = "1.0.0" fvm_shared = { version = "2.0.0-alpha.2", default-features = false } serde = { version = "1.0.136", features = ["derive"] } num-traits = "0.2.14" diff --git a/actors/account/src/lib.rs b/actors/account/src/lib.rs index a985084f4..b601527d9 100644 --- a/actors/account/src/lib.rs +++ b/actors/account/src/lib.rs @@ -11,12 +11,13 @@ use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; -use crate::types::AuthenticateMessageParams; use fil_actors_runtime::builtin::singletons::SYSTEM_ACTOR_ADDR; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{actor_error, ActorError}; use fil_actors_runtime::{cbor, ActorDowncast}; +use crate::types::AuthenticateMessageParams; + pub use self::state::State; mod state; @@ -26,8 +27,6 @@ pub mod types; #[cfg(feature = "fil-actor")] fil_actors_runtime::wasm_trampoline!(Actor); -// * Updated to specs-actors commit: 845089a6d2580e46055c24415a6c32ee688e5186 (v3.0.0) - /// Account actor methods available #[derive(FromPrimitive)] #[repr(u64)] @@ -35,6 +34,7 @@ pub enum Method { Constructor = METHOD_CONSTRUCTOR, PubkeyAddress = 2, AuthenticateMessage = 3, + UniversalReceiverHook = frc42_dispatch::method_hash!("Receive"), } /// Account Actor @@ -102,6 +102,19 @@ impl Actor { Ok(()) } + + // Always succeeds, accepting any transfers. + pub fn universal_receiver_hook( + rt: &mut RT, + _params: &RawBytes, + ) -> Result<(), ActorError> + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + Ok(()) + } } impl ActorCode for Actor { @@ -127,6 +140,10 @@ impl ActorCode for Actor { Self::authenticate_message(rt, cbor::deserialize_params(params)?)?; Ok(RawBytes::default()) } + Some(Method::UniversalReceiverHook) => { + Self::universal_receiver_hook(rt, params)?; + Ok(RawBytes::default()) + } None => Err(actor_error!(unhandled_message; "Invalid method")), } } diff --git a/actors/account/tests/account_actor_test.rs b/actors/account/tests/account_actor_test.rs index 384613bdf..a437ca7c5 100644 --- a/actors/account/tests/account_actor_test.rs +++ b/actors/account/tests/account_actor_test.rs @@ -2,80 +2,91 @@ // SPDX-License-Identifier: Apache-2.0, MIT use anyhow::anyhow; -use fil_actor_account::types::AuthenticateMessageParams; -use fil_actor_account::{testing::check_state_invariants, Actor as AccountActor, State}; -use fil_actors_runtime::builtin::SYSTEM_ACTOR_ADDR; -use fil_actors_runtime::test_utils::*; use fvm_ipld_encoding::RawBytes; use fvm_shared::address::Address; use fvm_shared::crypto::signature::Signature; use fvm_shared::error::ExitCode; +use fvm_shared::MethodNum; -fn check_state(rt: &MockRuntime) { - let test_address = Address::new_id(1000); - let (_, acc) = check_state_invariants(&rt.get_state(), &test_address); - acc.assert_empty(); -} +use fil_actor_account::types::AuthenticateMessageParams; +use fil_actor_account::{testing::check_state_invariants, Actor as AccountActor, Method, State}; +use fil_actors_runtime::builtin::SYSTEM_ACTOR_ADDR; +use fil_actors_runtime::test_utils::*; + +#[test] +fn construction() { + fn construct(addr: Address, exit_code: ExitCode) { + let mut rt = MockRuntime { + receiver: Address::new_id(100), + caller: SYSTEM_ACTOR_ADDR, + caller_type: *SYSTEM_ACTOR_CODE_ID, + ..Default::default() + }; + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + + if exit_code.is_success() { + rt.call::( + Method::Constructor as MethodNum, + &RawBytes::serialize(addr).unwrap(), + ) + .unwrap(); -macro_rules! account_constructor_tests { - ($($name:ident: $value:expr,)*) => { - $( - #[test] - fn $name() { - let (addr, exit_code) = $value; - - let mut rt = MockRuntime { - receiver: fvm_shared::address::Address::new_id(100), - caller: SYSTEM_ACTOR_ADDR.clone(), - caller_type: SYSTEM_ACTOR_CODE_ID.clone(), - ..Default::default() - }; - rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); - - if exit_code.is_success() { - rt.call::(1, &RawBytes::serialize(addr).unwrap()).unwrap(); - - let state: State = rt.get_state(); - assert_eq!(state.address, addr); - rt.expect_validate_caller_any(); - - let pk: Address = rt - .call::(2, &RawBytes::default()) - .unwrap() - .deserialize() - .unwrap(); - assert_eq!(pk, addr); - - check_state(&rt); - } else { - expect_abort( - exit_code, - rt.call::(1,&RawBytes::serialize(addr).unwrap()) - ) - } - rt.verify(); - } - )* + let state: State = rt.get_state(); + assert_eq!(state.address, addr); + rt.expect_validate_caller_any(); + + let pk: Address = rt + .call::(Method::PubkeyAddress as MethodNum, &RawBytes::default()) + .unwrap() + .deserialize() + .unwrap(); + assert_eq!(pk, addr); + check_state(&rt); + } else { + expect_abort(exit_code, rt.call::(1, &RawBytes::serialize(addr).unwrap())) + } + rt.verify(); } -} -account_constructor_tests! { - happy_construct_secp256k1_address: ( + construct( Address::new_secp256k1(&[2; fvm_shared::address::SECP_PUB_LEN]).unwrap(), - ExitCode::OK - ), - happy_construct_bls_address: ( - Address::new_bls(&[1; fvm_shared::address::BLS_PUB_LEN]).unwrap(), - ExitCode::OK - ), - fail_construct_id_address: ( - Address::new_id(1), - ExitCode::USR_ILLEGAL_ARGUMENT - ), - fail_construct_actor_address: ( - Address::new_actor(&[1, 2, 3]), - ExitCode::USR_ILLEGAL_ARGUMENT - ), + ExitCode::OK, + ); + construct(Address::new_bls(&[1; fvm_shared::address::BLS_PUB_LEN]).unwrap(), ExitCode::OK); + construct(Address::new_id(1), ExitCode::USR_ILLEGAL_ARGUMENT); + construct(Address::new_actor(&[1, 2, 3]), ExitCode::USR_ILLEGAL_ARGUMENT); +} + +#[test] +fn token_receiver() { + let mut rt = MockRuntime { + receiver: Address::new_id(100), + caller: SYSTEM_ACTOR_ADDR, + caller_type: *SYSTEM_ACTOR_CODE_ID, + ..Default::default() + }; + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + + let param = Address::new_secp256k1(&[2; fvm_shared::address::SECP_PUB_LEN]).unwrap(); + rt.call::( + Method::Constructor as MethodNum, + &RawBytes::serialize(¶m).unwrap(), + ) + .unwrap(); + + rt.expect_validate_caller_any(); + let ret = rt.call::( + Method::UniversalReceiverHook as MethodNum, + &RawBytes::new(vec![1, 2, 3]), + ); + assert!(ret.is_ok()); + assert_eq!(RawBytes::default(), ret.unwrap()); +} + +fn check_state(rt: &MockRuntime) { + let test_address = Address::new_id(1000); + let (_, acc) = check_state_invariants(&rt.get_state(), &test_address); + acc.assert_empty(); } #[test] diff --git a/actors/datacap/Cargo.toml b/actors/datacap/Cargo.toml new file mode 100644 index 000000000..ee0a6f224 --- /dev/null +++ b/actors/datacap/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "fil_actor_datacap" +description = "Builtin data cap actor for Filecoin" +version = "9.0.0-alpha.1" +license = "MIT OR Apache-2.0" +authors = ["Protocol Labs", "Filecoin Core Devs"] +edition = "2018" +repository = "https://github.com/filecoin-project/builtin-actors" +keywords = ["filecoin", "web3", "wasm"] + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime"} + +cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } +frc42_dispatch = "1.0.0" +frc46_token = "1.0.0" +fvm_actor_utils = "0.1.0" +fvm_ipld_blockstore = "0.1.1" +fvm_ipld_encoding = "0.2.2" +fvm_ipld_hamt = "0.5.1" +fvm_shared = { version = "2.0.0-alpha.2", default-features = false } +lazy_static = "1.4.0" +num-derive = "0.3.3" +num-traits = "0.2.14" +serde = { version = "1.0.136", features = ["derive"] } + +[dev-dependencies] +fil_actors_runtime = { path = "../../runtime", features = ["test_utils", "sector-default"] } +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] + diff --git a/actors/datacap/src/lib.rs b/actors/datacap/src/lib.rs new file mode 100644 index 000000000..75bdef7c5 --- /dev/null +++ b/actors/datacap/src/lib.rs @@ -0,0 +1,562 @@ +use std::marker::PhantomData; + +use frc46_token::token::types::{ + BurnFromParams, BurnFromReturn, BurnParams, BurnReturn, DecreaseAllowanceParams, + GetAllowanceParams, IncreaseAllowanceParams, MintReturn, RevokeAllowanceParams, + TransferFromParams, TransferFromReturn, TransferParams, TransferReturn, +}; +use frc46_token::token::{Token, TokenError, TOKEN_PRECISION}; +use fvm_actor_utils::messaging::{Messaging, MessagingError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::Address; +use fvm_shared::bigint::BigInt; +use fvm_shared::econ::TokenAmount; +use fvm_shared::error::{ErrorNumber, ExitCode}; +use fvm_shared::receipt::Receipt; +use fvm_shared::{ActorID, MethodNum, METHOD_CONSTRUCTOR, METHOD_SEND}; +use lazy_static::lazy_static; +use num_derive::FromPrimitive; +use num_traits::{FromPrimitive, Zero}; + +use fil_actors_runtime::cbor::serialize; +use fil_actors_runtime::runtime::{ActorCode, Runtime}; +use fil_actors_runtime::{ + actor_error, cbor, ActorContext, ActorError, AsActorError, SYSTEM_ACTOR_ADDR, +}; + +pub use self::state::State; +pub use self::types::*; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(Actor); + +mod state; +pub mod testing; +mod types; + +pub const DATACAP_GRANULARITY: u64 = TOKEN_PRECISION as u64; + +lazy_static! { + // > 800 EiB + static ref INFINITE_ALLOWANCE: TokenAmount = TokenAmount::from_atto( + BigInt::from(TOKEN_PRECISION) + * BigInt::from(1_000_000_000_000_000_000_000_i128) + ); +} +/// Static method numbers for builtin-actor private dispatch. +/// The methods are also expected to be exposed via FRC-XXXX standard calling convention, +/// with numbers determined by name. +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + // Non-standard. + Mint = 2, + Destroy = 3, + // Static method numbers for token standard methods, for private use. + Name = 10, + Symbol = 11, + TotalSupply = 12, + BalanceOf = 13, + Transfer = 14, + TransferFrom = 15, + IncreaseAllowance = 16, + DecreaseAllowance = 17, + RevokeAllowance = 18, + Burn = 19, + BurnFrom = 20, +} + +pub struct Actor; + +impl Actor { + /// Constructor for DataCap Actor + pub fn constructor(rt: &mut RT, governor: Address) -> Result<(), ActorError> + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + // Confirm the governor address is an ID. + rt.resolve_address(&governor) + .ok_or_else(|| actor_error!(illegal_argument, "failed to resolve governor address"))?; + + let st = State::new(rt.store(), governor).context("failed to create datacap state")?; + rt.create(&st)?; + Ok(()) + } + + pub fn name(_: &RT, _: ()) -> Result + where + BS: Blockstore, + RT: Runtime, + { + Ok("DataCap".to_string()) + } + + pub fn symbol(_: &RT, _: ()) -> Result + where + BS: Blockstore, + RT: Runtime, + { + Ok("DCAP".to_string()) + } + + pub fn total_supply(rt: &mut RT, _: ()) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let mut st: State = rt.state()?; + let msg = Messenger { rt, dummy: Default::default() }; + let token = as_token(&mut st, &msg); + Ok(token.total_supply()) + } + + pub fn balance_of(rt: &mut RT, address: Address) -> Result + where + BS: Blockstore, + RT: Runtime, + { + // NOTE: mutability and method caller here are awkward for a read-only call + rt.validate_immediate_caller_accept_any()?; + let mut st: State = rt.state()?; + let msg = Messenger { rt, dummy: Default::default() }; + let token = as_token(&mut st, &msg); + token.balance_of(&address).actor_result() + } + + pub fn allowance( + rt: &mut RT, + params: GetAllowanceParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let mut st: State = rt.state()?; + let msg = Messenger { rt, dummy: Default::default() }; + let token = as_token(&mut st, &msg); + token.allowance(¶ms.owner, ¶ms.operator).actor_result() + } + + /// Mints new data cap tokens for an address (a verified client). + /// Simultaneously sets the allowance for any specified operators to effectively infinite. + /// Only the governor can call this method. + /// This method is not part of the fungible token standard. + pub fn mint(rt: &mut RT, params: MintParams) -> Result + where + BS: Blockstore, + RT: Runtime, + { + let mut hook = rt + .transaction(|st: &mut State, rt| { + // Only the governor can mint datacap tokens. + rt.validate_immediate_caller_is(std::iter::once(&st.governor))?; + let operator = st.governor; + + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + // Mint tokens "from" the operator to the beneficiary. + let ret = token + .mint( + &operator, + ¶ms.to, + ¶ms.amount, + RawBytes::default(), + RawBytes::default(), + ) + .actor_result(); + + // Set allowance for any specified operators. + for delegate in ¶ms.operators { + token + .set_allowance(¶ms.to, delegate, &INFINITE_ALLOWANCE) + .actor_result()?; + } + + ret + }) + .context("state transaction failed")?; + + let mut st: State = rt.state()?; + let msg = Messenger { rt, dummy: Default::default() }; + let intermediate = hook.call(&&msg).actor_result()?; + as_token(&mut st, &msg).mint_return(intermediate).actor_result() + } + + /// Destroys data cap tokens for an address (a verified client). + /// Only the governor can call this method. + /// This method is not part of the fungible token standard, and is named distinctly from + /// "burn" to reflect that distinction. + pub fn destroy(rt: &mut RT, params: DestroyParams) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.transaction(|st: &mut State, rt| { + // Only the governor can destroy datacap tokens on behalf of a holder. + rt.validate_immediate_caller_is(std::iter::once(&st.governor))?; + + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + // Burn tokens as if the holder had invoked burn() themselves. + // The governor doesn't need an allowance. + token.burn(¶ms.owner, ¶ms.amount).actor_result() + }) + .context("state transaction failed") + } + + /// Transfers data cap tokens to an address. + /// Data cap tokens are not generally transferable. + /// Succeeds if the to address is the governor, otherwise always fails. + pub fn transfer( + rt: &mut RT, + params: TransferParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let operator = &rt.message().caller(); + let from = operator; + // Resolve to address for comparison with governor address. + let to = rt + .resolve_address(¶ms.to) + .context_code(ExitCode::USR_ILLEGAL_ARGUMENT, "to must be ID address")?; + let to_address = Address::new_id(to); + + let mut hook = rt + .transaction(|st: &mut State, rt| { + let allowed = to_address == st.governor; + if !allowed { + return Err(actor_error!(forbidden, "transfer not allowed")); + } + + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + token + .transfer( + from, + &to_address, + ¶ms.amount, + params.operator_data.clone(), + RawBytes::default(), + ) + .actor_result() + }) + .context("state transaction failed")?; + + let mut st: State = rt.state()?; + let msg = Messenger { rt, dummy: Default::default() }; + let intermediate = hook.call(&&msg).actor_result()?; + as_token(&mut st, &msg).transfer_return(intermediate).actor_result() + } + + /// Transfers data cap tokens between addresses. + /// Data cap tokens are not generally transferable between addresses. + /// Succeeds if the to address is the governor, otherwise always fails. + pub fn transfer_from( + rt: &mut RT, + params: TransferFromParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let operator = rt.message().caller(); + let from = params.from; + // Resolve to address for comparison with governor. + let to = rt + .resolve_address(¶ms.to) + .context_code(ExitCode::USR_ILLEGAL_ARGUMENT, "to must be an ID address")?; + let to_address = Address::new_id(to); + + let mut hook = rt + .transaction(|st: &mut State, rt| { + let allowed = to_address == st.governor; + if !allowed { + return Err(actor_error!(forbidden, "transfer not allowed")); + } + + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + token + .transfer_from( + &operator, + &from, + &to_address, + ¶ms.amount, + params.operator_data.clone(), + RawBytes::default(), + ) + .actor_result() + }) + .context("state transaction failed")?; + + let mut st: State = rt.state()?; + let msg = Messenger { rt, dummy: Default::default() }; + let intermediate = hook.call(&&msg).actor_result()?; + as_token(&mut st, &msg).transfer_from_return(intermediate).actor_result() + } + + pub fn increase_allowance( + rt: &mut RT, + params: IncreaseAllowanceParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let owner = rt.message().caller(); + let operator = params.operator; + + rt.transaction(|st: &mut State, rt| { + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + token.increase_allowance(&owner, &operator, ¶ms.increase).actor_result() + }) + .context("state transaction failed") + } + + pub fn decrease_allowance( + rt: &mut RT, + params: DecreaseAllowanceParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let owner = &rt.message().caller(); + let operator = ¶ms.operator; + + rt.transaction(|st: &mut State, rt| { + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + token.decrease_allowance(owner, operator, ¶ms.decrease).actor_result() + }) + .context("state transaction failed") + } + + pub fn revoke_allowance( + rt: &mut RT, + params: RevokeAllowanceParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let owner = &rt.message().caller(); + let operator = ¶ms.operator; + + rt.transaction(|st: &mut State, rt| { + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + token.revoke_allowance(owner, operator).actor_result() + }) + .context("state transaction failed") + } + + pub fn burn(rt: &mut RT, params: BurnParams) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let owner = &rt.message().caller(); + + rt.transaction(|st: &mut State, rt| { + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + token.burn(owner, ¶ms.amount).actor_result() + }) + .context("state transaction failed") + } + + pub fn burn_from( + rt: &mut RT, + params: BurnFromParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let operator = &rt.message().caller(); + let owner = ¶ms.owner; + + rt.transaction(|st: &mut State, rt| { + let msg = Messenger { rt, dummy: Default::default() }; + let mut token = as_token(st, &msg); + token.burn_from(operator, owner, ¶ms.amount).actor_result() + }) + .context("state transaction failed") + } +} + +/// Implementation of the token library's messenger trait in terms of the built-in actors' +/// runtime library. +struct Messenger<'a, BS, RT> +where + BS: Blockstore, + RT: Runtime, +{ + rt: &'a mut RT, + // Without this, Rust complains the BS parameter is unused. + // This might be solved better by having BS as an associated type of the Runtime trait. + dummy: PhantomData, +} + +// The trait is implemented for Messenger _reference_ since the mutable ref to rt has been +// moved into it and we can't move the messenger instance since callers need to get at the +// rt that's now in there. +impl<'a, BS, RT> Messaging for &Messenger<'a, BS, RT> +where + BS: Blockstore, + RT: Runtime, +{ + fn actor_id(&self) -> ActorID { + // The Runtime unhelpfully wraps receiver in an address, while the Messaging trait + // is closer to the syscall interface. + self.rt.message().receiver().id().unwrap() + } + + fn send( + &self, + to: &Address, + method: MethodNum, + params: &RawBytes, + value: &TokenAmount, + ) -> fvm_actor_utils::messaging::Result { + // The Runtime discards some of the information from the syscall :-( + let fake_gas_used = 0; + let fake_syscall_error_number = ErrorNumber::NotFound; + self.rt + .send(to, method, params.clone(), value.clone()) + .map(|bytes| Receipt { + exit_code: ExitCode::OK, + return_data: bytes, + gas_used: fake_gas_used, + }) + .map_err(|_| MessagingError::Syscall(fake_syscall_error_number)) + } + + fn resolve_id(&self, address: &Address) -> fvm_actor_utils::messaging::Result { + self.rt.resolve_address(address).ok_or(MessagingError::AddressNotInitialized(*address)) + } + + fn initialize_account(&self, address: &Address) -> fvm_actor_utils::messaging::Result { + let fake_syscall_error_number = ErrorNumber::NotFound; + if self.rt.send(address, METHOD_SEND, Default::default(), TokenAmount::zero()).is_err() { + return Err(MessagingError::Syscall(fake_syscall_error_number)); + } + self.resolve_id(address) + } +} + +// Returns a token instance wrapping the token state. +fn as_token<'st, BS, RT>( + st: &'st mut State, + msg: &'st Messenger<'st, BS, RT>, +) -> Token<'st, &'st BS, &'st Messenger<'st, BS, RT>> +where + BS: Blockstore, + RT: Runtime, +{ + Token::wrap(msg.rt.store(), msg, DATACAP_GRANULARITY, &mut st.token) +} + +trait AsActorResult { + fn actor_result(self) -> Result; +} + +impl AsActorResult for Result { + fn actor_result(self) -> Result { + self.map_err(|e| ActorError::unchecked(ExitCode::from(&e), e.to_string())) + } +} + +impl ActorCode for Actor { + fn invoke_method( + rt: &mut RT, + method: MethodNum, + params: &RawBytes, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + // I'm trying to find a fixed template for these blocks so we can macro it. + // Current blockers: + // - the serialize method maps () to CBOR null (we want no bytes instead) + // - the serialize method can't do BigInts + match FromPrimitive::from_u64(method) { + Some(Method::Constructor) => { + Self::constructor(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::default()) + } + Some(Method::Mint) => { + let ret = Self::mint(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "mint result") + } + Some(Method::Destroy) => { + let ret = Self::destroy(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "destroy result") + } + Some(Method::Name) => { + let ret = Self::name(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "name result") + } + Some(Method::Symbol) => { + let ret = Self::symbol(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "symbol result") + } + Some(Method::TotalSupply) => { + let ret = Self::total_supply(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "total_supply result") + } + Some(Method::BalanceOf) => { + let ret = Self::balance_of(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "balance_of result") + } + Some(Method::Transfer) => { + let ret = Self::transfer(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "transfer result") + } + Some(Method::TransferFrom) => { + let ret = Self::transfer_from(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "transfer_from result") + } + Some(Method::IncreaseAllowance) => { + let ret = Self::increase_allowance(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "increase_allowance result") + } + Some(Method::DecreaseAllowance) => { + let ret = Self::decrease_allowance(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "decrease_allowance result") + } + Some(Method::RevokeAllowance) => { + Self::revoke_allowance(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::default()) + } + Some(Method::Burn) => { + let ret = Self::burn(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "burn result") + } + Some(Method::BurnFrom) => { + let ret = Self::burn_from(rt, cbor::deserialize_params(params)?)?; + serialize(&ret, "burn_from result") + } + None => Err(actor_error!(unhandled_message; "Invalid method")), + } + } +} diff --git a/actors/datacap/src/state.rs b/actors/datacap/src/state.rs new file mode 100644 index 000000000..61a58cf7c --- /dev/null +++ b/actors/datacap/src/state.rs @@ -0,0 +1,37 @@ +use frc46_token::token; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::Cbor; +use fvm_shared::address::Address; +use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; +use fvm_shared::ActorID; + +use fil_actors_runtime::{ActorError, AsActorError}; + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct State { + pub governor: Address, + pub token: token::state::TokenState, +} + +impl State { + pub fn new(store: &BS, governor: Address) -> Result { + let token_state = token::state::TokenState::new(store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create token state")?; + Ok(State { governor, token: token_state }) + } + + // Visible for testing + pub fn balance( + &self, + bs: &BS, + owner: ActorID, + ) -> Result { + self.token + .get_balance(bs, owner) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get balance") + } +} + +impl Cbor for State {} diff --git a/actors/datacap/src/testing.rs b/actors/datacap/src/testing.rs new file mode 100644 index 000000000..8e84f4c41 --- /dev/null +++ b/actors/datacap/src/testing.rs @@ -0,0 +1,23 @@ +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::address::Protocol; + +use fil_actors_runtime::MessageAccumulator; + +use crate::State; + +pub struct StateSummary {} + +/// Checks internal invariants of data cap token actor state. +pub fn check_state_invariants( + state: &State, + store: &BS, +) -> (StateSummary, MessageAccumulator) { + let acc = MessageAccumulator::default(); + acc.require(state.governor.protocol() == Protocol::ID, "registry must be ID address"); + let r = state.token.check_invariants(store); + if let Err(e) = r { + acc.add(e.to_string()); + } + + (StateSummary {}, acc) +} diff --git a/actors/datacap/src/types.rs b/actors/datacap/src/types.rs new file mode 100644 index 000000000..4e9ded1a2 --- /dev/null +++ b/actors/datacap/src/types.rs @@ -0,0 +1,24 @@ +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::Cbor; +use fvm_shared::address::Address; +use fvm_shared::econ::TokenAmount; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct MintParams { + // Recipient of the newly minted tokens. + pub to: Address, + // Amount of tokens to mint. + pub amount: TokenAmount, + // Addresses to be granted effectively-infinite operator allowance for the recipient. + pub operators: Vec
, +} + +impl Cbor for MintParams {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct DestroyParams { + pub owner: Address, + pub amount: TokenAmount, +} + +impl Cbor for DestroyParams {} diff --git a/actors/datacap/tests/datacap_actor_test.rs b/actors/datacap/tests/datacap_actor_test.rs new file mode 100644 index 000000000..2fcd87500 --- /dev/null +++ b/actors/datacap/tests/datacap_actor_test.rs @@ -0,0 +1,95 @@ +use fvm_shared::address::Address; +use lazy_static::lazy_static; + +use fil_actors_runtime::test_utils::MockRuntime; +use fil_actors_runtime::VERIFIED_REGISTRY_ACTOR_ADDR; + +use crate::harness::{new_runtime, Harness}; + +mod harness; + +lazy_static! { + static ref ALICE: Address = Address::new_id(101); + static ref BOB: Address = Address::new_id(102); + static ref CARLA: Address = Address::new_id(103); +} + +mod construction { + use crate::*; + use fil_actors_runtime::VERIFIED_REGISTRY_ACTOR_ADDR; + + #[test] + fn construct_with_verified() { + let mut rt = new_runtime(); + let h = Harness { registry: VERIFIED_REGISTRY_ACTOR_ADDR }; + h.construct_and_verify(&mut rt, &h.registry); + h.check_state(&rt); + } +} + +mod mint { + use fvm_shared::econ::TokenAmount; + use fvm_shared::error::ExitCode; + use fvm_shared::MethodNum; + + use fil_actor_datacap::{Actor, Method, MintParams}; + use fil_actors_runtime::cbor::serialize; + use fil_actors_runtime::test_utils::{expect_abort_contains_message, MARKET_ACTOR_CODE_ID}; + use fil_actors_runtime::{STORAGE_MARKET_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR}; + + use crate::*; + + #[test] + fn mint_balances() { + // The token library has far more extensive tests, this is just a sanity check. + let (mut rt, h) = make_harness(); + + let amt = TokenAmount::from_whole(1); + let ret = h.mint(&mut rt, &*ALICE, &amt, vec![]).unwrap(); + assert_eq!(amt, ret.supply); + assert_eq!(amt, ret.balance); + assert_eq!(amt, h.get_supply(&rt)); + assert_eq!(amt, h.get_balance(&rt, &*ALICE)); + + let ret = h.mint(&mut rt, &*BOB, &amt, vec![]).unwrap(); + assert_eq!(&amt * 2, ret.supply); + assert_eq!(amt, ret.balance); + assert_eq!(&amt * 2, h.get_supply(&rt)); + assert_eq!(amt, h.get_balance(&rt, &*BOB)); + + h.check_state(&rt); + } + + #[test] + fn requires_verifreg_caller() { + let (mut rt, _) = make_harness(); + let amt = TokenAmount::from_whole(1); + let params = MintParams { to: *ALICE, amount: amt, operators: vec![] }; + + rt.expect_validate_caller_addr(vec![VERIFIED_REGISTRY_ACTOR_ADDR]); + rt.set_caller(*MARKET_ACTOR_CODE_ID, STORAGE_MARKET_ACTOR_ADDR); + expect_abort_contains_message( + ExitCode::USR_FORBIDDEN, + "caller address", + rt.call::(Method::Mint as MethodNum, &serialize(¶ms, "params").unwrap()), + ); + } + + #[test] + fn requires_whole_tokens() { + let (mut rt, h) = make_harness(); + let amt = TokenAmount::from_atto(100); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "must be a multiple of 1000000000000000000", + h.mint(&mut rt, &*ALICE, &amt, vec![]), + ); + } +} + +fn make_harness() -> (MockRuntime, Harness) { + let mut rt = new_runtime(); + let h = Harness { registry: VERIFIED_REGISTRY_ACTOR_ADDR }; + h.construct_and_verify(&mut rt, &h.registry); + (rt, h) +} diff --git a/actors/datacap/tests/harness/mod.rs b/actors/datacap/tests/harness/mod.rs new file mode 100644 index 000000000..9038c21fa --- /dev/null +++ b/actors/datacap/tests/harness/mod.rs @@ -0,0 +1,114 @@ +use frc46_token::receiver::types::{FRC46TokenReceived, UniversalReceiverParams, FRC46_TOKEN_TYPE}; +use frc46_token::token::types::MintReturn; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::Address; +use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; +use fvm_shared::MethodNum; +use num_traits::Zero; + +use fil_actor_datacap::testing::check_state_invariants; +use fil_actor_datacap::{Actor as DataCapActor, Method, MintParams, State}; +use fil_actors_runtime::cbor::serialize; +use fil_actors_runtime::runtime::Runtime; +use fil_actors_runtime::test_utils::*; +use fil_actors_runtime::{ + ActorError, DATACAP_TOKEN_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, +}; + +pub fn new_runtime() -> MockRuntime { + MockRuntime { + receiver: DATACAP_TOKEN_ACTOR_ADDR, + caller: SYSTEM_ACTOR_ADDR, + caller_type: *SYSTEM_ACTOR_CODE_ID, + ..Default::default() + } +} + +#[allow(dead_code)] +pub fn new_harness() -> (Harness, MockRuntime) { + let mut rt = new_runtime(); + let h = Harness { registry: VERIFIED_REGISTRY_ACTOR_ADDR }; + h.construct_and_verify(&mut rt, &h.registry); + (h, rt) +} + +pub struct Harness { + pub registry: Address, +} + +impl Harness { + pub fn construct_and_verify(&self, rt: &mut MockRuntime, registry: &Address) { + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let ret = rt + .call::( + Method::Constructor as MethodNum, + &RawBytes::serialize(registry).unwrap(), + ) + .unwrap(); + + assert_eq!(RawBytes::default(), ret); + rt.verify(); + + let state: State = rt.get_state(); + assert_eq!(self.registry, state.governor); + } + + pub fn mint( + &self, + rt: &mut MockRuntime, + to: &Address, + amount: &TokenAmount, + operators: Vec
, + ) -> Result { + rt.expect_validate_caller_addr(vec![VERIFIED_REGISTRY_ACTOR_ADDR]); + + // Expect the token receiver hook to be called. + let hook_params = UniversalReceiverParams { + type_: FRC46_TOKEN_TYPE, + payload: serialize( + &FRC46TokenReceived { + from: DATACAP_TOKEN_ACTOR_ADDR.id().unwrap(), + to: to.id().unwrap(), + operator: VERIFIED_REGISTRY_ACTOR_ADDR.id().unwrap(), + amount: amount.clone(), + operator_data: Default::default(), + token_data: Default::default(), + }, + "hook payload", + )?, + }; + // UniversalReceiverParams + rt.expect_send( + *to, + frc42_dispatch::method_hash!("Receive"), + serialize(&hook_params, "hook params")?, + TokenAmount::zero(), + RawBytes::default(), + ExitCode::OK, + ); + + let params = MintParams { to: *to, amount: amount.clone(), operators }; + rt.set_caller(*VERIFREG_ACTOR_CODE_ID, VERIFIED_REGISTRY_ACTOR_ADDR); + let ret = + rt.call::(Method::Mint as MethodNum, &serialize(¶ms, "params")?)?; + + rt.verify(); + Ok(ret.deserialize().unwrap()) + } + + // Reads the total supply from state directly. + pub fn get_supply(&self, rt: &MockRuntime) -> TokenAmount { + rt.get_state::().token.supply + } + + // Reads a balance from state directly. + pub fn get_balance(&self, rt: &MockRuntime, address: &Address) -> TokenAmount { + rt.get_state::().token.get_balance(rt.store(), address.id().unwrap()).unwrap() + } + + pub fn check_state(&self, rt: &MockRuntime) { + let (_, acc) = check_state_invariants(&rt.get_state(), rt.store()); + acc.assert_empty(); + } +} diff --git a/actors/market/Cargo.toml b/actors/market/Cargo.toml index 78a138050..c9dc3f2f1 100644 --- a/actors/market/Cargo.toml +++ b/actors/market/Cargo.toml @@ -14,19 +14,22 @@ keywords = ["filecoin", "web3", "wasm"] crate-type = ["cdylib", "lib"] [dependencies] -fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime" } -fvm_ipld_hamt = "0.5.1" -fvm_shared = { version = "2.0.0-alpha.2", default-features = false } -fvm_ipld_bitfield = "0.5.2" -num-traits = "0.2.14" -num-derive = "0.3.3" -serde = { version = "1.0.136", features = ["derive"] } -cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } -log = "0.4.14" +fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime"} + anyhow = "1.0.65" +cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } +frc46_token = "1.0.0" +fvm_ipld_bitfield = "0.5.2" fvm_ipld_blockstore = "0.1.1" fvm_ipld_encoding = "0.2.2" +fvm_ipld_hamt = "0.5.1" +fvm_shared = { version = "2.0.0-alpha.2", default-features = false } +integer-encoding = { version = "3.0.3", default-features = false } libipld-core = { version = "0.13.1", features = ["serde-codec"] } +log = "0.4.14" +num-derive = "0.3.3" +num-traits = "0.2.14" +serde = { version = "1.0.136", features = ["derive"] } [dev-dependencies] fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime", features = ["test_utils", "sector-default"] } @@ -39,4 +42,4 @@ regex = "1" itertools = "0.10" [features] -fil-actor = ["fil_actors_runtime/fil-actor"] +fil-actor = ["fil_actors_runtime/fil-actor"] \ No newline at end of file diff --git a/actors/market/src/deal.rs b/actors/market/src/deal.rs index 661461876..f1f186aa3 100644 --- a/actors/market/src/deal.rs +++ b/actors/market/src/deal.rs @@ -1,8 +1,8 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT +use crate::ext::verifreg::AllocationID; use cid::{Cid, Version}; -use fil_actors_runtime::DealWeight; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::{BytesSer, Cbor}; use fvm_shared::address::Address; @@ -119,10 +119,6 @@ impl DealProposal { pub fn duration(&self) -> ChainEpoch { self.end_epoch - self.start_epoch } - /// Computes weight for a deal proposal, which is a function of its size and duration. - pub fn weight(&self) -> DealWeight { - DealWeight::from(self.duration()) * self.piece_size.0 - } pub fn total_storage_fee(&self) -> TokenAmount { self.storage_price_per_epoch.clone() * self.duration() as u64 } @@ -151,4 +147,6 @@ pub struct DealState { pub last_updated_epoch: ChainEpoch, // -1 if deal never slashed pub slash_epoch: ChainEpoch, + // ID of the verified registry allocation/claim for this deal's data (0 if none). + pub verified_claim: AllocationID, } diff --git a/actors/market/src/ext.rs b/actors/market/src/ext.rs index 1ecd1ad2d..ff4082682 100644 --- a/actors/market/src/ext.rs +++ b/actors/market/src/ext.rs @@ -36,22 +36,50 @@ pub mod miner { pub mod verifreg { use super::*; + use cid::Cid; + use fil_actors_runtime::BatchReturn; + use fvm_shared::clock::ChainEpoch; + use fvm_shared::piece::PaddedPieceSize; - // based on fil_actor_verifreg - pub const USE_BYTES_METHOD: u64 = 5; - pub const RESTORE_BYTES_METHOD: u64 = 6; + pub type AllocationID = u64; + pub type ClaimID = u64; - pub type UseBytesParams = BytesParams; - pub type RestoreBytesParams = BytesParams; + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct AllocationRequest { + pub provider: Address, + pub data: Cid, + pub size: PaddedPieceSize, + pub term_min: ChainEpoch, + pub term_max: ChainEpoch, + pub expiration: ChainEpoch, + } #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] - pub struct BytesParams { - /// Address of verified client. - pub address: Address, - /// Number of bytes to use. - #[serde(with = "bigint_ser")] - pub deal_size: StoragePower, + pub struct ClaimExtensionRequest { + pub provider: Address, + pub claim: ClaimID, + pub term_max: ChainEpoch, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct AllocationRequests { + pub allocations: Vec, + pub extensions: Vec, } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct AllocationsResponse { + // Result for each allocation request. + pub allocation_results: BatchReturn, + // Result for each extension request. + pub extension_results: BatchReturn, + // IDs of new allocations created. + pub new_allocations: Vec, + } +} + +pub mod datacap { + pub const TRANSFER_FROM_METHOD: u64 = 15; } pub mod reward { diff --git a/actors/market/src/lib.rs b/actors/market/src/lib.rs index 45d1edf68..66944cf6e 100644 --- a/actors/market/src/lib.rs +++ b/actors/market/src/lib.rs @@ -1,13 +1,16 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use cid::multihash::{Code, MultihashDigest, MultihashGeneric}; -use cid::Cid; +use std::cmp::min; use std::collections::{BTreeMap, BTreeSet}; +use cid::multihash::{Code, MultihashDigest, MultihashGeneric}; +use cid::Cid; +use frc46_token::token::types::{TransferFromParams, TransferFromReturn}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::{Cbor, RawBytes}; +use fvm_ipld_hamt::BytesKey; use fvm_shared::address::Address; use fvm_shared::bigint::BigInt; use fvm_shared::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED}; @@ -18,20 +21,21 @@ use fvm_shared::piece::PieceInfo; use fvm_shared::reward::ThisEpochRewardReturn; use fvm_shared::sector::{RegisteredSealProof, SectorSize, StoragePower}; use fvm_shared::{ActorID, MethodNum, METHOD_CONSTRUCTOR, METHOD_SEND}; +use integer_encoding::VarInt; use log::info; use num_derive::FromPrimitive; use num_traits::{FromPrimitive, Zero}; -use fil_actors_runtime::cbor::serialize_vec; +use fil_actors_runtime::cbor::{deserialize, serialize, serialize_vec}; use fil_actors_runtime::runtime::builtins::Type; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorDowncast, ActorError, BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, - CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, - VERIFIED_REGISTRY_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorDowncast, ActorError, AsActorError, + BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, CRON_ACTOR_ADDR, DATACAP_TOKEN_ACTOR_ADDR, + REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; -use crate::ext::verifreg::UseBytesParams; +use crate::ext::verifreg::{AllocationID, AllocationRequest}; pub use self::deal::*; use self::policy::*; @@ -52,26 +56,7 @@ mod types; #[cfg(feature = "fil-actor")] fil_actors_runtime::wasm_trampoline!(Actor); -fn request_miner_control_addrs( - rt: &mut RT, - miner_id: ActorID, -) -> Result<(Address, Address, Vec
), ActorError> -where - BS: Blockstore, - RT: Runtime, -{ - let ret = rt.send( - &Address::new_id(miner_id), - ext::miner::CONTROL_ADDRESSES_METHOD, - RawBytes::default(), - TokenAmount::zero(), - )?; - let addrs: ext::miner::GetControlAddressesReturnParams = ret.deserialize()?; - - Ok((addrs.owner, addrs.worker, addrs.control_addresses)) -} - -// * Updated to specs-actors commit: e195950ba98adb8ce362030356bf4a3809b7ec77 (v2.3.2) +pub const NO_ALLOCATION_ID: u64 = 0; /// Market actor methods available #[derive(FromPrimitive)] @@ -143,9 +128,7 @@ impl Actor { ) })?; - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state()?; Ok(()) })?; @@ -200,9 +183,7 @@ impl Actor { ) })?; - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state()?; Ok(ex) })?; @@ -266,16 +247,23 @@ impl Actor { let baseline_power = request_current_baseline_power(rt)?; let (network_raw_power, _) = request_current_network_power(rt)?; - // Drop invalid deals + struct ValidDeal { + proposal: DealProposal, + cid: Cid, + allocation: AllocationID, + } + + // Deals that passed validation. + let mut valid_deals: Vec = Vec::with_capacity(params.deals.len()); + // CIDs of valid proposals. let mut proposal_cid_lookup = BTreeSet::new(); - let mut valid_proposal_cids = Vec::new(); - let mut valid_deals = Vec::with_capacity(params.deals.len()); let mut total_client_lockup: BTreeMap = BTreeMap::new(); let mut total_provider_lockup = TokenAmount::zero(); let mut valid_input_bf = BitField::default(); - let mut state: State = rt.state::()?; + let curr_epoch = rt.curr_epoch(); + let mut state: State = rt.state::()?; let store = rt.store(); let mut msm = state.mutator(store); msm.with_pending_proposals(Permission::ReadOnly) @@ -348,7 +336,6 @@ impl Actor { // drop duplicate deals // Normalise provider and client addresses in the proposal stored on chain. // Must happen after signature verification and before taking cid. - deal.proposal.provider = Address::new_id(provider_id); deal.proposal.client = Address::new_id(client_id); let pcid = rt_deal_cid(rt, &deal.proposal).map_err( @@ -370,40 +357,64 @@ impl Actor { continue; } - // check VerifiedClient allowed cap and deduct PieceSize from cap - // drop deals with a DealSize that cannot be fully covered by VerifiedClient's available DataCap - if deal.proposal.verified_deal { - if let Err(e) = rt.send( - &VERIFIED_REGISTRY_ACTOR_ADDR, - crate::ext::verifreg::USE_BYTES_METHOD as u64, - RawBytes::serialize(UseBytesParams { - address: Address::new_id(client_id), - deal_size: BigInt::from(deal.proposal.piece_size.0), - })?, - TokenAmount::zero(), - ) { - info!("invalid deal {}: failed to acquire datacap exitcode: {}", di, e); - continue; + // For verified deals, transfer datacap tokens from the client + // to the verified registry actor along with a specification for the allocation. + // Drop deal if the transfer fails. + // This could be done in a batch, but one-at-a-time allows dropping of only + // some deals if the client's balance is insufficient, rather than dropping them all. + // An alternative could first fetch the available balance/allowance, and then make + // a batch transfer for an amount known to be available. + // https://github.com/filecoin-project/builtin-actors/issues/662 + let allocation_id = if deal.proposal.verified_deal { + let params = datacap_transfer_request( + &Address::new_id(client_id), + vec![alloc_request_for_deal(&deal, rt.policy(), curr_epoch)], + )?; + let alloc_ids = rt + .send( + &DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::TRANSFER_FROM_METHOD as u64, + serialize(¶ms, "transfer parameters")?, + TokenAmount::zero(), + ) + .and_then(|ret| datacap_transfer_response(&ret)); + match alloc_ids { + Ok(ids) => { + // Note: when changing this to do anything other than expect complete success, + // inspect the BatchReturn values to determine which deals succeeded and which failed. + if ids.len() != 1 { + return Err(actor_error!( + unspecified, + "expected 1 allocation ID, got {:?}", + ids + )); + } + ids[0] + } + Err(e) => { + info!( + "invalid deal {}: failed to allocate datacap for verified deal: {}", + di, e + ); + continue; + } } - } + } else { + NO_ALLOCATION_ID + }; total_provider_lockup = provider_lockup; total_client_lockup.insert(client_id, client_lockup); proposal_cid_lookup.insert(pcid); - valid_proposal_cids.push(pcid); - valid_deals.push(deal); + valid_deals.push(ValidDeal { + proposal: deal.proposal, + cid: pcid, + allocation: allocation_id, + }); valid_input_bf.set(di as u64) } let valid_deal_count = valid_input_bf.len(); - if valid_deals.len() != valid_proposal_cids.len() { - return Err(actor_error!( - illegal_state, - "{} valid deals but {} valid proposal cids", - valid_deals.len(), - valid_proposal_cids.len() - )); - } if valid_deal_count != valid_deals.len() as u64 { return Err(actor_error!( illegal_state, @@ -430,38 +441,51 @@ impl Actor { })?; // All storage dealProposals will be added in an atomic transaction; this operation will be unrolled if any of them fails. // This should only fail on programmer error because all expected invalid conditions should be filtered in the first set of checks. - for (vid, valid_deal) in valid_deals.iter().enumerate() { + for valid_deal in valid_deals.iter() { msm.lock_client_and_provider_balances(&valid_deal.proposal)?; - let id = msm.generate_storage_deal_id(); - - let pcid = valid_proposal_cids[vid]; - - msm.pending_deals.as_mut().unwrap().put(pcid.to_bytes().into()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set pending deal") - })?; - msm.deal_proposals.as_mut().unwrap().set(id, valid_deal.proposal.clone()).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set deal"), - )?; + // Store the proposal CID in pending deals set. + msm.pending_deals + .as_mut() + .unwrap() + .put(valid_deal.cid.to_bytes().into()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set pending deal")?; + // Allocate a deal ID and store the proposal in the proposals AMT. + let deal_id = msm.generate_storage_deal_id(); + msm.deal_proposals + .as_mut() + .unwrap() + .set(deal_id, valid_deal.proposal.clone()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal")?; + // Store verified allocation (if any) in the pending allocation IDs map. + // It will be removed when the deal is activated or expires. + if valid_deal.allocation != NO_ALLOCATION_ID { + msm.pending_deal_allocation_ids + .as_mut() + .unwrap() + .set(deal_id_key(deal_id), valid_deal.allocation) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to set deal allocation", + )?; + } - // We randomize the first epoch for when the deal will be processed so an attacker isn't able to + // Randomize the first epoch for when the deal will be processed so an attacker isn't able to // schedule too many deals for the same tick. let process_epoch = - gen_rand_next_epoch(rt.policy(), valid_deal.proposal.start_epoch, id); + gen_rand_next_epoch(rt.policy(), valid_deal.proposal.start_epoch, deal_id); - msm.deals_by_epoch.as_mut().unwrap().put(process_epoch, id).map_err(|e| { + msm.deals_by_epoch.as_mut().unwrap().put(process_epoch, deal_id).map_err(|e| { e.downcast_default( ExitCode::USR_ILLEGAL_STATE, "failed to set deal ops by epoch", ) })?; - new_deal_ids.push(id); + new_deal_ids.push(deal_id); } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state()?; Ok(()) })?; @@ -493,7 +517,7 @@ impl Actor { .sector_type .sector_size() .map_err(|e| actor_error!(illegal_argument, "sector size unknown: {}", e))?; - validate_and_compute_deal_weight( + validate_and_return_deal_space( &proposals, §or.deal_ids, &miner_addr, @@ -501,12 +525,7 @@ impl Actor { curr_epoch, Some(sector_size), ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to validate deal proposals for activation", - ) - })?; + .context("failed to validate deal proposals for activation")?; let commd = if sector.deal_ids.is_empty() { None @@ -519,9 +538,7 @@ impl Actor { Ok(VerifyDealsForActivationReturn { sectors: sectors_data }) } - /// Activate a set of deals, returning the combined deal weights. - /// The weight is defined as the sum, over all deals in the set, of the product of deal size - /// and duration. + /// Activate a set of deals, returning the combined deal space and extra info for verified deals. fn activate_deals( rt: &mut RT, params: ActivateDealsParams, @@ -534,13 +551,13 @@ impl Actor { let miner_addr = rt.message().caller(); let curr_epoch = rt.curr_epoch(); - let deal_weights = { + let deal_spaces = { let st: State = rt.state()?; let proposals = DealArray::load(&st.proposals, rt.store()).map_err(|e| { e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals") })?; - validate_and_compute_deal_weight( + validate_and_return_deal_space( &proposals, ¶ms.deal_ids, &miner_addr, @@ -548,19 +565,15 @@ impl Actor { curr_epoch, None, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to validate deal proposals for activation", - ) - })? + .context("failed to validate deal proposals for activation")? }; // Update deal states + let mut verified_infos = Vec::new(); rt.transaction(|st: &mut State, rt| { let mut msm = st.mutator(rt.store()); msm.with_deal_states(Permission::Write) - .with_pending_proposals(Permission::ReadOnly) + .with_pending_proposals(Permission::Write) .with_deal_proposals(Permission::ReadOnly) .build() .map_err(|e| { @@ -570,16 +583,18 @@ impl Actor { for deal_id in params.deal_ids { // This construction could be replaced with a single "update deal state" // state method, possibly batched over all deal ids at once. - let s = msm.deal_states.as_ref().unwrap().get(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get state for deal_id ({})", deal_id), - ) - })?; + let s = msm + .deal_states + .as_ref() + .unwrap() + .get(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to check state for deal ({})", deal_id) + })?; if s.is_some() { return Err(actor_error!( illegal_argument, - "deal {} already included in another sector", + "deal {} already activated", deal_id )); } @@ -589,22 +604,22 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load deal proposal {}", deal_id) })? - .ok_or_else(|| actor_error!(not_found, "no such deal_id: {}", deal_id))?; + .ok_or_else(|| actor_error!(not_found, "no such deal proposal {}", deal_id))?; let propc = rt_deal_cid(rt, proposal)?; - let has = - msm.pending_deals.as_ref().unwrap().has(&propc.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get pending proposal ({})", propc), - ) + // Confirm the deal is in the pending proposals queue. + // It will be removed from this queue later, during cron. + let has = msm + .pending_deals + .as_ref() + .unwrap() + .has(&propc.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get pending proposal ({})", propc) })?; if !has { @@ -615,6 +630,25 @@ impl Actor { )); } + // Extract and remove any verified allocation ID for the pending deal. + let allocation = msm + .pending_deal_allocation_ids + .as_mut() + .unwrap() + .delete(&deal_id_key(deal_id)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to remove allocation id for deal {}", deal_id) + })? + .unwrap_or((BytesKey(vec![]), NO_ALLOCATION_ID)) + .1; + if allocation != NO_ALLOCATION_ID { + verified_infos.push(VerifiedDealInfo { + client: proposal.client.id().unwrap(), + allocation_id: allocation, + data: proposal.piece_cid, + size: proposal.piece_size, + }) + } msm.deal_states .as_mut() .unwrap() @@ -624,24 +658,19 @@ impl Actor { sector_start_epoch: curr_epoch, last_updated_epoch: EPOCH_UNDEFINED, slash_epoch: EPOCH_UNDEFINED, + verified_claim: allocation, }, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to set deal state {}", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set deal state {}", deal_id) })?; } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; - + msm.commit_state()?; Ok(()) })?; - Ok(ActivateDealsResult { weights: deal_weights }) + Ok(ActivateDealsResult { nonverified_deal_space: deal_spaces.deal_space, verified_infos }) } /// Terminate a set of deals in response to their containing sector being terminated. @@ -725,9 +754,7 @@ impl Actor { })?; } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state()?; Ok(()) })?; Ok(()) @@ -770,7 +797,6 @@ impl Actor { let mut amount_slashed = TokenAmount::zero(); let curr_epoch = rt.curr_epoch(); - let mut timed_out_verified_deals: Vec = Vec::new(); rt.transaction(|st: &mut State, rt| { let last_cron = st.last_cron; @@ -801,9 +827,7 @@ impl Actor { deal_ids.push(deal_id); Ok(()) }) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state")?; for deal_id in deal_ids { let deal = msm @@ -811,11 +835,8 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) })? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) @@ -829,12 +850,7 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to get deal state", - ) - })? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state")? .cloned(); // deal has been published but not activated yet -> terminate it @@ -854,17 +870,15 @@ impl Actor { if !slashed.is_zero() { amount_slashed += slashed; } - if deal.verified_deal { - timed_out_verified_deals.push(deal); - } // Delete the proposal (but not state, which doesn't exist). - let deleted = - msm.deal_proposals.as_mut().unwrap().delete(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete deal proposal {}", deal_id), - ) + let deleted = msm + .deal_proposals + .as_mut() + .unwrap() + .delete(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete deal proposal {}", deal_id) })?; if deleted.is_none() { return Err(actor_error!( @@ -875,15 +889,13 @@ impl Actor { ) )); } + // Delete pending deal CID msm.pending_deals .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete pending proposal {}", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete pending proposal {}", deal_id) })? .ok_or_else(|| { actor_error!( @@ -891,6 +903,17 @@ impl Actor { "failed to delete pending proposal: does not exist" ) })?; + // Delete pending deal allocation id (if present). + msm.pending_deal_allocation_ids + .as_mut() + .unwrap() + .delete(&deal_id_key(deal_id)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!( + "failed to delete pending proposal allocation id for {}", + deal_id + ) + })?; continue; } @@ -1020,35 +1043,10 @@ impl Actor { msm.st.last_cron = rt.curr_epoch(); - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state()?; Ok(()) })?; - for d in timed_out_verified_deals { - let res = rt.send( - &VERIFIED_REGISTRY_ACTOR_ADDR, - ext::verifreg::RESTORE_BYTES_METHOD, - RawBytes::serialize(ext::verifreg::RestoreBytesParams { - address: d.client, - deal_size: BigInt::from(d.piece_size.0), - })?, - TokenAmount::zero(), - ); - if let Err(e) = res { - log::error!( - "failed to send RestoreBytes call to the verifreg actor for timed \ - out verified deal, client: {}, deal_size: {}, provider: {}, got code: {:?}. {}", - d.client, - d.piece_size.0, - d.provider, - e.exit_code(), - e.msg() - ); - } - } - if !amount_slashed.is_zero() { rt.send(&BURNT_FUNDS_ACTOR_ADDR, METHOD_SEND, RawBytes::default(), amount_slashed)?; } @@ -1084,64 +1082,104 @@ where }) } -pub fn validate_and_compute_deal_weight( +pub fn validate_and_return_deal_space( proposals: &DealArray, deal_ids: &[DealID], miner_addr: &Address, sector_expiry: ChainEpoch, sector_activation: ChainEpoch, sector_size: Option, -) -> anyhow::Result +) -> Result where BS: Blockstore, { let mut seen_deal_ids = BTreeSet::new(); - let mut total_deal_size = 0; - let mut total_deal_space_time = BigInt::zero(); - let mut total_verified_space_time = BigInt::zero(); + let mut deal_space = BigInt::zero(); + let mut verified_deal_space = BigInt::zero(); for deal_id in deal_ids { if !seen_deal_ids.insert(deal_id) { return Err(actor_error!( illegal_argument, "deal id {} present multiple times", deal_id - ) - .into()); + )); } let proposal = proposals - .get(*deal_id)? + .get(*deal_id) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deal")? .ok_or_else(|| actor_error!(not_found, "no such deal {}", deal_id))?; validate_deal_can_activate(proposal, miner_addr, sector_expiry, sector_activation) - .map_err(|e| e.wrap(&format!("cannot activate deal {}", deal_id)))?; + .with_context(|| format!("cannot activate deal {}", deal_id))?; - total_deal_size += proposal.piece_size.0; - let deal_space_time = detail::deal_weight(proposal); if proposal.verified_deal { - total_verified_space_time += deal_space_time; + verified_deal_space += proposal.piece_size.0; } else { - total_deal_space_time += deal_space_time; + deal_space += proposal.piece_size.0; } } if let Some(sector_size) = sector_size { - if total_deal_size > sector_size as u64 { + let total_deal_space = deal_space.clone() + verified_deal_space.clone(); + if total_deal_space > BigInt::from(sector_size as u64) { return Err(actor_error!( illegal_argument, "deals too large to fit in sector {} > {}", - total_deal_size, + total_deal_space, sector_size - ) - .into()); + )); } } - Ok(DealWeights { - deal_space: total_deal_size, - deal_weight: total_deal_space_time, - verified_deal_weight: total_verified_space_time, + Ok(DealSpaces { deal_space, verified_deal_space }) +} + +fn alloc_request_for_deal( + deal: &ClientDealProposal, + policy: &Policy, + curr_epoch: ChainEpoch, +) -> ext::verifreg::AllocationRequest { + let alloc_term_min = deal.proposal.end_epoch - deal.proposal.start_epoch; + let alloc_term_max = min( + alloc_term_min + policy.market_default_allocation_term_buffer, + policy.maximum_verified_allocation_term, + ); + let alloc_expiration = + min(deal.proposal.start_epoch, curr_epoch + policy.maximum_verified_allocation_expiration); + ext::verifreg::AllocationRequest { + provider: deal.proposal.provider, + data: deal.proposal.piece_cid, + size: deal.proposal.piece_size, + term_min: alloc_term_min, + term_max: alloc_term_max, + expiration: alloc_expiration, + } +} + +// Builds TransferFromParams for a transfer of datacap for specified allocations. +fn datacap_transfer_request( + client: &Address, + alloc_reqs: Vec, +) -> Result { + let datacap_required: u64 = alloc_reqs.iter().map(|it| it.size.0).sum(); + Ok(TransferFromParams { + from: *client, + to: VERIFIED_REGISTRY_ACTOR_ADDR, + amount: TokenAmount::from_whole(datacap_required), + operator_data: serialize( + &ext::verifreg::AllocationRequests { allocations: alloc_reqs, extensions: vec![] }, + "allocation requests", + )?, }) } +// Parses allocation IDs from a TransferFromReturn +fn datacap_transfer_response(ret: &RawBytes) -> Result, ActorError> { + let ret: TransferFromReturn = deserialize(ret, "transfer from response")?; + let allocs: ext::verifreg::AllocationsResponse = + deserialize(&ret.recipient_data, "allocations response")?; + Ok(allocs.new_allocations) +} + pub fn gen_rand_next_epoch( policy: &Policy, start_epoch: ChainEpoch, @@ -1324,6 +1362,25 @@ pub(crate) fn deal_cid(proposal: &DealProposal) -> Result { Ok(Cid::new_v1(DAG_CBOR, hash)) } +fn request_miner_control_addrs( + rt: &mut RT, + miner_id: ActorID, +) -> Result<(Address, Address, Vec
), ActorError> +where + BS: Blockstore, + RT: Runtime, +{ + let ret = rt.send( + &Address::new_id(miner_id), + ext::miner::CONTROL_ADDRESSES_METHOD, + RawBytes::default(), + TokenAmount::zero(), + )?; + let addrs: ext::miner::GetControlAddressesReturnParams = ret.deserialize()?; + + Ok((addrs.owner, addrs.worker, addrs.control_addresses)) +} + /// Resolves a provider or client address to the canonical form against which a balance should be held, and /// the designated recipient address of withdrawals (which is the same, for simple account parties). fn escrow_address( @@ -1389,6 +1446,11 @@ where Ok((ret.raw_byte_power, ret.quality_adj_power)) } +pub fn deal_id_key(k: DealID) -> BytesKey { + let bz = k.encode_var_vec(); + bz.into() +} + impl ActorCode for Actor { fn invoke_method( rt: &mut RT, diff --git a/actors/market/src/policy.rs b/actors/market/src/policy.rs index 3ca161bdc..913d00d67 100644 --- a/actors/market/src/policy.rs +++ b/actors/market/src/policy.rs @@ -5,7 +5,6 @@ use std::cmp::max; use fil_actors_runtime::network::EPOCHS_IN_DAY; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::DealWeight; use fvm_shared::bigint::{BigInt, Integer}; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; @@ -14,19 +13,9 @@ use fvm_shared::sector::StoragePower; use fvm_shared::TOTAL_FILECOIN; use num_traits::Zero; -use super::deal::DealProposal; - pub mod detail { - use super::*; - /// Maximum length of a deal label. pub const DEAL_MAX_LABEL_SIZE: usize = 256; - - /// Computes the weight for a deal proposal, which is a function of its size and duration. - pub fn deal_weight(proposal: &DealProposal) -> DealWeight { - let deal_duration = DealWeight::from(proposal.duration()); - deal_duration * proposal.piece_size.0 - } } /// Bounds (inclusive) on deal duration. diff --git a/actors/market/src/state.rs b/actors/market/src/state.rs index 34ee6acae..2b231a238 100644 --- a/actors/market/src/state.rs +++ b/actors/market/src/state.rs @@ -2,11 +2,13 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::balance_table::BalanceTable; +use crate::ext::verifreg::AllocationID; use anyhow::anyhow; use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ - actor_error, make_empty_map, ActorDowncast, ActorError, Array, Set, SetMultimap, + actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorDowncast, ActorError, Array, + AsActorError, Map, Set, SetMultimap, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -62,6 +64,9 @@ pub struct State { pub total_provider_locked_collateral: TokenAmount, /// Total storage fee that is locked in escrow -> unlocked when payments are made pub total_client_storage_fee: TokenAmount, + + /// Verified registry allocation IDs for deals that are not yet activated. + pub pending_deal_allocation_ids: Cid, // HAMT[DealID]AllocationID } impl State { @@ -80,10 +85,13 @@ impl State { let empty_balance_table = BalanceTable::new(store) .root() .map_err(|e| anyhow!("Failed to create empty balance table map: {}", e))?; - let empty_deal_ops_hamt = SetMultimap::new(store) .root() .map_err(|e| anyhow!("Failed to create empty multiset: {}", e))?; + let empty_pending_deal_allocation_map = + make_empty_map::<_, AllocationID>(store, HAMT_BIT_WIDTH).flush().map_err(|e| { + anyhow!("Failed to create empty pending deal allocation map: {}", e) + })?; Ok(Self { proposals: empty_proposals_array, states: empty_states_array, @@ -97,6 +105,7 @@ impl State { total_client_locked_collateral: TokenAmount::default(), total_provider_locked_collateral: TokenAmount::default(), total_client_storage_fee: TokenAmount::default(), + pending_deal_allocation_ids: empty_pending_deal_allocation_map, }) } @@ -172,6 +181,7 @@ pub(super) struct MarketStateMutation<'bs, 's, BS> { pub(super) pending_permit: Permission, pub(super) pending_deals: Option>, + pub(super) pending_deal_allocation_ids: Option>, pub(super) dpe_permit: Permission, pub(super) deals_by_epoch: Option>, @@ -202,6 +212,7 @@ where escrow_table: None, pending_permit: Permission::Invalid, pending_deals: None, + pending_deal_allocation_ids: None, dpe_permit: Permission::Invalid, deals_by_epoch: None, locked_permit: Permission::Invalid, @@ -236,6 +247,11 @@ where if self.pending_permit != Permission::Invalid { self.pending_deals = Some(Set::from_root(self.store, &self.st.pending_proposals)?); + self.pending_deal_allocation_ids = Some(make_map_with_root_and_bitwidth( + &self.st.pending_deal_allocation_ids, + self.store, + HAMT_BIT_WIDTH, + )?); } if self.dpe_permit != Permission::Invalid { @@ -278,25 +294,28 @@ where self } - pub(super) fn commit_state(&mut self) -> anyhow::Result<()> { + pub(super) fn commit_state(&mut self) -> Result<(), ActorError> { if self.proposal_permit == Permission::Write { if let Some(s) = &mut self.deal_proposals { - self.st.proposals = - s.flush().map_err(|e| e.downcast_wrap("failed to flush deal proposals"))?; + self.st.proposals = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal proposals")?; } } if self.state_permit == Permission::Write { if let Some(s) = &mut self.deal_states { - self.st.states = - s.flush().map_err(|e| e.downcast_wrap("failed to flush deal states"))?; + self.st.states = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal states")?; } } if self.locked_permit == Permission::Write { if let Some(s) = &mut self.locked_table { - self.st.locked_table = - s.root().map_err(|e| e.downcast_wrap("failed to flush locked table"))?; + self.st.locked_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush locked table")?; } if let Some(s) = &mut self.total_client_locked_collateral { self.st.total_client_locked_collateral = s.clone(); @@ -311,22 +330,31 @@ where if self.escrow_permit == Permission::Write { if let Some(s) = &mut self.escrow_table { - self.st.escrow_table = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.escrow_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } if self.pending_permit == Permission::Write { if let Some(s) = &mut self.pending_deals { - self.st.pending_proposals = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.pending_proposals = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; + } + if let Some(s) = &mut self.pending_deal_allocation_ids { + self.st.pending_deal_allocation_ids = s.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending deal allocation ids", + )?; } } if self.dpe_permit == Permission::Write { if let Some(s) = &mut self.deals_by_epoch { - self.st.deal_ops_by_epoch = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.deal_ops_by_epoch = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } diff --git a/actors/market/src/testing.rs b/actors/market/src/testing.rs index e154cff2a..ff5699185 100644 --- a/actors/market/src/testing.rs +++ b/actors/market/src/testing.rs @@ -5,6 +5,7 @@ use std::{ }; use cid::Cid; +use fil_actors_runtime::builtin::HAMT_BIT_WIDTH; use fil_actors_runtime::{ make_map_with_root_and_bitwidth, parse_uint_key, MessageAccumulator, SetMultimap, }; @@ -15,8 +16,10 @@ use fvm_shared::{ deal::DealID, econ::TokenAmount, }; +use integer_encoding::VarInt; use num_traits::Zero; +use crate::ext::verifreg::AllocationID; use crate::{ balance_table::BalanceTable, deal_cid, DealArray, DealMetaArray, State, PROPOSALS_AMT_BITWIDTH, }; @@ -138,6 +141,26 @@ pub fn check_state_invariants( ), ); + let mut pending_allocations = BTreeMap::::new(); + match make_map_with_root_and_bitwidth(&state.pending_deal_allocation_ids, store, HAMT_BIT_WIDTH) + { + Ok(pending_allocations_hamt) => { + let ret = pending_allocations_hamt.for_each(|key, allocation_id| { + let deal_id: u64 = u64::decode_var(key.0.as_slice()).unwrap().0; + + acc.require( + proposal_stats.get(&deal_id).is_some(), + format!("pending deal allocation {} not found in proposals", deal_id), + ); + + pending_allocations.insert(deal_id, *allocation_id); + Ok(()) + }); + acc.require_no_error(ret, "error iterating pending allocations"); + } + Err(e) => acc.add(format!("error loading pending allocations: {e}")), + }; + // deal states let mut deal_state_count = 0; match DealMetaArray::load(&state.states, store) { @@ -172,6 +195,7 @@ pub fn check_state_invariants( } else { acc.add(format!("no deal proposal for deal state {deal_id}")); } + acc.require(!pending_allocations.contains_key(&deal_id), format!("deal {deal_id} has pending allocation")); deal_state_count += 1; @@ -184,7 +208,6 @@ pub fn check_state_invariants( // pending proposals let mut pending_proposal_count = 0; - match make_map_with_root_and_bitwidth::<_, ()>( &state.pending_proposals, store, diff --git a/actors/market/src/types.rs b/actors/market/src/types.rs index b359c5c4f..39ee64e13 100644 --- a/actors/market/src/types.rs +++ b/actors/market/src/types.rs @@ -1,16 +1,20 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT +use super::ext::verifreg::AllocationID; use cid::Cid; -use fil_actors_runtime::{Array, DealWeight}; +use fil_actors_runtime::Array; use fvm_ipld_bitfield::BitField; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; use fvm_shared::address::Address; -use fvm_shared::bigint::bigint_ser; +use fvm_shared::bigint::{bigint_ser, BigInt}; use fvm_shared::clock::ChainEpoch; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; +use fvm_shared::piece::PaddedPieceSize; +use fvm_shared::ActorID; + use fvm_shared::sector::RegisteredSealProof; use super::deal::{ClientDealProposal, DealProposal, DealState}; @@ -95,18 +99,26 @@ pub struct ActivateDealsParams { pub sector_expiry: ChainEpoch, } +#[derive(Serialize_tuple, Deserialize_tuple, Clone)] +pub struct VerifiedDealInfo { + pub client: ActorID, + pub allocation_id: AllocationID, + pub data: Cid, + pub size: PaddedPieceSize, +} + #[derive(Serialize_tuple, Deserialize_tuple)] pub struct ActivateDealsResult { - pub weights: DealWeights, + #[serde(with = "bigint_ser")] + pub nonverified_deal_space: BigInt, + pub verified_infos: Vec, } - #[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Default)] -pub struct DealWeights { - pub deal_space: u64, +pub struct DealSpaces { #[serde(with = "bigint_ser")] - pub deal_weight: DealWeight, + pub deal_space: BigInt, #[serde(with = "bigint_ser")] - pub verified_deal_weight: DealWeight, + pub verified_deal_space: BigInt, } #[derive(Serialize_tuple, Deserialize_tuple)] diff --git a/actors/market/tests/cron_tick_timedout_deals.rs b/actors/market/tests/cron_tick_timedout_deals.rs index a3922805d..55714aab5 100644 --- a/actors/market/tests/cron_tick_timedout_deals.rs +++ b/actors/market/tests/cron_tick_timedout_deals.rs @@ -1,21 +1,17 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use fil_actor_market::ext::verifreg::RestoreBytesParams; use fil_actor_market::{ - ext, Actor as MarketActor, ClientDealProposal, Method, PublishStorageDealsParams, + Actor as MarketActor, ClientDealProposal, Method, PublishStorageDealsParams, }; use fil_actors_runtime::network::EPOCHS_IN_DAY; use fil_actors_runtime::test_utils::*; -use fil_actors_runtime::{ - BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, VERIFIED_REGISTRY_ACTOR_ADDR, -}; +use fil_actors_runtime::{BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE}; use fvm_ipld_encoding::RawBytes; use fvm_shared::clock::ChainEpoch; use fvm_shared::crypto::signature::Signature; use fvm_shared::econ::TokenAmount; use fvm_shared::error::ExitCode; -use fvm_shared::sector::StoragePower; use fvm_shared::METHOD_SEND; use fil_actor_market::ext::account::{AuthenticateMessageParams, AUTHENTICATE_MESSAGE_METHOD}; @@ -140,7 +136,7 @@ fn publishing_timed_out_deal_again_should_work_after_cron_tick_as_it_should_no_l } #[test] -fn timed_out_and_verified_deals_are_slashed_deleted_and_sent_to_the_registry_actor() { +fn timed_out_and_verified_deals_are_slashed_deleted() { let mut rt = setup(); let mut deal1 = generate_deal_and_add_funds( &mut rt, @@ -174,39 +170,13 @@ fn timed_out_and_verified_deals_are_slashed_deleted_and_sent_to_the_registry_act &mut rt, &MinerAddresses::default(), &[deal1.clone(), deal2.clone(), deal3.clone()], + 1, ); // do a cron tick for it -> all should time out and get slashed // ONLY deal1 and deal2 should be sent to the Registry actor rt.set_epoch(process_epoch(START_EPOCH, *deal_ids.last().unwrap())); - // expected sends to the registry actor - let param1 = RestoreBytesParams { - address: deal1.client, - deal_size: StoragePower::from(deal1.piece_size.0), - }; - let param2 = RestoreBytesParams { - address: deal2.client, - deal_size: StoragePower::from(deal2.piece_size.0), - }; - - rt.expect_send( - VERIFIED_REGISTRY_ACTOR_ADDR, - ext::verifreg::RESTORE_BYTES_METHOD as u64, - RawBytes::serialize(param1).unwrap(), - TokenAmount::zero(), - RawBytes::default(), - ExitCode::OK, - ); - rt.expect_send( - VERIFIED_REGISTRY_ACTOR_ADDR, - ext::verifreg::RESTORE_BYTES_METHOD as u64, - RawBytes::serialize(param2).unwrap(), - TokenAmount::zero(), - RawBytes::default(), - ExitCode::OK, - ); - let expected_burn = 3 * &deal1.provider_collateral; rt.expect_send( BURNT_FUNDS_ACTOR_ADDR, diff --git a/actors/market/tests/harness.rs b/actors/market/tests/harness.rs index 9e644bc2c..2bf48c600 100644 --- a/actors/market/tests/harness.rs +++ b/actors/market/tests/harness.rs @@ -1,30 +1,35 @@ #![allow(dead_code)] use cid::Cid; +use frc46_token::token::types::{TransferFromParams, TransferFromReturn}; use num_traits::{FromPrimitive, Zero}; use regex::Regex; +use std::cmp::min; use std::{cell::RefCell, collections::HashMap}; use fil_actor_market::ext::account::{AuthenticateMessageParams, AUTHENTICATE_MESSAGE_METHOD}; +use fil_actor_market::ext::verifreg::{AllocationID, AllocationRequest, AllocationsResponse}; use fil_actor_market::{ - balance_table::BalanceTable, ext, ext::miner::GetControlAddressesReturnParams, + balance_table::BalanceTable, deal_id_key, ext, ext::miner::GetControlAddressesReturnParams, gen_rand_next_epoch, testing::check_state_invariants, ActivateDealsParams, ActivateDealsResult, Actor as MarketActor, ClientDealProposal, DealArray, DealMetaArray, DealProposal, DealState, Label, Method, OnMinerSectorsTerminateParams, PublishStorageDealsParams, PublishStorageDealsReturn, SectorDeals, State, VerifyDealsForActivationParams, - VerifyDealsForActivationReturn, WithdrawBalanceParams, WithdrawBalanceReturn, + VerifyDealsForActivationReturn, WithdrawBalanceParams, WithdrawBalanceReturn, NO_ALLOCATION_ID, PROPOSALS_AMT_BITWIDTH, }; use fil_actor_power::{CurrentTotalPowerReturn, Method as PowerMethod}; use fil_actor_reward::Method as RewardMethod; -use fil_actor_verifreg::UseBytesParams; +use fil_actors_runtime::builtin::HAMT_BIT_WIDTH; +use fil_actors_runtime::cbor::serialize; use fil_actors_runtime::{ + make_map_with_root_and_bitwidth, network::EPOCHS_IN_DAY, runtime::{builtins::Type, Policy, Runtime}, test_utils::*, - ActorError, SetMultimap, BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, CRON_ACTOR_ADDR, - REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, - VERIFIED_REGISTRY_ACTOR_ADDR, + ActorError, BatchReturn, SetMultimap, BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, + CRON_ACTOR_ADDR, DATACAP_TOKEN_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, + STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; use fvm_ipld_encoding::{to_vec, RawBytes}; use fvm_shared::bigint::BigInt; @@ -296,33 +301,35 @@ pub fn activate_deals_raw( pub fn get_deal_proposal(rt: &mut MockRuntime, deal_id: DealID) -> DealProposal { let st: State = rt.get_state(); - let deals = DealArray::load(&st.proposals, &rt.store).unwrap(); - let d = deals.get(deal_id).unwrap(); d.unwrap().clone() } -pub fn get_locked_balance(rt: &mut MockRuntime, addr: Address) -> TokenAmount { +pub fn get_pending_deal_allocation(rt: &mut MockRuntime, deal_id: DealID) -> AllocationID { let st: State = rt.get_state(); + let pending_allocations = + make_map_with_root_and_bitwidth(&st.pending_deal_allocation_ids, &rt.store, HAMT_BIT_WIDTH) + .unwrap(); - let lt = BalanceTable::from_root(&rt.store, &st.locked_table).unwrap(); + *pending_allocations.get(&deal_id_key(deal_id)).unwrap().unwrap_or(&NO_ALLOCATION_ID) +} +pub fn get_locked_balance(rt: &mut MockRuntime, addr: Address) -> TokenAmount { + let st: State = rt.get_state(); + let lt = BalanceTable::from_root(&rt.store, &st.locked_table).unwrap(); lt.get(&addr).unwrap() } pub fn get_deal_state(rt: &mut MockRuntime, deal_id: DealID) -> DealState { let st: State = rt.get_state(); - let states = DealMetaArray::load(&st.states, &rt.store).unwrap(); - let s = states.get(deal_id).unwrap(); *s.unwrap() } pub fn update_last_updated(rt: &mut MockRuntime, deal_id: DealID, new_last_updated: ChainEpoch) { let st: State = rt.get_state(); - let mut states = DealMetaArray::load(&st.states, &rt.store).unwrap(); let s = *states.get(deal_id).unwrap().unwrap(); @@ -333,7 +340,6 @@ pub fn update_last_updated(rt: &mut MockRuntime, deal_id: DealID, new_last_updat pub fn delete_deal_proposal(rt: &mut MockRuntime, deal_id: DealID) { let mut st: State = rt.get_state(); - let mut deals = DealArray::load(&st.proposals, &rt.store).unwrap(); deals.delete(deal_id).unwrap(); @@ -436,6 +442,7 @@ pub fn publish_deals( rt: &mut MockRuntime, addrs: &MinerAddresses, publish_deals: &[DealProposal], + next_allocation_id: AllocationID, ) -> Vec { rt.expect_validate_caller_type((*CALLER_TYPES_SIGNABLE).to_vec()); @@ -457,6 +464,7 @@ pub fn publish_deals( let mut params: PublishStorageDealsParams = PublishStorageDealsParams { deals: vec![] }; + let mut alloc_id = next_allocation_id; for deal in publish_deals { // create a client proposal with a valid signature let buf = RawBytes::serialize(deal.clone()).expect("failed to marshal deal proposal"); @@ -481,20 +489,49 @@ pub fn publish_deals( ); if deal.verified_deal { - let param = RawBytes::serialize(UseBytesParams { - address: deal.client, - deal_size: BigInt::from(deal.piece_size.0), - }) - .unwrap(); - + // Expect transfer of data cap to the verified registry, with spec for the allocation. + let curr_epoch = rt.epoch; + let alloc_req = ext::verifreg::AllocationRequests { + allocations: vec![AllocationRequest { + provider: deal.provider, + data: deal.piece_cid, + size: deal.piece_size, + term_min: deal.end_epoch - deal.start_epoch, + term_max: (deal.end_epoch - deal.start_epoch) + 90 * EPOCHS_IN_DAY, + expiration: min(deal.start_epoch, curr_epoch + 60 * EPOCHS_IN_DAY), + }], + extensions: vec![], + }; + let datacap_amount = TokenAmount::from_whole(deal.piece_size.0 as i64); + let params = TransferFromParams { + from: deal.client, + to: VERIFIED_REGISTRY_ACTOR_ADDR, + amount: datacap_amount.clone(), + operator_data: serialize(&alloc_req, "allocation requests").unwrap(), + }; + let alloc_ids = AllocationsResponse { + allocation_results: BatchReturn::ok(1), + extension_results: BatchReturn::empty(), + new_allocations: vec![alloc_id], + }; rt.expect_send( - VERIFIED_REGISTRY_ACTOR_ADDR, - ext::verifreg::USE_BYTES_METHOD as u64, - param, + DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::TRANSFER_FROM_METHOD as u64, + serialize(¶ms, "transfer from params").unwrap(), TokenAmount::zero(), - RawBytes::default(), + serialize( + &TransferFromReturn { + from_balance: TokenAmount::zero(), + to_balance: datacap_amount, + allowance: TokenAmount::zero(), + recipient_data: serialize(&alloc_ids, "allocation response").unwrap(), + }, + "transfer from return", + ) + .unwrap(), ExitCode::OK, ); + alloc_id += 1 } } @@ -511,11 +548,15 @@ pub fn publish_deals( assert_eq!(ret.ids.len(), publish_deals.len()); // assert state after publishing the deals + alloc_id = next_allocation_id; for (i, deal_id) in ret.ids.iter().enumerate() { let expected = &publish_deals[i]; let p = get_deal_proposal(rt, *deal_id); - assert_eq!(expected, &p); + if p.verified_deal { + assert_eq!(get_pending_deal_allocation(rt, *deal_id), alloc_id); + alloc_id += 1; + } } ret.ids @@ -660,7 +701,8 @@ pub fn assert_deals_not_terminated(rt: &mut MockRuntime, deal_ids: &[DealID]) { pub fn assert_deal_deleted(rt: &mut MockRuntime, deal_id: DealID, p: DealProposal) { use cid::multihash::Code; use cid::multihash::MultihashDigest; - use fvm_ipld_hamt::{BytesKey, Hamt}; + use fil_actors_runtime::Map; + use fvm_ipld_hamt::BytesKey; let st: State = rt.get_state(); @@ -677,12 +719,11 @@ pub fn assert_deal_deleted(rt: &mut MockRuntime, deal_id: DealID, p: DealProposa let mh_code = Code::Blake2b256; let p_cid = Cid::new_v1(fvm_ipld_encoding::DAG_CBOR, mh_code.digest(&to_vec(&p).unwrap())); // Check that the deal_id is not in st.pending_proposals. - let pending_deals: Hamt<&fvm_ipld_blockstore::MemoryBlockstore, DealProposal> = - fil_actors_runtime::make_map_with_root_and_bitwidth( - &st.pending_proposals, - &*rt.store, - PROPOSALS_AMT_BITWIDTH, - ) + let pending_deals: Map = + fil_actors_runtime::make_map_with_root_and_bitwidth::< + fvm_ipld_blockstore::MemoryBlockstore, + DealProposal, + >(&st.pending_proposals, &*rt.store, PROPOSALS_AMT_BITWIDTH) .unwrap(); assert!(!pending_deals.contains_key(&BytesKey(p_cid.to_bytes())).unwrap()); } @@ -768,7 +809,7 @@ pub fn publish_and_activate_deal( ) -> DealID { let deal = generate_deal_and_add_funds(rt, client, addrs, start_epoch, end_epoch); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, addrs.worker); - let deal_ids = publish_deals(rt, addrs, &[deal]); + let deal_ids = publish_deals(rt, addrs, &[deal], NO_ALLOCATION_ID); // unverified deal activate_deals(rt, sector_expiry, addrs.provider, current_epoch, &deal_ids); deal_ids[0] } @@ -782,7 +823,7 @@ pub fn generate_and_publish_deal( ) -> DealID { let deal = generate_deal_and_add_funds(rt, client, addrs, start_epoch, end_epoch); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, addrs.worker); - let deal_ids = publish_deals(rt, addrs, &[deal]); + let deal_ids = publish_deals(rt, addrs, &[deal], NO_ALLOCATION_ID); // unverified deal deal_ids[0] } @@ -792,11 +833,12 @@ pub fn generate_and_publish_verified_deal( addrs: &MinerAddresses, start_epoch: ChainEpoch, end_epoch: ChainEpoch, + next_allocation_id: AllocationID, ) -> DealID { let mut deal = generate_deal_and_add_funds(rt, client, addrs, start_epoch, end_epoch); deal.verified_deal = true; rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, addrs.worker); - let deal_ids = publish_deals(rt, addrs, &[deal]); + let deal_ids = publish_deals(rt, addrs, &[deal], next_allocation_id); deal_ids[0] } @@ -834,7 +876,7 @@ pub fn generate_and_publish_deal_for_piece( // publish rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, addrs.worker); - let deal_ids = publish_deals(rt, addrs, &[deal]); + let deal_ids = publish_deals(rt, addrs, &[deal], NO_ALLOCATION_ID); // unverified deal deal_ids[0] } diff --git a/actors/market/tests/market_actor_test.rs b/actors/market/tests/market_actor_test.rs index 98cff123b..6fcad4c43 100644 --- a/actors/market/tests/market_actor_test.rs +++ b/actors/market/tests/market_actor_test.rs @@ -4,23 +4,23 @@ use fil_actor_market::balance_table::BALANCE_TABLE_BITWIDTH; use fil_actor_market::policy::detail::DEAL_MAX_LABEL_SIZE; use fil_actor_market::{ - ext, ActivateDealsParams, Actor as MarketActor, ClientDealProposal, DealMetaArray, Label, - Method, PublishStorageDealsParams, PublishStorageDealsReturn, State, WithdrawBalanceParams, - PROPOSALS_AMT_BITWIDTH, STATES_AMT_BITWIDTH, + deal_id_key, ext, ActivateDealsParams, Actor as MarketActor, ClientDealProposal, DealArray, + DealMetaArray, Label, Method, PublishStorageDealsParams, PublishStorageDealsReturn, State, + WithdrawBalanceParams, NO_ALLOCATION_ID, PROPOSALS_AMT_BITWIDTH, STATES_AMT_BITWIDTH, }; -use fil_actor_verifreg::UseBytesParams; -use fil_actors_runtime::cbor::deserialize; +use fil_actors_runtime::cbor::{deserialize, serialize}; use fil_actors_runtime::network::EPOCHS_IN_DAY; use fil_actors_runtime::runtime::{builtins::Type, Policy, Runtime}; use fil_actors_runtime::test_utils::*; use fil_actors_runtime::{ - make_empty_map, ActorError, SetMultimap, BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, - SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, + make_empty_map, make_map_with_root_and_bitwidth, ActorError, BatchReturn, Map, SetMultimap, + BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, DATACAP_TOKEN_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + VERIFIED_REGISTRY_ACTOR_ADDR, }; +use frc46_token::token::types::{TransferFromParams, TransferFromReturn}; use fvm_ipld_amt::Amt; use fvm_ipld_encoding::{to_vec, RawBytes}; use fvm_shared::address::Address; -use fvm_shared::bigint::BigInt; use fvm_shared::clock::{ChainEpoch, EPOCH_UNDEFINED}; use fvm_shared::crypto::signature::Signature; use fvm_shared::deal::DealID; @@ -33,6 +33,7 @@ use regex::Regex; use std::ops::Add; use fil_actor_market::ext::account::{AuthenticateMessageParams, AUTHENTICATE_MESSAGE_METHOD}; +use fil_actor_market::ext::verifreg::{AllocationID, AllocationRequest, AllocationsResponse}; use num_traits::{FromPrimitive, Zero}; mod harness; @@ -684,28 +685,95 @@ fn simple_deal() { let mut rt = setup(); rt.set_epoch(publish_epoch); + let next_allocation_id = 1; // Publish from miner worker. - let deal1 = generate_deal_and_add_funds( + let mut deal1 = generate_deal_and_add_funds( &mut rt, CLIENT_ADDR, &MinerAddresses::default(), start_epoch, end_epoch, ); + deal1.verified_deal = false; rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - publish_deals(&mut rt, &MinerAddresses::default(), &[deal1]); + let deal1_id = + publish_deals(&mut rt, &MinerAddresses::default(), &[deal1], next_allocation_id)[0]; // Publish from miner control address. - let deal2 = generate_deal_and_add_funds( + let mut deal2 = generate_deal_and_add_funds( &mut rt, CLIENT_ADDR, &MinerAddresses::default(), start_epoch + 1, end_epoch + 1, ); + deal2.verified_deal = true; rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, CONTROL_ADDR); - publish_deals(&mut rt, &MinerAddresses::default(), &[deal2]); + let deal2_id = + publish_deals(&mut rt, &MinerAddresses::default(), &[deal2], next_allocation_id)[0]; + + // activate the deal + activate_deals(&mut rt, end_epoch + 1, PROVIDER_ADDR, publish_epoch, &[deal1_id, deal2_id]); + let deal1st = get_deal_state(&mut rt, deal1_id); + assert_eq!(publish_epoch, deal1st.sector_start_epoch); + assert_eq!(NO_ALLOCATION_ID, deal1st.verified_claim); + + let deal2st = get_deal_state(&mut rt, deal2_id); + assert_eq!(publish_epoch, deal2st.sector_start_epoch); + assert_eq!(next_allocation_id, deal2st.verified_claim); + + check_state(&rt); +} + +#[test] +fn deal_expires() { + let start_epoch = 100; + let end_epoch = start_epoch + 200 * EPOCHS_IN_DAY; + let publish_epoch = ChainEpoch::from(1); + + let mut rt = setup(); + rt.set_epoch(publish_epoch); + let next_allocation_id = 1; + + // Publish from miner worker. + let mut deal = generate_deal_and_add_funds( + &mut rt, + CLIENT_ADDR, + &MinerAddresses::default(), + start_epoch, + end_epoch, + ); + deal.verified_deal = true; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); + let deal_id = + publish_deals(&mut rt, &MinerAddresses::default(), &[deal.clone()], next_allocation_id)[0]; + + rt.set_epoch(start_epoch + EPOCHS_IN_DAY + 1); + rt.expect_send( + BURNT_FUNDS_ACTOR_ADDR, + METHOD_SEND, + RawBytes::default(), + deal.provider_collateral, + RawBytes::default(), + ExitCode::OK, + ); + cron_tick(&mut rt); + + // No deal state for unactivated deal + let st: State = rt.get_state(); + let states = DealMetaArray::load(&st.states, &rt.store).unwrap(); + assert!(states.get(deal_id).unwrap().is_none()); + + // The proposal is gone + assert!(DealArray::load(&st.proposals, &rt.store).unwrap().get(deal_id).unwrap().is_none()); + + // Pending allocation ID is gone + let pending_allocs: Map<_, AllocationID> = + make_map_with_root_and_bitwidth(&st.pending_deal_allocation_ids, &rt.store, HAMT_BIT_WIDTH) + .unwrap(); + assert!(pending_allocs.get(&deal_id_key(deal_id)).unwrap().is_none()); + check_state(&rt); } @@ -794,19 +862,45 @@ fn provider_and_client_addresses_are_resolved_before_persisting_state_and_sent_t ExitCode::OK, ); - // request is sent to the VerifReg actor using the resolved address - let param = RawBytes::serialize(UseBytesParams { - address: client_resolved, - deal_size: BigInt::from(deal.piece_size.0), - }) - .unwrap(); - + // Data cap transfer is requested using the resolved address (not that it matters). + let alloc_req = ext::verifreg::AllocationRequests { + allocations: vec![AllocationRequest { + provider: provider_resolved, + data: deal.piece_cid, + size: deal.piece_size, + term_min: deal.end_epoch - deal.start_epoch, + term_max: (deal.end_epoch - deal.start_epoch) + 90 * EPOCHS_IN_DAY, + expiration: deal.start_epoch, + }], + extensions: vec![], + }; + let datacap_amount = TokenAmount::from_whole(deal.piece_size.0 as i64); + let transfer_params = TransferFromParams { + from: client_resolved, + to: VERIFIED_REGISTRY_ACTOR_ADDR, + amount: datacap_amount.clone(), + operator_data: serialize(&alloc_req, "allocation requests").unwrap(), + }; + let transfer_return = TransferFromReturn { + from_balance: TokenAmount::zero(), + to_balance: datacap_amount, + allowance: TokenAmount::zero(), + recipient_data: serialize( + &AllocationsResponse { + allocation_results: BatchReturn::ok(1), + extension_results: BatchReturn::empty(), + new_allocations: vec![1], + }, + "allocations response", + ) + .unwrap(), + }; rt.expect_send( - VERIFIED_REGISTRY_ACTOR_ADDR, - ext::verifreg::USE_BYTES_METHOD as u64, - param, + DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::TRANSFER_FROM_METHOD as u64, + serialize(&transfer_params, "transfer from params").unwrap(), TokenAmount::zero(), - RawBytes::default(), + serialize(&transfer_return, "transfer from return").unwrap(), ExitCode::OK, ); @@ -898,7 +992,7 @@ fn publish_a_deal_with_enough_collateral_when_circulating_supply_is_superior_to_ // publish the deal successfully rt.set_epoch(publish_epoch); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - publish_deals(&mut rt, &MinerAddresses::default(), &[deal]); + publish_deals(&mut rt, &MinerAddresses::default(), &[deal], 1); check_state(&rt); } @@ -945,6 +1039,7 @@ fn publish_multiple_deals_for_different_clients_and_ensure_balances_are_correct( &mut rt, &MinerAddresses::default(), &[deal1.clone(), deal2.clone(), deal3.clone()], + 1, ); // assert locked balance for all clients and provider @@ -984,7 +1079,7 @@ fn publish_multiple_deals_for_different_clients_and_ensure_balances_are_correct( 100 + 200 * EPOCHS_IN_DAY, ); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - publish_deals(&mut rt, &MinerAddresses::default(), &[deal4.clone(), deal5.clone()]); + publish_deals(&mut rt, &MinerAddresses::default(), &[deal4.clone(), deal5.clone()], 1); // assert locked balances for clients and provider let provider_locked_expected = @@ -1027,7 +1122,7 @@ fn publish_multiple_deals_for_different_clients_and_ensure_balances_are_correct( // publish both the deals for the second provider rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - publish_deals(&mut rt, &addrs, &[deal6.clone(), deal7.clone()]); + publish_deals(&mut rt, &addrs, &[deal6.clone(), deal7.clone()], 1); // assertions let st: State = rt.get_state(); @@ -1578,7 +1673,7 @@ fn market_actor_deals() { // First attempt at publishing the deal should work rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - publish_deals(&mut rt, &miner_addresses, &[deal_proposal.clone()]); + publish_deals(&mut rt, &miner_addresses, &[deal_proposal.clone()], 1); // Second attempt at publishing the same deal should fail publish_deals_expect_abort( @@ -1591,7 +1686,7 @@ fn market_actor_deals() { // Same deal with a different label should work deal_proposal.label = Label::String("Cthulhu".to_owned()); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - publish_deals(&mut rt, &miner_addresses, &[deal_proposal]); + publish_deals(&mut rt, &miner_addresses, &[deal_proposal], 1); check_state(&rt); } @@ -1617,7 +1712,7 @@ fn max_deal_label_size() { // DealLabel at max size should work. deal_proposal.label = Label::String("s".repeat(DEAL_MAX_LABEL_SIZE)); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - publish_deals(&mut rt, &miner_addresses, &[deal_proposal.clone()]); + publish_deals(&mut rt, &miner_addresses, &[deal_proposal.clone()], 1); // over max should fail deal_proposal.label = Label::String("s".repeat(DEAL_MAX_LABEL_SIZE + 1)); diff --git a/actors/market/tests/on_miner_sectors_terminate.rs b/actors/market/tests/on_miner_sectors_terminate.rs index 030a36f0a..31546e082 100644 --- a/actors/market/tests/on_miner_sectors_terminate.rs +++ b/actors/market/tests/on_miner_sectors_terminate.rs @@ -163,7 +163,7 @@ fn terminate_valid_deals_along_with_expired_and_cleaned_up_deal() { ); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - let deal_ids = publish_deals(&mut rt, &MinerAddresses::default(), &[deal1, deal2.clone()]); + let deal_ids = publish_deals(&mut rt, &MinerAddresses::default(), &[deal1, deal2.clone()], 1); activate_deals(&mut rt, sector_expiry, PROVIDER_ADDR, current_epoch, &deal_ids); let new_epoch = end_epoch - 1; diff --git a/actors/market/tests/verify_deals_for_activation_test.rs b/actors/market/tests/verify_deals_for_activation_test.rs index 425b89eff..ffaeae91f 100644 --- a/actors/market/tests/verify_deals_for_activation_test.rs +++ b/actors/market/tests/verify_deals_for_activation_test.rs @@ -3,7 +3,6 @@ mod harness; -use fil_actor_market::policy::detail::deal_weight; use fil_actor_market::{Actor as MarketActor, Method, SectorDeals, VerifyDealsForActivationParams}; use fil_actors_runtime::runtime::builtins::Type; use fil_actors_runtime::test_utils::{ @@ -33,7 +32,7 @@ const MINER_ADDRESSES: MinerAddresses = MinerAddresses { }; #[test] -fn verify_deal_and_activate_to_get_deal_weight_for_unverified_deal_proposal() { +fn verify_deal_and_activate_to_get_deal_space_for_unverified_deal_proposal() { let mut rt = setup(); let deal_id = generate_and_publish_deal(&mut rt, CLIENT_ADDR, &MINER_ADDRESSES, START_EPOCH, END_EPOCH); @@ -52,21 +51,23 @@ fn verify_deal_and_activate_to_get_deal_weight_for_unverified_deal_proposal() { let a_response = activate_deals(&mut rt, SECTOR_EXPIRY, PROVIDER_ADDR, CURR_EPOCH, &[deal_id]); assert_eq!(1, v_response.sectors.len()); assert_eq!(Some(make_piece_cid("1".as_bytes())), v_response.sectors[0].commd); - assert_eq!(BigInt::zero(), a_response.weights.verified_deal_weight); - assert_eq!(deal_weight(&deal_proposal), a_response.weights.deal_weight); + assert!(a_response.verified_infos.is_empty()); + assert_eq!(BigInt::from(deal_proposal.piece_size.0), a_response.nonverified_deal_space); check_state(&rt); } #[test] -fn verify_deal_and_activate_to_get_deal_weight_for_verified_deal_proposal() { +fn verify_deal_and_activate_to_get_deal_space_for_verified_deal_proposal() { let mut rt = setup(); + let next_allocation_id = 1; let deal_id = generate_and_publish_verified_deal( &mut rt, CLIENT_ADDR, &MINER_ADDRESSES, START_EPOCH, END_EPOCH, + next_allocation_id, ); let deal_proposal = get_deal_proposal(&mut rt, deal_id); @@ -85,8 +86,13 @@ fn verify_deal_and_activate_to_get_deal_weight_for_verified_deal_proposal() { assert_eq!(1, response.sectors.len()); assert_eq!(Some(make_piece_cid("1".as_bytes())), response.sectors[0].commd); - assert_eq!(deal_weight(&deal_proposal), a_response.weights.verified_deal_weight); - assert_eq!(BigInt::zero(), a_response.weights.deal_weight); + assert_eq!(1, a_response.verified_infos.len()); + assert_eq!(deal_proposal.piece_size, a_response.verified_infos[0].size); + assert_eq!(deal_proposal.client.id().unwrap(), a_response.verified_infos[0].client); + assert_eq!(deal_proposal.piece_cid, a_response.verified_infos[0].data); + assert_eq!(next_allocation_id, a_response.verified_infos[0].allocation_id); + + assert_eq!(BigInt::zero(), a_response.nonverified_deal_space); check_state(&rt); } @@ -118,7 +124,7 @@ fn verification_and_weights_for_verified_and_unverified_deals() { ]; rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, WORKER_ADDR); - let deal_ids = publish_deals(&mut rt, &MINER_ADDRESSES, &deals.clone()); + let deal_ids = publish_deals(&mut rt, &MINER_ADDRESSES, &deals.clone(), 1); let response = verify_deals_for_activation( &mut rt, @@ -138,14 +144,17 @@ fn verification_and_weights_for_verified_and_unverified_deals() { }, ); - let verified_weight = deal_weight(&verified_deal_1) + deal_weight(&verified_deal_2); - let unverified_weight = deal_weight(&unverified_deal_1) + deal_weight(&unverified_deal_2); + let verified_space = BigInt::from(verified_deal_1.piece_size.0 + verified_deal_2.piece_size.0); + let unverified_space = + BigInt::from(unverified_deal_1.piece_size.0 + unverified_deal_2.piece_size.0); let a_response = activate_deals(&mut rt, SECTOR_EXPIRY, PROVIDER_ADDR, CURR_EPOCH, &deal_ids); assert_eq!(1, response.sectors.len()); - assert_eq!(verified_weight, a_response.weights.verified_deal_weight); - assert_eq!(unverified_weight, a_response.weights.deal_weight); + let returned_verified_space: BigInt = + a_response.verified_infos.iter().map(|info| BigInt::from(info.size.0)).sum(); + assert_eq!(verified_space, returned_verified_space); + assert_eq!(unverified_space, a_response.nonverified_deal_space); check_state(&rt); } diff --git a/actors/miner/src/ext.rs b/actors/miner/src/ext.rs index e4aa1ec3e..b151019fa 100644 --- a/actors/miner/src/ext.rs +++ b/actors/miner/src/ext.rs @@ -1,13 +1,16 @@ use cid::Cid; -use fil_actors_runtime::DealWeight; +use fil_actors_runtime::BatchReturn; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::RawBytes; -use fvm_shared::bigint::bigint_ser; +use fvm_shared::bigint::{bigint_ser, BigInt}; use fvm_shared::clock::ChainEpoch; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; +use fvm_shared::piece::PaddedPieceSize; +use fvm_shared::sector::SectorNumber; use fvm_shared::sector::{RegisteredSealProof, StoragePower}; use fvm_shared::smooth::FilterEstimate; +use fvm_shared::ActorID; pub mod account { pub const PUBKEY_ADDRESS_METHOD: u64 = 2; @@ -35,20 +38,39 @@ pub mod market { pub sector_expiry: ChainEpoch, } + #[derive(Serialize_tuple, Deserialize_tuple, Clone)] + pub struct VerifiedDealInfo { + pub client: ActorID, + pub allocation_id: u64, + pub data: Cid, + pub size: PaddedPieceSize, + } + + impl Default for VerifiedDealInfo { + fn default() -> VerifiedDealInfo { + VerifiedDealInfo { + size: PaddedPieceSize(0), + client: 0, + allocation_id: 0, + data: Default::default(), + } + } + } + #[derive(Serialize_tuple, Deserialize_tuple)] pub struct ActivateDealsResult { - pub weights: DealWeights, + #[serde(with = "bigint_ser")] + pub nonverified_deal_space: BigInt, + pub verified_infos: Vec, } #[derive(Serialize_tuple, Deserialize_tuple, Clone, Default)] - pub struct DealWeights { - pub deal_space: u64, + pub struct DealSpaces { #[serde(with = "bigint_ser")] - pub deal_weight: DealWeight, + pub deal_space: BigInt, #[serde(with = "bigint_ser")] - pub verified_deal_weight: DealWeight, + pub verified_deal_space: BigInt, } - #[derive(Serialize_tuple)] pub struct ComputeDataCommitmentParamsRef<'a> { pub inputs: &'a [SectorDataSpec], @@ -132,3 +154,66 @@ pub mod power { pub mod reward { pub const THIS_EPOCH_REWARD_METHOD: u64 = 3; } + +pub mod verifreg { + use super::*; + + pub const GET_CLAIMS_METHOD: u64 = 10; + pub const CLAIM_ALLOCATIONS_METHOD: u64 = 9; + + pub type ClaimID = u64; + pub type AllocationID = u64; + + #[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] + pub struct Claim { + // The provider storing the data (from allocation). + pub provider: ActorID, + // The client which allocated the DataCap (from allocation). + pub client: ActorID, + // Identifier of the data committed (from allocation). + pub data: Cid, + // The (padded) size of data (from allocation). + pub size: PaddedPieceSize, + // The min period which the provider must commit to storing data + pub term_min: ChainEpoch, + // The max period for which provider can earn QA-power for the data + pub term_max: ChainEpoch, + // The epoch at which the (first range of the) piece was committed. + pub term_start: ChainEpoch, + // ID of the provider's sector in which the data is committed. + pub sector: SectorNumber, + } + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct GetClaimsParams { + pub provider: ActorID, + pub claim_ids: Vec, + } + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + + pub struct GetClaimsReturn { + pub batch_info: BatchReturn, + pub claims: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct SectorAllocationClaim { + pub client: ActorID, + pub allocation_id: AllocationID, + pub data: Cid, + pub size: PaddedPieceSize, + pub sector: SectorNumber, + pub sector_expiry: ChainEpoch, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct ClaimAllocationsParams { + pub sectors: Vec, + pub all_or_nothing: bool, + } + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct ClaimAllocationsReturn { + pub batch_info: BatchReturn, + #[serde(with = "bigint_ser")] + pub claimed_space: BigInt, + } +} diff --git a/actors/miner/src/lib.rs b/actors/miner/src/lib.rs index 69493feab..e2220568e 100644 --- a/actors/miner/src/lib.rs +++ b/actors/miner/src/lib.rs @@ -7,21 +7,8 @@ use std::iter; use std::ops::Neg; use anyhow::{anyhow, Error}; -pub use bitfield_queue::*; use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; -use cid::multihash::Code; use cid::Cid; -pub use commd::*; -pub use deadline_assignment::*; -pub use deadline_info::*; -pub use deadline_state::*; -pub use deadlines::*; -pub use expiration_queue::*; -use fil_actors_runtime::runtime::{ActorCode, DomainSeparationTag, Policy, Runtime}; -use fil_actors_runtime::{ - actor_error, cbor, ActorDowncast, ActorError, BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, - INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, -}; use fvm_ipld_bitfield::{BitField, Validate}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::{from_slice, BytesDe, Cbor, CborStore, RawBytes}; @@ -30,14 +17,6 @@ use fvm_shared::bigint::{BigInt, Integer}; use fvm_shared::clock::ChainEpoch; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; -// The following errors are particular cases of illegal state. -// They're not expected to ever happen, but if they do, distinguished codes can help us -// diagnose the problem. - -use crate::Code::Blake2b256; -pub use beneficiary::*; -use fil_actors_runtime::cbor::{deserialize, serialize, serialize_vec}; -use fil_actors_runtime::runtime::builtins::Type; use fvm_shared::error::*; use fvm_shared::randomness::*; use fvm_shared::reward::ThisEpochRewardReturn; @@ -45,9 +24,27 @@ use fvm_shared::sector::*; use fvm_shared::smooth::FilterEstimate; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR, METHOD_SEND}; use log::{error, info, warn}; -pub use monies::*; +use multihash::Code::Blake2b256; use num_derive::FromPrimitive; use num_traits::{FromPrimitive, Zero}; + +pub use beneficiary::*; +pub use bitfield_queue::*; +pub use commd::*; +pub use deadline_assignment::*; +pub use deadline_info::*; +pub use deadline_state::*; +pub use deadlines::*; +pub use expiration_queue::*; +use fil_actors_runtime::cbor::{deserialize, serialize, serialize_vec}; +use fil_actors_runtime::runtime::builtins::Type; +use fil_actors_runtime::runtime::{ActorCode, DomainSeparationTag, Policy, Runtime}; +use fil_actors_runtime::{ + actor_error, cbor, ActorContext, ActorDowncast, ActorError, BURNT_FUNDS_ACTOR_ADDR, + CALLER_TYPES_SIGNABLE, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, + STORAGE_POWER_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, +}; +pub use monies::*; pub use partition_state::*; pub use policy::*; pub use sector_map::*; @@ -57,6 +54,10 @@ pub use termination::*; pub use types::*; pub use vesting_state::*; +// The following errors are particular cases of illegal state. +// They're not expected to ever happen, but if they do, distinguished codes can help us +// diagnose the problem. + #[cfg(feature = "fil-actor")] fil_actors_runtime::wasm_trampoline!(Actor); @@ -121,6 +122,7 @@ pub enum Method { ProveReplicaUpdates2 = 29, ChangeBeneficiary = 30, GetBeneficiary = 31, + ExtendSectorExpiration2 = 32, } pub const ERR_BALANCE_INVARIANTS_BROKEN: ExitCode = ExitCode::new(1000); @@ -949,7 +951,7 @@ impl Actor { struct UpdateAndSectorInfo<'a> { update: &'a ReplicaUpdateInner, sector_info: SectorOnChainInfo, - deal_weights: ext::market::DealWeights, + deal_spaces: ext::market::DealSpaces, } let mut sectors_deals = Vec::::new(); @@ -1050,34 +1052,25 @@ impl Actor { continue; } - let res = rt.send( - &STORAGE_MARKET_ACTOR_ADDR, - ext::market::ACTIVATE_DEALS_METHOD, - RawBytes::serialize(ext::market::ActivateDealsParams { - deal_ids: update.deals.clone(), - sector_expiry: sector_info.expiration, - })?, - TokenAmount::zero(), - ); - let weights = if let Ok(res) = res { - // Erroring in this case as it means something went really wrong - let activate_ret: ext::market::ActivateDealsResult = res.deserialize()?; - activate_ret.weights - } else { - info!( - "failed to activate deals on sector {0}, skipping sector {0}", - update.sector_number, - ); - continue; + let deal_spaces = match activate_deals_and_claim_allocations( + rt, + update.deals.clone(), + sector_info.expiration, + sector_info.sector_number, + )? { + Some(deal_spaces) => deal_spaces, + None => { + info!( + "failed to activate deals on sector {}, skipping from replica update set", + update.sector_number + ); + continue; + } }; let expiration = sector_info.expiration; let seal_proof = sector_info.seal_proof; - validated_updates.push(UpdateAndSectorInfo { - update, - sector_info, - deal_weights: weights, - }); + validated_updates.push(UpdateAndSectorInfo { update, sector_info, deal_spaces }); sectors_deals.push(ext::market::SectorDeals { sector_type: seal_proof, @@ -1109,7 +1102,7 @@ impl Actor { struct UpdateWithDetails<'a> { update: &'a ReplicaUpdateInner, sector_info: &'a SectorOnChainInfo, - deal_weights: &'a ext::market::DealWeights, + deal_spaces: &'a ext::market::DealSpaces, full_unsealed_cid: Cid, } @@ -1136,7 +1129,7 @@ impl Actor { decls_by_deadline.entry(dl).or_default().push(UpdateWithDetails { update: with_sector_info.update, sector_info: &with_sector_info.sector_info, - deal_weights: &with_sector_info.deal_weights, + deal_spaces: &with_sector_info.deal_spaces, full_unsealed_cid: computed_commd, }); } @@ -1205,6 +1198,7 @@ impl Actor { let mut new_sector_info = with_details.sector_info.clone(); + new_sector_info.simple_qa_power = true; new_sector_info.sealed_cid = with_details.update.new_sealed_cid; new_sector_info.sector_key_cid = match new_sector_info.sector_key_cid { None => Some(with_details.sector_info.sealed_cid), @@ -1215,12 +1209,12 @@ impl Actor { new_sector_info.deal_ids = with_details.update.deals.clone(); new_sector_info.activation = rt.curr_epoch(); - new_sector_info.deal_weight = with_details.deal_weights.deal_weight.clone(); - new_sector_info.verified_deal_weight = with_details.deal_weights.verified_deal_weight.clone(); + let duration = new_sector_info.expiration - new_sector_info.activation; - // compute initial pledge - let duration = with_details.sector_info.expiration - rt.curr_epoch(); + new_sector_info.deal_weight = with_details.deal_spaces.deal_space.clone() * duration; + new_sector_info.verified_deal_weight = with_details.deal_spaces.verified_deal_space.clone() * duration; + // compute initial pledge let qa_pow = qa_power_for_weight( info.sector_size, duration, @@ -1771,7 +1765,13 @@ impl Actor { // This could make sector maximum lifetime validation more lenient if the maximum sector limit isn't hit first. let max_activation = curr_epoch + max_prove_commit_duration(rt.policy(), precommit.seal_proof).unwrap_or_default(); - validate_expiration(rt, max_activation, precommit.expiration, precommit.seal_proof)?; + validate_expiration( + rt.policy(), + curr_epoch, + max_activation, + precommit.expiration, + precommit.seal_proof, + )?; sectors_deals.push(ext::market::SectorDeals { sector_type: precommit.seal_proof, @@ -2105,84 +2105,61 @@ impl Actor { /// Changes the expiration epoch for a sector to a new, later one. /// The sector must not be terminated or faulty. /// The sector's power is recomputed for the new expiration. + /// This method is legacy and should be replaced with calls to extend_sector_expiration2 fn extend_sector_expiration( rt: &mut RT, - mut params: ExtendSectorExpirationParams, + params: ExtendSectorExpirationParams, ) -> Result<(), ActorError> where BS: Blockstore, RT: Runtime, { - { - let policy = rt.policy(); - if params.extensions.len() as u64 > policy.declarations_max { - return Err(actor_error!( - illegal_argument, - "too many declarations {}, max {}", - params.extensions.len(), - policy.declarations_max - )); - } - } - - // limit the number of sectors declared at once - // https://github.com/filecoin-project/specs-actors/issues/416 - let mut sector_count: u64 = 0; - - for decl in &mut params.extensions { - let policy = rt.policy(); - if decl.deadline >= policy.wpost_period_deadlines { - return Err(actor_error!( - illegal_argument, - "deadline {} not in range 0..{}", - decl.deadline, - policy.wpost_period_deadlines - )); - } - - let sectors = match decl.sectors.validate() { - Ok(sectors) => sectors, - Err(e) => { - return Err(actor_error!( - illegal_argument, - "failed to validate sectors for deadline {}, partition {}: {}", - decl.deadline, - decl.partition, - e - )); - } - }; - - match sector_count.checked_add(sectors.len()) { - Some(sum) => sector_count = sum, - None => { - return Err(actor_error!(illegal_argument, "sector bitfield integer overflow")); - } - } - } + let extend_expiration_inner = + validate_legacy_extension_declarations(¶ms.extensions, rt.policy())?; + Self::extend_sector_expiration_inner( + rt, + extend_expiration_inner, + ExtensionKind::ExtendCommittmentLegacy, + ) + } - { - let policy = rt.policy(); - if sector_count > policy.addressed_sectors_max { - return Err(actor_error!( - illegal_argument, - "too many sectors for declaration {}, max {}", - sector_count, - policy.addressed_sectors_max - )); - } - } + // Up to date version of extend_sector_expiration that correctly handles simple qap sectors + // with FIL+ claims. Extension is only allowed if all claim max terms extend past new expiration + // or claims are dropped. Power only changes when claims are dropped. + fn extend_sector_expiration2( + rt: &mut RT, + params: ExtendSectorExpiration2Params, + ) -> Result<(), ActorError> + where + BS: Blockstore, + RT: Runtime, + { + let extend_expiration_inner = validate_extension_declarations(rt, params.extensions)?; + Self::extend_sector_expiration_inner( + rt, + extend_expiration_inner, + ExtensionKind::ExtendCommittment, + ) + } + fn extend_sector_expiration_inner( + rt: &mut RT, + inner: ExtendExpirationsInner, + kind: ExtensionKind, + ) -> Result<(), ActorError> + where + BS: Blockstore, + RT: Runtime, + { let curr_epoch = rt.curr_epoch(); + /* Loop over sectors and do extension */ let (power_delta, pledge_delta) = rt.transaction(|state: &mut State, rt| { let info = get_miner_info(rt.store(), state)?; rt.validate_immediate_caller_is( info.control_addresses.iter().chain(&[info.worker, info.owner]), )?; - let store = rt.store(); - let mut deadlines = state.load_deadlines(rt.store()).map_err(|e| e.wrap("failed to load deadlines"))?; @@ -2192,8 +2169,7 @@ impl Actor { .take(rt.policy().wpost_period_deadlines as usize) .collect(); let mut deadlines_to_load = Vec::::new(); - - for decl in params.extensions { + for decl in &inner.extensions { // the deadline indices are already checked. let decls = &mut decls_by_deadline[decl.deadline as usize]; if decls.is_empty() { @@ -2212,14 +2188,14 @@ impl Actor { for deadline_idx in deadlines_to_load { let policy = rt.policy(); let mut deadline = - deadlines.load_deadline(policy, store, deadline_idx).map_err(|e| { + deadlines.load_deadline(policy, rt.store(), deadline_idx).map_err(|e| { e.downcast_default( ExitCode::USR_ILLEGAL_STATE, format!("failed to load deadline {}", deadline_idx), ) })?; - let mut partitions = deadline.partitions_amt(store).map_err(|e| { + let mut partitions = deadline.partitions_amt(rt.store()).map_err(|e| { e.downcast_default( ExitCode::USR_ILLEGAL_STATE, format!("failed to load partitions for deadline {}", deadline_idx), @@ -2249,64 +2225,30 @@ impl Actor { let old_sectors = sectors .load_sector(&decl.sectors) .map_err(|e| e.wrap("failed to load sectors"))?; - let new_sectors: Vec = old_sectors .iter() - .map(|sector| { - if !can_extend_seal_proof_type(sector.seal_proof) { - return Err(actor_error!( - forbidden, - "cannot extend expiration for sector {} with unsupported \ - seal type {:?}", - sector.sector_number, - sector.seal_proof - )); - } - - // This can happen if the sector should have already expired, but hasn't - // because the end of its deadline hasn't passed yet. - if sector.expiration < rt.curr_epoch() { - return Err(actor_error!( - forbidden, - "cannot extend expiration for expired sector {} at {}", - sector.sector_number, - sector.expiration - )); - } - - if decl.new_expiration < sector.expiration { - return Err(actor_error!( - illegal_argument, - "cannot reduce sector {} expiration to {} from {}", - sector.sector_number, + .map(|sector| match kind { + ExtensionKind::ExtendCommittmentLegacy => { + extend_sector_committment_legacy( + rt.policy(), + curr_epoch, decl.new_expiration, - sector.expiration - )); + sector, + ) } - - validate_expiration( - rt, - sector.activation, - decl.new_expiration, - sector.seal_proof, - )?; - - // Remove "spent" deal weights - let new_deal_weight = (§or.deal_weight - * (sector.expiration - curr_epoch)) - .div_floor(&BigInt::from(sector.expiration - sector.activation)); - - let new_verified_deal_weight = (§or.verified_deal_weight - * (sector.expiration - curr_epoch)) - .div_floor(&BigInt::from(sector.expiration - sector.activation)); - - let mut sector = sector.clone(); - sector.expiration = decl.new_expiration; - - sector.deal_weight = new_deal_weight; - sector.verified_deal_weight = new_verified_deal_weight; - - Ok(sector) + ExtensionKind::ExtendCommittment => match &inner.claims { + None => Err(actor_error!( + unspecified, + "extend2 always specifies (potentially empty) claim mapping" + )), + Some(claim_space_by_sector) => extend_sector_committment( + rt.policy(), + curr_epoch, + decl.new_expiration, + sector, + claim_space_by_sector, + ), + }, }) .collect::>()?; @@ -2320,7 +2262,13 @@ impl Actor { // Remove old sectors from partition and assign new sectors. let (partition_power_delta, partition_pledge_delta) = partition - .replace_sectors(store, &old_sectors, &new_sectors, info.sector_size, quant) + .replace_sectors( + rt.store(), + &old_sectors, + &new_sectors, + info.sector_size, + quant, + ) .map_err(|e| { e.downcast_default( ExitCode::USR_ILLEGAL_STATE, @@ -2361,7 +2309,7 @@ impl Actor { // Record partitions in deadline expiration queue for epoch in epochs_to_reschedule { let p_idxs = partitions_by_new_epoch.get(&epoch).unwrap(); - deadline.add_expiration_partitions(store, epoch, p_idxs, quant).map_err( + deadline.add_expiration_partitions(rt.store(), epoch, p_idxs, quant).map_err( |e| { e.downcast_default( ExitCode::USR_ILLEGAL_STATE, @@ -2375,18 +2323,20 @@ impl Actor { )?; } - deadlines.update_deadline(policy, store, deadline_idx, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {}", deadline_idx), - ) - })?; + deadlines.update_deadline(policy, rt.store(), deadline_idx, &deadline).map_err( + |e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to save deadline {}", deadline_idx), + ) + }, + )?; } state.sectors = sectors.amt.flush().map_err(|e| { e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors") })?; - state.save_deadlines(store, deadlines).map_err(|e| { + state.save_deadlines(rt.store(), deadlines).map_err(|e| { e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") })?; @@ -3591,6 +3541,290 @@ pub struct ReplicaUpdateInner { pub replica_proof: Vec, } +enum ExtensionKind { + ExtendCommittmentLegacy, // handle only legacy sectors + ExtendCommittment, // handle both Simple QAP and legacy sectors + // TODO: when landing https://github.com/filecoin-project/builtin-actors/pull/518 + // ExtendProofValidity +} + +// ExtendSectorExpiration param +struct ExtendExpirationsInner { + extensions: Vec, + // Map from sector being extended to (check, maintain) + // `check` is the space of active claims, checked to ensure all claims are checked + // `maintain` is the space of claims to maintain + // maintain <= check with equality in the case no claims are dropped + claims: Option>, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct ValidatedExpirationExtension { + pub deadline: u64, + pub partition: u64, + pub sectors: BitField, + pub new_expiration: ChainEpoch, +} + +#[allow(clippy::too_many_arguments)] // validate mut prevents implementing From +impl From for ValidatedExpirationExtension { + fn from(e2: ExpirationExtension2) -> Self { + let mut sectors = BitField::new(); + for sc in e2.sectors_with_claims { + sectors.set(sc.sector_number) + } + sectors |= &e2.sectors; + + Self { + deadline: e2.deadline, + partition: e2.partition, + sectors, + new_expiration: e2.new_expiration, + } + } +} + +fn validate_legacy_extension_declarations( + extensions: &[ExpirationExtension], + policy: &Policy, +) -> Result { + let vec_validated = extensions + .iter() + .map(|decl| { + if decl.deadline >= policy.wpost_period_deadlines { + return Err(actor_error!( + illegal_argument, + "deadline {} not in range 0..{}", + decl.deadline, + policy.wpost_period_deadlines + )); + } + + Ok(ValidatedExpirationExtension { + deadline: decl.deadline, + partition: decl.partition, + sectors: decl.sectors.clone(), + new_expiration: decl.new_expiration, + }) + }) + .collect::>()?; + + Ok(ExtendExpirationsInner { extensions: vec_validated, claims: None }) +} + +fn validate_extension_declarations( + rt: &mut RT, + extensions: Vec, +) -> Result +where + BS: Blockstore, + RT: Runtime, +{ + let mut claim_space_by_sector = BTreeMap::::new(); + + for decl in &extensions { + let policy = rt.policy(); + if decl.deadline >= policy.wpost_period_deadlines { + return Err(actor_error!( + illegal_argument, + "deadline {} not in range 0..{}", + decl.deadline, + policy.wpost_period_deadlines + )); + } + + for sc in &decl.sectors_with_claims { + let mut drop_claims = sc.drop_claims.clone(); + let mut all_claim_ids = sc.maintain_claims.clone(); + all_claim_ids.append(&mut drop_claims); + let claims = get_claims(rt, &all_claim_ids) + .with_context(|| format!("failed to get claims for sector {}", sc.sector_number))?; + let first_drop = sc.maintain_claims.len(); + + for (i, claim) in claims.iter().enumerate() { + // check provider and sector matches + if claim.provider != rt.message().receiver().id().unwrap() { + return Err(actor_error!(illegal_argument, "failed to validate declaration sector={}, claim={}, expected claim provider to be {} but found {} ", sc.sector_number, all_claim_ids[i], rt.message().receiver().id().unwrap(), claim.provider)); + } + if claim.sector != sc.sector_number { + return Err(actor_error!(illegal_argument, "failed to validate declaration sector={}, claim={} expected claim sector number to be {} but found {} ", sc.sector_number, all_claim_ids[i], sc.sector_number, claim.sector)); + } + + // If we are not dropping check expiration does not exceed term max + let mut maintain_delta: u64 = 0; + if i < first_drop { + if decl.new_expiration > claim.term_start + claim.term_max { + return Err(actor_error!(forbidden, "failed to validate declaration sector={}, claim={} claim only allows extension to {} but declared new expiration is {}", sc.sector_number, sc.maintain_claims[i], claim.term_start + claim.term_max, decl.new_expiration)); + } + maintain_delta = claim.size.0 + } + + claim_space_by_sector + .entry(sc.sector_number) + .and_modify(|(check, maintain)| { + *check += claim.size.0; + *maintain += maintain_delta; + }) + .or_insert((claim.size.0, maintain_delta)); + } + } + } + Ok(ExtendExpirationsInner { + extensions: extensions.into_iter().map(|e2| e2.into()).collect(), + claims: Some(claim_space_by_sector), + }) +} + +fn extend_sector_committment( + policy: &Policy, + curr_epoch: ChainEpoch, + new_expiration: ChainEpoch, + sector: &SectorOnChainInfo, + claim_space_by_sector: &BTreeMap, +) -> Result { + validate_extended_expiration(policy, curr_epoch, new_expiration, sector)?; + + // all simple_qa_power sectors with VerifiedDealWeight > 0 MUST check all claims + if sector.simple_qa_power { + extend_simple_qap_sector(policy, new_expiration, curr_epoch, sector, claim_space_by_sector) + } else { + extend_non_simple_qap_sector(new_expiration, curr_epoch, sector) + } +} + +fn extend_sector_committment_legacy( + policy: &Policy, + curr_epoch: ChainEpoch, + new_expiration: ChainEpoch, + sector: &SectorOnChainInfo, +) -> Result { + validate_extended_expiration(policy, curr_epoch, new_expiration, sector)?; + + // it is an error to do legacy sector expiration on simple-qa power sectors with deal weight + if sector.simple_qa_power + && (sector.verified_deal_weight > BigInt::zero() || sector.deal_weight > BigInt::zero()) + { + return Err(actor_error!( + forbidden, + "cannot use legacy sector extension for simple qa power with deal weight {}", + sector.sector_number + )); + } + extend_non_simple_qap_sector(new_expiration, curr_epoch, sector) +} + +fn validate_extended_expiration( + policy: &Policy, + curr_epoch: ChainEpoch, + new_expiration: ChainEpoch, + sector: &SectorOnChainInfo, +) -> Result<(), ActorError> { + if !can_extend_seal_proof_type(sector.seal_proof) { + return Err(actor_error!( + forbidden, + "cannot extend expiration for sector {} with unsupported \ + seal type {:?}", + sector.sector_number, + sector.seal_proof + )); + } + // This can happen if the sector should have already expired, but hasn't + // because the end of its deadline hasn't passed yet. + if sector.expiration < curr_epoch { + return Err(actor_error!( + forbidden, + "cannot extend expiration for expired sector {} at {}", + sector.sector_number, + sector.expiration + )); + } + + if new_expiration < sector.expiration { + return Err(actor_error!( + illegal_argument, + "cannot reduce sector {} expiration to {} from {}", + sector.sector_number, + new_expiration, + sector.expiration + )); + } + + validate_expiration(policy, curr_epoch, sector.activation, new_expiration, sector.seal_proof)?; + Ok(()) +} + +fn extend_simple_qap_sector( + policy: &Policy, + new_expiration: ChainEpoch, + curr_epoch: ChainEpoch, + sector: &SectorOnChainInfo, + claim_space_by_sector: &BTreeMap, +) -> Result { + let mut new_sector = sector.clone(); + if sector.verified_deal_weight > BigInt::zero() { + let old_duration = sector.expiration - sector.activation; + let deal_space = §or.deal_weight / old_duration; + let old_verified_deal_space = §or.verified_deal_weight / old_duration; + let (expected_verified_deal_space, new_verified_deal_space) = + match claim_space_by_sector.get(§or.sector_number) { + None => { + return Err(actor_error!( + illegal_argument, + "claim missing from declaration for sector {} with non-zero verified deal weight {}", + sector.sector_number, + §or.verified_deal_weight + )) + } + Some(space) => space, + }; + // claims must be completely accounted for + if BigInt::from(*expected_verified_deal_space as i64) != old_verified_deal_space { + return Err(actor_error!(illegal_argument, "declared verified deal space in claims ({}) does not match verified deal space ({}) for sector {}", expected_verified_deal_space, old_verified_deal_space, sector.sector_number)); + } + // claim dropping is restricted to extensions at the end of a sector's life + + let dropping_claims = expected_verified_deal_space != new_verified_deal_space; + if dropping_claims && sector.expiration - curr_epoch >= policy.end_of_life_claim_drop_period + { + return Err(actor_error!( + forbidden, + "attempt to drop sectors with {} epochs < end of life claim drop period {} remaining", + sector.expiration - curr_epoch, + policy.end_of_life_claim_drop_period + )); + } + + new_sector.expiration = new_expiration; + // update deal weights to account for new duration + new_sector.deal_weight = deal_space * (new_sector.expiration - new_sector.activation); + new_sector.verified_deal_weight = BigInt::from(*new_verified_deal_space) + * (new_sector.expiration - new_sector.activation); + } else { + new_sector.expiration = new_expiration + } + Ok(new_sector) +} + +fn extend_non_simple_qap_sector( + new_expiration: ChainEpoch, + curr_epoch: ChainEpoch, + sector: &SectorOnChainInfo, +) -> Result { + let mut new_sector = sector.clone(); + // Remove "spent" deal weights for non simple_qa_power sectors with deal weight > 0 + let new_deal_weight = (§or.deal_weight * (sector.expiration - curr_epoch)) + .div_floor(&BigInt::from(sector.expiration - sector.activation)); + + let new_verified_deal_weight = (§or.verified_deal_weight + * (sector.expiration - curr_epoch)) + .div_floor(&BigInt::from(sector.expiration - sector.activation)); + + new_sector.expiration = new_expiration; + new_sector.deal_weight = new_deal_weight; + new_sector.verified_deal_weight = new_verified_deal_weight; + Ok(new_sector) +} + // TODO: We're using the current power+epoch reward. Technically, we // should use the power/reward at the time of termination. // https://github.com/filecoin-project/specs-actors/v6/pull/648 @@ -3859,19 +4093,13 @@ where Ok(()) } -/// Check expiry is exactly *the epoch before* the start of a proving period. -fn validate_expiration( - rt: &RT, +fn validate_expiration( + policy: &Policy, + curr_epoch: ChainEpoch, activation: ChainEpoch, expiration: ChainEpoch, seal_proof: RegisteredSealProof, -) -> Result<(), ActorError> -where - BS: Blockstore, - RT: Runtime, -{ - let policy = rt.policy(); - +) -> Result<(), ActorError> { // Expiration must be after activation. Check this explicitly to avoid an underflow below. if expiration <= activation { return Err(actor_error!( @@ -3895,13 +4123,13 @@ where } // expiration cannot exceed MaxSectorExpirationExtension from now - if expiration > rt.curr_epoch() + policy.max_sector_expiration_extension { + if expiration > curr_epoch + policy.max_sector_expiration_extension { return Err(actor_error!( illegal_argument, "invalid expiration {}, cannot be more than {} past current epoch {}", expiration, policy.max_sector_expiration_extension, - rt.curr_epoch() + curr_epoch )); } @@ -3911,13 +4139,13 @@ where })?; if expiration - activation > max_lifetime { return Err(actor_error!( - illegal_argument, - "invalid expiration {}, total sector lifetime ({}) cannot exceed {} after activation {}", - expiration, - expiration - activation, - max_lifetime, - activation - )); + illegal_argument, + "invalid expiration {}, total sector lifetime ({}) cannot exceed {} after activation {}", + expiration, + expiration - activation, + max_lifetime, + activation + )); } Ok(()) @@ -4286,6 +4514,31 @@ where Ok(()) } +fn get_claims( + rt: &mut RT, + ids: &Vec, +) -> Result, ActorError> +where + BS: Blockstore, + RT: Runtime, +{ + let params = ext::verifreg::GetClaimsParams { + provider: rt.message().receiver().id().unwrap(), + claim_ids: ids.clone(), + }; + let ret_raw = rt.send( + &VERIFIED_REGISTRY_ACTOR_ADDR, + ext::verifreg::GET_CLAIMS_METHOD as u64, + serialize(¶ms, "get claims parameters")?, + TokenAmount::zero(), + )?; + let claims_ret: ext::verifreg::GetClaimsReturn = deserialize(&ret_raw, "get claims return")?; + if (claims_ret.batch_info.success_count as usize) < ids.len() { + return Err(actor_error!(illegal_argument, "invalid claims")); + } + Ok(claims_ret.claims) +} + /// Assigns proving period offset randomly in the range [0, WPoStProvingPeriod) by hashing /// the actor's address and current epoch. fn assign_proving_period_offset( @@ -4562,36 +4815,21 @@ where let mut valid_pre_commits = Vec::default(); for pre_commit in pre_commits { - let deal_weights = if !pre_commit.info.deal_ids.is_empty() { - // Check (and activate) storage deals associated to sector. Abort if checks failed. - let res = rt.send( - &STORAGE_MARKET_ACTOR_ADDR, - ext::market::ACTIVATE_DEALS_METHOD, - RawBytes::serialize(ext::market::ActivateDealsParams { - deal_ids: pre_commit.info.deal_ids.clone(), - sector_expiry: pre_commit.info.expiration, - })?, - TokenAmount::zero(), - ); - match res { - Ok(res) => { - let activate_res: ext::market::ActivateDealsResult = res.deserialize()?; - activate_res.weights - } - Err(e) => { - info!( - "failed to activate deals on sector {}, dropping from prove commit set: {}", - pre_commit.info.sector_number, - e.msg() - ); - continue; - } + match activate_deals_and_claim_allocations( + rt, + pre_commit.clone().info.deal_ids, + pre_commit.info.expiration, + pre_commit.info.sector_number, + )? { + None => { + info!( + "failed to activate deals on sector {}, dropping from prove commit set", + pre_commit.info.sector_number, + ); + continue; } - } else { - ext::market::DealWeights::default() + Some(deal_spaces) => valid_pre_commits.push((pre_commit, deal_spaces)), }; - - valid_pre_commits.push((pre_commit, deal_weights)); } // When all prove commits have failed abort early @@ -4609,7 +4847,7 @@ where let mut new_sectors = Vec::::new(); let mut total_pledge = TokenAmount::zero(); - for (pre_commit, deal_weights) in valid_pre_commits { + for (pre_commit, deal_spaces) in valid_pre_commits { // compute initial pledge let duration = pre_commit.info.expiration - activation; @@ -4622,11 +4860,14 @@ where continue; } + let deal_weight = deal_spaces.deal_space * duration; + let verified_deal_weight = deal_spaces.verified_deal_space * duration; + let power = qa_power_for_weight( info.sector_size, duration, - &deal_weights.deal_weight, - &deal_weights.verified_deal_weight, + &deal_weight, + &verified_deal_weight, ); let day_reward = expected_reward_for_power( @@ -4664,14 +4905,15 @@ where deal_ids: pre_commit.info.deal_ids, expiration: pre_commit.info.expiration, activation, - deal_weight: deal_weights.deal_weight, - verified_deal_weight: deal_weights.verified_deal_weight, + deal_weight, + verified_deal_weight, initial_pledge, expected_day_reward: day_reward, expected_storage_pledge: storage_pledge, replaced_sector_age: ChainEpoch::zero(), replaced_day_reward: TokenAmount::zero(), sector_key_cid: None, + simple_qa_power: true, }; new_sector_numbers.push(new_sector_info.sector_number); @@ -4736,6 +4978,79 @@ where Ok(()) } +// activate deals with builtin market and claim allocations with verified registry actor +// returns an error in case of a fatal programmer error +// returns Ok(None) in case deal activation or verified allocation claim fails +fn activate_deals_and_claim_allocations( + rt: &mut RT, + deal_ids: Vec, + sector_expiry: ChainEpoch, + sector_number: SectorNumber, +) -> Result, ActorError> +where + BS: Blockstore, + RT: Runtime, +{ + if deal_ids.is_empty() { + return Ok(Some(ext::market::DealSpaces::default())); + } + // Check (and activate) storage deals associated to sector. Abort if checks failed. + let activate_raw = rt.send( + &STORAGE_MARKET_ACTOR_ADDR, + ext::market::ACTIVATE_DEALS_METHOD, + RawBytes::serialize(ext::market::ActivateDealsParams { deal_ids, sector_expiry })?, + TokenAmount::zero(), + ); + let activate_res: ext::market::ActivateDealsResult = match activate_raw { + Ok(res) => res.deserialize()?, + Err(e) => { + info!("error activating deals on sector {}: {}", sector_number, e.msg()); + return Ok(None); + } + }; + + // If deal activation includes verified deals claim allocations + if activate_res.verified_infos.is_empty() { + return Ok(Some(ext::market::DealSpaces { + deal_space: activate_res.nonverified_deal_space, + ..Default::default() + })); + } + let sector_claims = activate_res + .verified_infos + .iter() + .map(|info| ext::verifreg::SectorAllocationClaim { + client: info.client, + allocation_id: info.allocation_id, + data: info.data, + size: info.size, + sector: sector_number, + sector_expiry, + }) + .collect(); + + let claim_raw = rt.send( + &VERIFIED_REGISTRY_ACTOR_ADDR, + ext::verifreg::CLAIM_ALLOCATIONS_METHOD, + RawBytes::serialize(ext::verifreg::ClaimAllocationsParams { + sectors: sector_claims, + all_or_nothing: true, + })?, + TokenAmount::zero(), + ); + let claim_res: ext::verifreg::ClaimAllocationsReturn = match claim_raw { + Ok(res) => res.deserialize()?, + Err(e) => { + info!("error claiming allocation on sector {}: {}", sector_number, e.msg()); + return Ok(None); + } + }; + Ok(Some(ext::market::DealSpaces { + deal_space: activate_res.nonverified_deal_space, + verified_deal_space: claim_res.claimed_space, + })) +} + // XXX: probably better to push this one level down into state fn balance_invariants_broken(e: Error) -> ActorError { ActorError::unchecked( @@ -4879,6 +5194,10 @@ impl ActorCode for Actor { let res = Self::get_beneficiary(rt)?; Ok(RawBytes::serialize(res)?) } + Some(Method::ExtendSectorExpiration2) => { + Self::extend_sector_expiration2(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::default()) + } None => Err(actor_error!(unhandled_message, "Invalid method")), } } diff --git a/actors/miner/src/types.rs b/actors/miner/src/types.rs index a649c66de..eceb55006 100644 --- a/actors/miner/src/types.rs +++ b/actors/miner/src/types.rs @@ -1,10 +1,7 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use super::beneficiary::*; -use crate::commd::CompactCommD; use cid::Cid; -use fil_actors_runtime::DealWeight; use fvm_ipld_bitfield::BitField; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::{serde_bytes, BytesDe, Cbor}; @@ -20,6 +17,13 @@ use fvm_shared::sector::{ }; use fvm_shared::smooth::FilterEstimate; +use fil_actors_runtime::DealWeight; + +use crate::commd::CompactCommD; +use crate::ext::verifreg::ClaimID; + +use super::beneficiary::*; + pub type CronEvent = i64; pub const CRON_EVENT_WORKER_KEY_CHANGE: CronEvent = 0; @@ -146,6 +150,33 @@ pub struct ExpirationExtension { pub new_expiration: ChainEpoch, } +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExtendSectorExpiration2Params { + pub extensions: Vec, +} + +impl Cbor for ExtendSectorExpiration2Params {} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct SectorClaim { + pub sector_number: SectorNumber, + pub maintain_claims: Vec, + pub drop_claims: Vec, +} + +impl Cbor for SectorClaim {} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExpirationExtension2 { + pub deadline: u64, + pub partition: u64, + pub sectors: BitField, // IDs of sectors without FIL+ claims + pub sectors_with_claims: Vec, + pub new_expiration: ChainEpoch, +} + +impl Cbor for ExpirationExtension2 {} + #[derive(Serialize_tuple, Deserialize_tuple)] pub struct TerminateSectorsParams { pub terminations: Vec, @@ -331,6 +362,8 @@ pub struct SectorOnChainInfo { pub replaced_day_reward: TokenAmount, /// The original SealedSectorCID, only gets set on the first ReplicaUpdate pub sector_key_cid: Option, + // Flag for QA power mechanism introduced in fip 0045 + pub simple_qa_power: bool, } #[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize_tuple, Deserialize_tuple)] diff --git a/actors/miner/tests/aggregate_prove_commit.rs b/actors/miner/tests/aggregate_prove_commit.rs index d4fce2cac..adc72970e 100644 --- a/actors/miner/tests/aggregate_prove_commit.rs +++ b/actors/miner/tests/aggregate_prove_commit.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use std::iter::FromIterator; +use fil_actor_market::DealSpaces; use fil_actor_miner::{ initial_pledge_for_power, qa_power_for_weight, PowerPair, QUALITY_BASE_MULTIPLIER, VERIFIED_DEAL_WEIGHT_MULTIPLIER, @@ -10,7 +10,6 @@ use fvm_ipld_bitfield::BitField; use fvm_shared::{bigint::BigInt, clock::ChainEpoch, econ::TokenAmount}; mod util; -use fil_actor_market::DealWeights; use fil_actors_runtime::test_utils::make_piece_cid; use num_traits::Zero; use util::*; @@ -35,14 +34,14 @@ fn valid_precommits_then_aggregate_provecommit() { let prove_commit_epoch = precommit_epoch + rt.policy.pre_commit_challenge_delay + 1; // something on deadline boundary but > 180 days + let verified_deal_space = actor.sector_size as u64; let expiration = dl_info.period_end() + rt.policy.wpost_proving_period * DEFAULT_SECTOR_EXPIRATION; // fill the sector with verified seals - let deal_space = actor.sector_size as u64 * (expiration - prove_commit_epoch) as u64; - let deal_weights = DealWeights { - deal_weight: BigInt::zero(), - deal_space, - verified_deal_weight: BigInt::from(deal_space), + let duration = expiration - prove_commit_epoch; + let deal_spaces = DealSpaces { + deal_space: BigInt::zero(), + verified_deal_space: BigInt::from(verified_deal_space), }; let mut precommits = vec![]; @@ -60,12 +59,13 @@ fn valid_precommits_then_aggregate_provecommit() { rt.set_epoch(prove_commit_epoch); rt.set_balance(TokenAmount::from_whole(1000)); - let pcc = ProveCommitConfig { - deal_weights: HashMap::from_iter( - precommits.iter().map(|pc| (pc.info.sector_number, deal_weights.clone())), - ), - ..Default::default() - }; + let mut pcc = ProveCommitConfig::empty(); + for pc in &precommits { + pcc.add_verified_deals( + pc.info.sector_number, + vec![test_verified_deal(verified_deal_space)], + ); + } actor .prove_commit_aggregate_sector( @@ -90,11 +90,13 @@ fn valid_precommits_then_aggregate_provecommit() { // The sector is exactly full with verified deals, so expect fully verified power. let expected_power = BigInt::from(actor.sector_size as i64) * (VERIFIED_DEAL_WEIGHT_MULTIPLIER.clone() / QUALITY_BASE_MULTIPLIER.clone()); + let deal_weight = deal_spaces.deal_space * duration; + let verified_deal_weight = deal_spaces.verified_deal_space * duration; let qa_power = qa_power_for_weight( actor.sector_size, expiration - rt.epoch, - &deal_weights.deal_weight, - &deal_weights.verified_deal_weight, + &deal_weight, + &verified_deal_weight, ); assert_eq!(expected_power, qa_power); let expected_initial_pledge = initial_pledge_for_power( @@ -111,8 +113,8 @@ fn valid_precommits_then_aggregate_provecommit() { for sector_no in sector_nos_bf.iter() { let sector = actor.get_sector(&rt, sector_no); // expect deal weights to be transferred to on chain info - assert_eq!(deal_weights.deal_weight, sector.deal_weight); - assert_eq!(deal_weights.verified_deal_weight, sector.verified_deal_weight); + assert_eq!(deal_weight, sector.deal_weight); + assert_eq!(verified_deal_weight, sector.verified_deal_weight); // expect activation epoch to be current epoch assert_eq!(rt.epoch, sector.activation); diff --git a/actors/miner/tests/extend_sector_expiration_test.rs b/actors/miner/tests/extend_sector_expiration_test.rs index 5585ed528..e51888134 100644 --- a/actors/miner/tests/extend_sector_expiration_test.rs +++ b/actors/miner/tests/extend_sector_expiration_test.rs @@ -1,13 +1,24 @@ +use fil_actor_market::VerifiedDealInfo; +use fil_actor_miner::ext::verifreg::Claim as FILPlusClaim; use fil_actor_miner::{ power_for_sector, seal_proof_sector_maximum_lifetime, ExpirationExtension, - ExtendSectorExpirationParams, PoStPartition, SectorOnChainInfo, State, + ExpirationExtension2, ExtendSectorExpiration2Params, ExtendSectorExpirationParams, + PoStPartition, SectorClaim, SectorOnChainInfo, State, }; use fil_actors_runtime::{ + actor_error, runtime::{Runtime, RuntimePolicy}, - test_utils::{expect_abort_contains_message, MockRuntime}, + test_utils::{expect_abort_contains_message, make_piece_cid, MockRuntime}, }; use fvm_ipld_bitfield::BitField; -use fvm_shared::{clock::ChainEpoch, error::ExitCode, sector::RegisteredSealProof}; +use fvm_shared::{ + address::Address, + clock::ChainEpoch, + error::ExitCode, + sector::{RegisteredSealProof, SectorNumber}, + ActorID, +}; +use std::collections::HashMap; mod util; use itertools::Itertools; @@ -349,3 +360,308 @@ fn supports_extensions_off_deadline_boundary() { h.check_state(&rt); } + +fn commit_sector_verified_deals( + verified_deals: &Vec, + h: &mut ActorHarness, + rt: &mut MockRuntime, +) -> SectorOnChainInfo { + h.construct_and_verify(rt); + assert!(!verified_deals.is_empty()); + + let mut pcc = ProveCommitConfig::empty(); + pcc.add_verified_deals(h.next_sector_no, verified_deals.clone()); + + let sector_info = &h.commit_and_prove_sectors_with_cfgs( + rt, + 1, + DEFAULT_SECTOR_EXPIRATION as u64, + vec![vec![42]], + true, + pcc, + )[0]; + + sector_info.clone() +} + +// assert that state tracks an expiration at the provided epoch in the provided deadline and partition for the provided sector +fn check_for_expiration( + h: &mut ActorHarness, + rt: &mut MockRuntime, + expiration: ChainEpoch, + sector_number: SectorNumber, + deadline_index: u64, + partition_index: u64, +) { + let new_sector = h.get_sector(rt, sector_number); + assert_eq!(expiration, new_sector.expiration); + let state: State = rt.get_state(); + let quant = state.quant_spec_for_deadline(rt.policy(), deadline_index); + + // assert that new expiration exists + let (_, mut partition) = h.get_deadline_and_partition(rt, deadline_index, partition_index); + let expiration_set = partition.pop_expired_sectors(rt.store(), expiration - 1, quant).unwrap(); + assert!(expiration_set.is_empty()); + + let expiration_set = + partition.pop_expired_sectors(rt.store(), quant.quantize_up(expiration), quant).unwrap(); + assert_eq!(expiration_set.len(), 1); + assert!(expiration_set.on_time_sectors.get(sector_number)); + + h.check_state(rt); +} + +fn make_claim( + claim_id: u64, + sector: &SectorOnChainInfo, + client: ActorID, + provider: ActorID, + new_expiration: ChainEpoch, + deal: &VerifiedDealInfo, + term_min: ChainEpoch, +) -> FILPlusClaim { + FILPlusClaim { + provider, + client, + data: make_piece_cid(format!("piece for claim {}", claim_id).as_bytes()), + size: deal.size, + term_min, + term_max: new_expiration - sector.activation, + term_start: sector.activation, + sector: sector.sector_number, + } +} + +#[test] +fn update_expiration_multiple_claims() { + let (mut h, mut rt) = setup(); + // add in verified deal + let verified_deals = vec![ + test_verified_deal(h.sector_size as u64 / 2), + test_verified_deal(h.sector_size as u64 / 2), + ]; + let old_sector = commit_sector_verified_deals(&verified_deals, &mut h, &mut rt); + h.advance_and_submit_posts(&mut rt, &vec![old_sector.clone()]); + + let state: State = rt.get_state(); + + let (deadline_index, partition_index) = + state.find_sector(rt.policy(), rt.store(), old_sector.sector_number).unwrap(); + + let extension = 42 * rt.policy().wpost_proving_period; + let new_expiration = old_sector.expiration + extension; + + let claim_ids = vec![400, 500]; + let client = Address::new_id(3000).id().unwrap(); + + let claim0 = make_claim( + claim_ids[0], + &old_sector, + client, + h.receiver.id().unwrap(), + new_expiration, + &verified_deals[0], + rt.policy.minimum_verified_allocation_term, + ); + let claim1 = make_claim( + claim_ids[1], + &old_sector, + client, + h.receiver.id().unwrap(), + new_expiration, + &verified_deals[1], + rt.policy.minimum_verified_allocation_term, + ); + + let mut claims = HashMap::new(); + claims.insert(claim_ids[0], Ok(claim0)); + claims.insert(claim_ids[1], Ok(claim1)); + + let params = ExtendSectorExpiration2Params { + extensions: vec![ExpirationExtension2 { + deadline: deadline_index, + partition: partition_index, + sectors: BitField::new(), + new_expiration, + sectors_with_claims: vec![SectorClaim { + sector_number: old_sector.sector_number, + maintain_claims: claim_ids, + drop_claims: vec![], + }], + }], + }; + + h.extend_sectors2(&mut rt, params, claims).unwrap(); + + // assert sector expiration is set to the new value + check_for_expiration( + &mut h, + &mut rt, + new_expiration, + old_sector.sector_number, + deadline_index, + partition_index, + ); +} + +#[test] +fn update_expiration2_failure_cases() { + let (mut h, mut rt) = setup(); + // add in verified deal + let verified_deals = vec![ + test_verified_deal(h.sector_size as u64 / 2), + test_verified_deal(h.sector_size as u64 / 2), + ]; + let old_sector = commit_sector_verified_deals(&verified_deals, &mut h, &mut rt); + h.advance_and_submit_posts(&mut rt, &vec![old_sector.clone()]); + + let state: State = rt.get_state(); + + let (deadline_index, partition_index) = + state.find_sector(rt.policy(), rt.store(), old_sector.sector_number).unwrap(); + + let extension = 42 * rt.policy().wpost_proving_period; + let new_expiration = old_sector.expiration + extension; + + let claim_ids = vec![400, 500]; + let client = Address::new_id(3000).id().unwrap(); + + let claim0 = make_claim( + claim_ids[0], + &old_sector, + client, + h.receiver.id().unwrap(), + new_expiration, + &verified_deals[0], + rt.policy.minimum_verified_allocation_term, + ); + let mut claim1 = make_claim( + claim_ids[1], + &old_sector, + client, + h.receiver.id().unwrap(), + new_expiration, + &verified_deals[1], + rt.policy.minimum_verified_allocation_term, + ); + + /* 1. Claim used by sector not referenced in extension params */ + { + let mut claims = HashMap::new(); + claims.insert(claim_ids[0], Ok(claim0.clone())); + + let params = ExtendSectorExpiration2Params { + extensions: vec![ExpirationExtension2 { + deadline: deadline_index, + partition: partition_index, + sectors: BitField::new(), + new_expiration, + sectors_with_claims: vec![SectorClaim { + sector_number: old_sector.sector_number, + maintain_claims: vec![claim_ids[0]], + drop_claims: vec![], + }], + }], + }; + + let res = h.extend_sectors2(&mut rt, params, claims); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "does not match verified deal space", + res, + ); + // assert sector expiration is same as the old value + check_for_expiration( + &mut h, + &mut rt, + old_sector.expiration, + old_sector.sector_number, + deadline_index, + partition_index, + ); + rt.replace_state(&state); + rt.reset(); + } + + /* Claim not found */ + { + let mut claims = HashMap::new(); + claims.insert(claim_ids[0], Ok(claim0.clone())); + claims.insert(claim_ids[1], Err(actor_error!(not_found, "claim not found"))); + + let params = ExtendSectorExpiration2Params { + extensions: vec![ExpirationExtension2 { + deadline: deadline_index, + partition: partition_index, + sectors: BitField::new(), + new_expiration, + sectors_with_claims: vec![SectorClaim { + sector_number: old_sector.sector_number, + maintain_claims: claim_ids.clone(), + drop_claims: vec![], + }], + }], + }; + + let res = h.extend_sectors2(&mut rt, params, claims); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "failed to get claims for sector", + res, + ); + // assert sector expiration is set to the new value + check_for_expiration( + &mut h, + &mut rt, + old_sector.expiration, + old_sector.sector_number, + deadline_index, + partition_index, + ); + rt.replace_state(&state); + rt.reset(); + } + /* Bad claim -- max term too small for expiration */ + { + claim1.term_max -= 1; + let mut claims = HashMap::new(); + claims.insert(claim_ids[0], Ok(claim0)); + claims.insert(claim_ids[1], Ok(claim1)); + + let params = ExtendSectorExpiration2Params { + extensions: vec![ExpirationExtension2 { + deadline: deadline_index, + partition: partition_index, + sectors: BitField::new(), + new_expiration, + sectors_with_claims: vec![SectorClaim { + sector_number: old_sector.sector_number, + maintain_claims: claim_ids, + drop_claims: vec![], + }], + }], + }; + + let res = h.extend_sectors2(&mut rt, params, claims); + expect_abort_contains_message( + ExitCode::USR_FORBIDDEN, + &format!( + "claim only allows extension to {} but declared new expiration is {}", + new_expiration - 1, + new_expiration + ), + res, + ); + // assert sector expiration is set to the new value + check_for_expiration( + &mut h, + &mut rt, + old_sector.expiration, + old_sector.sector_number, + deadline_index, + partition_index, + ); + rt.replace_state(&state); + rt.reset(); + } +} diff --git a/actors/miner/tests/prove_commit.rs b/actors/miner/tests/prove_commit.rs index 814e99e85..aa58535b5 100644 --- a/actors/miner/tests/prove_commit.rs +++ b/actors/miner/tests/prove_commit.rs @@ -1,4 +1,4 @@ -use fil_actor_market::{DealWeights, SectorDealData}; +use fil_actor_market::{DealSpaces, SectorDealData}; use fil_actor_miner::{ initial_pledge_for_power, max_prove_commit_duration, pre_commit_deposit_for_power, qa_power_for_weight, qa_power_max, PowerPair, PreCommitSectorBatchParams, VestSpec, @@ -46,12 +46,10 @@ fn prove_single_sector() { let expiration = dl_info.period_end() + DEFAULT_SECTOR_EXPIRATION * rt.policy.wpost_proving_period; // something on deadline boundary but > 180 days // Fill the sector with verified deals - let sector_weight = - DealWeight::from(h.sector_size as u64) * DealWeight::from(expiration - prove_commit_epoch); - let deal_weight = DealWeights { - deal_space: h.sector_size as u64, - deal_weight: DealWeight::zero(), - verified_deal_weight: sector_weight, + let verified_deal = test_verified_deal(h.sector_size as u64); + let deal_spaces = DealSpaces { + deal_space: BigInt::zero(), + verified_deal_space: BigInt::from(verified_deal.size.0), }; // Pre-commit with a deal in order to exercise non-zero deal weights. @@ -75,10 +73,8 @@ fn prove_single_sector() { // run prove commit logic rt.set_epoch(prove_commit_epoch); rt.balance.replace(TokenAmount::from_whole(1000)); - let pcc = ProveCommitConfig { - deal_weights: HashMap::from([(sector_no, deal_weight.clone())]), - ..Default::default() - }; + let mut pcc = ProveCommitConfig::empty(); + pcc.add_verified_deals(sector_no, vec![verified_deal]); let sector = h .prove_commit_sector_and_confirm( @@ -104,21 +100,20 @@ fn prove_single_sector() { assert!(st.pre_commit_deposits.is_zero()); // The sector is exactly full with verified deals, so expect fully verified power. + let duration = precommit.info.expiration - prove_commit_epoch; + let deal_weight = deal_spaces.deal_space * duration; + let verified_deal_weight = deal_spaces.verified_deal_space * duration; let expected_power = StoragePower::from(h.sector_size as u64) * (VERIFIED_DEAL_WEIGHT_MULTIPLIER / QUALITY_BASE_MULTIPLIER); - let qa_power = qa_power_for_weight( - h.sector_size, - precommit.info.expiration - rt.epoch, - &deal_weight.deal_weight, - &deal_weight.verified_deal_weight, - ); + let qa_power = + qa_power_for_weight(h.sector_size, duration, &deal_weight, &verified_deal_weight); assert_eq!(expected_power, qa_power); let sector_power = PowerPair { raw: StoragePower::from(h.sector_size as u64), qa: qa_power.clone() }; // expect deal weights to be transferred to on chain info - assert_eq!(deal_weight.deal_weight, sector.deal_weight); - assert_eq!(deal_weight.verified_deal_weight, sector.verified_deal_weight); + assert_eq!(deal_weight, sector.deal_weight); + assert_eq!(verified_deal_weight, sector.verified_deal_weight); // expect initial plege of sector to be set, and be total pledge requirement let expected_initial_pledge = initial_pledge_for_power( @@ -182,18 +177,15 @@ fn prove_sectors_from_batch_pre_commit() { h.make_pre_commit_params(102, precommit_epoch - 1, sector_expiration, vec![2, 3]), // 2 * 16GiB verified deals ]; - let deal_space = 32 << 30; - let deal_weight = DealWeight::zero(); + let deal_space: i64 = 32 << 30; let prove_commit_epoch = precommit_epoch + rt.policy.pre_commit_challenge_delay + 1; let deal_lifespan = sector_expiration - prove_commit_epoch; + let verified_deal1 = test_verified_deal(deal_space as u64); + let verified_deal2 = test_verified_deal(deal_space as u64 / 2); + let verified_deal3 = test_verified_deal(deal_space as u64 / 2); + let deal_weight = DealWeight::zero(); let verified_deal_weight = deal_space * DealWeight::from(deal_lifespan); - let deal_weights = DealWeights { - deal_space, - deal_weight: deal_weight.clone(), - verified_deal_weight: verified_deal_weight.clone(), - }; - let conf = PreCommitBatchConfig { sector_deal_data: vec![ SectorDealData { commd: None }, @@ -270,10 +262,8 @@ fn prove_sectors_from_batch_pre_commit() { // Prove the next, with one deal { let precommit = &precommits[1]; - let pcc = ProveCommitConfig { - deal_weights: HashMap::from([(precommit.info.sector_number, deal_weights.clone())]), - ..Default::default() - }; + let mut pcc = ProveCommitConfig::empty(); + pcc.add_verified_deals(precommit.info.sector_number, vec![verified_deal1]); let sector = h .prove_commit_sector_and_confirm( &mut rt, @@ -299,10 +289,8 @@ fn prove_sectors_from_batch_pre_commit() { // Prove the last { let precommit = &precommits[2]; - let pcc = ProveCommitConfig { - deal_weights: HashMap::from([(precommit.info.sector_number, deal_weights)]), - ..Default::default() - }; + let mut pcc = ProveCommitConfig::empty(); + pcc.add_verified_deals(precommit.info.sector_number, vec![verified_deal2, verified_deal3]); let sector = h .prove_commit_sector_and_confirm( &mut rt, diff --git a/actors/miner/tests/util.rs b/actors/miner/tests/util.rs index 996fb0a2c..9b61a80e4 100644 --- a/actors/miner/tests/util.rs +++ b/actors/miner/tests/util.rs @@ -2,12 +2,15 @@ use fil_actor_account::Method as AccountMethod; use fil_actor_market::{ - ActivateDealsParams, ActivateDealsResult, DealWeights, Method as MarketMethod, - OnMinerSectorsTerminateParams, SectorDealData, SectorDeals, VerifyDealsForActivationParams, - VerifyDealsForActivationReturn, + ActivateDealsParams, ActivateDealsResult, DealSpaces, Method as MarketMethod, + OnMinerSectorsTerminateParams, SectorDealData, SectorDeals, VerifiedDealInfo, + VerifyDealsForActivationParams, VerifyDealsForActivationReturn, }; use fil_actor_miner::ext::market::ON_MINER_SECTORS_TERMINATE_METHOD; use fil_actor_miner::ext::power::{UPDATE_CLAIMED_POWER_METHOD, UPDATE_PLEDGE_TOTAL_METHOD}; +use fil_actor_miner::ext::verifreg::{ + ClaimAllocationsParams, ClaimAllocationsReturn, SectorAllocationClaim, CLAIM_ALLOCATIONS_METHOD, +}; use fil_actor_miner::{ aggregate_pre_commit_network_fee, aggregate_prove_commit_network_fee, consensus_fault_penalty, initial_pledge_for_power, locked_reward_from_reward, max_prove_commit_duration, @@ -18,8 +21,8 @@ use fil_actor_miner::{ CompactCommD, CompactPartitionsParams, CompactSectorNumbersParams, ConfirmSectorProofsParams, CronEventPayload, Deadline, DeadlineInfo, Deadlines, DeclareFaultsParams, DeclareFaultsRecoveredParams, DeferredCronEventParams, DisputeWindowedPoStParams, - ExpirationQueue, ExpirationSet, ExtendSectorExpirationParams, FaultDeclaration, - GetBeneficiaryReturn, GetControlAddressesReturn, Method, + ExpirationQueue, ExpirationSet, ExtendSectorExpiration2Params, ExtendSectorExpirationParams, + FaultDeclaration, GetBeneficiaryReturn, GetControlAddressesReturn, Method, MinerConstructorParams as ConstructorParams, MinerInfo, Partition, PendingBeneficiaryChange, PoStPartition, PowerPair, PreCommitSectorBatchParams, PreCommitSectorBatchParams2, PreCommitSectorParams, ProveCommitSectorParams, RecoveryDeclaration, @@ -33,12 +36,17 @@ use fil_actor_power::{ CurrentTotalPowerReturn, EnrollCronEventParams, Method as PowerMethod, UpdateClaimedPowerParams, }; use fil_actor_reward::{Method as RewardMethod, ThisEpochRewardReturn}; + +use fil_actor_miner::ext::verifreg::{ + Claim as FILPlusClaim, ClaimID, GetClaimsParams, GetClaimsReturn, +}; + use fil_actors_runtime::runtime::{DomainSeparationTag, Policy, Runtime, RuntimePolicy}; -use fil_actors_runtime::test_utils::*; +use fil_actors_runtime::{test_utils::*, BatchReturn, BatchReturnGen}; use fil_actors_runtime::{ ActorDowncast, ActorError, Array, DealWeight, MessageAccumulator, BURNT_FUNDS_ACTOR_ADDR, CALLER_TYPES_SIGNABLE, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, - STORAGE_POWER_ACTOR_ADDR, + STORAGE_POWER_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; use fvm_ipld_amt::Amt; use fvm_shared::bigint::Zero; @@ -58,6 +66,7 @@ use fvm_shared::consensus::ConsensusFault; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; use fvm_shared::error::ExitCode; +use fvm_shared::piece::PaddedPieceSize; use fvm_shared::randomness::Randomness; use fvm_shared::randomness::RANDOMNESS_LENGTH; use fvm_shared::sector::{ @@ -345,6 +354,25 @@ impl ActorHarness { lifetime_periods: u64, deal_ids: Vec>, first: bool, + ) -> Vec { + self.commit_and_prove_sectors_with_cfgs( + rt, + num_sectors, + lifetime_periods, + deal_ids, + first, + ProveCommitConfig::empty(), + ) + } + + pub fn commit_and_prove_sectors_with_cfgs( + &mut self, + rt: &mut MockRuntime, + num_sectors: usize, + lifetime_periods: u64, + deal_ids: Vec>, + first: bool, + prove_cfg: ProveCommitConfig, // must be same length as num_sectors ) -> Vec { let precommit_epoch = rt.epoch; let deadline = self.get_deadline_info(rt); @@ -385,7 +413,7 @@ impl ActorHarness { rt, &pc, self.make_prove_commit_params(pc.info.sector_number), - ProveCommitConfig::empty(), + prove_cfg.clone(), ) .unwrap(); info.push(sector); @@ -1002,24 +1030,24 @@ impl ActorHarness { let mut valid_pcs = Vec::new(); for pc in pcs { if !pc.info.deal_ids.is_empty() { - let params = ActivateDealsParams { + let deal_spaces = cfg.deal_spaces(&pc.info.sector_number); + let activate_params = ActivateDealsParams { deal_ids: pc.info.deal_ids.clone(), sector_expiry: pc.info.expiration, }; - let mut exit = ExitCode::OK; + let mut activate_deals_exit = ExitCode::OK; match cfg.verify_deals_exit.get(&pc.info.sector_number) { Some(exit_code) => { - exit = *exit_code; - } - None => { - valid_pcs.push(pc); + activate_deals_exit = *exit_code; } + None => (), } let ret = ActivateDealsResult { - weights: cfg - .deal_weights + nonverified_deal_space: deal_spaces.deal_space, + verified_infos: cfg + .verified_deal_infos .get(&pc.info.sector_number) .cloned() .unwrap_or_default(), @@ -1028,11 +1056,49 @@ impl ActorHarness { rt.expect_send( STORAGE_MARKET_ACTOR_ADDR, MarketMethod::ActivateDeals as u64, - RawBytes::serialize(params).unwrap(), + RawBytes::serialize(activate_params).unwrap(), TokenAmount::zero(), - RawBytes::serialize(ret).unwrap(), - exit, + RawBytes::serialize(&ret).unwrap(), + activate_deals_exit, ); + if ret.verified_infos.is_empty() { + if activate_deals_exit == ExitCode::OK { + valid_pcs.push(pc); + } + } else { + // calim FIL+ allocations + let sector_claims = ret + .verified_infos + .iter() + .map(|info| SectorAllocationClaim { + client: info.client, + allocation_id: info.allocation_id, + data: info.data, + size: info.size, + sector: pc.info.sector_number, + sector_expiry: pc.info.expiration, + }) + .collect(); + + let claim_allocation_params = + ClaimAllocationsParams { sectors: sector_claims, all_or_nothing: true }; + + // TODO handle failures of claim allocations + // use exit code map for claim allocations in config + valid_pcs.push(pc); + let claim_allocs_ret = ClaimAllocationsReturn { + batch_info: BatchReturn::ok(ret.verified_infos.len() as u32), + claimed_space: deal_spaces.verified_deal_space, + }; + rt.expect_send( + VERIFIED_REGISTRY_ACTOR_ADDR, + CLAIM_ALLOCATIONS_METHOD as u64, + RawBytes::serialize(&claim_allocation_params).unwrap(), + TokenAmount::zero(), + RawBytes::serialize(&claim_allocs_ret).unwrap(), + ExitCode::OK, + ); + } } else { valid_pcs.push(pc); } @@ -1044,16 +1110,17 @@ impl ActorHarness { let mut expected_raw_power = BigInt::from(0); for pc in valid_pcs { - let weights = - cfg.deal_weights.get(&pc.info.sector_number).cloned().unwrap_or_default(); + let spaces = cfg.deal_spaces(&pc.info.sector_number); let duration = pc.info.expiration - rt.epoch; + let deal_weight = spaces.deal_space * duration; + let verified_deal_weight = spaces.verified_deal_space * duration; if duration >= rt.policy.min_sector_expiration { let qa_power_delta = qa_power_for_weight( self.sector_size, duration, - &weights.deal_weight, - &weights.verified_deal_weight, + &deal_weight, + &verified_deal_weight, ); expected_qa_power += &qa_power_delta; expected_raw_power += self.sector_size as u64; @@ -2296,6 +2363,85 @@ impl ActorHarness { Ok(ret) } + pub fn extend_sectors2( + &self, + rt: &mut MockRuntime, + mut params: ExtendSectorExpiration2Params, + expected_claims: HashMap>, + ) -> Result { + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, self.worker); + rt.expect_validate_caller_addr(self.caller_addrs()); + + // TODO handle qa power changes for dropping claims once claim dropping logic is added + for extension in params.extensions.iter_mut() { + for sc in &extension.sectors_with_claims { + // construct expected return value + let mut claims = Vec::new(); + let mut batch_gen = BatchReturnGen::new(sc.maintain_claims.len()); + for claim_id in &sc.maintain_claims { + match expected_claims.get(&claim_id).unwrap().clone() { + Ok(claim) => { + batch_gen.add_success(); + claims.push(claim); + } + Err(ae) => { + batch_gen.add_fail(ae.exit_code()); + } + } + } + + rt.expect_send( + VERIFIED_REGISTRY_ACTOR_ADDR, + fil_actor_miner::ext::verifreg::GET_CLAIMS_METHOD as u64, + RawBytes::serialize(GetClaimsParams { + provider: self.receiver.id().unwrap(), + claim_ids: sc.maintain_claims.clone(), + }) + .unwrap(), + TokenAmount::zero(), + RawBytes::serialize(GetClaimsReturn { batch_info: batch_gen.gen(), claims }) + .unwrap(), + ExitCode::OK, + ); + } + } + + // Handle non claim bearing sector extensions + let mut qa_delta = BigInt::zero(); + for extension in params.extensions.iter_mut() { + for sector_nr in extension.sectors.validate().unwrap().iter() { + let sector = self.get_sector(&rt, sector_nr); + let mut new_sector = sector.clone(); + new_sector.expiration = extension.new_expiration; + qa_delta += qa_power_for_sector(self.sector_size, &new_sector) + - qa_power_for_sector(self.sector_size, §or); + } + } + + if !qa_delta.is_zero() { + let params = UpdateClaimedPowerParams { + raw_byte_delta: BigInt::zero(), + quality_adjusted_delta: qa_delta, + }; + rt.expect_send( + STORAGE_POWER_ACTOR_ADDR, + UPDATE_CLAIMED_POWER_METHOD, + RawBytes::serialize(params).unwrap(), + TokenAmount::zero(), + RawBytes::default(), + ExitCode::OK, + ); + } + + let ret = rt.call::( + Method::ExtendSectorExpiration2 as u64, + &RawBytes::serialize(params).unwrap(), + )?; + + rt.verify(); + Ok(ret) + } + pub fn compact_partitions( &self, rt: &mut MockRuntime, @@ -2397,13 +2543,50 @@ impl PreCommitConfig { #[derive(Default, Clone)] pub struct ProveCommitConfig { pub verify_deals_exit: HashMap, - pub deal_weights: HashMap, + pub claim_allocs_exit: HashMap, + pub deal_space: HashMap, + pub verified_deal_infos: HashMap>, +} + +#[allow(dead_code)] +pub fn test_verified_deal(space: u64) -> VerifiedDealInfo { + // only set size for testing and zero out remaining fields + VerifiedDealInfo { + client: 0, + allocation_id: 0, + data: make_piece_cid("test verified deal".as_bytes()), + size: PaddedPieceSize(space), + } } #[allow(dead_code)] impl ProveCommitConfig { pub fn empty() -> ProveCommitConfig { - ProveCommitConfig { verify_deals_exit: HashMap::new(), deal_weights: HashMap::new() } + ProveCommitConfig { + verify_deals_exit: HashMap::new(), + claim_allocs_exit: HashMap::new(), + deal_space: HashMap::new(), + verified_deal_infos: HashMap::new(), + } + } + + pub fn add_verified_deals(&mut self, sector: SectorNumber, deals: Vec) { + self.verified_deal_infos.insert(sector, deals); + } + + pub fn deal_spaces(&self, sector: &SectorNumber) -> DealSpaces { + let verified_deal_space = match self.verified_deal_infos.get(sector) { + None => BigInt::zero(), + Some(infos) => infos + .iter() + .map(|info| BigInt::from(info.size.0)) + .reduce(|x, a| x + a) + .unwrap_or_default(), + }; + DealSpaces { + deal_space: self.deal_space.get(sector).cloned().unwrap_or_default(), + verified_deal_space, + } } } diff --git a/actors/multisig/Cargo.toml b/actors/multisig/Cargo.toml index 2bffe3474..f7d85d2a8 100644 --- a/actors/multisig/Cargo.toml +++ b/actors/multisig/Cargo.toml @@ -14,18 +14,20 @@ keywords = ["filecoin", "web3", "wasm"] crate-type = ["cdylib", "lib"] [dependencies] -fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime" } -fvm_shared = { version = "2.0.0-alpha.2", default-features = false } -fvm_ipld_hamt = "0.5.1" -num-traits = "0.2.14" -num-derive = "0.3.3" +fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime"} + +anyhow = "1.0.65" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } +frc42_dispatch = "1.0.0" +fvm_ipld_blockstore = "0.1.1" +fvm_ipld_encoding = "0.2.2" +fvm_ipld_hamt = "0.5.1" +fvm_shared = { version = "2.0.0-alpha.2", default-features = false } indexmap = { version = "1.8.0", features = ["serde-1"] } integer-encoding = { version = "3.0.3", default-features = false } +num-derive = "0.3.3" +num-traits = "0.2.14" serde = { version = "1.0.136", features = ["derive"] } -anyhow = "1.0.65" -fvm_ipld_blockstore = "0.1.1" -fvm_ipld_encoding = "0.2.2" [dev-dependencies] fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime", features = ["test_utils", "sector-default"] } diff --git a/actors/multisig/src/lib.rs b/actors/multisig/src/lib.rs index 9851322e4..a95bfc7c9 100644 --- a/actors/multisig/src/lib.rs +++ b/actors/multisig/src/lib.rs @@ -3,22 +3,22 @@ use std::collections::BTreeSet; -use fil_actors_runtime::cbor::serialize_vec; -use fil_actors_runtime::runtime::{builtins::Type, ActorCode, Primitives, Runtime}; -use fil_actors_runtime::{ - actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_actor_id, ActorContext, - ActorError, AsActorError, Map, INIT_ACTOR_ADDR, -}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::address::Address; - use fvm_shared::econ::TokenAmount; use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, HAMT_BIT_WIDTH, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::{FromPrimitive, Zero}; +use fil_actors_runtime::cbor::serialize_vec; +use fil_actors_runtime::runtime::{builtins::Type, ActorCode, Primitives, Runtime}; +use fil_actors_runtime::{ + actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_actor_id, ActorContext, + ActorError, AsActorError, Map, INIT_ACTOR_ADDR, +}; + pub use self::state::*; pub use self::types::*; @@ -42,6 +42,7 @@ pub enum Method { SwapSigner = 7, ChangeNumApprovalsThreshold = 8, LockBalance = 9, + UniversalReceiverHook = frc42_dispatch::method_hash!("Receive"), } /// Multisig Actor @@ -472,6 +473,19 @@ impl Actor { execute_transaction_if_approved(rt, &st, tx_id, &txn) } + + // Always succeeds, accepting any transfers. + pub fn universal_receiver_hook( + rt: &mut RT, + _params: &RawBytes, + ) -> Result<(), ActorError> + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + Ok(()) + } } fn execute_transaction_if_approved( @@ -616,6 +630,10 @@ impl ActorCode for Actor { Self::lock_balance(rt, cbor::deserialize_params(params)?)?; Ok(RawBytes::default()) } + Some(Method::UniversalReceiverHook) => { + Self::universal_receiver_hook(rt, params)?; + Ok(RawBytes::default()) + } None => Err(actor_error!(unhandled_message, "Invalid method")), } } diff --git a/actors/multisig/tests/multisig_actor_test.rs b/actors/multisig/tests/multisig_actor_test.rs index 2c1adb30f..b1ab809b1 100644 --- a/actors/multisig/tests/multisig_actor_test.rs +++ b/actors/multisig/tests/multisig_actor_test.rs @@ -15,7 +15,7 @@ use fvm_shared::bigint::Zero; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; use fvm_shared::error::ExitCode; -use fvm_shared::METHOD_SEND; +use fvm_shared::{MethodNum, METHOD_SEND}; mod util; @@ -2408,3 +2408,22 @@ mod lock_balance_tests { check_state(&rt); } } + +#[test] +fn token_receiver() { + let msig = Address::new_id(1000); + let anne = Address::new_id(101); + let bob = Address::new_id(102); + + let mut rt = construct_runtime(msig); + let h = util::ActorHarness::new(); + h.construct_and_verify(&mut rt, 2, 0, 0, vec![anne, bob]); + + rt.expect_validate_caller_any(); + let ret = rt.call::( + Method::UniversalReceiverHook as MethodNum, + &RawBytes::new(vec![1, 2, 3]), + ); + assert!(ret.is_ok()); + assert_eq!(RawBytes::default(), ret.unwrap()); +} diff --git a/actors/verifreg/Cargo.toml b/actors/verifreg/Cargo.toml index 7f112b23e..11cf84598 100644 --- a/actors/verifreg/Cargo.toml +++ b/actors/verifreg/Cargo.toml @@ -14,17 +14,21 @@ keywords = ["filecoin", "web3", "wasm"] crate-type = ["cdylib", "lib"] [dependencies] -fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime" } -fvm_shared = { version = "2.0.0-alpha.2", default-features = false } -serde = { version = "1.0.136", features = ["derive"] } -num-traits = "0.2.14" -num-derive = "0.3.3" -cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } -lazy_static = "1.4.0" +fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime"} + anyhow = "1.0.65" -fvm_ipld_hamt = "0.5.1" +cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } +frc42_dispatch = "1.0.0" +frc46_token = "1.0.0" fvm_ipld_blockstore = "0.1.1" fvm_ipld_encoding = "0.2.2" +fvm_ipld_hamt = "0.5.1" +fvm_shared = { version = "2.0.0-alpha.2", default-features = false } +lazy_static = "1.4.0" +log = "0.4.14" +num-derive = "0.3.3" +num-traits = "0.2.14" +serde = { version = "1.0.136", features = ["derive"] } [dev-dependencies] fil_actors_runtime = { version = "9.0.0-alpha.1", path = "../../runtime", features = ["test_utils", "sector-default"] } diff --git a/actors/verifreg/src/expiration.rs b/actors/verifreg/src/expiration.rs new file mode 100644 index 000000000..53b13a05e --- /dev/null +++ b/actors/verifreg/src/expiration.rs @@ -0,0 +1,88 @@ +use crate::{Allocation, Claim}; +use fil_actors_runtime::{ + parse_uint_key, ActorError, AsActorError, BatchReturn, BatchReturnGen, MapMap, +}; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::clock::ChainEpoch; +use fvm_shared::error::ExitCode; +use fvm_shared::ActorID; +use log::info; +use serde::de::DeserializeOwned; +use serde::Serialize; + +// Something with an expiration epoch. +pub trait Expires { + fn expiration(&self) -> ChainEpoch; +} + +impl Expires for Allocation { + fn expiration(&self) -> ChainEpoch { + self.expiration + } +} + +impl Expires for Claim { + fn expiration(&self) -> ChainEpoch { + self.term_start + self.term_max + } +} + +// Finds all items in a collection for some owner that have expired. +// Returns those items' keys. +pub fn find_expired( + collection: &mut MapMap, + owner: ActorID, + curr_epoch: ChainEpoch, +) -> Result, ActorError> +where + T: Expires + Serialize + DeserializeOwned + Clone + PartialEq, + BS: Blockstore, +{ + let mut found_ids = Vec::::new(); + collection + .for_each(owner, |key, record| { + if curr_epoch >= record.expiration() { + let id = parse_uint_key(key) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to parse uint key")?; + found_ids.push(id); + } + Ok(()) + }) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to iterate over allocations/claims")?; + Ok(found_ids) +} + +// Checks each candidate item from the collection for expiration. +// Returns a batch return with OK for expired items, and FORBIDDEN for non-expired. +pub fn check_expired( + collection: &mut MapMap, + candidates: &Vec, + owner: ActorID, + curr_epoch: ChainEpoch, +) -> Result +where + T: Expires + Serialize + DeserializeOwned + Clone + PartialEq, + BS: Blockstore, +{ + let mut ret_gen = BatchReturnGen::new(candidates.len()); + for id in candidates { + // Check each specified claim is expired. + let maybe_record = collection.get(owner, *id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "HAMT lookup failure getting allocation/claim", + )?; + + if let Some(record) = maybe_record { + if curr_epoch >= record.expiration() { + ret_gen.add_success(); + } else { + ret_gen.add_fail(ExitCode::USR_FORBIDDEN); + info!("cannot remove allocation/claim {} that has not expired", id); + } + } else { + ret_gen.add_fail(ExitCode::USR_NOT_FOUND); + info!("allocation/claim references id {} that does not belong to {}", id, owner,); + } + } + Ok(ret_gen.gen()) +} diff --git a/actors/verifreg/src/ext.rs b/actors/verifreg/src/ext.rs new file mode 100644 index 000000000..d70d74c46 --- /dev/null +++ b/actors/verifreg/src/ext.rs @@ -0,0 +1,40 @@ +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_shared::address::Address; + +pub mod datacap { + use super::*; + use fvm_shared::econ::TokenAmount; + + #[repr(u64)] + pub enum Method { + // Non-standard. + Mint = 2, + Destroy = 3, + // Static method numbers for token standard methods, for private use. + // Name = 10, + // Symbol = 11, + // TotalSupply = 12, + BalanceOf = 13, + Transfer = 14, + // TransferFrom = 15, + // IncreaseAllowance = 16, + // DecreaseAllowance = 17, + // RevokeAllowance = 18, + Burn = 19, + // BurnFrom = 20, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct MintParams { + pub to: Address, + pub amount: TokenAmount, + pub operators: Vec
, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct DestroyParams { + pub owner: Address, + pub amount: TokenAmount, + } +} diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index e8f4d73e1..e3730b317 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -1,32 +1,48 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_actor_id, ActorDowncast, - ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, -}; +use frc46_token::receiver::types::{FRC46TokenReceived, UniversalReceiverParams, FRC46_TOKEN_TYPE}; +use frc46_token::token::types::{BurnParams, BurnReturn, TransferParams}; +use frc46_token::token::TOKEN_PRECISION; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_ipld_hamt::BytesKey; use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; +use fvm_shared::bigint::BigInt; +use fvm_shared::clock::ChainEpoch; +use fvm_shared::econ::TokenAmount; use fvm_shared::error::ExitCode; -use fvm_shared::{MethodNum, HAMT_BIT_WIDTH, METHOD_CONSTRUCTOR}; +use fvm_shared::{ActorID, MethodNum, HAMT_BIT_WIDTH, METHOD_CONSTRUCTOR}; +use log::info; use num_derive::FromPrimitive; use num_traits::{FromPrimitive, Signed, Zero}; +use fil_actors_runtime::cbor::{deserialize, serialize}; +use fil_actors_runtime::runtime::builtins::Type; +use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; +use fil_actors_runtime::{ + actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_actor_id, ActorDowncast, + ActorError, BatchReturn, Map, DATACAP_TOKEN_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, + SYSTEM_ACTOR_ADDR, +}; +use fil_actors_runtime::{ActorContext, AsActorError, BatchReturnGen}; + +use crate::ext::datacap::{DestroyParams, MintParams}; + +pub use self::state::Allocation; +pub use self::state::Claim; pub use self::state::State; pub use self::types::*; #[cfg(feature = "fil-actor")] fil_actors_runtime::wasm_trampoline!(Actor); -mod state; +pub mod expiration; +pub mod ext; +pub mod state; pub mod testing; -mod types; - -// * Updated to specs-actors commit: 845089a6d2580e46055c24415a6c32ee688e5186 (v3.0.0) +pub mod types; /// Account actor methods available #[derive(FromPrimitive)] @@ -36,9 +52,15 @@ pub enum Method { AddVerifier = 2, RemoveVerifier = 3, AddVerifiedClient = 4, - UseBytes = 5, - RestoreBytes = 6, + // UseBytes = 5, // Deprecated + // RestoreBytes = 6, // Deprecated RemoveVerifiedClientDataCap = 7, + RemoveExpiredAllocations = 8, + ClaimAllocations = 9, + GetClaims = 10, + ExtendClaimTerms = 11, + RemoveExpiredClaims = 12, + UniversalReceiverHook = frc42_dispatch::method_hash!("Receive"), } pub struct Actor; @@ -55,11 +77,10 @@ impl Actor { // root should be an ID address let id_addr = rt .resolve_address(&root_key) - .ok_or_else(|| actor_error!(illegal_argument, "root should be an ID address"))?; + .context_code(ExitCode::USR_ILLEGAL_ARGUMENT, "root should be an ID address")?; - let st = State::new(rt.store(), Address::new_id(id_addr)).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create verifreg state") - })?; + let st = State::new(rt.store(), Address::new_id(id_addr)) + .context("failed to create verifreg state")?; rt.create(&st)?; Ok(()) @@ -70,7 +91,7 @@ impl Actor { BS: Blockstore, RT: Runtime, { - if params.allowance < rt.policy().minimum_verified_deal_size { + if params.allowance < rt.policy().minimum_verified_allocation_size { return Err(actor_error!( illegal_argument, "Allowance {} below minimum deal size for add verifier {}", @@ -86,53 +107,26 @@ impl Actor { let st: State = rt.state()?; rt.validate_immediate_caller_is(std::iter::once(&st.root_key))?; + // Disallow root as a verifier. if verifier == st.root_key { return Err(actor_error!(illegal_argument, "Rootkey cannot be added as verifier")); } - rt.transaction(|st: &mut State, rt| { - let mut verifiers = - make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; - let verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &st.verified_clients, - rt.store(), - HAMT_BIT_WIDTH, - ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; - - let found = verified_clients.contains_key(&verifier.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get client state for {}", verifier), - ) - })?; - if found { - return Err(actor_error!( - illegal_argument, - "verified client {} cannot become a verifier", - verifier - )); - } - - verifiers.set(verifier.to_bytes().into(), BigIntDe(params.allowance.clone())).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add verifier"), - )?; - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; - - Ok(()) - })?; + // Disallow existing clients as verifiers. + let token_balance = balance_of(rt, &verifier)?; + if token_balance.is_positive() { + return Err(actor_error!( + illegal_argument, + "verified client {} cannot become a verifier", + verifier + )); + } - Ok(()) + // Store the new verifier and allowance (over-writing). + rt.transaction(|st: &mut State, rt| { + st.put_verifier(rt.store(), &verifier, ¶ms.allowance) + .context("failed to add verifier") + }) } pub fn remove_verifier(rt: &mut RT, verifier_addr: Address) -> Result<(), ActorError> @@ -147,30 +141,8 @@ impl Actor { rt.validate_immediate_caller_is(std::iter::once(&state.root_key))?; rt.transaction(|st: &mut State, rt| { - let mut verifiers = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &st.verifiers, - rt.store(), - HAMT_BIT_WIDTH, - ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; - verifiers - .delete(&verifier.to_bytes()) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to remove verifier") - })? - .ok_or_else(|| { - actor_error!(illegal_argument, "failed to remove verifier: not found") - })?; - - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; - Ok(()) - })?; - - Ok(()) + st.remove_verifier(rt.store(), &verifier).context("failed to remove verifier") + }) } pub fn add_verified_client( @@ -184,10 +156,10 @@ impl Actor { // The caller will be verified by checking table below rt.validate_immediate_caller_accept_any()?; - if params.allowance < rt.policy().minimum_verified_deal_size { + if params.allowance < rt.policy().minimum_verified_allocation_size { return Err(actor_error!( illegal_argument, - "Allowance {} below MinVerifiedDealSize for add verified client {}", + "allowance {} below MinVerifiedDealSize for add verified client {}", params.allowance, params.address )); @@ -198,287 +170,47 @@ impl Actor { let st: State = rt.state()?; if client == st.root_key { - return Err(actor_error!(illegal_argument, "Rootkey cannot be added as verifier")); + return Err(actor_error!(illegal_argument, "root cannot be added as client")); } - rt.transaction(|st: &mut State, rt| { - let mut verifiers = - make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; - let mut verified_clients = - make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; - - // Validate caller is one of the verifiers. - let verifier = rt.message().caller(); - let BigIntDe(verifier_cap) = verifiers - .get(&verifier.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get Verifier {}", verifier), - ) - })? - .ok_or_else(|| actor_error!(not_found, format!("no such Verifier {}", verifier)))?; - - // Validate client to be added isn't a verifier - let found = verifiers.contains_key(&client.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier") - })?; - if found { - return Err(actor_error!( - illegal_argument, - "verifier {} cannot be added as a verified client", - client - )); - } - - // Compute new verifier cap and update. - if verifier_cap < ¶ms.allowance { - return Err(actor_error!( - illegal_argument, - "Add more DataCap {} for VerifiedClient than allocated {}", - params.allowance, - verifier_cap - )); - } - let new_verifier_cap = verifier_cap - ¶ms.allowance; - - verifiers.set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to update new verifier cap for {}", verifier), - ) - })?; - - let client_cap = verified_clients.get(&client.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to get verified client {}", client), - ) - })?; - // if verified client exists, add allowance to existing cap - // otherwise, create new client with allownace - let client_cap = if let Some(BigIntDe(client_cap)) = client_cap { - client_cap + params.allowance - } else { - params.allowance - }; - - verified_clients.set(client.to_bytes().into(), BigIntDe(client_cap.clone())).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "Failed to add verified client {} with cap {}", - client, client_cap, - ), - ) - }, - )?; - - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; - - Ok(()) - })?; - - Ok(()) - } - - /// Called by StorageMarketActor during PublishStorageDeals. - /// Do not allow partially verified deals (DealSize must be greater than equal to allowed cap). - /// Delete VerifiedClient if remaining DataCap is smaller than minimum VerifiedDealSize. - pub fn use_bytes(rt: &mut RT, params: UseBytesParams) -> Result<(), ActorError> - where - BS: Blockstore, - RT: Runtime, - { - rt.validate_immediate_caller_is(std::iter::once(&STORAGE_MARKET_ACTOR_ADDR))?; - - let client = resolve_to_actor_id(rt, ¶ms.address)?; - let client = Address::new_id(client); + // Validate caller is one of the verifiers, i.e. has an allowance (even if zero). + let verifier = rt.message().caller(); + let verifier_cap = st + .get_verifier_cap(rt.store(), &verifier)? + .ok_or_else(|| actor_error!(not_found, "caller {} is not a verifier", verifier))?; - if params.deal_size < rt.policy().minimum_verified_deal_size { + // Disallow existing verifiers as clients. + if st.get_verifier_cap(rt.store(), &client)?.is_some() { return Err(actor_error!( illegal_argument, - "Verified Dealsize {} is below minimum in usedbytes", - params.deal_size + "verifier {} cannot be added as a verified client", + client )); } - rt.transaction(|st: &mut State, rt| { - let mut verified_clients = - make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; - - let BigIntDe(vc_cap) = verified_clients - .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) - })? - .ok_or_else(|| actor_error!(not_found, "no such verified client {}", client))?; - if vc_cap.is_negative() { - return Err(actor_error!( - illegal_state, - "negative cap for client {}: {}", - client, - vc_cap - )); - } - - if ¶ms.deal_size > vc_cap { - return Err(actor_error!( - illegal_argument, - "Deal size of {} is greater than verifier_cap {} for verified client {}", - params.deal_size, - vc_cap, - client - )); - }; - - let new_vc_cap = vc_cap - ¶ms.deal_size; - if new_vc_cap < rt.policy().minimum_verified_deal_size { - // Delete entry if remaining DataCap is less than MinVerifiedDealSize. - // Will be restored later if the deal did not get activated with a ProvenSector. - verified_clients - .delete(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to delete verified client {}", client), - ) - })? - .ok_or_else(|| { - actor_error!( - illegal_state, - "Failed to delete verified client {}: not found", - client - ) - })?; - } else { - verified_clients.set(client.to_bytes().into(), BigIntDe(new_vc_cap)).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to update verified client {}", client), - ) - }, - )?; - } - - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; - Ok(()) - })?; - - Ok(()) - } - - /// Called by HandleInitTimeoutDeals from StorageMarketActor when a VerifiedDeal fails to init. - /// Restore allowable cap for the client, creating new entry if the client has been deleted. - pub fn restore_bytes(rt: &mut RT, params: RestoreBytesParams) -> Result<(), ActorError> - where - BS: Blockstore, - RT: Runtime, - { - rt.validate_immediate_caller_is(std::iter::once(&STORAGE_MARKET_ACTOR_ADDR))?; - if params.deal_size < rt.policy().minimum_verified_deal_size { + // Compute new verifier allowance. + if verifier_cap < params.allowance { return Err(actor_error!( illegal_argument, - "Below minimum VerifiedDealSize requested in RestoreBytes: {}", - params.deal_size + "add more DataCap {} for client than allocated {}", + params.allowance, + verifier_cap )); } - let client = resolve_to_actor_id(rt, ¶ms.address)?; - let client = Address::new_id(client); - - let st: State = rt.state()?; - if client == st.root_key { - return Err(actor_error!(illegal_argument, "Cannot restore allowance for Rootkey")); - } - + // Reduce verifier's cap. + let new_verifier_cap = verifier_cap - ¶ms.allowance; rt.transaction(|st: &mut State, rt| { - let mut verified_clients = - make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; - let verifiers = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &st.verifiers, - rt.store(), - HAMT_BIT_WIDTH, - ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers") - })?; - - // validate we are NOT attempting to do this for a verifier - let found = verifiers.contains_key(&client.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier") - })?; - if found { - return Err(actor_error!( - illegal_argument, - "cannot restore allowance for a verifier {}", - client - )); - } - - // Get existing cap - let BigIntDe(vc_cap) = verified_clients - .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) - })? - .cloned() - .unwrap_or_default(); - - // Update to new cap - let new_vc_cap = vc_cap + ¶ms.deal_size; - verified_clients.set(client.to_bytes().into(), BigIntDe(new_vc_cap)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to put verified client {}", client), - ) - })?; - - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; - Ok(()) + st.put_verifier(rt.store(), &verifier, &new_verifier_cap) + .context("failed to update verifier allowance") })?; + // Credit client token allowance. + let operators = vec![STORAGE_MARKET_ACTOR_ADDR]; + mint(rt, &client, ¶ms.allowance, operators).context(format!( + "failed to mint {} data cap to client {}", + ¶ms.allowance, client + ))?; Ok(()) } @@ -507,54 +239,16 @@ impl Actor { )); } - let mut removed_data_cap_amount = DataCap::default(); + // Validate and then remove the proposal. rt.transaction(|st: &mut State, rt| { rt.validate_immediate_caller_is(std::iter::once(&st.root_key))?; - // get current verified clients - let mut verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &st.verified_clients, - rt.store(), - HAMT_BIT_WIDTH, - ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; - - // check that `client` is currently a verified client - let is_verified_client = verified_clients - .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })? - .is_some(); - if !is_verified_client { - return Err(actor_error!(not_found, "{} is not a verified client", client)); - } - - // get existing cap allocated to client - let BigIntDe(previous_data_cap) = verified_clients - .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) - })? - .cloned() - .unwrap_or_default(); - - // check that `verifier_1` is currently a verifier if !is_verifier(rt, st, verifier_1)? { - return Err(actor_error!(not_found, "{} is not a verified client", verifier_1)); + return Err(actor_error!(not_found, "{} is not a verifier", verifier_1)); } - // check that `verifier_2` is currently a verifier if !is_verifier(rt, st, verifier_2)? { - return Err(actor_error!(not_found, "{} is not a verified client", verifier_2)); + return Err(actor_error!(not_found, "{} is not a verifier", verifier_2)); } // validate signatures @@ -588,73 +282,577 @@ impl Actor { client, )?; - let new_data_cap = &previous_data_cap - ¶ms.data_cap_amount_to_remove; - if new_data_cap <= Zero::zero() { - // no DataCap remaining, delete verified client - verified_clients.delete(&client.to_bytes()).map_err(|e| { - e.downcast_default( + st.remove_data_cap_proposal_ids = proposal_ids + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush proposal ids")?; + Ok(()) + })?; + + // Burn the client's data cap tokens. + let balance = balance_of(rt, &client).context("failed to fetch balance")?; + let burnt = std::cmp::min(balance, params.data_cap_amount_to_remove); + destroy(rt, &client, &burnt) + .context(format!("failed to destroy {} from allowance for {}", &burnt, &client))?; + + Ok(RemoveDataCapReturn { + verified_client: client, // Changed to the resolved address + data_cap_removed: burnt, + }) + } + + // An allocation may be removed after its expiration epoch has passed (by anyone). + // When removed, the DataCap tokens are transferred back to the client. + // If no allocations are specified, all eligible allocations are removed. + pub fn remove_expired_allocations( + rt: &mut RT, + params: RemoveExpiredAllocationsParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + // Since the allocations are expired, this is safe to be called by anyone. + rt.validate_immediate_caller_accept_any()?; + let curr_epoch = rt.curr_epoch(); + let mut batch_ret = BatchReturn::empty(); + let mut considered = Vec::::new(); + let mut recovered_datacap = DataCap::zero(); + let recovered_datacap = rt + .transaction(|st: &mut State, rt| { + let mut allocs = st.load_allocs(rt.store())?; + + let to_remove: Vec; + if params.allocation_ids.is_empty() { + // Find all expired allocations for the client. + considered = expiration::find_expired(&mut allocs, params.client, curr_epoch)?; + batch_ret = BatchReturn::ok(considered.len() as u32); + to_remove = considered.clone(); + } else { + considered = params.allocation_ids.clone(); + batch_ret = expiration::check_expired( + &mut allocs, + ¶ms.allocation_ids, + params.client, + curr_epoch, + )?; + to_remove = batch_ret.successes(¶ms.allocation_ids); + } + + for id in to_remove { + let existing = allocs.remove(params.client, id).context_code( ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete verified client {}", &client), - ) - })?; - removed_data_cap_amount = previous_data_cap; - } else { - // update DataCap amount after removal - verified_clients - .set(BytesKey::from(client.to_bytes()), BigIntDe(new_data_cap)) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update datacap for verified client {}", &client), - ) - })?; - removed_data_cap_amount = params.data_cap_amount_to_remove.clone(); + format!("failed to remove allocation {}", id), + )?; + // Unwrapping here as both paths to here should ensure the allocation exists. + recovered_datacap += existing.unwrap().size.0; + } + + st.save_allocs(&mut allocs)?; + Ok(recovered_datacap) + }) + .context("state transaction failed")?; + + // Transfer the recovered datacap back to the client. + transfer(rt, params.client, &recovered_datacap).with_context(|| { + format!( + "failed to transfer recovered datacap {} back to client {}", + &recovered_datacap, params.client + ) + })?; + + Ok(RemoveExpiredAllocationsReturn { + considered, + results: batch_ret, + datacap_recovered: recovered_datacap, + }) + } + + // Called by storage provider actor to claim allocations for data provably committed to storage. + // For each allocation claim, the registry checks that the provided piece CID + // and size match that of the allocation. + pub fn claim_allocations( + rt: &mut RT, + params: ClaimAllocationsParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_type(std::iter::once(&Type::Miner))?; + let provider = rt.message().caller().id().unwrap(); + if params.sectors.is_empty() { + return Err(actor_error!(illegal_argument, "claim allocations called with no claims")); + } + let mut datacap_claimed = DataCap::zero(); + let mut ret_gen = BatchReturnGen::new(params.sectors.len()); + let all_or_nothing = params.all_or_nothing; + rt.transaction(|st: &mut State, rt| { + let mut claims = st.load_claims(rt.store())?; + let mut allocs = st.load_allocs(rt.store())?; + + for claim_alloc in params.sectors { + let maybe_alloc = state::get_allocation( + &mut allocs, + claim_alloc.client, + claim_alloc.allocation_id, + )?; + let alloc: &Allocation = match maybe_alloc { + None => { + ret_gen.add_fail(ExitCode::USR_NOT_FOUND); + info!( + "no allocation {} for client {}", + claim_alloc.allocation_id, claim_alloc.client, + ); + continue; + } + Some(a) => a, + }; + + if !can_claim_alloc(&claim_alloc, provider, alloc, rt.curr_epoch()) { + ret_gen.add_fail(ExitCode::USR_FORBIDDEN); + info!( + "invalid sector {:?} for allocation {}", + claim_alloc.sector, claim_alloc.allocation_id, + ); + continue; + } + + let new_claim = Claim { + provider, + client: alloc.client, + data: alloc.data, + size: alloc.size, + term_min: alloc.term_min, + term_max: alloc.term_max, + term_start: rt.curr_epoch(), + sector: claim_alloc.sector, + }; + + let inserted = claims + .put_if_absent(provider, claim_alloc.allocation_id, new_claim) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to write claim {}", claim_alloc.allocation_id), + )?; + if !inserted { + ret_gen.add_fail(ExitCode::USR_ILLEGAL_STATE); // should be unreachable since claim and alloc can't exist at once + info!( + "claim for allocation {} could not be inserted as it already exists", + claim_alloc.allocation_id, + ); + continue; + } + + allocs.remove(claim_alloc.client, claim_alloc.allocation_id).context_code( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to remove allocation {}", claim_alloc.allocation_id), + )?; + + datacap_claimed += DataCap::from(claim_alloc.size.0); + ret_gen.add_success(); } + st.save_allocs(&mut allocs)?; + st.save_claims(&mut claims)?; + Ok(()) + }) + .context("state transaction failed")?; + let batch_info = ret_gen.gen(); + if all_or_nothing && !batch_info.all_ok() { + return Err(actor_error!( + illegal_argument, + "all or nothing call contained failures: {}", + batch_info.to_string() + )); + } - st.remove_data_cap_proposal_ids = proposal_ids.flush().map_err(|e| { - actor_error! { - illegal_state, - "failed to flush proposal ids: {}", - e + // Burn the datacap tokens from verified registry's own balance. + burn(rt, &datacap_claimed)?; + + Ok(ClaimAllocationsReturn { batch_info, claimed_space: datacap_claimed }) + } + + // get claims for a provider + pub fn get_claims( + rt: &mut RT, + params: GetClaimsParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + rt.validate_immediate_caller_accept_any()?; + let mut batch_gen = BatchReturnGen::new(params.claim_ids.len()); + let claims = rt + .transaction(|st: &mut State, rt| { + let mut st_claims = st.load_claims(rt.store())?; + let mut ret_claims = Vec::new(); + for id in params.claim_ids { + let maybe_claim = state::get_claim(&mut st_claims, params.provider, id)?; + match maybe_claim { + None => { + batch_gen.add_fail(ExitCode::USR_NOT_FOUND); + info!("no claim {} for provider {}", id, params.provider,); + } + Some(claim) => { + batch_gen.add_success(); + ret_claims.push(claim.clone()); + } + }; } - })?; - st.verified_clients = verified_clients.flush().map_err(|e| { - actor_error! { - illegal_state, - "failed to flush verified clients: {}", - e + Ok(ret_claims) + }) + .context("state transaction failed")?; + Ok(GetClaimsReturn { batch_info: batch_gen.gen(), claims }) + } + + /// Extends the maximum term of some claims up to the largest value they could have been + /// originally allocated. + /// Callable only by the claims' client. + /// Cannot reduce a claim's term. + /// Can extend the term even if the claim has already expired. + /// Note that this method can't extend the term past the original limit, + /// even if the term has previously been extended past that by spending new datacap. + pub fn extend_claim_terms( + rt: &mut RT, + params: ExtendClaimTermsParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + // Permissions are checked per-claim. + rt.validate_immediate_caller_accept_any()?; + let caller_id = rt.message().caller().id().unwrap(); + let term_limit = rt.policy().maximum_verified_allocation_term; + let mut batch_gen = BatchReturnGen::new(params.terms.len()); + rt.transaction(|st: &mut State, rt| { + let mut st_claims = st.load_claims(rt.store())?; + for term in params.terms { + // Confirm the new term limit is allowed. + if term.term_max > term_limit { + batch_gen.add_fail(ExitCode::USR_ILLEGAL_ARGUMENT); + info!( + "term_max {} for claim {} exceeds maximum {}", + term.term_max, term.claim_id, term_limit, + ); + continue; } - })?; + + let maybe_claim = state::get_claim(&mut st_claims, term.provider, term.claim_id)?; + if let Some(claim) = maybe_claim { + // Confirm the caller is the claim's client. + if claim.client != caller_id { + batch_gen.add_fail(ExitCode::USR_FORBIDDEN); + info!( + "client {} for claim {} does not match caller {}", + claim.client, term.claim_id, caller_id, + ); + continue; + } + // Confirm the new term limit is no less than the old one. + if term.term_max < claim.term_max { + batch_gen.add_fail(ExitCode::USR_ILLEGAL_ARGUMENT); + info!( + "term_max {} for claim {} is less than current {}", + term.term_max, term.claim_id, claim.term_max, + ); + continue; + } + + let new_claim = Claim { term_max: term.term_max, ..*claim }; + st_claims.put(term.provider, term.claim_id, new_claim).context_code( + ExitCode::USR_ILLEGAL_STATE, + "HAMT put failure storing new claims", + )?; + batch_gen.add_success(); + } else { + batch_gen.add_fail(ExitCode::USR_NOT_FOUND); + info!("no claim {} for provider {}", term.claim_id, term.provider); + } + } + st.save_claims(&mut st_claims)?; Ok(()) - })?; + }) + .context("state transaction failed")?; + Ok(batch_gen.gen()) + } - Ok(RemoveDataCapReturn { - verified_client: params.verified_client_to_remove, - data_cap_removed: removed_data_cap_amount, + // A claim may be removed after its maximum term has elapsed (by anyone). + // If no claims are specified, all eligible claims are removed. + pub fn remove_expired_claims( + rt: &mut RT, + params: RemoveExpiredClaimsParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + // Since the claims are expired, this is safe to be called by anyone. + rt.validate_immediate_caller_accept_any()?; + let curr_epoch = rt.curr_epoch(); + let mut batch_ret = BatchReturn::empty(); + let mut considered = Vec::::new(); + rt.transaction(|st: &mut State, rt| { + let mut claims = st.load_claims(rt.store())?; + let to_remove: Vec; + if params.claim_ids.is_empty() { + // Find all expired claims for the provider. + considered = expiration::find_expired(&mut claims, params.provider, curr_epoch)?; + batch_ret = BatchReturn::ok(considered.len() as u32); + to_remove = considered.clone(); + } else { + considered = params.claim_ids.clone(); + batch_ret = expiration::check_expired( + &mut claims, + ¶ms.claim_ids, + params.provider, + curr_epoch, + )?; + to_remove = batch_ret.successes(¶ms.claim_ids); + } + + for id in to_remove { + claims.remove(params.provider, id).context_code( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to remove claim {}", id), + )?; + } + + st.save_claims(&mut claims)?; + Ok(()) }) + .context("state transaction failed")?; + + Ok(RemoveExpiredClaimsReturn { considered, results: batch_ret }) + } + + // Receives data cap tokens (only) and creates allocations according to one or more + // allocation requests specified in the transfer's operator data. + // The token amount received must exactly correspond to the sum of the requested allocation sizes. + // This method does not support partial success (yet): all allocations must succeed, + // or the transfer will be rejected. + // Returns the ids of the created allocations. + pub fn universal_receiver_hook( + rt: &mut RT, + params: UniversalReceiverParams, + ) -> Result + where + BS: Blockstore, + RT: Runtime, + { + // Accept only the data cap token. + rt.validate_immediate_caller_is(&[DATACAP_TOKEN_ACTOR_ADDR])?; + + let my_id = rt.message().receiver().id().unwrap(); + let curr_epoch = rt.curr_epoch(); + + // Validate receiver hook payload. + let tokens_received = validate_tokens_received(¶ms, my_id)?; + let token_datacap = tokens_to_datacap(&tokens_received.amount); + let client = tokens_received.from; + + // Extract and validate allocation request from the operator data. + let reqs: AllocationRequests = + deserialize(&tokens_received.operator_data, "allocation requests")?; + let mut allocation_total = DataCap::zero(); + + // Construct new allocation records. + let mut new_allocs = Vec::with_capacity(reqs.allocations.len()); + for req in &reqs.allocations { + validate_new_allocation(req, rt.policy(), curr_epoch)?; + // Require the provider for new allocations to be a miner actor. + // This doesn't matter much, but is more ergonomic to fail rather than lock up datacap. + let provider_id = resolve_miner_id(rt, &req.provider)?; + new_allocs.push(Allocation { + client, + provider: provider_id, + data: req.data, + size: req.size, + term_min: req.term_min, + term_max: req.term_max, + expiration: req.expiration, + }); + allocation_total += DataCap::from(req.size.0); + } + + let st: State = rt.state()?; + let mut claims = st.load_claims(rt.store())?; + let mut updated_claims = Vec::<(ClaimID, Claim)>::new(); + for req in &reqs.extensions { + // Note: we don't check the client address here, by design. + // Any client can spend datacap to extend an existing claim. + let provider_id = rt + .resolve_address(&req.provider) + .with_context_code(ExitCode::USR_ILLEGAL_ARGUMENT, || { + format!("failed to resolve provider address {}", req.provider) + })?; + let claim = state::get_claim(&mut claims, provider_id, req.claim)? + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no claim {} for provider {}", req.claim, provider_id) + })?; + let policy = rt.policy(); + + validate_claim_extension(req, claim, policy, curr_epoch)?; + // The claim's client is not changed to be the address of the token sender. + // It remains the original allocation client. + updated_claims.push((req.claim, Claim { term_max: req.term_max, ..*claim })); + allocation_total += DataCap::from(claim.size.0); + } + + // Allocation size must match the tokens received exactly (we don't return change). + if allocation_total != token_datacap { + return Err(actor_error!( + illegal_argument, + "total allocation size {} must match data cap amount received {}", + allocation_total, + token_datacap + )); + } + // Partial success isn't supported yet, but these results make space for it in the future. + let allocation_results = BatchReturn::ok(new_allocs.len() as u32); + let extension_results = BatchReturn::ok(updated_claims.len() as u32); + + // Save new allocations and updated claims. + let ids = rt.transaction(|st: &mut State, rt| { + let ids = st.insert_allocations(rt.store(), client, new_allocs.into_iter())?; + st.put_claims(rt.store(), updated_claims.into_iter())?; + Ok(ids) + })?; + + Ok(AllocationsResponse { allocation_results, extension_results, new_allocations: ids }) } } +// Checks whether an address has a verifier entry (which could be zero). fn is_verifier(rt: &RT, st: &State, address: Address) -> Result where BS: Blockstore, RT: Runtime, { - let verifiers = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &st.verifiers, - rt.store(), - HAMT_BIT_WIDTH, - ) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers"))?; + let verifiers = + make_map_with_root_and_bitwidth::<_, BigIntDe>(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers")?; // check that the `address` is currently a verified client let found = verifiers .contains_key(&address.to_bytes()) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; Ok(found) } +// Invokes BalanceOf on the data cap token actor, and converts the result to whole units of data cap. +fn balance_of(rt: &mut RT, owner: &Address) -> Result +where + BS: Blockstore, + RT: Runtime, +{ + let params = serialize(owner, "owner address")?; + let ret = rt + .send( + &DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::BalanceOf as u64, + params, + TokenAmount::zero(), + ) + .context(format!("failed to query datacap balance of {}", owner))?; + let x: TokenAmount = deserialize(&ret, "balance result")?; + Ok(tokens_to_datacap(&x)) +} + +// Invokes Mint on a data cap token actor for whole units of data cap. +fn mint( + rt: &mut RT, + to: &Address, + amount: &DataCap, + operators: Vec
, +) -> Result<(), ActorError> +where + BS: Blockstore, + RT: Runtime, +{ + let token_amt = datacap_to_tokens(amount); + let params = MintParams { to: *to, amount: token_amt, operators }; + rt.send( + &DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::Mint as u64, + serialize(¶ms, "mint params")?, + TokenAmount::zero(), + ) + .context(format!("failed to send mint {:?} to datacap", params))?; + Ok(()) +} + +// Invokes Burn on a data cap token actor for whole units of data cap. +fn burn(rt: &mut RT, amount: &DataCap) -> Result +where + BS: Blockstore, + RT: Runtime, +{ + let token_amt = datacap_to_tokens(amount); + let params = BurnParams { amount: token_amt }; + let ret: BurnReturn = rt + .send( + &DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::Burn as u64, + serialize(¶ms, "burn params")?, + TokenAmount::zero(), + ) + .context(format!("failed to send burn {:?} to datacap", params))? + .deserialize()?; + Ok(tokens_to_datacap(&ret.balance)) +} + +// Invokes Destroy on a data cap token actor for whole units of data cap. +fn destroy(rt: &mut RT, owner: &Address, amount: &DataCap) -> Result +where + BS: Blockstore, + RT: Runtime, +{ + let token_amt = datacap_to_tokens(amount); + let params = DestroyParams { owner: *owner, amount: token_amt }; + let ret: BurnReturn = rt + .send( + &DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::Destroy as u64, + serialize(¶ms, "destroy params")?, + TokenAmount::zero(), + ) + .context(format!("failed to send destroy {:?} to datacap", params))? + .deserialize()?; + Ok(tokens_to_datacap(&ret.balance)) +} + +// Invokes transfer on a data cap token actor for whole units of data cap. +fn transfer(rt: &mut RT, to: ActorID, amount: &DataCap) -> Result<(), ActorError> +where + BS: Blockstore, + RT: Runtime, +{ + let token_amt = datacap_to_tokens(amount); + let params = TransferParams { + to: Address::new_id(to), + amount: token_amt, + operator_data: Default::default(), + }; + rt.send( + &DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::Transfer as u64, + serialize(¶ms, "transfer params")?, + TokenAmount::zero(), + ) + .context(format!("failed to send transfer to datacap {:?}", params))?; + Ok(()) +} + +fn datacap_to_tokens(amount: &DataCap) -> TokenAmount { + TokenAmount::from_atto(amount.clone()) * TOKEN_PRECISION +} + +fn tokens_to_datacap(amount: &TokenAmount) -> BigInt { + amount.atto() / TOKEN_PRECISION +} + fn use_proposal_id( proposal_ids: &mut Map, verifier: Address, @@ -725,6 +923,192 @@ where ) } +// Deserializes and validates a receiver hook payload, expecting only an FRC-46 transfer. +fn validate_tokens_received( + params: &UniversalReceiverParams, + my_id: u64, +) -> Result { + if params.type_ != FRC46_TOKEN_TYPE { + return Err(actor_error!( + illegal_argument, + "invalid token type {}, expected {} (FRC-46)", + params.type_, + FRC46_TOKEN_TYPE + )); + } + let payload: FRC46TokenReceived = deserialize(¶ms.payload, "receiver hook payload")?; + // Payload to address must match receiving actor. + if payload.to != my_id { + return Err(actor_error!( + illegal_argument, + "token receiver expected to {}, was {}", + my_id, + payload.to + )); + } + Ok(payload) +} + +// Validates an allocation request. +fn validate_new_allocation( + req: &AllocationRequest, + policy: &Policy, + curr_epoch: ChainEpoch, +) -> Result<(), ActorError> { + // Size must be at least the policy minimum. + if DataCap::from(req.size.0) < policy.minimum_verified_allocation_size { + return Err(actor_error!( + illegal_argument, + "allocation size {} below minimum {}", + req.size.0, + policy.minimum_verified_allocation_size + )); + } + // Term must be at least the policy minimum. + if req.term_min < policy.minimum_verified_allocation_term { + return Err(actor_error!( + illegal_argument, + "allocation term min {} below limit {}", + req.term_min, + policy.minimum_verified_allocation_term + )); + } + // Term cannot exceed the policy maximum. + if req.term_max > policy.maximum_verified_allocation_term { + return Err(actor_error!( + illegal_argument, + "allocation term max {} above limit {}", + req.term_max, + policy.maximum_verified_allocation_term + )); + } + // Term range must be non-empty. + if req.term_min > req.term_max { + return Err(actor_error!( + illegal_argument, + "allocation term min {} exceeds term max {}", + req.term_min, + req.term_max + )); + } + + // Allocation must expire in the future. + if req.expiration < curr_epoch { + return Err(actor_error!( + illegal_argument, + "allocation expiration epoch {} has passed current epoch {}", + req.expiration, + curr_epoch + )); + } + // Allocation must expire soon enough. + let max_expiration = curr_epoch + policy.maximum_verified_allocation_expiration; + if req.expiration > max_expiration { + return Err(actor_error!( + illegal_argument, + "allocation expiration {} exceeds maximum {}", + req.expiration, + max_expiration + )); + } + Ok(()) +} + +fn validate_claim_extension( + req: &ClaimExtensionRequest, + claim: &Claim, + policy: &Policy, + curr_epoch: ChainEpoch, +) -> Result<(), ActorError> { + // The new term max is the policy limit after current epoch (not after the old term max). + let term_limit_absolute = curr_epoch + policy.maximum_verified_allocation_term; + let term_limit_relative = term_limit_absolute - claim.term_start; + if req.term_max > term_limit_relative { + return Err(actor_error!( + illegal_argument, + format!( + "term_max {} for claim {} exceeds maximum {} at current epoch {}", + req.term_max, req.claim, term_limit_relative, curr_epoch + ) + )); + } + // The new term max must be larger than the old one. + // Cannot reduce term, and cannot spend datacap on a zero increase. + // There is no policy on minimum extension duration. + if req.term_max <= claim.term_max { + return Err(actor_error!( + illegal_argument, + "term_max {} for claim {} is not larger than existing term max {}", + req.term_max, + req.claim, + claim.term_max + )); + } + // The claim must not have already expired. + // Unlike when the claim client extends term up to the originally-allowed max, + // allowing extension of expired claims with new datacap could revive a claim arbitrarily + // far into the future. + // A claim can be extended continuously into the future, but once it has expired + // it is expired for good. + let claim_expiration = claim.term_start + claim.term_max; + if curr_epoch > claim_expiration { + return Err(actor_error!( + forbidden, + "claim {} expired at {}, current epoch {}", + req.claim, + claim_expiration, + curr_epoch + )); + } + Ok(()) +} + +// Checks that an address corresponsds to a miner actor. +fn resolve_miner_id(rt: &mut RT, addr: &Address) -> Result +where + BS: Blockstore, + RT: Runtime, +{ + let id = rt.resolve_address(addr).with_context_code(ExitCode::USR_ILLEGAL_ARGUMENT, || { + format!("failed to resolve provider address {}", addr) + })?; + let code_cid = + rt.get_actor_code_cid(&id).with_context_code(ExitCode::USR_ILLEGAL_ARGUMENT, || { + format!("no code CID for provider {}", addr) + })?; + let provider_type = rt + .resolve_builtin_actor_type(&code_cid) + .with_context_code(ExitCode::USR_ILLEGAL_ARGUMENT, || { + format!("provider code {} must be built-in miner actor", code_cid) + })?; + if provider_type != Type::Miner { + return Err(actor_error!( + illegal_argument, + "allocation provider {} must be a miner actor, was {:?}", + addr, + provider_type + )); + } + Ok(id) +} + +fn can_claim_alloc( + claim_alloc: &SectorAllocationClaim, + provider: ActorID, + alloc: &Allocation, + curr_epoch: ChainEpoch, +) -> bool { + let sector_lifetime = claim_alloc.sector_expiry - curr_epoch; + + provider == alloc.provider + && claim_alloc.client == alloc.client + && claim_alloc.data == alloc.data + && claim_alloc.size == alloc.size + && curr_epoch <= alloc.expiration + && sector_lifetime >= alloc.term_min + && sector_lifetime <= alloc.term_max +} + impl ActorCode for Actor { fn invoke_method( rt: &mut RT, @@ -752,19 +1136,35 @@ impl ActorCode for Actor { Self::add_verified_client(rt, cbor::deserialize_params(params)?)?; Ok(RawBytes::default()) } - Some(Method::UseBytes) => { - Self::use_bytes(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::default()) - } - Some(Method::RestoreBytes) => { - Self::restore_bytes(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::default()) - } Some(Method::RemoveVerifiedClientDataCap) => { let res = Self::remove_verified_client_data_cap(rt, cbor::deserialize_params(params)?)?; Ok(RawBytes::serialize(res)?) } + Some(Method::RemoveExpiredAllocations) => { + let res = Self::remove_expired_allocations(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::serialize(res)?) + } + Some(Method::ClaimAllocations) => { + let res = Self::claim_allocations(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::serialize(res)?) + } + Some(Method::ExtendClaimTerms) => { + let res = Self::extend_claim_terms(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::serialize(res)?) + } + Some(Method::GetClaims) => { + let res = Self::get_claims(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::serialize(res)?) + } + Some(Method::RemoveExpiredClaims) => { + let res = Self::remove_expired_claims(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::serialize(res)?) + } + Some(Method::UniversalReceiverHook) => { + let res = Self::universal_receiver_hook(rt, cbor::deserialize_params(params)?)?; + Ok(RawBytes::serialize(res)?) + } None => Err(actor_error!(unhandled_message; "Invalid method")), } } diff --git a/actors/verifreg/src/state.rs b/actors/verifreg/src/state.rs index 5c0e5681f..b8a8c2474 100644 --- a/actors/verifreg/src/state.rs +++ b/actors/verifreg/src/state.rs @@ -2,34 +2,281 @@ // SPDX-License-Identifier: Apache-2.0, MIT use cid::Cid; -use fil_actors_runtime::make_empty_map; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; use fvm_shared::address::Address; -use fvm_shared::HAMT_BIT_WIDTH; +use fvm_shared::bigint::bigint_ser::BigIntDe; +use fvm_shared::clock::ChainEpoch; +use fvm_shared::error::ExitCode; +use fvm_shared::piece::PaddedPieceSize; +use fvm_shared::sector::SectorNumber; +use fvm_shared::{ActorID, HAMT_BIT_WIDTH}; + +use fil_actors_runtime::{ + actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorError, AsActorError, Map, + MapMap, +}; + +use crate::DataCap; +use crate::{AllocationID, ClaimID}; #[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] pub struct State { pub root_key: Address, - pub verifiers: Cid, - pub verified_clients: Cid, + // Maps verifier addresses to data cap minting allowance (in bytes). + pub verifiers: Cid, // HAMT[Address]DataCap pub remove_data_cap_proposal_ids: Cid, + // Maps client IDs to allocations made by that client. + pub allocations: Cid, // HAMT[ActorID]HAMT[AllocationID]Allocation + // Next allocation identifier to use. + // The value 0 is reserved to mean "no allocation". + pub next_allocation_id: u64, + // Maps provider IDs to allocations claimed by that provider. + pub claims: Cid, // HAMT[ActorID]HAMT[ClaimID]Claim } impl State { - pub fn new(store: &BS, root_key: Address) -> anyhow::Result { + pub fn new(store: &BS, root_key: Address) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .map_err(|e| anyhow::anyhow!("Failed to create empty map: {}", e))?; + .map_err(|e| actor_error!(illegal_state, "failed to create empty map: {}", e))?; + + let empty_mapmap = + MapMap::<_, (), ActorID, u64>::new(store, HAMT_BIT_WIDTH, HAMT_BIT_WIDTH) + .flush() + .map_err(|e| { + actor_error!(illegal_state, "failed to create empty multi map: {}", e) + })?; Ok(State { root_key, verifiers: empty_map, - verified_clients: empty_map, remove_data_cap_proposal_ids: empty_map, + allocations: empty_mapmap, + next_allocation_id: 1, + claims: empty_mapmap, }) } + + // Adds a verifier and cap, overwriting any existing cap for that verifier. + pub fn put_verifier( + &mut self, + store: &impl Blockstore, + verifier: &Address, + cap: &DataCap, + ) -> Result<(), ActorError> { + let mut verifiers = + make_map_with_root_and_bitwidth::<_, BigIntDe>(&self.verifiers, store, HAMT_BIT_WIDTH) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers")?; + verifiers + .set(verifier.to_bytes().into(), BigIntDe(cap.clone())) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set verifier")?; + self.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; + Ok(()) + } + + pub fn remove_verifier( + &mut self, + store: &impl Blockstore, + verifier: &Address, + ) -> Result<(), ActorError> { + let mut verifiers = + make_map_with_root_and_bitwidth::<_, BigIntDe>(&self.verifiers, store, HAMT_BIT_WIDTH) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers")?; + + verifiers + .delete(&verifier.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to remove verifier")? + .context_code(ExitCode::USR_ILLEGAL_ARGUMENT, "verifier not found")?; + + self.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; + Ok(()) + } + + pub fn get_verifier_cap( + &self, + store: &impl Blockstore, + verifier: &Address, + ) -> Result, ActorError> { + let verifiers = + make_map_with_root_and_bitwidth::<_, BigIntDe>(&self.verifiers, store, HAMT_BIT_WIDTH) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers")?; + let allowance = verifiers + .get(&verifier.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; + Ok(allowance.map(|a| a.0.clone() as DataCap)) + } + + pub fn load_verifiers<'a, BS: Blockstore>( + &self, + store: &'a BS, + ) -> Result, ActorError> { + make_map_with_root_and_bitwidth::<_, BigIntDe>(&self.verifiers, store, HAMT_BIT_WIDTH) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers") + } + + pub fn load_allocs<'a, BS: Blockstore>( + &self, + store: &'a BS, + ) -> Result, ActorError> { + MapMap::::from_root( + store, + &self.allocations, + HAMT_BIT_WIDTH, + HAMT_BIT_WIDTH, + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load allocations table") + } + + pub fn save_allocs<'a, BS: Blockstore>( + &mut self, + allocs: &mut MapMap<'a, BS, Allocation, ActorID, AllocationID>, + ) -> Result<(), ActorError> { + self.allocations = allocs + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush allocations table")?; + Ok(()) + } + + /// Inserts a batch of allocations under a single client address. + /// The allocations are assigned sequential IDs starting from the next available. + pub fn insert_allocations( + &mut self, + store: &BS, + client: ActorID, + new_allocs: I, + ) -> Result, ActorError> + where + I: Iterator, + { + let mut allocs = self.load_allocs(store)?; + // These local variables allow the id-associating map closure to move the allocations + // from the iterator rather than clone, without moving self. + let first_id = self.next_allocation_id; + let mut count = 0; + let count_ref = &mut count; + allocs + .put_many( + client, + new_allocs.map(move |a| { + let id = first_id + *count_ref; + *count_ref += 1; + (id, a) + }), + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to put allocations")?; + self.save_allocs(&mut allocs)?; + self.next_allocation_id += count; + let allocated_ids = (first_id..first_id + count).collect(); + Ok(allocated_ids) + } + + pub fn load_claims<'a, BS: Blockstore>( + &self, + store: &'a BS, + ) -> Result, ActorError> { + MapMap::::from_root( + store, + &self.claims, + HAMT_BIT_WIDTH, + HAMT_BIT_WIDTH, + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims table") + } + + pub fn save_claims<'a, BS: Blockstore>( + &mut self, + claims: &mut MapMap<'a, BS, Claim, ActorID, ClaimID>, + ) -> Result<(), ActorError> { + self.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims table")?; + Ok(()) + } + + pub fn put_claims(&mut self, store: &BS, claims: I) -> Result<(), ActorError> + where + I: Iterator, + { + let mut st_claims = self.load_claims(store)?; + for (id, claim) in claims { + st_claims + .put(claim.provider, id, claim) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to put claim")?; + } + self.save_claims(&mut st_claims)?; + Ok(()) + } +} +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] +pub struct Claim { + // The provider storing the data (from allocation). + pub provider: ActorID, + // The client which allocated the DataCap (from allocation). + pub client: ActorID, + // Identifier of the data committed (from allocation). + pub data: Cid, + // The (padded) size of data (from allocation). + pub size: PaddedPieceSize, + // The min period after term_start which the provider must commit to storing data + pub term_min: ChainEpoch, + // The max period after term_start for which provider can earn QA-power for the data + pub term_max: ChainEpoch, + // The epoch at which the (first range of the) piece was committed. + pub term_start: ChainEpoch, + // ID of the provider's sector in which the data is committed. + pub sector: SectorNumber, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] +pub struct Allocation { + // The verified client which allocated the DataCap. + pub client: ActorID, + // The provider (miner actor) which may claim the allocation. + pub provider: ActorID, + // Identifier of the data to be committed. + pub data: Cid, + // The (padded) size of data. + pub size: PaddedPieceSize, + // The minimum duration which the provider must commit to storing the piece to avoid + // early-termination penalties (epochs). + pub term_min: ChainEpoch, + // The maximum period for which a provider can earn quality-adjusted power + // for the piece (epochs). + pub term_max: ChainEpoch, + // The latest epoch by which a provider must commit data before the allocation expires. + pub expiration: ChainEpoch, } impl Cbor for State {} + +pub fn get_allocation<'a, BS>( + allocations: &'a mut MapMap, + client: ActorID, + id: AllocationID, +) -> Result, ActorError> +where + BS: Blockstore, +{ + allocations + .get(client, id) + .context_code(ExitCode::USR_ILLEGAL_STATE, "HAMT lookup failure getting allocation") +} + +pub fn get_claim<'a, BS>( + claims: &'a mut MapMap, + provider: ActorID, + id: ClaimID, +) -> Result, ActorError> +where + BS: Blockstore, +{ + claims + .get(provider, id) + .context_code(ExitCode::USR_ILLEGAL_STATE, "HAMT lookup failure getting claim") +} diff --git a/actors/verifreg/src/testing.rs b/actors/verifreg/src/testing.rs index 67a327c3a..5ff1fa438 100644 --- a/actors/verifreg/src/testing.rs +++ b/actors/verifreg/src/testing.rs @@ -2,17 +2,14 @@ use std::collections::HashMap; use fil_actors_runtime::{Map, MessageAccumulator}; use fvm_ipld_blockstore::Blockstore; -use fvm_shared::{ - address::{Address, Protocol}, - bigint::bigint_ser::BigIntDe, -}; +use fvm_shared::address::{Address, Protocol}; +use fvm_shared::bigint::bigint_ser::BigIntDe; use num_traits::Signed; use crate::{DataCap, State}; pub struct StateSummary { pub verifiers: HashMap, - pub clients: HashMap, } /// Checks internal invariants of verified registry state. @@ -47,35 +44,5 @@ pub fn check_state_invariants( Err(e) => acc.add(format!("error loading verifiers {e}")), } - // check clients - let mut all_clients = HashMap::new(); - match Map::<_, BigIntDe>::load(&state.verified_clients, store) { - Ok(clients) => { - let ret = clients.for_each(|key, cap| { - let client = Address::from_bytes(key)?; - let cap = &cap.0; - - acc.require( - client.protocol() == Protocol::ID, - format!("client {client} should have ID protocol"), - ); - acc.require(!cap.is_negative(), format!("client {client} cap {cap} is negative")); - all_clients.insert(client, cap.clone()); - Ok(()) - }); - - acc.require_no_error(ret, "error iterating clients"); - } - Err(e) => acc.add(format!("error loading clients {e}")), - } - - // check verifiers and clients are disjoint - // No need to iterate all clients; any overlap must have been one of all verifiers. - all_verifiers.keys().filter(|verifier| all_clients.contains_key(verifier)).for_each( - |verifier| { - acc.add(format!("verifier {verifier} is also a client")); - }, - ); - - (StateSummary { verifiers: all_verifiers, clients: all_clients }, acc) + (StateSummary { verifiers: all_verifiers }, acc) } diff --git a/actors/verifreg/src/types.rs b/actors/verifreg/src/types.rs index 20385cd9e..a888fc029 100644 --- a/actors/verifreg/src/types.rs +++ b/actors/verifreg/src/types.rs @@ -1,12 +1,23 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT +use cid::Cid; +use fil_actors_runtime::BatchReturn; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; use fvm_shared::address::Address; -use fvm_shared::bigint::bigint_ser; +use fvm_shared::bigint::{bigint_ser, BigInt}; +use fvm_shared::clock::ChainEpoch; use fvm_shared::crypto::signature::Signature; +use fvm_shared::piece::PaddedPieceSize; +use fvm_shared::sector::SectorNumber; use fvm_shared::sector::StoragePower; +use fvm_shared::ActorID; + +use crate::Claim; + +pub type AllocationID = u64; +pub type ClaimID = u64; #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] pub struct VerifierParams { @@ -25,18 +36,6 @@ pub type AddVerifierClientParams = VerifierParams; /// We can introduce policy changes and replace this in the future. pub type DataCap = StoragePower; -#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] -pub struct BytesParams { - /// Address of verified client. - pub address: Address, - /// Number of bytes to use. - #[serde(with = "bigint_ser")] - pub deal_size: StoragePower, -} - -pub type UseBytesParams = BytesParams; -pub type RestoreBytesParams = BytesParams; - pub const SIGNATURE_DOMAIN_SEPARATION_REMOVE_DATA_CAP: &[u8] = b"fil_removedatacap:"; impl Cbor for RemoveDataCapParams {} @@ -95,3 +94,148 @@ impl AddrPairKey { first } } + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredAllocationsParams { + // Client for which to remove expired allocations. + pub client: ActorID, + // Optional list of allocation IDs to attempt to remove. + // Empty means remove all eligible expired allocations. + pub allocation_ids: Vec, +} +impl Cbor for RemoveExpiredAllocationsParams {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredAllocationsReturn { + // Ids of the allocations that were either specified by the caller or discovered to be expired. + pub considered: Vec, + // Results for each processed allocation. + pub results: BatchReturn, + // The amount of datacap reclaimed for the client. + #[serde(with = "bigint_ser")] + pub datacap_recovered: DataCap, +} +impl Cbor for RemoveExpiredAllocationsReturn {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct SectorAllocationClaim { + pub client: ActorID, + pub allocation_id: AllocationID, + pub data: Cid, + pub size: PaddedPieceSize, + pub sector: SectorNumber, + pub sector_expiry: ChainEpoch, +} +impl Cbor for SectorAllocationClaim {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimAllocationsParams { + pub sectors: Vec, + pub all_or_nothing: bool, +} +impl Cbor for ClaimAllocationsParams {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimAllocationsReturn { + pub batch_info: BatchReturn, + #[serde(with = "bigint_ser")] + pub claimed_space: BigInt, +} + +impl Cbor for ClaimAllocationsReturn {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimTerm { + pub provider: ActorID, + pub claim_id: ClaimID, + pub term_max: ChainEpoch, +} +impl Cbor for ClaimTerm {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ExtendClaimTermsParams { + pub terms: Vec, +} +impl Cbor for ExtendClaimTermsParams {} + +pub type ExtendClaimTermsReturn = BatchReturn; + +// +// Receiver hook payload +// + +// A request to create an allocation with datacap tokens. +// See Allocation state for description of field semantics. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AllocationRequest { + pub provider: Address, + pub data: Cid, + pub size: PaddedPieceSize, + pub term_min: ChainEpoch, + pub term_max: ChainEpoch, + pub expiration: ChainEpoch, +} +impl Cbor for AllocationRequest {} + +// A request to extend the term of an existing claim with datacap tokens. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimExtensionRequest { + pub provider: Address, + pub claim: ClaimID, + pub term_max: ChainEpoch, +} +impl Cbor for ClaimExtensionRequest {} + +/// Operator-data payload for a datacap token transfer receiver hook specifying an allocation. +/// The implied client is the sender of the datacap. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AllocationRequests { + pub allocations: Vec, + pub extensions: Vec, +} +impl Cbor for AllocationRequests {} + +/// Recipient data payload in response to a datacap token transfer. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AllocationsResponse { + // Result for each allocation request. + pub allocation_results: BatchReturn, + // Result for each extension request. + pub extension_results: BatchReturn, + // IDs of new allocations created. + pub new_allocations: Vec, +} +impl Cbor for AllocationsResponse {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetClaimsParams { + pub provider: ActorID, + pub claim_ids: Vec, +} +impl Cbor for GetClaimsParams {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetClaimsReturn { + pub batch_info: BatchReturn, + pub claims: Vec, +} +impl Cbor for GetClaimsReturn {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredClaimsParams { + // Provider to clean up (need not be the caller) + pub provider: ActorID, + // Optional list of claim IDs to attempt to remove. + // Empty means remove all eligible expired claims. + pub claim_ids: Vec, +} +impl Cbor for RemoveExpiredClaimsParams {} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredClaimsReturn { + // Ids of the claims that were either specified by the caller or discovered to be expired. + pub considered: Vec, + // Results for each processed claim. + pub results: BatchReturn, +} +impl Cbor for RemoveExpiredClaimsReturn {} diff --git a/actors/verifreg/tests/harness/mod.rs b/actors/verifreg/tests/harness/mod.rs index 76d5f1fe5..bd4468797 100644 --- a/actors/verifreg/tests/harness/mod.rs +++ b/actors/verifreg/tests/harness/mod.rs @@ -1,32 +1,54 @@ -use fil_actor_verifreg::testing::check_state_invariants; -use fil_actors_runtime::runtime::Runtime; -use fvm_ipld_blockstore::MemoryBlockstore; +use frc46_token::receiver::types::{FRC46TokenReceived, UniversalReceiverParams, FRC46_TOKEN_TYPE}; +use frc46_token::token::types::{BurnParams, BurnReturn, TransferParams}; +use frc46_token::token::TOKEN_PRECISION; use fvm_ipld_encoding::RawBytes; use fvm_shared::address::Address; -use fvm_shared::bigint::bigint_ser::BigIntDe; -use fvm_shared::{MethodNum, HAMT_BIT_WIDTH}; +use fvm_shared::bigint::bigint_ser::{BigIntDe, BigIntSer}; +use fvm_shared::clock::ChainEpoch; +use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; +use fvm_shared::piece::PaddedPieceSize; +use fvm_shared::sector::SectorNumber; +use fvm_shared::{ActorID, MethodNum, HAMT_BIT_WIDTH}; +use num_traits::{ToPrimitive, Zero}; +use fil_actor_verifreg::testing::check_state_invariants; use fil_actor_verifreg::{ - Actor as VerifregActor, AddVerifierClientParams, AddVerifierParams, DataCap, Method, - RestoreBytesParams, State, UseBytesParams, + ext, Actor as VerifregActor, AddVerifierClientParams, AddVerifierParams, Allocation, + AllocationID, AllocationRequest, AllocationRequests, AllocationsResponse, Claim, + ClaimAllocationsParams, ClaimAllocationsReturn, ClaimExtensionRequest, ClaimID, DataCap, + ExtendClaimTermsParams, ExtendClaimTermsReturn, GetClaimsParams, GetClaimsReturn, Method, + RemoveExpiredAllocationsParams, RemoveExpiredAllocationsReturn, RemoveExpiredClaimsParams, + RemoveExpiredClaimsReturn, SectorAllocationClaim, State, +}; +use fil_actors_runtime::cbor::serialize; +use fil_actors_runtime::runtime::builtins::Type; +use fil_actors_runtime::runtime::policy_constants::{ + MAXIMUM_VERIFIED_ALLOCATION_TERM, MINIMUM_VERIFIED_ALLOCATION_TERM, }; +use fil_actors_runtime::runtime::Runtime; use fil_actors_runtime::test_utils::*; use fil_actors_runtime::{ - make_empty_map, make_map_with_root_and_bitwidth, ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, - SYSTEM_ACTOR_ADDR, + make_empty_map, ActorError, AsActorError, BatchReturn, DATACAP_TOKEN_ACTOR_ADDR, + STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; pub const ROOT_ADDR: Address = Address::new_id(101); pub fn new_runtime() -> MockRuntime { MockRuntime { - receiver: ROOT_ADDR, + receiver: VERIFIED_REGISTRY_ACTOR_ADDR, caller: SYSTEM_ACTOR_ADDR, caller_type: *SYSTEM_ACTOR_CODE_ID, ..Default::default() } } +// Sets the miner code/type for an actor ID +pub fn add_miner(rt: &mut MockRuntime, id: ActorID) { + rt.set_address_actor_type(Address::new_id(id), *MINER_ACTOR_CODE_ID); +} + pub fn new_harness() -> (Harness, MockRuntime) { let mut rt = new_runtime(); let h = Harness { root: ROOT_ADDR }; @@ -54,7 +76,6 @@ impl Harness { let empty_map = make_empty_map::<_, ()>(&rt.store, HAMT_BIT_WIDTH).flush().unwrap(); let state: State = rt.get_state(); assert_eq!(self.root, state.root_key); - assert_eq!(empty_map, state.verified_clients); assert_eq!(empty_map, state.verifiers); } @@ -63,9 +84,30 @@ impl Harness { rt: &mut MockRuntime, verifier: &Address, allowance: &DataCap, + ) -> Result<(), ActorError> { + self.add_verifier_with_existing_cap(rt, verifier, allowance, &DataCap::zero()) + } + + pub fn add_verifier_with_existing_cap( + &self, + rt: &mut MockRuntime, + verifier: &Address, + allowance: &DataCap, + cap: &DataCap, // Mocked data cap balance of the prospective verifier ) -> Result<(), ActorError> { rt.expect_validate_caller_addr(vec![self.root]); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, self.root); + let verifier_resolved = rt.get_id_address(verifier).unwrap_or(*verifier); + // Expect checking the verifier's token balance. + rt.expect_send( + DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::BalanceOf as MethodNum, + RawBytes::serialize(&verifier_resolved).unwrap(), + TokenAmount::zero(), + serialize(&BigIntSer(&(cap * TOKEN_PRECISION)), "").unwrap(), + ExitCode::OK, + ); + let params = AddVerifierParams { address: *verifier, allowance: allowance.clone() }; let ret = rt.call::( Method::AddVerifier as MethodNum, @@ -107,14 +149,14 @@ impl Harness { } pub fn get_verifier_allowance(&self, rt: &MockRuntime, verifier: &Address) -> DataCap { - let verifiers = load_verifiers(rt); + let verifiers = rt.get_state::().load_verifiers(&rt.store).unwrap(); let BigIntDe(allowance) = verifiers.get(&verifier.to_bytes()).unwrap().unwrap(); allowance.clone() } pub fn assert_verifier_removed(&self, rt: &MockRuntime, verifier: &Address) { let verifier_id_addr = rt.get_id_address(verifier).unwrap(); - let verifiers = load_verifiers(rt); + let verifiers = rt.get_state::().load_verifiers(&rt.store).unwrap(); assert!(!verifiers.contains_key(&verifier_id_addr.to_bytes()).unwrap()) } @@ -124,10 +166,26 @@ impl Harness { verifier: &Address, client: &Address, allowance: &DataCap, - expected_allowance: &DataCap, ) -> Result<(), ActorError> { rt.expect_validate_caller_any(); rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, *verifier); + let client_resolved = rt.get_id_address(client).unwrap_or(*client); + + // Expect tokens to be minted. + let mint_params = ext::datacap::MintParams { + to: client_resolved, + amount: TokenAmount::from_whole(allowance.to_i64().unwrap()), + operators: vec![STORAGE_MARKET_ACTOR_ADDR], + }; + rt.expect_send( + DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::Mint as MethodNum, + RawBytes::serialize(&mint_params).unwrap(), + TokenAmount::zero(), + RawBytes::default(), + ExitCode::OK, + ); + let params = AddVerifierClientParams { address: *client, allowance: allowance.clone() }; let ret = rt.call::( Method::AddVerifiedClient as MethodNum, @@ -136,94 +194,363 @@ impl Harness { assert_eq!(RawBytes::default(), ret); rt.verify(); - // Confirm the verifier was added to state. - self.assert_client_allowance(rt, client, expected_allowance); Ok(()) } - pub fn assert_client_allowance(&self, rt: &MockRuntime, client: &Address, allowance: &DataCap) { - let client_id_addr = rt.get_id_address(client).unwrap(); - assert_eq!(*allowance, self.get_client_allowance(rt, &client_id_addr)); + pub fn check_state(&self, rt: &MockRuntime) { + let (_, acc) = check_state_invariants(&rt.get_state(), rt.store()); + acc.assert_empty(); } - pub fn get_client_allowance(&self, rt: &MockRuntime, client: &Address) -> DataCap { - let clients = load_clients(rt); - let BigIntDe(allowance) = clients.get(&client.to_bytes()).unwrap().unwrap(); - allowance.clone() + // TODO this should be implemented through a call to verifreg but for now it modifies state directly + pub fn create_alloc( + &self, + rt: &mut MockRuntime, + alloc: &Allocation, + ) -> Result { + let mut st: State = rt.get_state(); + let mut allocs = st.load_allocs(rt.store()).unwrap(); + let alloc_id = st.next_allocation_id; + assert!(allocs + .put_if_absent(alloc.client, alloc_id, alloc.clone()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "faild to put")?); + st.next_allocation_id += 1; + st.allocations = allocs.flush().expect("failed flushing allocation table"); + rt.replace_state(&st); + Ok(alloc_id) } - pub fn assert_client_removed(&self, rt: &MockRuntime, client: &Address) { - let client_id_addr = rt.get_id_address(client).unwrap(); - let clients = load_clients(rt); - assert!(!clients.contains_key(&client_id_addr.to_bytes()).unwrap()) + pub fn load_alloc( + &self, + rt: &mut MockRuntime, + client: ActorID, + id: AllocationID, + ) -> Option { + let st: State = rt.get_state(); + let mut allocs = st.load_allocs(rt.store()).unwrap(); + allocs.get(client, id).unwrap().cloned() } - pub fn add_verifier_and_client( + // Invokes the ClaimAllocations actor method + pub fn claim_allocations( &self, rt: &mut MockRuntime, - verifier: &Address, - client: &Address, - verifier_allowance: &DataCap, - client_allowance: &DataCap, - ) { - self.add_verifier(rt, verifier, verifier_allowance).unwrap(); - self.add_client(rt, verifier, client, client_allowance, client_allowance).unwrap(); + provider: ActorID, + claim_allocs: Vec, + datacap_burnt: u64, + all_or_nothing: bool, + ) -> Result { + rt.expect_validate_caller_type(vec![Type::Miner]); + rt.set_caller(*MINER_ACTOR_CODE_ID, Address::new_id(provider)); + + rt.expect_send( + DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::Burn as MethodNum, + RawBytes::serialize(&BurnParams { + amount: TokenAmount::from_whole(datacap_burnt.to_i64().unwrap()), + }) + .unwrap(), + TokenAmount::zero(), + RawBytes::serialize(&BurnReturn { balance: TokenAmount::zero() }).unwrap(), + ExitCode::OK, + ); + + let params = ClaimAllocationsParams { sectors: claim_allocs, all_or_nothing }; + let ret = rt + .call::( + Method::ClaimAllocations as MethodNum, + &RawBytes::serialize(params).unwrap(), + )? + .deserialize() + .expect("failed to deserialize claim allocations return"); + rt.verify(); + Ok(ret) } - pub fn use_bytes( + // Invokes the RemoveExpiredAllocations actor method. + pub fn remove_expired_allocations( &self, rt: &mut MockRuntime, - client: &Address, - amount: &DataCap, - ) -> Result<(), ActorError> { - rt.expect_validate_caller_addr(vec![STORAGE_MARKET_ACTOR_ADDR]); - rt.set_caller(*MARKET_ACTOR_CODE_ID, STORAGE_MARKET_ACTOR_ADDR); - let params = UseBytesParams { address: *client, deal_size: amount.clone() }; - let ret = rt.call::( - Method::UseBytes as MethodNum, - &RawBytes::serialize(params).unwrap(), - )?; - assert_eq!(RawBytes::default(), ret); + client: ActorID, + allocation_ids: Vec, + expected_datacap: u64, + ) -> Result { + rt.expect_validate_caller_any(); + + rt.expect_send( + DATACAP_TOKEN_ACTOR_ADDR, + ext::datacap::Method::Transfer as MethodNum, + RawBytes::serialize(&TransferParams { + to: Address::new_id(client), + amount: TokenAmount::from_whole(expected_datacap.to_i64().unwrap()), + operator_data: RawBytes::default(), + }) + .unwrap(), + TokenAmount::zero(), + RawBytes::default(), + ExitCode::OK, + ); + + let params = RemoveExpiredAllocationsParams { client, allocation_ids }; + let ret = rt + .call::( + Method::RemoveExpiredAllocations as MethodNum, + &RawBytes::serialize(params).unwrap(), + )? + .deserialize() + .expect("failed to deserialize remove expired allocations return"); rt.verify(); - Ok(()) + Ok(ret) } - pub fn restore_bytes( + // Invokes the RemoveExpiredClaims actor method. + pub fn remove_expired_claims( &self, rt: &mut MockRuntime, - client: &Address, - amount: &DataCap, + provider: ActorID, + claim_ids: Vec, + ) -> Result { + rt.expect_validate_caller_any(); + + let params = RemoveExpiredClaimsParams { provider, claim_ids }; + let ret = rt + .call::( + Method::RemoveExpiredClaims as MethodNum, + &RawBytes::serialize(params).unwrap(), + )? + .deserialize() + .expect("failed to deserialize remove expired claims return"); + rt.verify(); + Ok(ret) + } + + pub fn load_claim( + &self, + rt: &mut MockRuntime, + provider: ActorID, + id: ClaimID, + ) -> Option { + let st: State = rt.get_state(); + let mut claims = st.load_claims(rt.store()).unwrap(); + claims.get(provider, id).unwrap().cloned() + } + + pub fn receive_tokens( + &self, + rt: &mut MockRuntime, + payload: FRC46TokenReceived, + expected_alloc_results: BatchReturn, + expected_extension_results: BatchReturn, + expected_alloc_ids: Vec, ) -> Result<(), ActorError> { - rt.expect_validate_caller_addr(vec![STORAGE_MARKET_ACTOR_ADDR]); - rt.set_caller(*MARKET_ACTOR_CODE_ID, STORAGE_MARKET_ACTOR_ADDR); - let params = RestoreBytesParams { address: *client, deal_size: amount.clone() }; + rt.set_caller(*DATACAP_TOKEN_ACTOR_CODE_ID, DATACAP_TOKEN_ACTOR_ADDR); + let params = UniversalReceiverParams { + type_: FRC46_TOKEN_TYPE, + payload: serialize(&payload, "payload").unwrap(), + }; + + rt.expect_validate_caller_addr(vec![DATACAP_TOKEN_ACTOR_ADDR]); let ret = rt.call::( - Method::RestoreBytes as MethodNum, - &RawBytes::serialize(params).unwrap(), + Method::UniversalReceiverHook as MethodNum, + &serialize(¶ms, "hook params").unwrap(), )?; - assert_eq!(RawBytes::default(), ret); + assert_eq!( + RawBytes::serialize(AllocationsResponse { + allocation_results: expected_alloc_results, + extension_results: expected_extension_results, + new_allocations: expected_alloc_ids + }) + .unwrap(), + ret + ); rt.verify(); Ok(()) } - pub fn check_state(&self, rt: &MockRuntime) { - let (_, acc) = check_state_invariants(&rt.get_state(), rt.store()); - acc.assert_empty(); + // Creates a claim directly in state. + pub fn create_claim(&self, rt: &mut MockRuntime, claim: &Claim) -> Result { + let mut st: State = rt.get_state(); + let mut claims = st.load_claims(rt.store()).unwrap(); + let id = st.next_allocation_id; + assert!(claims + .put_if_absent(claim.provider, id, claim.clone()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "faild to put")?); + st.next_allocation_id += 1; + st.claims = claims.flush().expect("failed flushing allocation table"); + rt.replace_state(&st); + Ok(id) + } + + pub fn get_claims( + &self, + rt: &mut MockRuntime, + provider: ActorID, + claim_ids: Vec, + ) -> Result { + rt.expect_validate_caller_any(); + let params = GetClaimsParams { claim_ids, provider }; + let ret = rt + .call::( + Method::GetClaims as MethodNum, + &serialize(¶ms, "get claims params").unwrap(), + )? + .deserialize() + .expect("failed to deserialize get claims return"); + rt.verify(); + Ok(ret) + } + + pub fn extend_claim_terms( + &self, + rt: &mut MockRuntime, + params: &ExtendClaimTermsParams, + ) -> Result { + rt.expect_validate_caller_any(); + let ret = rt + .call::( + Method::ExtendClaimTerms as MethodNum, + &serialize(¶ms, "extend claim terms params").unwrap(), + )? + .deserialize() + .expect("failed to deserialize extend claim terms return"); + rt.verify(); + Ok(ret) + } +} + +pub fn make_alloc(data_id: &str, client: ActorID, provider: ActorID, size: u64) -> Allocation { + Allocation { + client, + provider, + data: make_piece_cid(data_id.as_bytes()), + size: PaddedPieceSize(size), + term_min: 1000, + term_max: 2000, + expiration: 100, + } +} + +// Creates an allocation request for fixed data with default terms. +pub fn make_alloc_req(rt: &MockRuntime, provider: ActorID, size: u64) -> AllocationRequest { + AllocationRequest { + provider: Address::new_id(provider), + data: make_piece_cid("1234".as_bytes()), + size: PaddedPieceSize(size), + term_min: MINIMUM_VERIFIED_ALLOCATION_TERM, + term_max: MAXIMUM_VERIFIED_ALLOCATION_TERM, + expiration: rt.epoch + 100, + } +} + +pub fn make_extension_req( + provider: ActorID, + claim: ClaimID, + term_max: ChainEpoch, +) -> ClaimExtensionRequest { + ClaimExtensionRequest { provider: Address::new_id(provider), claim, term_max } +} + +// Creates the expected allocation from a request. +pub fn alloc_from_req(client: ActorID, req: &AllocationRequest) -> Allocation { + Allocation { + client, + provider: req.provider.id().unwrap(), + data: req.data, + size: req.size, + term_min: req.term_min, + term_max: req.term_max, + expiration: req.expiration, + } +} + +pub fn make_claim_req( + id: AllocationID, + alloc: &Allocation, + sector_id: SectorNumber, + sector_expiry: ChainEpoch, +) -> SectorAllocationClaim { + SectorAllocationClaim { + client: alloc.client, + allocation_id: id, + data: alloc.data, + size: alloc.size, + sector: sector_id, + sector_expiry, } } -fn load_verifiers(rt: &MockRuntime) -> Map { - let state: State = rt.get_state(); - make_map_with_root_and_bitwidth::<_, BigIntDe>(&state.verifiers, &*rt.store, HAMT_BIT_WIDTH) - .unwrap() +#[allow(clippy::too_many_arguments)] +pub fn make_claim( + data_id: &str, + client: ActorID, + provider: ActorID, + size: u64, + term_min: i64, + term_max: i64, + term_start: i64, + sector: u64, +) -> Claim { + Claim { + provider, + client, + data: make_piece_cid(data_id.as_bytes()), + size: PaddedPieceSize(size), + term_min, + term_max, + term_start, + sector, + } } -fn load_clients(rt: &MockRuntime) -> Map { - let state: State = rt.get_state(); - make_map_with_root_and_bitwidth::<_, BigIntDe>( - &state.verified_clients, - &*rt.store, - HAMT_BIT_WIDTH, - ) - .unwrap() +pub fn claim_from_alloc(alloc: &Allocation, term_start: ChainEpoch, sector: SectorNumber) -> Claim { + Claim { + provider: alloc.provider, + client: alloc.client, + data: alloc.data, + size: alloc.size, + term_min: alloc.term_min, + term_max: alloc.term_max, + term_start, + sector, + } +} + +pub fn make_receiver_hook_token_payload( + client: ActorID, + alloc_requests: Vec, + extension_requests: Vec, + datacap_received: u64, +) -> FRC46TokenReceived { + // let total_size: u64 = alloc_requests.iter().map(|r| r.size.0).sum(); + let payload = + AllocationRequests { allocations: alloc_requests, extensions: extension_requests }; + FRC46TokenReceived { + from: client, + to: VERIFIED_REGISTRY_ACTOR_ADDR.id().unwrap(), + operator: client, + amount: TokenAmount::from_whole(datacap_received as i64), + operator_data: serialize(&payload, "operator data").unwrap(), + token_data: Default::default(), + } +} + +pub fn assert_allocation( + rt: &MockRuntime, + client: ActorID, + id: AllocationID, + expected: &Allocation, +) { + let st: State = rt.get_state(); + let store = &rt.store(); + let mut allocs = st.load_allocs(store).unwrap(); + + assert_eq!(expected, allocs.get(client, id).unwrap().unwrap()); +} + +pub fn assert_claim(rt: &MockRuntime, provider: ActorID, id: ClaimID, expected: &Claim) { + let st: State = rt.get_state(); + let store = &rt.store(); + let mut claims = st.load_claims(store).unwrap(); + + assert_eq!(expected, claims.get(provider, id).unwrap().unwrap()); } diff --git a/actors/verifreg/tests/verifreg_actor_test.rs b/actors/verifreg/tests/verifreg_actor_test.rs index 8bbb13581..396a751fe 100644 --- a/actors/verifreg/tests/verifreg_actor_test.rs +++ b/actors/verifreg/tests/verifreg_actor_test.rs @@ -10,14 +10,17 @@ lazy_static! { static ref CLIENT2: Address = Address::new_id(302); static ref CLIENT3: Address = Address::new_id(303); static ref CLIENT4: Address = Address::new_id(304); + static ref PROVIDER: Address = Address::new_id(305); + static ref PROVIDER2: Address = Address::new_id(306); } mod util { - use fil_actors_runtime::test_utils::MockRuntime; use fvm_shared::sector::StoragePower; + use fil_actors_runtime::test_utils::MockRuntime; + pub fn verifier_allowance(rt: &MockRuntime) -> StoragePower { - rt.policy.minimum_verified_deal_size.clone() + 42 + rt.policy.minimum_verified_allocation_size.clone() + 42 } pub fn client_allowance(rt: &MockRuntime) -> StoragePower { @@ -34,9 +37,9 @@ mod construction { use fil_actor_verifreg::{Actor as VerifregActor, Method}; use fil_actors_runtime::test_utils::*; use fil_actors_runtime::SYSTEM_ACTOR_ADDR; + use harness::*; use crate::*; - use harness::*; #[test] fn construct_with_root_id() { @@ -79,13 +82,13 @@ mod verifiers { use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_SEND}; - use fil_actor_verifreg::{Actor as VerifregActor, AddVerifierParams, Method}; + use fil_actor_verifreg::{Actor as VerifregActor, AddVerifierParams, DataCap, Method}; use fil_actors_runtime::test_utils::*; - - use crate::*; use harness::*; use util::*; + use crate::*; + #[test] fn add_verifier_requires_root_caller() { let (h, mut rt) = new_harness(); @@ -106,7 +109,7 @@ mod verifiers { #[test] fn add_verifier_enforces_min_size() { let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone() - 1; + let allowance = rt.policy.minimum_verified_allocation_size.clone() - 1; expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, h.add_verifier(&mut rt, &VERIFIER, &allowance), @@ -129,8 +132,10 @@ mod verifiers { fn add_verifier_rejects_client() { let (h, mut rt) = new_harness(); let allowance = verifier_allowance(&rt); - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance); - expect_abort(ExitCode::USR_ILLEGAL_ARGUMENT, h.add_verifier(&mut rt, &CLIENT, &allowance)); + expect_abort( + ExitCode::USR_ILLEGAL_ARGUMENT, + h.add_verifier_with_existing_cap(&mut rt, &VERIFIER, &allowance, &DataCap::from(1)), + ); h.check_state(&rt); } @@ -170,7 +175,6 @@ mod verifiers { let allowance = verifier_allowance(&rt); let pubkey_addr = Address::new_secp256k1(&[0u8; 65]).unwrap(); rt.id_addresses.insert(pubkey_addr, *VERIFIER); - h.add_verifier(&mut rt, &pubkey_addr, &allowance).unwrap(); h.check_state(&rt); } @@ -231,14 +235,15 @@ mod clients { use fvm_shared::econ::TokenAmount; use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_SEND}; + use num_traits::Zero; use fil_actor_verifreg::{Actor as VerifregActor, AddVerifierClientParams, DataCap, Method}; use fil_actors_runtime::test_utils::*; - - use crate::*; use harness::*; use util::*; + use crate::*; + #[test] fn many_verifiers_and_clients() { let (h, mut rt) = new_harness(); @@ -248,17 +253,13 @@ mod clients { h.add_verifier(&mut rt, &VERIFIER, &allowance_verifier).unwrap(); h.add_verifier(&mut rt, &VERIFIER2, &allowance_verifier).unwrap(); - h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance_client, &allowance_client).unwrap(); - h.add_client(&mut rt, &VERIFIER, &CLIENT2, &allowance_client, &allowance_client).unwrap(); + h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance_client).unwrap(); + h.add_client(&mut rt, &VERIFIER, &CLIENT2, &allowance_client).unwrap(); - h.add_client(&mut rt, &VERIFIER2, &CLIENT3, &allowance_client, &allowance_client).unwrap(); - h.add_client(&mut rt, &VERIFIER2, &CLIENT4, &allowance_client, &allowance_client).unwrap(); + h.add_client(&mut rt, &VERIFIER2, &CLIENT3, &allowance_client).unwrap(); + h.add_client(&mut rt, &VERIFIER2, &CLIENT4, &allowance_client).unwrap(); - // all clients should exist and verifiers should have no more allowance left - h.assert_client_allowance(&rt, &CLIENT, &allowance_client); - h.assert_client_allowance(&rt, &CLIENT2, &allowance_client); - h.assert_client_allowance(&rt, &CLIENT3, &allowance_client); - h.assert_client_allowance(&rt, &CLIENT4, &allowance_client); + // No more allowance left h.assert_verifier_allowance(&rt, &VERIFIER, &DataCap::from(0)); h.assert_verifier_allowance(&rt, &VERIFIER2, &DataCap::from(0)); h.check_state(&rt); @@ -271,15 +272,13 @@ mod clients { // Verifier only has allowance for one client. h.add_verifier(&mut rt, &VERIFIER, &allowance).unwrap(); - h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance).unwrap(); + h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance).unwrap(); expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, - h.add_client(&mut rt, &VERIFIER, &CLIENT2, &allowance, &allowance), + h.add_client(&mut rt, &VERIFIER, &CLIENT2, &allowance), ); - // One client should exist and verifier should have no more allowance left. - h.assert_client_allowance(&rt, &CLIENT, &allowance); - h.assert_verifier_allowance(&rt, &VERIFIER, &DataCap::from(0)); + h.assert_verifier_allowance(&rt, &VERIFIER, &DataCap::zero()); h.check_state(&rt); } @@ -293,14 +292,12 @@ mod clients { rt.id_addresses.insert(client_pubkey, *CLIENT); h.add_verifier(&mut rt, &VERIFIER, &allowance_verifier).unwrap(); - h.add_client(&mut rt, &VERIFIER, &client_pubkey, &allowance_client, &allowance_client) - .unwrap(); + h.add_client(&mut rt, &VERIFIER, &client_pubkey, &allowance_client).unwrap(); - // Adding another verified client with the same ID address increments + // Adding another client with the same address increments // the data cap which has already been granted. h.add_verifier(&mut rt, &VERIFIER, &allowance_verifier).unwrap(); - let expected_allowance = allowance_client.clone() + allowance_client.clone(); - h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance_client, &expected_allowance).unwrap(); + h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance_client).unwrap(); h.check_state(&rt); } @@ -310,8 +307,8 @@ mod clients { let allowance_verifier = verifier_allowance(&rt); h.add_verifier(&mut rt, &VERIFIER, &allowance_verifier).unwrap(); - let allowance = rt.policy.minimum_verified_deal_size.clone(); - h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance).unwrap(); + let allowance = rt.policy.minimum_verified_allocation_size.clone(); + h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance).unwrap(); h.check_state(&rt); } @@ -336,7 +333,7 @@ mod clients { expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, - h.add_client(&mut rt, &VERIFIER, &client, &allowance_client, &allowance_client), + h.add_client(&mut rt, &VERIFIER, &client, &allowance_client), ); h.check_state(&rt); } @@ -347,10 +344,10 @@ mod clients { let allowance_verifier = verifier_allowance(&rt); h.add_verifier(&mut rt, &VERIFIER, &allowance_verifier).unwrap(); - let allowance = rt.policy.minimum_verified_deal_size.clone() - 1; + let allowance = rt.policy.minimum_verified_allocation_size.clone() - 1; expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, - h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance), + h.add_client(&mut rt, &VERIFIER, &CLIENT, &allowance), ); h.check_state(&rt); } @@ -385,7 +382,7 @@ mod clients { let allowance = allowance_verifier.clone() + 1; expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, - h.add_client(&mut rt, &VERIFIER, &h.root, &allowance, &allowance), + h.add_client(&mut rt, &VERIFIER, &h.root, &allowance), ); h.check_state(&rt); } @@ -398,7 +395,7 @@ mod clients { h.add_verifier(&mut rt, &VERIFIER, &allowance_verifier).unwrap(); expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, - h.add_client(&mut rt, &VERIFIER, &h.root, &allowance_client, &allowance_client), + h.add_client(&mut rt, &VERIFIER, &h.root, &allowance_client), ); h.check_state(&rt); } @@ -411,325 +408,778 @@ mod clients { h.add_verifier(&mut rt, &VERIFIER, &allowance_verifier).unwrap(); expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, - h.add_client(&mut rt, &VERIFIER, &VERIFIER, &allowance_client, &allowance_client), + h.add_client(&mut rt, &VERIFIER, &VERIFIER, &allowance_client), ); + rt.reset(); h.add_verifier(&mut rt, &VERIFIER2, &allowance_verifier).unwrap(); expect_abort( ExitCode::USR_ILLEGAL_ARGUMENT, - h.add_client(&mut rt, &VERIFIER, &VERIFIER2, &allowance_client, &allowance_client), + h.add_client(&mut rt, &VERIFIER, &VERIFIER2, &allowance_client), ); - h.check_state(&rt); } } -mod datacap { - use fvm_ipld_encoding::RawBytes; - use fvm_shared::address::Address; +mod claims { + use fvm_shared::bigint::BigInt; use fvm_shared::error::ExitCode; - use fvm_shared::MethodNum; - - use fil_actor_verifreg::{Actor as VerifregActor, Method, RestoreBytesParams, UseBytesParams}; - use fil_actors_runtime::test_utils::*; - use fil_actors_runtime::{STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR}; - - use crate::*; + use fvm_shared::ActorID; + use num_traits::Zero; + + use fil_actor_verifreg::Claim; + use fil_actor_verifreg::{AllocationID, ClaimTerm, DataCap, ExtendClaimTermsParams, State}; + use fil_actors_runtime::runtime::policy_constants::{ + MAXIMUM_VERIFIED_ALLOCATION_TERM, MINIMUM_VERIFIED_ALLOCATION_SIZE, + MINIMUM_VERIFIED_ALLOCATION_TERM, + }; + use fil_actors_runtime::runtime::Runtime; + use fil_actors_runtime::test_utils::ACCOUNT_ACTOR_CODE_ID; use harness::*; - use util::*; - #[test] - fn consume_multiple_clients() { - let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone() * 10; - - let ca1 = rt.policy.minimum_verified_deal_size.clone() * 3; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &ca1); - let ca2 = rt.policy.minimum_verified_deal_size.clone() * 2; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT2, &allowance, &ca2); // FIXME redundant verifier - let ca3 = rt.policy.minimum_verified_deal_size.clone() + 1; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT3, &allowance, &ca3); - - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - h.use_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &(ca1.clone() - &deal_size)); - - h.use_bytes(&mut rt, &CLIENT2, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT2, &(ca2 - &deal_size)); - - // Client 3 had less than minimum balance remaining. - h.use_bytes(&mut rt, &CLIENT3, &deal_size).unwrap(); - h.assert_client_removed(&rt, &CLIENT3); + use crate::*; - // Client 1 uses more bytes. - h.use_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &(ca1.clone() - &deal_size - &deal_size)); - - // Client 2 uses more bytes, exhausting allocation - h.use_bytes(&mut rt, &CLIENT2, &deal_size).unwrap(); - h.assert_client_removed(&rt, &CLIENT2); - h.check_state(&rt); - } + const CLIENT1: ActorID = 101; + const CLIENT2: ActorID = 102; + const PROVIDER1: ActorID = 301; + const PROVIDER2: ActorID = 302; + const ALLOC_SIZE: u64 = MINIMUM_VERIFIED_ALLOCATION_SIZE as u64; #[test] - fn consume_then_fail_exhausted() { + fn expire_allocs() { let (h, mut rt) = new_harness(); - let ve_allowance = rt.policy.minimum_verified_deal_size.clone() * 10; - let cl_allowance = rt.policy.minimum_verified_deal_size.clone() * 2; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &ve_allowance, &cl_allowance); - // Use some allowance. - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - h.use_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - - // Attempt to use more than remaining. - let deal_size = rt.policy.minimum_verified_deal_size.clone() + 2; - expect_abort(ExitCode::USR_ILLEGAL_ARGUMENT, h.use_bytes(&mut rt, &CLIENT, &deal_size)); - h.check_state(&rt) + let mut alloc1 = make_alloc("1", CLIENT1, PROVIDER1, ALLOC_SIZE); + alloc1.expiration = 100; + let mut alloc2 = make_alloc("2", CLIENT1, PROVIDER1, ALLOC_SIZE * 2); + alloc2.expiration = 200; + let total_size = alloc1.size.0 + alloc2.size.0; + + let id1 = h.create_alloc(&mut rt, &alloc1).unwrap(); + let id2 = h.create_alloc(&mut rt, &alloc2).unwrap(); + let state_with_allocs: State = rt.get_state(); + + // Can't remove allocations that aren't expired + let ret = h.remove_expired_allocations(&mut rt, CLIENT1, vec![id1, id2], 0).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::USR_FORBIDDEN, ExitCode::USR_FORBIDDEN], ret.results.codes()); + assert_eq!(DataCap::zero(), ret.datacap_recovered); + + // Can't remove with wrong client ID + rt.set_epoch(200); + let ret = h.remove_expired_allocations(&mut rt, CLIENT2, vec![id1, id2], 0).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::USR_NOT_FOUND, ExitCode::USR_NOT_FOUND], ret.results.codes()); + assert_eq!(DataCap::zero(), ret.datacap_recovered); + + // Remove the first alloc, which expired. + rt.set_epoch(100); + let ret = + h.remove_expired_allocations(&mut rt, CLIENT1, vec![id1, id2], alloc1.size.0).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::OK, ExitCode::USR_FORBIDDEN], ret.results.codes()); + assert_eq!(DataCap::from(alloc1.size.0), ret.datacap_recovered); + + // Remove the second alloc (the first is no longer found). + rt.set_epoch(200); + let ret = + h.remove_expired_allocations(&mut rt, CLIENT1, vec![id1, id2], alloc2.size.0).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::USR_NOT_FOUND, ExitCode::OK], ret.results.codes()); + assert_eq!(DataCap::from(alloc2.size.0), ret.datacap_recovered); + + // Reset state and show we can remove two at once. + rt.replace_state(&state_with_allocs); + let ret = + h.remove_expired_allocations(&mut rt, CLIENT1, vec![id1, id2], total_size).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::OK, ExitCode::OK], ret.results.codes()); + assert_eq!(DataCap::from(total_size), ret.datacap_recovered); + + // Reset state and show that only what was asked for is removed. + rt.replace_state(&state_with_allocs); + let ret = h.remove_expired_allocations(&mut rt, CLIENT1, vec![id1], alloc1.size.0).unwrap(); + assert_eq!(vec![1], ret.considered); + assert_eq!(vec![ExitCode::OK], ret.results.codes()); + assert_eq!(DataCap::from(alloc1.size.0), ret.datacap_recovered); + + // Reset state and show that specifying none removes only expired allocations + rt.set_epoch(0); + rt.replace_state(&state_with_allocs); + let ret = h.remove_expired_allocations(&mut rt, CLIENT1, vec![], 0).unwrap(); + assert_eq!(Vec::::new(), ret.considered); + assert_eq!(Vec::::new(), ret.results.codes()); + assert_eq!(DataCap::zero(), ret.datacap_recovered); + assert!(h.load_alloc(&mut rt, CLIENT1, id1).is_some()); + assert!(h.load_alloc(&mut rt, CLIENT1, id2).is_some()); + + rt.set_epoch(100); + let ret = h.remove_expired_allocations(&mut rt, CLIENT1, vec![], alloc1.size.0).unwrap(); + assert_eq!(vec![1], ret.considered); + assert_eq!(vec![ExitCode::OK], ret.results.codes()); + assert_eq!(DataCap::from(alloc1.size.0), ret.datacap_recovered); + assert!(h.load_alloc(&mut rt, CLIENT1, id1).is_none()); // removed + assert!(h.load_alloc(&mut rt, CLIENT1, id2).is_some()); + + rt.set_epoch(200); + let ret = h.remove_expired_allocations(&mut rt, CLIENT1, vec![], alloc2.size.0).unwrap(); + assert_eq!(vec![2], ret.considered); + assert_eq!(vec![ExitCode::OK], ret.results.codes()); + assert_eq!(DataCap::from(alloc2.size.0), ret.datacap_recovered); + assert!(h.load_alloc(&mut rt, CLIENT1, id1).is_none()); // removed + assert!(h.load_alloc(&mut rt, CLIENT1, id2).is_none()); // removed + + // Reset state and show that specifying none removes *all* expired allocations + rt.replace_state(&state_with_allocs); + let ret = h.remove_expired_allocations(&mut rt, CLIENT1, vec![], total_size).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::OK, ExitCode::OK], ret.results.codes()); + assert_eq!(DataCap::from(total_size), ret.datacap_recovered); + assert!(h.load_alloc(&mut rt, CLIENT1, id1).is_none()); // removed + assert!(h.load_alloc(&mut rt, CLIENT1, id2).is_none()); // removed } #[test] - fn consume_resolves_client_address() { + fn claim_allocs() { let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone(); - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance); + let size = 128; + let alloc1 = make_alloc("1", CLIENT1, PROVIDER1, size); + let alloc2 = make_alloc("2", CLIENT1, PROVIDER1, size); + let alloc3 = make_alloc("3", CLIENT1, PROVIDER1, size); + + h.create_alloc(&mut rt, &alloc1).unwrap(); + h.create_alloc(&mut rt, &alloc2).unwrap(); + h.create_alloc(&mut rt, &alloc3).unwrap(); + + let sector = 1000; + let ret = h + .claim_allocations( + &mut rt, + PROVIDER1, + vec![ + make_claim_req(1, &alloc1, sector, 1500), + make_claim_req(2, &alloc2, sector, 1500), + make_claim_req(3, &alloc3, sector, 1500), + ], + size * 3, + false, + ) + .unwrap(); - let client_pubkey = Address::new_secp256k1(&[3u8; 65]).unwrap(); - rt.id_addresses.insert(client_pubkey, *CLIENT); - h.use_bytes(&mut rt, &client_pubkey, &allowance).unwrap(); - h.check_state(&rt) + assert_eq!(ret.batch_info.codes(), vec![ExitCode::OK, ExitCode::OK, ExitCode::OK]); + assert_eq!(ret.claimed_space, BigInt::from(3 * size)); + + // check that state is as expected + let st: State = rt.get_state(); + let store = rt.store(); + let mut allocs = st.load_allocs(&store).unwrap(); + // allocs deleted + assert!(allocs.get(CLIENT1, 1).unwrap().is_none()); + assert!(allocs.get(CLIENT1, 2).unwrap().is_none()); + assert!(allocs.get(CLIENT1, 3).unwrap().is_none()); + + // claims inserted + let claim1 = claim_from_alloc(&alloc1, 0, sector); + let claim2 = claim_from_alloc(&alloc2, 0, sector); + let claim3 = claim_from_alloc(&alloc3, 0, sector); + assert_claim(&rt, PROVIDER1, 1, &claim1); + assert_claim(&rt, PROVIDER1, 2, &claim2); + assert_claim(&rt, PROVIDER1, 3, &claim3); + + // get claims + //successfully + let succ_gc = h.get_claims(&mut rt, PROVIDER1, vec![1, 2, 3]).unwrap(); + assert_eq!(3, succ_gc.batch_info.success_count); + assert_eq!(claim2, succ_gc.claims[1]); + + // bad provider + let fail_gc = h.get_claims(&mut rt, PROVIDER1 + 42, vec![1, 2, 3]).unwrap(); + assert_eq!(0, fail_gc.batch_info.success_count); + + // mixed bag + let mix_gc = h.get_claims(&mut rt, PROVIDER1, vec![1, 4, 5]).unwrap(); + assert_eq!(1, mix_gc.batch_info.success_count); + assert_eq!(claim1, succ_gc.claims[0]); } #[test] - fn consume_then_fail_removed() { + fn extend_claims_basic() { let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone(); - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance); - - // Use full allowance. - h.use_bytes(&mut rt, &CLIENT, &allowance).unwrap(); - // Fail to use any more because client was removed. - expect_abort(ExitCode::USR_NOT_FOUND, h.use_bytes(&mut rt, &CLIENT, &allowance)); - h.check_state(&rt) + let size = 128; + let sector = 0; + let start = 0; + let min_term = MINIMUM_VERIFIED_ALLOCATION_TERM; + let max_term = min_term + 1000; + + let claim1 = make_claim("1", CLIENT1, PROVIDER1, size, min_term, max_term, start, sector); + let claim2 = make_claim("2", CLIENT1, PROVIDER1, size, min_term, max_term, start, sector); + let claim3 = make_claim("3", CLIENT1, PROVIDER2, size, min_term, max_term, start, sector); + + let id1 = h.create_claim(&mut rt, &claim1).unwrap(); + let id2 = h.create_claim(&mut rt, &claim2).unwrap(); + let id3 = h.create_claim(&mut rt, &claim3).unwrap(); + + // Extend claim terms and verify return value. + let params = ExtendClaimTermsParams { + terms: vec![ + ClaimTerm { provider: PROVIDER1, claim_id: id1, term_max: max_term + 1 }, + ClaimTerm { provider: PROVIDER1, claim_id: id2, term_max: max_term + 2 }, + ClaimTerm { provider: PROVIDER2, claim_id: id3, term_max: max_term + 3 }, + ], + }; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, Address::new_id(CLIENT1)); + let ret = h.extend_claim_terms(&mut rt, ¶ms).unwrap(); + assert_eq!(ret.codes(), vec![ExitCode::OK, ExitCode::OK, ExitCode::OK]); + + // Verify state directly. + assert_claim(&rt, PROVIDER1, id1, &Claim { term_max: max_term + 1, ..claim1 }); + assert_claim(&rt, PROVIDER1, id2, &Claim { term_max: max_term + 2, ..claim2 }); + assert_claim(&rt, PROVIDER2, id3, &Claim { term_max: max_term + 3, ..claim3 }); } #[test] - fn consume_requires_market_actor_caller() { + fn extend_claims_edge_cases() { let (h, mut rt) = new_harness(); - rt.expect_validate_caller_addr(vec![STORAGE_MARKET_ACTOR_ADDR]); - rt.set_caller(*POWER_ACTOR_CODE_ID, STORAGE_POWER_ACTOR_ADDR); - let params = UseBytesParams { - address: *CLIENT, - deal_size: rt.policy.minimum_verified_deal_size.clone(), - }; - expect_abort( - ExitCode::USR_FORBIDDEN, - rt.call::( - Method::UseBytes as MethodNum, - &RawBytes::serialize(params).unwrap(), - ), - ); - h.check_state(&rt) + let size = 128; + let sector = 0; + let start = 0; + let min_term = MINIMUM_VERIFIED_ALLOCATION_TERM; + let max_term = min_term + 1000; + + let claim = make_claim("1", CLIENT1, PROVIDER1, size, min_term, max_term, start, sector); + + // Basic success case with no-op extension + { + let claim_id = h.create_claim(&mut rt, &claim).unwrap(); + let params = ExtendClaimTermsParams { + terms: vec![ClaimTerm { provider: PROVIDER1, claim_id, term_max: max_term }], + }; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, Address::new_id(CLIENT1)); + let ret = h.extend_claim_terms(&mut rt, ¶ms).unwrap(); + assert_eq!(ret.codes(), vec![ExitCode::OK]); + rt.verify() + } + // Mismatched client is forbidden + { + let claim_id = h.create_claim(&mut rt, &claim).unwrap(); + let params = ExtendClaimTermsParams { + terms: vec![ClaimTerm { provider: PROVIDER1, claim_id, term_max: max_term }], + }; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, Address::new_id(CLIENT2)); + let ret = h.extend_claim_terms(&mut rt, ¶ms).unwrap(); + assert_eq!(ret.codes(), vec![ExitCode::USR_FORBIDDEN]); + rt.verify() + } + // Mismatched provider is not found + { + let claim_id = h.create_claim(&mut rt, &claim).unwrap(); + let params = ExtendClaimTermsParams { + terms: vec![ClaimTerm { provider: PROVIDER2, claim_id, term_max: max_term }], + }; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, Address::new_id(CLIENT1)); + let ret = h.extend_claim_terms(&mut rt, ¶ms).unwrap(); + assert_eq!(ret.codes(), vec![ExitCode::USR_NOT_FOUND]); + rt.verify() + } + // Term in excess of limit is denied + { + let claim_id = h.create_claim(&mut rt, &claim).unwrap(); + let params = ExtendClaimTermsParams { + terms: vec![ClaimTerm { + provider: PROVIDER1, + claim_id, + term_max: MAXIMUM_VERIFIED_ALLOCATION_TERM + 1, + }], + }; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, Address::new_id(CLIENT1)); + let ret = h.extend_claim_terms(&mut rt, ¶ms).unwrap(); + assert_eq!(ret.codes(), vec![ExitCode::USR_ILLEGAL_ARGUMENT]); + rt.verify() + } + // Reducing term is denied. + { + let claim_id = h.create_claim(&mut rt, &claim).unwrap(); + let params = ExtendClaimTermsParams { + terms: vec![ClaimTerm { provider: PROVIDER1, claim_id, term_max: max_term - 1 }], + }; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, Address::new_id(CLIENT1)); + let ret = h.extend_claim_terms(&mut rt, ¶ms).unwrap(); + assert_eq!(ret.codes(), vec![ExitCode::USR_ILLEGAL_ARGUMENT]); + rt.verify() + } + // Extending an already-expired claim is ok + { + let claim_id = h.create_claim(&mut rt, &claim).unwrap(); + let params = ExtendClaimTermsParams { + terms: vec![ClaimTerm { + provider: PROVIDER1, + claim_id, + term_max: MAXIMUM_VERIFIED_ALLOCATION_TERM, + }], + }; + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, Address::new_id(CLIENT1)); + rt.set_epoch(max_term + 1); + let ret = h.extend_claim_terms(&mut rt, ¶ms).unwrap(); + assert_eq!(ret.codes(), vec![ExitCode::OK]); + rt.verify() + } } #[test] - fn consume_requires_minimum_deal_size() { + fn expire_claims() { let (h, mut rt) = new_harness(); - let allowance_verifier = verifier_allowance(&rt); - let allowance_client = client_allowance(&rt); - h.add_verifier_and_client( - &mut rt, - &VERIFIER, - &CLIENT, - &allowance_verifier, - &allowance_client, + let term_start = 0; + let term_min = MINIMUM_VERIFIED_ALLOCATION_TERM; + let sector = 0; + + // expires at term_start + term_min + 100 + let claim1 = make_claim( + "1", + CLIENT1, + PROVIDER1, + ALLOC_SIZE, + term_min, + term_min + 100, + term_start, + sector, + ); + // expires at term_start + 200 + term_min (i.e. 100 epochs later) + let claim2 = make_claim( + "2", + CLIENT1, + PROVIDER1, + ALLOC_SIZE * 2, + term_min, + term_min, + term_start + 200, + sector, ); - let deal_size = rt.policy.minimum_verified_deal_size.clone() - 1; - expect_abort(ExitCode::USR_ILLEGAL_ARGUMENT, h.use_bytes(&mut rt, &CLIENT, &deal_size)); - h.check_state(&rt) + let id1 = h.create_claim(&mut rt, &claim1).unwrap(); + let id2 = h.create_claim(&mut rt, &claim2).unwrap(); + let state_with_allocs: State = rt.get_state(); + + // Removal of expired claims shares most of its implementation with removing expired allocations. + // The full test suite is not duplicated here, simple ones to ensure that the expiration + // is correctly computed. + + // None expired yet + rt.set_epoch(term_start + term_min + 99); + let ret = h.remove_expired_claims(&mut rt, PROVIDER1, vec![id1, id2]).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::USR_FORBIDDEN, ExitCode::USR_FORBIDDEN], ret.results.codes()); + + // One expired + rt.set_epoch(term_start + term_min + 100); + let ret = h.remove_expired_claims(&mut rt, PROVIDER1, vec![id1, id2]).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::OK, ExitCode::USR_FORBIDDEN], ret.results.codes()); + + // Both now expired + rt.set_epoch(term_start + term_min + 200); + let ret = h.remove_expired_claims(&mut rt, PROVIDER1, vec![id1, id2]).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::USR_NOT_FOUND, ExitCode::OK], ret.results.codes()); + + // Reset state, and show that specifying none removes only expired allocations + rt.set_epoch(term_start + term_min); + rt.replace_state(&state_with_allocs); + let ret = h.remove_expired_claims(&mut rt, PROVIDER1, vec![]).unwrap(); + assert_eq!(Vec::::new(), ret.considered); + assert_eq!(Vec::::new(), ret.results.codes()); + assert!(h.load_claim(&mut rt, PROVIDER1, id1).is_some()); + assert!(h.load_claim(&mut rt, PROVIDER1, id2).is_some()); + + rt.set_epoch(term_start + term_min + 200); + let ret = h.remove_expired_claims(&mut rt, PROVIDER1, vec![]).unwrap(); + assert_eq!(vec![1, 2], ret.considered); + assert_eq!(vec![ExitCode::OK, ExitCode::OK], ret.results.codes()); + assert!(h.load_claim(&mut rt, PROVIDER1, id1).is_none()); // removed + assert!(h.load_claim(&mut rt, PROVIDER1, id2).is_none()); // removed } +} - #[test] - fn consume_requires_client_exists() { - let (h, mut rt) = new_harness(); - let min_deal_size = rt.policy.minimum_verified_deal_size.clone(); - expect_abort(ExitCode::USR_NOT_FOUND, h.use_bytes(&mut rt, &CLIENT, &min_deal_size)); - h.check_state(&rt) - } +mod datacap { + use frc46_token::receiver::types::{UniversalReceiverParams, FRC46_TOKEN_TYPE}; + use fvm_ipld_encoding::RawBytes; + use fvm_shared::address::Address; + use fvm_shared::econ::TokenAmount; + use fvm_shared::error::ExitCode; + use fvm_shared::{ActorID, MethodNum}; + + use fil_actor_verifreg::{Actor as VerifregActor, Claim, Method, State}; + use fil_actors_runtime::cbor::serialize; + use fil_actors_runtime::runtime::policy_constants::{ + MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION, MAXIMUM_VERIFIED_ALLOCATION_TERM, + MINIMUM_VERIFIED_ALLOCATION_SIZE, MINIMUM_VERIFIED_ALLOCATION_TERM, + }; + use fil_actors_runtime::test_utils::*; + use fil_actors_runtime::{ + BatchReturn, DATACAP_TOKEN_ACTOR_ADDR, EPOCHS_IN_YEAR, STORAGE_MARKET_ACTOR_ADDR, + }; + use harness::*; - #[test] - fn consume_requires_deal_size_below_allowance() { - let (h, mut rt) = new_harness(); - let allowance_verifier = verifier_allowance(&rt); - let allowance_client = client_allowance(&rt); - h.add_verifier_and_client( - &mut rt, - &VERIFIER, - &CLIENT, - &allowance_verifier, - &allowance_client, - ); + use crate::*; - let deal_size = allowance_client.clone() + 1; - expect_abort(ExitCode::USR_ILLEGAL_ARGUMENT, h.use_bytes(&mut rt, &CLIENT, &deal_size)); - h.check_state(&rt) - } + const CLIENT1: ActorID = 101; + const CLIENT2: ActorID = 102; + const PROVIDER1: ActorID = 301; + const PROVIDER2: ActorID = 302; + const SIZE: u64 = MINIMUM_VERIFIED_ALLOCATION_SIZE as u64; + const BATCH_EMPTY: BatchReturn = BatchReturn::empty(); #[test] - fn restore_multiple_clients() { + fn receive_tokens_make_allocs() { let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone() * 10; - - let ca1 = rt.policy.minimum_verified_deal_size.clone() * 3; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &ca1); - let ca2 = rt.policy.minimum_verified_deal_size.clone() * 2; - h.add_client(&mut rt, &VERIFIER, &CLIENT2, &ca2, &ca2).unwrap(); - let ca3 = rt.policy.minimum_verified_deal_size.clone() + 1; - h.add_client(&mut rt, &VERIFIER, &CLIENT3, &ca3, &ca3).unwrap(); - - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - h.restore_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &(ca1.clone() + &deal_size)); - - h.restore_bytes(&mut rt, &CLIENT2, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT2, &(ca2.clone() + &deal_size)); - - h.restore_bytes(&mut rt, &CLIENT3, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT3, &(ca3.clone() + &deal_size)); - - // Clients 1 and 2 now use bytes. - h.use_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &ca1); - - h.use_bytes(&mut rt, &CLIENT2, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT2, &ca2); - - // Restore bytes back to all clients - h.restore_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &(ca1.clone() + &deal_size)); - - h.restore_bytes(&mut rt, &CLIENT2, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT2, &(ca2.clone() + &deal_size)); - - h.restore_bytes(&mut rt, &CLIENT3, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT3, &(ca3.clone() + &deal_size + &deal_size)); + add_miner(&mut rt, PROVIDER1); + add_miner(&mut rt, PROVIDER2); + + { + let reqs = vec![ + make_alloc_req(&rt, PROVIDER1, SIZE), + make_alloc_req(&rt, PROVIDER2, SIZE * 2), + ]; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs.clone(), vec![], SIZE * 3); + h.receive_tokens(&mut rt, payload, BatchReturn::ok(2), BATCH_EMPTY, vec![1, 2]) + .unwrap(); + + // Verify allocations in state. + assert_allocation(&rt, CLIENT1, 1, &alloc_from_req(CLIENT1, &reqs[0])); + assert_allocation(&rt, CLIENT1, 2, &alloc_from_req(CLIENT1, &reqs[1])); + let st: State = rt.get_state(); + assert_eq!(3, st.next_allocation_id); + } + { + // Make another allocation from a different client + let reqs = vec![make_alloc_req(&rt, PROVIDER1, SIZE)]; + let payload = make_receiver_hook_token_payload(CLIENT2, reqs.clone(), vec![], SIZE); + h.receive_tokens(&mut rt, payload, BatchReturn::ok(1), BATCH_EMPTY, vec![3]).unwrap(); + + // Verify allocations in state. + assert_allocation(&rt, CLIENT2, 3, &alloc_from_req(CLIENT2, &reqs[0])); + let st: State = rt.get_state(); + assert_eq!(4, st.next_allocation_id); + } h.check_state(&rt); } #[test] - fn restore_after_reducing_client_cap() { + fn receive_tokens_extend_claims() { let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone() * 2; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance); - - // Use half allowance. - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - h.use_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &rt.policy.minimum_verified_deal_size); - // Restore it. - h.restore_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &allowance); - h.check_state(&rt) + let term_min = MINIMUM_VERIFIED_ALLOCATION_TERM; + let term_max = term_min + 100; + let term_start = 100; + let sector = 1234; + let claim1 = + make_claim("1", CLIENT1, PROVIDER1, SIZE, term_min, term_max, term_start, sector); + let claim2 = + make_claim("2", CLIENT2, PROVIDER2, SIZE * 2, term_min, term_max, term_start, sector); + + let cid1 = h.create_claim(&mut rt, &claim1).unwrap(); + let cid2 = h.create_claim(&mut rt, &claim2).unwrap(); + + let reqs = vec![ + make_extension_req(PROVIDER1, cid1, term_max + 1000), + make_extension_req(PROVIDER2, cid2, term_max + 2000), + ]; + // Client1 extends both claims + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE * 3); + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BatchReturn::ok(2), vec![]).unwrap(); + + // Verify claims in state. + assert_claim(&rt, PROVIDER1, cid1, &Claim { term_max: term_max + 1000, ..claim1 }); + assert_claim(&rt, PROVIDER2, cid2, &Claim { term_max: term_max + 2000, ..claim2 }); } #[test] - fn restore_resolves_client_address() { + fn receive_tokens_make_alloc_and_extend_claims() { let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone() * 2; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance); + add_miner(&mut rt, PROVIDER1); + add_miner(&mut rt, PROVIDER2); + + let alloc_reqs = + vec![make_alloc_req(&rt, PROVIDER1, SIZE), make_alloc_req(&rt, PROVIDER2, SIZE * 2)]; + + let term_min = MINIMUM_VERIFIED_ALLOCATION_TERM; + let term_max = term_min + 100; + let term_start = 100; + let sector = 1234; + let claim1 = + make_claim("1", CLIENT1, PROVIDER1, SIZE, term_min, term_max, term_start, sector); + let claim2 = + make_claim("2", CLIENT2, PROVIDER2, SIZE * 2, term_min, term_max, term_start, sector); + let cid1 = h.create_claim(&mut rt, &claim1).unwrap(); + let cid2 = h.create_claim(&mut rt, &claim2).unwrap(); + + let ext_reqs = vec![ + make_extension_req(PROVIDER1, cid1, term_max + 1000), + make_extension_req(PROVIDER2, cid2, term_max + 2000), + ]; + + // CLIENT1 makes two new allocations and extends two existing claims. + let payload = + make_receiver_hook_token_payload(CLIENT1, alloc_reqs.clone(), ext_reqs, SIZE * 6); + h.receive_tokens(&mut rt, payload, BatchReturn::ok(2), BatchReturn::ok(2), vec![3, 4]) + .unwrap(); - // Use half allowance. - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - h.use_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &rt.policy.minimum_verified_deal_size); + // Verify state. + assert_allocation(&rt, CLIENT1, 3, &alloc_from_req(CLIENT1, &alloc_reqs[0])); + assert_allocation(&rt, CLIENT1, 4, &alloc_from_req(CLIENT1, &alloc_reqs[1])); + assert_claim(&rt, PROVIDER1, cid1, &Claim { term_max: term_max + 1000, ..claim1 }); + assert_claim(&rt, PROVIDER2, cid2, &Claim { term_max: term_max + 2000, ..claim2 }); - let client_pubkey = Address::new_secp256k1(&[3u8; 65]).unwrap(); - rt.id_addresses.insert(client_pubkey, *CLIENT); - - // Restore to pubkey address. - h.restore_bytes(&mut rt, &client_pubkey, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &allowance); - h.check_state(&rt) + let st: State = rt.get_state(); + assert_eq!(5, st.next_allocation_id); } #[test] - fn restore_after_removing_client() { + fn receive_requires_datacap_caller() { let (h, mut rt) = new_harness(); - let allowance = rt.policy.minimum_verified_deal_size.clone() + 1; - h.add_verifier_and_client(&mut rt, &VERIFIER, &CLIENT, &allowance, &allowance); - - // Use allowance. - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - h.use_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_removed(&rt, &CLIENT); + add_miner(&mut rt, PROVIDER1); + + let params = UniversalReceiverParams { + type_: FRC46_TOKEN_TYPE, + payload: serialize( + &make_receiver_hook_token_payload( + CLIENT1, + vec![make_alloc_req(&rt, PROVIDER1, SIZE)], + vec![], + SIZE, + ), + "payload", + ) + .unwrap(), + }; - // Restore it. Client has only the restored bytes (lost the +1 in original allowance). - h.restore_bytes(&mut rt, &CLIENT, &deal_size).unwrap(); - h.assert_client_allowance(&rt, &CLIENT, &deal_size); - h.check_state(&rt) + rt.set_caller(*MARKET_ACTOR_CODE_ID, STORAGE_MARKET_ACTOR_ADDR); // Wrong caller + rt.expect_validate_caller_addr(vec![DATACAP_TOKEN_ACTOR_ADDR]); + expect_abort_contains_message( + ExitCode::USR_FORBIDDEN, + "caller address", + rt.call::( + Method::UniversalReceiverHook as MethodNum, + &RawBytes::serialize(¶ms).unwrap(), + ), + ); + rt.verify(); + h.check_state(&rt); } #[test] - fn restore_requires_market_actor_caller() { + fn receive_requires_to_self() { let (h, mut rt) = new_harness(); - rt.expect_validate_caller_addr(vec![STORAGE_MARKET_ACTOR_ADDR]); - rt.set_caller(*POWER_ACTOR_CODE_ID, STORAGE_POWER_ACTOR_ADDR); - let params = RestoreBytesParams { - address: *CLIENT, - deal_size: rt.policy.minimum_verified_deal_size.clone(), + add_miner(&mut rt, PROVIDER1); + + let mut payload = make_receiver_hook_token_payload( + CLIENT1, + vec![make_alloc_req(&rt, PROVIDER1, SIZE)], + vec![], + SIZE, + ); + // Set invalid receiver hook "to" address (should be the verified registry itself). + payload.to = PROVIDER1; + let params = UniversalReceiverParams { + type_: FRC46_TOKEN_TYPE, + payload: serialize(&payload, "payload").unwrap(), }; - expect_abort( - ExitCode::USR_FORBIDDEN, + + rt.set_caller(*DATACAP_TOKEN_ACTOR_CODE_ID, DATACAP_TOKEN_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![DATACAP_TOKEN_ACTOR_ADDR]); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "token receiver expected to", rt.call::( - Method::RestoreBytes as MethodNum, - &RawBytes::serialize(params).unwrap(), + Method::UniversalReceiverHook as MethodNum, + &RawBytes::serialize(¶ms).unwrap(), ), ); - h.check_state(&rt) + rt.verify(); + h.check_state(&rt); } #[test] - fn restore_requires_minimum_deal_size() { + fn receive_alloc_requires_miner_actor() { let (h, mut rt) = new_harness(); - let allowance_verifier = verifier_allowance(&rt); - let allowance_client = client_allowance(&rt); - h.add_verifier_and_client( - &mut rt, - &VERIFIER, - &CLIENT, - &allowance_verifier, - &allowance_client, - ); + let provider1 = Address::new_id(PROVIDER1); + rt.set_address_actor_type(provider1, *ACCOUNT_ACTOR_CODE_ID); - let deal_size = rt.policy.minimum_verified_deal_size.clone() - 1; - expect_abort(ExitCode::USR_ILLEGAL_ARGUMENT, h.restore_bytes(&mut rt, &CLIENT, &deal_size)); - h.check_state(&rt) + let reqs = vec![make_alloc_req(&rt, PROVIDER1, SIZE)]; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + format!("allocation provider {} must be a miner actor", provider1).as_str(), + h.receive_tokens(&mut rt, payload, BatchReturn::ok(1), BATCH_EMPTY, vec![1]), + ); + h.check_state(&rt); } #[test] - fn restore_rejects_root() { + fn receive_invalid_alloc_reqs() { let (h, mut rt) = new_harness(); - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - expect_abort( - ExitCode::USR_ILLEGAL_ARGUMENT, - h.restore_bytes(&mut rt, &ROOT_ADDR, &deal_size), - ); - h.check_state(&rt) + add_miner(&mut rt, PROVIDER1); + add_miner(&mut rt, PROVIDER2); + + // Alloc too small + { + let reqs = vec![make_alloc_req(&rt, PROVIDER1, SIZE - 1)]; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE - 1); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "allocation size 1048575 below minimum 1048576", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + } + // Min term too short + { + let mut reqs = vec![make_alloc_req(&rt, PROVIDER1, SIZE)]; + reqs[0].term_min = MINIMUM_VERIFIED_ALLOCATION_TERM - 1; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "allocation term min 518399 below limit 518400", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + } + // Max term too long + { + let mut reqs = vec![make_alloc_req(&rt, PROVIDER1, SIZE)]; + reqs[0].term_max = MAXIMUM_VERIFIED_ALLOCATION_TERM + 1; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "allocation term max 5259486 above limit 5259485", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + } + // Term minimum greater than maximum + { + let mut reqs = vec![make_alloc_req(&rt, PROVIDER1, SIZE)]; + reqs[0].term_max = 2 * EPOCHS_IN_YEAR; + reqs[0].term_min = reqs[0].term_max + 1; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "allocation term min 2103795 exceeds term max 2103794", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + } + // Allocation expires too late + { + let mut reqs = vec![make_alloc_req(&rt, PROVIDER1, SIZE)]; + reqs[0].expiration = rt.epoch + MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION + 1; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "allocation expiration 172801 exceeds maximum 172800", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + } + // Tokens received doesn't match sum of allocation sizes + { + let reqs = + vec![make_alloc_req(&rt, PROVIDER1, SIZE), make_alloc_req(&rt, PROVIDER2, SIZE)]; + let payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE * 2 + 1); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "total allocation size 2097152 must match data cap amount received 2097153", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + } + // One bad request fails the lot + { + let reqs = vec![ + make_alloc_req(&rt, PROVIDER1, SIZE), + make_alloc_req(&rt, PROVIDER2, SIZE - 1), + ]; + let mut payload = make_receiver_hook_token_payload(CLIENT1, reqs, vec![], SIZE * 2 - 1); + payload.amount = TokenAmount::from_whole((SIZE * 2 - 1) as i64); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "allocation size 1048575 below minimum 1048576", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + } + h.check_state(&rt); } #[test] - fn restore_rejects_verifier() { + fn receive_invalid_extension_reqs() { let (h, mut rt) = new_harness(); - let allowance = verifier_allowance(&rt); - h.add_verifier(&mut rt, &VERIFIER, &allowance).unwrap(); - let deal_size = rt.policy.minimum_verified_deal_size.clone(); - expect_abort( - ExitCode::USR_ILLEGAL_ARGUMENT, - h.restore_bytes(&mut rt, &VERIFIER, &deal_size), - ); - h.check_state(&rt) + + let term_min = MINIMUM_VERIFIED_ALLOCATION_TERM; + let term_max = term_min + 100; + let term_start = 100; + let sector = 1234; + let claim1 = + make_claim("1", CLIENT1, PROVIDER1, SIZE, term_min, term_max, term_start, sector); + + let cid1 = h.create_claim(&mut rt, &claim1).unwrap(); + let st: State = rt.get_state(); + + // Extension too long + { + rt.replace_state(&st); + let epoch = term_start + 1000; + rt.set_epoch(epoch); + let max_allowed_term = epoch - term_start + MAXIMUM_VERIFIED_ALLOCATION_TERM; + let reqs = vec![make_extension_req(PROVIDER1, cid1, max_allowed_term + 1)]; + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "term_max 5260486 for claim 1 exceeds maximum 5260485 at current epoch 1100", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + // But just on the limit is allowed + let reqs = vec![make_extension_req(PROVIDER1, cid1, max_allowed_term)]; + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE); + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BatchReturn::ok(1), vec![]).unwrap(); + } + { + // Claim already expired + rt.replace_state(&st); + let epoch = term_start + term_max + 1; + let new_term = epoch - term_start + MINIMUM_VERIFIED_ALLOCATION_TERM; + rt.set_epoch(epoch); + let reqs = vec![make_extension_req(PROVIDER1, cid1, new_term)]; + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE); + expect_abort_contains_message( + ExitCode::USR_FORBIDDEN, + "claim 1 expired at 518600, current epoch 518601", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + // But just at expiration is allowed + let epoch = term_start + term_max; + let new_term = epoch - term_start + MAXIMUM_VERIFIED_ALLOCATION_TERM; // Can get full max term now + rt.set_epoch(epoch); + let reqs = vec![make_extension_req(PROVIDER1, cid1, new_term)]; + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE); + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BatchReturn::ok(1), vec![]).unwrap(); + } + { + // Extension is zero + rt.replace_state(&st); + rt.set_epoch(term_start + 100); + let reqs = vec![make_extension_req(PROVIDER1, cid1, term_max)]; + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "term_max 518500 for claim 1 is not larger than existing term max 518500", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + // Extension is negative + let reqs = vec![make_extension_req(PROVIDER1, cid1, term_max - 1)]; + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_ARGUMENT, + "term_max 518499 for claim 1 is not larger than existing term max 518500", + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BATCH_EMPTY, vec![]), + ); + // But extension by just 1 epoch is allowed + let reqs = vec![make_extension_req(PROVIDER1, cid1, term_max + 1)]; + let payload = make_receiver_hook_token_payload(CLIENT1, vec![], reqs, SIZE); + h.receive_tokens(&mut rt, payload, BATCH_EMPTY, BatchReturn::ok(1), vec![]).unwrap(); + } } } diff --git a/build.rs b/build.rs index 7c78734d9..056a0beb2 100644 --- a/build.rs +++ b/build.rs @@ -25,6 +25,7 @@ const ACTORS: &[(&Package, &ID)] = &[ ("multisig", "multisig"), ("reward", "reward"), ("verifreg", "verifiedregistry"), + ("datacap", "datacap"), ]; const NETWORK_ENV: &str = "BUILD_FIL_NETWORK"; diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 1ddc5b1c6..bb5d2bf8b 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -31,6 +31,7 @@ fvm_sdk = { version = "2.0.0-alpha.3", optional = true } blake2b_simd = "1.0" fvm_ipld_blockstore = "0.1.1" fvm_ipld_encoding = "0.2.2" +fvm_ipld_bitfield = "0.5.2" multihash = { version = "0.16.1", default-features = false } rand = "0.8.5" serde_repr = "0.1.8" diff --git a/runtime/src/builtin/singletons.rs b/runtime/src/builtin/singletons.rs index 8c896e976..458996b87 100644 --- a/runtime/src/builtin/singletons.rs +++ b/runtime/src/builtin/singletons.rs @@ -24,6 +24,7 @@ define_singletons! { STORAGE_POWER_ACTOR = 4, STORAGE_MARKET_ACTOR = 5, VERIFIED_REGISTRY_ACTOR = 6, + DATACAP_TOKEN_ACTOR = 7, CHAOS_ACTOR = 98, BURNT_FUNDS_ACTOR = 99, } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 7615a090b..152d59baa 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -6,6 +6,7 @@ use cid::Cid; use fvm_ipld_amt::Amt; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_hamt::{BytesKey, Error as HamtError, Hamt}; +use fvm_shared::address::Address; use fvm_shared::bigint::BigInt; pub use fvm_shared::BLOCKS_PER_EPOCH as EXPECTED_LEADERS_PER_EPOCH; use serde::de::DeserializeOwned; @@ -104,3 +105,19 @@ pub fn parse_uint_key(s: &[u8]) -> Result { let (v, _) = unsigned_varint::decode::u64(s)?; Ok(v) } + +pub trait Keyer { + fn key(&self) -> BytesKey; +} + +impl Keyer for Address { + fn key(&self) -> BytesKey { + self.to_bytes().into() + } +} + +impl Keyer for u64 { + fn key(&self) -> BytesKey { + u64_key(*self) + } +} diff --git a/runtime/src/runtime/builtins.rs b/runtime/src/runtime/builtins.rs index 6a0059268..abc97cd00 100644 --- a/runtime/src/runtime/builtins.rs +++ b/runtime/src/runtime/builtins.rs @@ -19,6 +19,7 @@ pub enum Type { Multisig = 9, Reward = 10, VerifiedRegistry = 11, + DataCap = 12, } impl Type { @@ -35,6 +36,7 @@ impl Type { Type::Multisig => "multisig", Type::Reward => "reward", Type::VerifiedRegistry => "verifiedregistry", + Type::DataCap => "datacap", } } } diff --git a/runtime/src/runtime/policy.rs b/runtime/src/runtime/policy.rs index 73b29f1c8..5559433b0 100644 --- a/runtime/src/runtime/policy.rs +++ b/runtime/src/runtime/policy.rs @@ -138,7 +138,16 @@ pub struct Policy { // --- verifreg policy /// Minimum verified deal size #[serde(with = "bigint_ser")] - pub minimum_verified_deal_size: StoragePower, + pub minimum_verified_allocation_size: StoragePower, + /// Minimum term for a verified data allocation (epochs) + pub minimum_verified_allocation_term: i64, + /// Maximum term for a verified data allocaion (epochs) + pub maximum_verified_allocation_term: i64, + /// Maximum time a verified allocation can be active without being claimed (epochs). + /// Supports recovery of erroneous allocations and prevents indefinite squatting on datacap. + pub maximum_verified_allocation_expiration: i64, + // Period of time at the end of a sector's life during which claims can be dropped + pub end_of_life_claim_drop_period: ChainEpoch, // --- market policy --- /// The number of blocks between payouts for deals @@ -152,6 +161,10 @@ pub struct Policy { /// supply that must be covered by provider collateral pub prov_collateral_percent_supply_denom: i64, + /// The default duration after a verified deal's nominal term to set for the corresponding + /// allocation's maximum term. + pub market_default_allocation_term_buffer: i64, + // --- power --- /// Minimum miner consensus power #[serde(with = "bigint_ser")] @@ -221,16 +234,22 @@ impl Default for Policy { RegisteredSealProof::StackedDRG64GiBV1P1, ]), - minimum_verified_deal_size: StoragePower::from_i32( - policy_constants::MINIMUM_VERIFIED_DEAL_SIZE, + minimum_verified_allocation_size: StoragePower::from_i32( + policy_constants::MINIMUM_VERIFIED_ALLOCATION_SIZE, ) .unwrap(), - + minimum_verified_allocation_term: policy_constants::MINIMUM_VERIFIED_ALLOCATION_TERM, + maximum_verified_allocation_term: policy_constants::MAXIMUM_VERIFIED_ALLOCATION_TERM, + maximum_verified_allocation_expiration: + policy_constants::MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION, + end_of_life_claim_drop_period: policy_constants::END_OF_LIFE_CLAIM_DROP_PERIOD, deal_updates_interval: policy_constants::DEAL_UPDATES_INTERVAL, prov_collateral_percent_supply_num: policy_constants::PROV_COLLATERAL_PERCENT_SUPPLY_NUM, prov_collateral_percent_supply_denom: policy_constants::PROV_COLLATERAL_PERCENT_SUPPLY_DENOM, + market_default_allocation_term_buffer: + policy_constants::MARKET_DEFAULT_ALLOCATION_TERM_BUFFER, minimum_consensus_power: StoragePower::from(policy_constants::MINIMUM_CONSENSUS_POWER), } @@ -238,10 +257,11 @@ impl Default for Policy { } pub mod policy_constants { - use crate::builtin::*; use fvm_shared::clock::ChainEpoch; use fvm_shared::clock::EPOCH_DURATION_SECONDS; + use crate::builtin::*; + /// Maximum amount of sectors that can be aggregated. pub const MAX_AGGREGATED_SECTORS: u64 = 819; /// Minimum amount of sectors that can be aggregated. @@ -363,9 +383,13 @@ pub mod policy_constants { pub const CHAIN_FINALITY: ChainEpoch = 900; #[cfg(not(feature = "small-deals"))] - pub const MINIMUM_VERIFIED_DEAL_SIZE: i32 = 1 << 20; + pub const MINIMUM_VERIFIED_ALLOCATION_SIZE: i32 = 1 << 20; #[cfg(feature = "small-deals")] - pub const MINIMUM_VERIFIED_DEAL_SIZE: i32 = 256; + pub const MINIMUM_VERIFIED_ALLOCATION_SIZE: i32 = 256; + pub const MINIMUM_VERIFIED_ALLOCATION_TERM: i64 = 180 * EPOCHS_IN_DAY; + pub const MAXIMUM_VERIFIED_ALLOCATION_TERM: i64 = 5 * EPOCHS_IN_YEAR; + pub const MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION: i64 = 60 * EPOCHS_IN_DAY; + pub const END_OF_LIFE_CLAIM_DROP_PERIOD: ChainEpoch = 30 * EPOCHS_IN_DAY; /// DealUpdatesInterval is the number of blocks between payouts for deals pub const DEAL_UPDATES_INTERVAL: i64 = EPOCHS_IN_DAY; @@ -381,6 +405,8 @@ pub mod policy_constants { /// supply that must be covered by provider collateral pub const PROV_COLLATERAL_PERCENT_SUPPLY_DENOM: i64 = 100; + pub const MARKET_DEFAULT_ALLOCATION_TERM_BUFFER: i64 = 90 * EPOCHS_IN_DAY; + #[cfg(feature = "min-power-2k")] pub const MINIMUM_CONSENSUS_POWER: i64 = 2 << 10; #[cfg(feature = "min-power-2g")] diff --git a/runtime/src/test_utils.rs b/runtime/src/test_utils.rs index ab96c13d3..d83912ce1 100644 --- a/runtime/src/test_utils.rs +++ b/runtime/src/test_utils.rs @@ -53,6 +53,7 @@ lazy_static::lazy_static! { pub static ref MULTISIG_ACTOR_CODE_ID: Cid = make_builtin(b"fil/test/multisig"); pub static ref REWARD_ACTOR_CODE_ID: Cid = make_builtin(b"fil/test/reward"); pub static ref VERIFREG_ACTOR_CODE_ID: Cid = make_builtin(b"fil/test/verifiedregistry"); + pub static ref DATACAP_TOKEN_ACTOR_CODE_ID: Cid = make_builtin(b"fil/test/datacap"); pub static ref ACTOR_TYPES: BTreeMap = { let mut map = BTreeMap::new(); map.insert(*SYSTEM_ACTOR_CODE_ID, Type::System); @@ -66,6 +67,7 @@ lazy_static::lazy_static! { map.insert(*MULTISIG_ACTOR_CODE_ID, Type::Multisig); map.insert(*REWARD_ACTOR_CODE_ID, Type::Reward); map.insert(*VERIFREG_ACTOR_CODE_ID, Type::VerifiedRegistry); + map.insert(*DATACAP_TOKEN_ACTOR_CODE_ID, Type::DataCap); map }; pub static ref ACTOR_CODES: BTreeMap = [ @@ -80,6 +82,7 @@ lazy_static::lazy_static! { (Type::Multisig, *MULTISIG_ACTOR_CODE_ID), (Type::Reward, *REWARD_ACTOR_CODE_ID), (Type::VerifiedRegistry, *VERIFREG_ACTOR_CODE_ID), + (Type::DataCap, *DATACAP_TOKEN_ACTOR_CODE_ID), ] .into_iter() .collect(); @@ -895,7 +898,7 @@ impl Runtime> for MockRuntime { assert!( !self.expectations.borrow_mut().expect_sends.is_empty(), - "unexpected expectedMessage to: {:?} method: {:?}, value: {:?}, params: {:?}", + "unexpected message to: {:?} method: {:?}, value: {:?}, params: {:?}", to, method, value, @@ -909,9 +912,9 @@ impl Runtime> for MockRuntime { && expected_msg.method == method && expected_msg.params == params && expected_msg.value == value, - "expectedMessage being sent does not match expectation.\n\ - Message - to: {:?}, method: {:?}, value: {:?}, params: {:?}\n\ - Expected - to: {:?}, method: {:?}, value: {:?}, params: {:?}", + "message sent does not match expectation.\n\ + message - to: {:?}, method: {:?}, value: {:?}, params: {:?}\n\ + expected - to: {:?}, method: {:?}, value: {:?}, params: {:?}", to, method, value, diff --git a/runtime/src/util/batch_return.rs b/runtime/src/util/batch_return.rs new file mode 100644 index 000000000..1170669f7 --- /dev/null +++ b/runtime/src/util/batch_return.rs @@ -0,0 +1,123 @@ +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::Cbor; +use fvm_shared::error::ExitCode; +use std::fmt; + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] +pub struct FailCode { + pub idx: u32, + pub code: ExitCode, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, PartialEq, Eq, Debug)] +pub struct BatchReturn { + // Total successes in batch + pub success_count: u32, + // Failure code and index for each failure in batch + pub fail_codes: Vec, +} + +impl BatchReturn { + pub const fn empty() -> Self { + Self { success_count: 0, fail_codes: Vec::new() } + } + + pub const fn ok(n: u32) -> Self { + Self { success_count: n, fail_codes: Vec::new() } + } + + pub fn size(&self) -> usize { + self.success_count as usize + self.fail_codes.len() + } + + pub fn all_ok(&self) -> bool { + self.fail_codes.is_empty() + } + + // Returns a vector of exit codes for each item (including successes). + pub fn codes(&self) -> Vec { + let mut ret = Vec::new(); + + for fail in &self.fail_codes { + for _ in ret.len()..fail.idx as usize { + ret.push(ExitCode::OK) + } + ret.push(fail.code) + } + for _ in ret.len()..self.size() { + ret.push(ExitCode::OK) + } + ret + } + + // Returns a subset of items corresponding to the successful indices. + // Panics if `items` is not the same length as this batch return. + pub fn successes(&self, items: &[T]) -> Vec { + if items.len() != self.size() { + panic!("items length {} does not match batch size {}", items.len(), self.size()); + } + let mut ret = Vec::new(); + let mut fail_idx = 0; + for (idx, item) in items.iter().enumerate() { + if fail_idx < self.fail_codes.len() && idx == self.fail_codes[fail_idx].idx as usize { + fail_idx += 1; + } else { + ret.push(*item) + } + } + ret + } +} + +impl fmt::Display for BatchReturn { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let succ_str = format!("Batch successes {} / {}", self.success_count, self.size()); + if self.all_ok() { + return f.write_str(&succ_str); + } + let mut ret = format!("{}, Batch failing: [", succ_str); + let mut strs = Vec::new(); + for fail in &self.fail_codes { + strs.push(format!("code={} at idx={}", fail.code, fail.idx)) + } + let fail_str = strs.join(", "); + ret.push_str(&fail_str); + ret.push(']'); + f.write_str(&ret) + } +} + +impl Cbor for BatchReturn {} + +pub struct BatchReturnGen { + success_count: usize, + fail_codes: Vec, + + // gen will only work if it has processed all of the expected batch + expect_count: usize, +} + +impl BatchReturnGen { + pub fn new(expect_count: usize) -> Self { + BatchReturnGen { success_count: 0, fail_codes: Vec::new(), expect_count } + } + + pub fn add_success(&mut self) -> &mut Self { + self.success_count += 1; + self + } + + pub fn add_fail(&mut self, code: ExitCode) -> &mut Self { + self.fail_codes + .push(FailCode { idx: (self.success_count + self.fail_codes.len()) as u32, code }); + self + } + + pub fn gen(&self) -> BatchReturn { + assert_eq!(self.expect_count, self.success_count + self.fail_codes.len(), "programmer error, mismatched batch size {} and processed count {} batch return must include success/fail for all inputs", self.expect_count, self.success_count + self.fail_codes.len()); + BatchReturn { + success_count: self.success_count as u32, + fail_codes: self.fail_codes.clone(), + } + } +} diff --git a/runtime/src/util/mapmap.rs b/runtime/src/util/mapmap.rs new file mode 100644 index 000000000..5689be059 --- /dev/null +++ b/runtime/src/util/mapmap.rs @@ -0,0 +1,159 @@ +use crate::{make_empty_map, make_map_with_root_and_bitwidth, Keyer, Map}; +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_hamt::{BytesKey, Error}; +use serde::de::DeserializeOwned; +use serde::Serialize; +use serde::__private::PhantomData; +use std::collections::btree_map::Entry::{Occupied, Vacant}; +use std::collections::BTreeMap; + +// MapMap stores multiple values per key in a Hamt of Hamts +// Every element stored has a primary and secondary key +pub struct MapMap<'a, BS, V, K1, K2> { + outer: Map<'a, BS, Cid>, + inner_bitwidth: u32, + // cache all inner maps loaded since last load/flush + // get/put/remove operations load the inner map into the cache first and modify in memory + // flush writes all inner maps in the cache to the outer map before flushing the outer map + cache: BTreeMap, Map<'a, BS, V>>, + key_types: PhantomData<(K1, K2)>, +} +impl<'a, BS, V, K1, K2> MapMap<'a, BS, V, K1, K2> +where + BS: Blockstore, + V: Serialize + DeserializeOwned + Clone + std::cmp::PartialEq, + K1: Keyer + std::fmt::Debug + std::fmt::Display, + K2: Keyer + std::fmt::Debug + std::fmt::Display, +{ + pub fn new(bs: &'a BS, outer_bitwidth: u32, inner_bitwidth: u32) -> Self { + MapMap { + outer: make_empty_map(bs, outer_bitwidth), + inner_bitwidth, + cache: BTreeMap::, Map>::new(), + key_types: PhantomData, + } + } + + pub fn from_root( + bs: &'a BS, + cid: &Cid, + outer_bitwidth: u32, + inner_bitwidth: u32, + ) -> Result { + Ok(MapMap { + outer: make_map_with_root_and_bitwidth(cid, bs, outer_bitwidth)?, + inner_bitwidth, + cache: BTreeMap::, Map>::new(), + key_types: PhantomData, + }) + } + + pub fn flush(&mut self) -> Result { + for (k, in_map) in self.cache.iter_mut() { + if in_map.is_empty() { + self.outer.delete(&BytesKey(k.to_vec()))?; + } else { + let new_in_root = in_map.flush()?; + self.outer.set(BytesKey(k.to_vec()), new_in_root)?; + } + } + self.outer.flush() + } + + // load inner map while memoizing + // 1. ensure inner map is loaded into cache + // 2. return (inner map is empty, inner map) + fn load_inner_map(&mut self, k: K1) -> Result<(bool, &mut Map<'a, BS, V>), Error> { + let in_map_thunk = || -> Result<(bool, Map), Error> { + // lazy to avoid ipld operations in case of cache hit + match self.outer.get(&k.key())? { + // flush semantics guarantee all written inner maps are non empty + Some(root) => Ok(( + false, + make_map_with_root_and_bitwidth::( + root, + *self.outer.store(), + self.inner_bitwidth, + )?, + )), + None => Ok((true, make_empty_map(*self.outer.store(), self.inner_bitwidth))), + } + }; + let raw_k = k.key().0; + match self.cache.entry(raw_k) { + Occupied(entry) => { + let in_map = entry.into_mut(); + // cached map could be empty + Ok((in_map.is_empty(), in_map)) + } + Vacant(entry) => { + let (empty, in_map) = in_map_thunk()?; + Ok((empty, entry.insert(in_map))) + } + } + } + + pub fn get(&mut self, outside_k: K1, inside_k: K2) -> Result, Error> { + let (is_empty, in_map) = self.load_inner_map(outside_k)?; + if is_empty { + return Ok(None); + } + in_map.get(&inside_k.key()) + } + + // Runs a function over all values for one outer key. + pub fn for_each(&mut self, outside_k: K1, f: F) -> Result<(), Error> + where + F: FnMut(&BytesKey, &V) -> anyhow::Result<()>, + { + let (is_empty, in_map) = self.load_inner_map(outside_k)?; + if is_empty { + return Ok(()); + } + in_map.for_each(f) + } + + // Puts a key value pair in the MapMap, overwriting any existing value. + // Returns the previous value, if any. + pub fn put(&mut self, outside_k: K1, inside_k: K2, value: V) -> Result, Error> { + let in_map = self.load_inner_map(outside_k)?.1; + // defer flushing cached inner map until flush call + in_map.set(inside_k.key(), value) + } + + // Puts a key value pair in the MapMap if it is not already set. Returns true + // if key is newly set, false if it was already set. + pub fn put_if_absent(&mut self, outside_k: K1, inside_k: K2, value: V) -> Result { + let in_map = self.load_inner_map(outside_k)?.1; + + // defer flushing cached inner map until flush call + in_map.set_if_absent(inside_k.key(), value) + } + + // Puts many values in the MapMap under a single outside key. + // Overwrites any existing values. + pub fn put_many(&mut self, outside_k: K1, values: I) -> Result<(), Error> + where + I: Iterator, + { + let in_map = self.load_inner_map(outside_k)?.1; + for (k, v) in values { + in_map.set(k.key(), v)?; + } + // defer flushing cached inner map until flush call + Ok(()) + } + + /// Removes a key from the MapMap, returning the value at the key if the key + /// was previously set. + pub fn remove(&mut self, outside_k: K1, inside_k: K2) -> Result, Error> { + let (is_empty, in_map) = self.load_inner_map(outside_k)?; + if is_empty { + return Ok(None); + } + in_map + .delete(&inside_k.key()) + .map(|o: Option<(BytesKey, V)>| -> Option { o.map(|p: (BytesKey, V)| -> V { p.1 }) }) + } +} diff --git a/runtime/src/util/mod.rs b/runtime/src/util/mod.rs index d4928b181..2dadacd35 100644 --- a/runtime/src/util/mod.rs +++ b/runtime/src/util/mod.rs @@ -1,15 +1,21 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT +pub use self::batch_return::BatchReturn; +pub use self::batch_return::BatchReturnGen; +pub use self::batch_return::FailCode; pub use self::downcast::*; +pub use self::mapmap::MapMap; pub use self::message_accumulator::MessageAccumulator; pub use self::multimap::*; pub use self::set::Set; pub use self::set_multimap::SetMultimap; +mod batch_return; pub mod cbor; pub mod chaos; mod downcast; +mod mapmap; mod message_accumulator; mod multimap; mod set; diff --git a/state/Cargo.toml b/state/Cargo.toml index 17494aea7..5e0cc2041 100644 --- a/state/Cargo.toml +++ b/state/Cargo.toml @@ -16,6 +16,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actor_account = { version = "9.0.0-alpha.1", path = "../actors/account"} fil_actor_verifreg = { version = "9.0.0-alpha.1", path = "../actors/verifreg"} +fil_actor_datacap = { version = "9.0.0-alpha.1", path = "../actors/datacap"} fil_actor_cron = { version = "9.0.0-alpha.1", path = "../actors/cron"} fil_actor_market = { version = "9.0.0-alpha.1", path = "../actors/market"} fil_actor_multisig = { version = "9.0.0-alpha.1", path = "../actors/multisig"} diff --git a/state/src/check.rs b/state/src/check.rs index 0649e083a..349a0c67c 100644 --- a/state/src/check.rs +++ b/state/src/check.rs @@ -6,6 +6,7 @@ use bimap::BiBTreeMap; use cid::Cid; use fil_actor_account::State as AccountState; use fil_actor_cron::State as CronState; +use fil_actor_datacap::State as DataCapState; use fil_actor_init::State as InitState; use fil_actor_market::State as MarketState; use fil_actor_miner::CronEventPayload; @@ -38,6 +39,7 @@ use fvm_ipld_encoding::tuple::*; use fil_actor_account::testing as account; use fil_actor_cron::testing as cron; +use fil_actor_datacap::testing as datacap; use fil_actor_init::testing as init; use fil_actor_market::testing as market; use fil_actor_miner::testing as miner; @@ -125,6 +127,7 @@ pub fn check_state_invariants<'a, BS: Blockstore + Debug>( let mut multisig_summaries = Vec::::new(); let mut reward_summary: Option = None; let mut verifreg_summary: Option = None; + let mut datacap_summary: Option = None; tree.for_each(|key, actor| { let acc = acc.with_prefix(format!("{key} ")); @@ -204,6 +207,12 @@ pub fn check_state_invariants<'a, BS: Blockstore + Debug>( acc.with_prefix("verifreg: ").add_all(&msgs); verifreg_summary = Some(summary); } + Some(Type::DataCap) => { + let state = get_state!(tree, actor, DataCapState); + let (summary, msgs) = datacap::check_state_invariants(&state, tree.store); + acc.with_prefix("datacap: ").add_all(&msgs); + datacap_summary = Some(summary); + } None => { bail!("unexpected actor code CID {} for address {}", actor.code, key); } diff --git a/test_vm/Cargo.toml b/test_vm/Cargo.toml index ca2713f55..f0197e918 100644 --- a/test_vm/Cargo.toml +++ b/test_vm/Cargo.toml @@ -24,26 +24,29 @@ fil_actor_power = { version = "9.0.0-alpha.1", path = "../actors/power" } fil_actor_market = { version = "9.0.0-alpha.1", path = "../actors/market" } fil_actor_verifreg = { version = "9.0.0-alpha.1", path = "../actors/verifreg" } fil_actor_miner = { version = "9.0.0-alpha.1", path = "../actors/miner" } -lazy_static = "1.4.0" -fvm_shared = { version = "2.0.0-alpha.2", default-features = false } -fvm_ipld_encoding = { version = "0.2.2", default-features = false } -fvm_ipld_blockstore = { version = "0.1.1", default-features = false } +fil_actor_datacap = { version = "9.0.0-alpha.1", path = "../actors/datacap" } + +anyhow = "1.0.65" +bimap = { version = "0.6.2" } +blake2b_simd = "1.0" +cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } +frc46_token = "1.0.0" fvm_ipld_bitfield = "0.5.2" +fvm_ipld_blockstore = { version = "0.1.1", default-features = false } +fvm_ipld_encoding = { version = "0.2.2", default-features = false } fvm_ipld_hamt = "0.5.1" -num-traits = "0.2.14" -num-derive = "0.3.3" +fvm_shared = { version = "2.0.0-alpha.2", default-features = false } +indexmap = { version = "1.8.0", features = ["serde-1"] } +integer-encoding = { version = "3.0.3", default-features = false } +lazy_static = "1.4.0" log = "0.4.14" +num-derive = "0.3.3" +num-traits = "0.2.14" rand = "0.8.5" rand_chacha = "0.3.1" -indexmap = { version = "1.8.0", features = ["serde-1"] } -cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } +regex = "1" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" -anyhow = "1.0.65" -bimap = { version = "0.6.2" } -blake2b_simd = "1.0" -integer-encoding = { version = "3.0.3", default-features = false } -regex = "1" [dev-dependencies] cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } diff --git a/test_vm/src/lib.rs b/test_vm/src/lib.rs index f9dfb8453..5a3a44166 100644 --- a/test_vm/src/lib.rs +++ b/test_vm/src/lib.rs @@ -4,6 +4,7 @@ use cid::multihash::Code; use cid::Cid; use fil_actor_account::{Actor as AccountActor, State as AccountState}; use fil_actor_cron::{Actor as CronActor, Entry as CronEntry, State as CronState}; +use fil_actor_datacap::{Actor as DataCapActor, State as DataCapState}; use fil_actor_init::{Actor as InitActor, ExecReturn, State as InitState}; use fil_actor_market::{Actor as MarketActor, Method as MarketMethod, State as MarketState}; use fil_actor_miner::{Actor as MinerActor, State as MinerState}; @@ -20,12 +21,12 @@ use fil_actors_runtime::runtime::{ Verifier, EMPTY_ARR_CID, }; use fil_actors_runtime::test_utils::*; -use fil_actors_runtime::MessageAccumulator; use fil_actors_runtime::{ ActorError, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, FIRST_NON_SINGLETON_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; +use fil_actors_runtime::{MessageAccumulator, DATACAP_TOKEN_ACTOR_ADDR}; use fil_builtin_actors_state::check::check_state_invariants; use fil_builtin_actors_state::check::Tree; use fvm_ipld_blockstore::MemoryBlockstore; @@ -224,6 +225,14 @@ impl<'bs> VM<'bs> { actor(*VERIFREG_ACTOR_CODE_ID, verifreg_head, 0, TokenAmount::zero()), ); + // datacap + let datacap_head = + v.put_store(&DataCapState::new(&v.store, VERIFIED_REGISTRY_ACTOR_ADDR).unwrap()); + v.set_actor( + DATACAP_TOKEN_ACTOR_ADDR, + actor(*DATACAP_TOKEN_ACTOR_CODE_ID, datacap_head, 0, TokenAmount::zero()), + ); + // burnt funds let burnt_funds_head = v.put_store(&AccountState { address: BURNT_FUNDS_ACTOR_ADDR }); v.set_actor( @@ -363,6 +372,18 @@ impl<'bs> VM<'bs> { self.store.get_cbor::(&a.head).unwrap() } + pub fn mutate_state(&self, addr: Address, f: F) + where + C: Cbor, + F: FnOnce(&mut C), + { + let mut a = self.get_actor(addr).unwrap(); + let mut st = self.store.get_cbor::(&a.head).unwrap().unwrap(); + f(&mut st); + a.head = self.store.put_cbor(&st, Code::Blake2b256).unwrap(); + self.set_actor(addr, a); + } + pub fn get_epoch(&self) -> ChainEpoch { self.curr_epoch } @@ -590,8 +611,8 @@ impl<'invocation, 'bs> InvocationCtx<'invocation, 'bs> { fn gather_trace(&mut self, invoke_result: Result) -> InvocationTrace { let (ret, code) = match invoke_result { - Ok(rb) => (Some(rb), None), - Err(ae) => (None, Some(ae.exit_code())), + Ok(rb) => (Some(rb), ExitCode::OK), + Err(ae) => (None, ae.exit_code()), }; let mut msg = self.msg.clone(); msg.to = match self.resolve_target(&self.msg.to) { @@ -653,6 +674,8 @@ impl<'invocation, 'bs> InvocationCtx<'invocation, 'bs> { Type::Power => PowerActor::invoke_method(self, self.msg.method, ¶ms), Type::PaymentChannel => PaychActor::invoke_method(self, self.msg.method, ¶ms), Type::VerifiedRegistry => VerifregActor::invoke_method(self, self.msg.method, ¶ms), + // Type::EVM => panic!("no EVM"), + Type::DataCap => DataCapActor::invoke_method(self, self.msg.method, ¶ms), }; if res.is_err() { self.v.rollback(prior_root) @@ -1027,7 +1050,7 @@ pub fn actor(code: Cid, head: Cid, seq: u64, bal: TokenAmount) -> Actor { #[derive(Clone)] pub struct InvocationTrace { pub msg: InternalMessage, - pub code: Option, + pub code: ExitCode, pub ret: Option, pub subinvocations: Vec, } @@ -1051,48 +1074,37 @@ impl ExpectInvocation { let id = format!("[{}:{}]", invoc.msg.to, invoc.msg.method); self.quick_match(invoc, String::new()); if let Some(c) = self.code { - assert_ne!( - None, - invoc.code, - "{} unexpected code: expected:{}was:{}", - id, - c, - ExitCode::OK - ); assert_eq!( - c, - invoc.code.unwrap(), - "{} unexpected code expected:{}was:{}", - id, - c, - invoc.code.unwrap() + c, invoc.code, + "{} unexpected code expected: {}, was: {}", + id, c, invoc.code ); } if let Some(f) = self.from { assert_eq!( f, invoc.msg.from, - "{} unexpected from addr: expected:{}was:{} ", + "{} unexpected from addr: expected: {}, was: {} ", id, f, invoc.msg.from ); } if let Some(v) = &self.value { assert_eq!( v, &invoc.msg.value, - "{} unexpected value: expected:{}was:{} ", + "{} unexpected value: expected: {}, was: {} ", id, v, invoc.msg.value ); } if let Some(p) = &self.params { assert_eq!( p, &invoc.msg.params, - "{} unexpected params: expected:{:x?}was:{:x?}", + "{} unexpected params: expected: {:x?}, was: {:x?}", id, p, invoc.msg.params ); } if let Some(r) = &self.ret { - assert_ne!(None, invoc.ret, "{} unexpected ret: expected:{:x?}was:None", id, r); + assert_ne!(None, invoc.ret, "{} unexpected ret: expected: {:x?}, was: None", id, r); let ret = &invoc.ret.clone().unwrap(); - assert_eq!(r, ret, "{} unexpected ret: expected:{:x?}was:{:x?}", id, r, ret); + assert_eq!(r, ret, "{} unexpected ret: expected: {:x?}, was: {:x?}", id, r, ret); } if let Some(expect_subinvocs) = &self.subinvocs { let subinvocs = &invoc.subinvocations; @@ -1133,12 +1145,12 @@ impl ExpectInvocation { let id = format!("[{}:{}]", invoc.msg.to, invoc.msg.method); assert_eq!( self.to, invoc.msg.to, - "{} unexpected to addr: expected:{} was:{} \n{}", + "{} unexpected to addr: expected: {}, was: {} \n{}", id, self.to, invoc.msg.to, extra_msg ); assert_eq!( self.method, invoc.msg.method, - "{} unexpected method: expected:{}was:{} \n{}", + "{} unexpected method: expected: {}, was: {} \n{}", id, self.method, invoc.msg.from, extra_msg ); } diff --git a/test_vm/src/util.rs b/test_vm/src/util.rs index cd9ffe3f3..ff636be28 100644 --- a/test_vm/src/util.rs +++ b/test_vm/src/util.rs @@ -1,10 +1,13 @@ use crate::*; use fil_actor_account::Method as AccountMethod; use fil_actor_cron::Method as CronMethod; +use fil_actor_datacap::Method as DataCapMethod; use fil_actor_market::{ ClientDealProposal, DealProposal, Label, Method as MarketMethod, PublishStorageDealsParams, PublishStorageDealsReturn, }; + +use fil_actor_market::ext::verifreg::{AllocationRequest, AllocationRequests}; use fil_actor_miner::{ aggregate_pre_commit_network_fee, max_prove_commit_duration, new_deadline_info_from_offset_and_epoch, CompactCommD, Deadline, DeadlineInfo, @@ -20,6 +23,11 @@ use fil_actor_power::{ }; use fil_actor_reward::Method as RewardMethod; use fil_actor_verifreg::{Method as VerifregMethod, VerifierParams}; +use fil_actors_runtime::runtime::policy_constants::{ + MARKET_DEFAULT_ALLOCATION_TERM_BUFFER, MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION, +}; +use frc46_token::receiver::types::{FRC46TokenReceived, UniversalReceiverParams, FRC46_TOKEN_TYPE}; +use frc46_token::token::types::TransferFromParams; use fvm_ipld_bitfield::BitField; use fvm_ipld_encoding::{BytesDe, Cbor, RawBytes}; use fvm_shared::address::{Address, BLS_PUB_LEN}; @@ -610,17 +618,22 @@ pub fn add_verifier(v: &VM, verifier: Address, data_cap: StoragePower) { MultisigMethod::Propose as u64, proposal, ); - let verifreg_invoc = ExpectInvocation { - to: VERIFIED_REGISTRY_ACTOR_ADDR, - method: VerifregMethod::AddVerifier as u64, - params: Some(serialize(&add_verifier_params, "verifreg add verifier params").unwrap()), - subinvocs: Some(vec![]), - ..Default::default() - }; ExpectInvocation { to: TEST_VERIFREG_ROOT_ADDR, method: MultisigMethod::Propose as u64, - subinvocs: Some(vec![verifreg_invoc]), + subinvocs: Some(vec![ExpectInvocation { + to: VERIFIED_REGISTRY_ACTOR_ADDR, + method: VerifregMethod::AddVerifier as u64, + params: Some(serialize(&add_verifier_params, "verifreg add verifier params").unwrap()), + subinvocs: Some(vec![ExpectInvocation { + to: DATACAP_TOKEN_ACTOR_ADDR, + method: DataCapMethod::BalanceOf as u64, + params: Some(serialize(&verifier, "balance of params").unwrap()), + code: Some(ExitCode::OK), + ..Default::default() + }]), + ..Default::default() + }]), ..Default::default() } .matches(v.take_invocations().last().unwrap()); @@ -629,7 +642,7 @@ pub fn add_verifier(v: &VM, verifier: Address, data_cap: StoragePower) { #[allow(clippy::too_many_arguments)] pub fn publish_deal( v: &VM, - provider: Address, + worker: Address, deal_client: Address, miner_id: Address, deal_label: String, @@ -664,7 +677,7 @@ pub fn publish_deal( }; let ret: PublishStorageDealsReturn = apply_ok( v, - provider, + worker, STORAGE_MARKET_ACTOR_ADDR, TokenAmount::zero(), MarketMethod::PublishStorageDeals as u64, @@ -696,9 +709,56 @@ pub fn publish_deal( }, ]; if verified_deal { + let deal_term = deal.end_epoch - deal.start_epoch; + let token_amount = TokenAmount::from_whole(deal.piece_size.0 as i64); + let alloc_expiration = + min(deal.start_epoch, v.curr_epoch + MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION); + + let alloc_reqs = AllocationRequests { + allocations: vec![AllocationRequest { + provider: miner_id, + data: deal.piece_cid, + size: deal.piece_size, + term_min: deal_term, + term_max: deal_term + MARKET_DEFAULT_ALLOCATION_TERM_BUFFER, + expiration: alloc_expiration, + }], + extensions: vec![], + }; expect_publish_invocs.push(ExpectInvocation { - to: VERIFIED_REGISTRY_ACTOR_ADDR, - method: VerifregMethod::UseBytes as u64, + to: DATACAP_TOKEN_ACTOR_ADDR, + method: DataCapMethod::TransferFrom as u64, + params: Some( + RawBytes::serialize(&TransferFromParams { + from: deal_client, + to: VERIFIED_REGISTRY_ACTOR_ADDR, + amount: token_amount.clone(), + operator_data: RawBytes::serialize(&alloc_reqs).unwrap(), + }) + .unwrap(), + ), + code: Some(ExitCode::OK), + subinvocs: Some(vec![ExpectInvocation { + to: VERIFIED_REGISTRY_ACTOR_ADDR, + method: VerifregMethod::UniversalReceiverHook as u64, + params: Some( + RawBytes::serialize(&UniversalReceiverParams { + type_: FRC46_TOKEN_TYPE, + payload: RawBytes::serialize(&FRC46TokenReceived { + from: deal_client.id().unwrap(), + to: VERIFIED_REGISTRY_ACTOR_ADDR.id().unwrap(), + operator: STORAGE_MARKET_ACTOR_ADDR.id().unwrap(), + amount: token_amount, + operator_data: RawBytes::serialize(&alloc_reqs).unwrap(), + token_data: Default::default(), + }) + .unwrap(), + }) + .unwrap(), + ), + code: Some(ExitCode::OK), + ..Default::default() + }]), ..Default::default() }) } diff --git a/test_vm/tests/extend_sectors_test.rs b/test_vm/tests/extend_sectors_test.rs index a86af038e..c14bac0b7 100644 --- a/test_vm/tests/extend_sectors_test.rs +++ b/test_vm/tests/extend_sectors_test.rs @@ -2,7 +2,7 @@ use fil_actor_cron::Method as CronMethod; use fil_actor_market::Method as MarketMethod; use fil_actor_miner::{ max_prove_commit_duration, ExpirationExtension, ExtendSectorExpirationParams, - Method as MinerMethod, PowerPair, PreCommitSectorParams, ProveCommitSectorParams, + Method as MinerMethod, PowerPair, PreCommitSectorParams, ProveCommitSectorParams, Sectors, State as MinerState, }; use fil_actor_power::{Method as PowerMethod, UpdateClaimedPowerParams}; @@ -171,6 +171,17 @@ fn extend_sector_with_deals() { sector_info.verified_deal_weight ); // (180 days *2880 epochs per day) * 32 GiB + // Note: we don't need to explicitly set verified weight using the legacy method + // because legacy and simple qa power deal weight calculations line up for fully packed sectors + // We do need to set simple_qa_power to false + sector_info.simple_qa_power = false; + // Manually craft state to match legacy sectors + v.mutate_state(miner_id, |st: &mut MinerState| { + let mut sectors = Sectors::load(&store, &st.sectors).unwrap(); + sectors.store(vec![sector_info.clone()]).unwrap(); + st.sectors = sectors.amt.flush().unwrap(); + }); + let initial_verified_deal_weight = sector_info.verified_deal_weight; let initial_deal_weight = sector_info.deal_weight; diff --git a/test_vm/tests/replica_update_test.rs b/test_vm/tests/replica_update_test.rs index e6933cc44..e900e002c 100644 --- a/test_vm/tests/replica_update_test.rs +++ b/test_vm/tests/replica_update_test.rs @@ -3,7 +3,7 @@ use fil_actor_market::Method as MarketMethod; use fil_actor_miner::{ power_for_sector, DisputeWindowedPoStParams, ExpirationExtension, ExtendSectorExpirationParams, Method as MinerMethod, PowerPair, ProveCommitSectorParams, ProveReplicaUpdatesParams, - ProveReplicaUpdatesParams2, ReplicaUpdate, ReplicaUpdate2, SectorOnChainInfo, + ProveReplicaUpdatesParams2, ReplicaUpdate, ReplicaUpdate2, SectorOnChainInfo, Sectors, State as MinerState, TerminateSectorsParams, TerminationDeclaration, SECTORS_AMT_BITWIDTH, }; @@ -701,6 +701,15 @@ fn extend_after_upgrade() { let (v, sector_info, worker, miner_id, deadline_index, partition_index, _) = create_miner_and_upgrade_sector(store, false); let sector_number = sector_info.sector_number; + let mut legacy_sector = sector_info; + legacy_sector.simple_qa_power = false; + + // TODO change to use extend2 + v.mutate_state(miner_id, |st: &mut MinerState| { + let mut sectors = Sectors::load(&store, &st.sectors).unwrap(); + sectors.store(vec![legacy_sector]).unwrap(); + st.sectors = sectors.amt.flush().unwrap(); + }); let extension_params = ExtendSectorExpirationParams { extensions: vec![ExpirationExtension { diff --git a/test_vm/tests/terminate_test.rs b/test_vm/tests/terminate_test.rs index 14561fa0a..e97b755a4 100644 --- a/test_vm/tests/terminate_test.rs +++ b/test_vm/tests/terminate_test.rs @@ -46,7 +46,7 @@ fn terminate_sectors() { let sealed_cid = make_sealed_cid(b"s100"); let seal_proof = RegisteredSealProof::StackedDRG32GiBV1P1; - let (id_addr, robust_addr) = create_miner( + let (miner_id_addr, miner_robust_addr) = create_miner( &mut v, owner, worker, @@ -96,7 +96,7 @@ fn terminate_sectors() { STORAGE_MARKET_ACTOR_ADDR, miner_collateral.clone(), MarketMethod::AddBalance as u64, - id_addr, + miner_id_addr, ); // create 3 deals, some verified and some not @@ -106,7 +106,7 @@ fn terminate_sectors() { &v, worker, verified_client, - id_addr, + miner_id_addr, "deal1".to_string(), PaddedPieceSize(1 << 30), true, @@ -120,7 +120,7 @@ fn terminate_sectors() { &v, worker, verified_client, - id_addr, + miner_id_addr, "deal2".to_string(), PaddedPieceSize(1 << 32), true, @@ -134,7 +134,7 @@ fn terminate_sectors() { &v, worker, unverified_client, - id_addr, + miner_id_addr, "deal3".to_string(), PaddedPieceSize(1 << 34), false, @@ -166,7 +166,7 @@ fn terminate_sectors() { apply_ok( &v, worker, - robust_addr, + miner_robust_addr, TokenAmount::zero(), MinerMethod::PreCommitSector as u64, PreCommitSectorParams { @@ -180,14 +180,14 @@ fn terminate_sectors() { }, ); let prove_time = v.get_epoch() + Policy::default().pre_commit_challenge_delay + 1; - let v = advance_by_deadline_to_epoch(v, id_addr, prove_time).0; + let v = advance_by_deadline_to_epoch(v, miner_id_addr, prove_time).0; // prove commit, cron, advance to post time let prove_params = ProveCommitSectorParams { sector_number, proof: vec![] }; apply_ok( &v, worker, - robust_addr, + miner_robust_addr, TokenAmount::zero(), MinerMethod::ProveCommitSector as u64, prove_params, @@ -202,12 +202,12 @@ fn terminate_sectors() { ) .unwrap(); assert_eq!(ExitCode::OK, res.code); - let (dline_info, p_idx, v) = advance_to_proving_deadline(v, id_addr, sector_number); + let (dline_info, p_idx, v) = advance_to_proving_deadline(v, miner_id_addr, sector_number); let d_idx = dline_info.index; - let st = v.get_state::(id_addr).unwrap(); + let st = v.get_state::(miner_id_addr).unwrap(); let sector = st.get_sector(v.store, sector_number).unwrap().unwrap(); let sector_power = power_for_sector(seal_proof.sector_size().unwrap(), §or); - submit_windowed_post(&v, worker, id_addr, dline_info, p_idx, Some(sector_power)); + submit_windowed_post(&v, worker, miner_id_addr, dline_info, p_idx, Some(sector_power)); let v = v.with_epoch(dline_info.last()); v.apply_message( @@ -225,7 +225,7 @@ fn terminate_sectors() { let v = v.with_epoch(start); // get out of proving deadline so we don't post twice let v = advance_by_deadline_to_epoch_while_proving( v, - id_addr, + miner_id_addr, worker, sector_number, start + Policy::default().deal_updates_interval, @@ -244,7 +244,7 @@ fn terminate_sectors() { apply_ok( &v, worker, - robust_addr, + miner_robust_addr, TokenAmount::zero(), MinerMethod::TerminateSectors as u64, TerminateSectorsParams { @@ -256,7 +256,7 @@ fn terminate_sectors() { }, ); ExpectInvocation { - to: id_addr, + to: miner_id_addr, method: MinerMethod::TerminateSectors as u64, subinvocs: Some(vec![ ExpectInvocation { @@ -294,7 +294,7 @@ fn terminate_sectors() { } .matches(v.take_invocations().last().unwrap()); - let miner_balances = v.get_miner_balance(id_addr); + let miner_balances = v.get_miner_balance(miner_id_addr); assert!(miner_balances.initial_pledge.is_zero()); assert!(miner_balances.pre_commit_deposit.is_zero()); @@ -319,7 +319,7 @@ fn terminate_sectors() { // advance a market cron processing period to process terminations fully let (v, _) = advance_by_deadline_to_epoch( v, - id_addr, + miner_id_addr, termination_epoch + Policy::default().deal_updates_interval, ); // because of rounding error it's annoying to compute exact withdrawable balance which is 2.9999.. FIL @@ -352,7 +352,7 @@ fn terminate_sectors() { STORAGE_MARKET_ACTOR_ADDR, TokenAmount::zero(), MarketMethod::WithdrawBalance as u64, - WithdrawBalanceParams { provider_or_client: id_addr, amount: miner_collateral }, + WithdrawBalanceParams { provider_or_client: miner_id_addr, amount: miner_collateral }, ); let value_withdrawn = v.take_invocations().last().unwrap().subinvocations[1].msg.value(); diff --git a/test_vm/tests/verifreg_remove_datacap_test.rs b/test_vm/tests/verifreg_remove_datacap_test.rs index 95d10b313..577dba948 100644 --- a/test_vm/tests/verifreg_remove_datacap_test.rs +++ b/test_vm/tests/verifreg_remove_datacap_test.rs @@ -1,20 +1,30 @@ -use fil_actor_verifreg::{ - AddVerifierClientParams, RemoveDataCapParams, RemoveDataCapRequest, RemoveDataCapReturn, - SIGNATURE_DOMAIN_SEPARATION_REMOVE_DATA_CAP, -}; -use fil_actor_verifreg::{AddrPairKey, Method as VerifregMethod}; -use fil_actor_verifreg::{RemoveDataCapProposal, RemoveDataCapProposalID, State as VerifregState}; -use fil_actors_runtime::cbor::serialize; -use fil_actors_runtime::{make_map_with_root_and_bitwidth, VERIFIED_REGISTRY_ACTOR_ADDR}; +use std::ops::{Div, Sub}; + use fvm_ipld_blockstore::MemoryBlockstore; use fvm_ipld_encoding::to_vec; use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::bigint::{BigInt, Zero}; use fvm_shared::crypto::signature::{Signature, SignatureType}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::StoragePower; use fvm_shared::HAMT_BIT_WIDTH; -use std::ops::{Div, Sub}; +use num_traits::ToPrimitive; + +use fil_actor_datacap::{ + DestroyParams, Method as DataCapMethod, MintParams, State as DataCapState, +}; +use fil_actor_verifreg::{ + AddVerifierClientParams, RemoveDataCapParams, RemoveDataCapRequest, RemoveDataCapReturn, + SIGNATURE_DOMAIN_SEPARATION_REMOVE_DATA_CAP, +}; +use fil_actor_verifreg::{AddrPairKey, Method as VerifregMethod}; +use fil_actor_verifreg::{RemoveDataCapProposal, RemoveDataCapProposalID, State as VerifregState}; +use fil_actors_runtime::cbor::serialize; +use fil_actors_runtime::{ + make_map_with_root_and_bitwidth, DATACAP_TOKEN_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, + VERIFIED_REGISTRY_ACTOR_ADDR, +}; use test_vm::util::{add_verifier, apply_ok, create_accounts}; use test_vm::{ExpectInvocation, TEST_VERIFREG_ROOT_ADDR, VM}; @@ -28,7 +38,7 @@ fn remove_datacap_simple_successful_path() { let verifier1_id_addr = v.normalize_address(&verifier1).unwrap(); let verifier2_id_addr = v.normalize_address(&verifier2).unwrap(); let verified_client_id_addr = v.normalize_address(&verified_client).unwrap(); - let verifier_allowance = StoragePower::from(2 * 1048576); + let verifier_allowance = StoragePower::from(2 * 1048576u64); let allowance_to_remove: StoragePower = verifier_allowance.clone().div(2); // register verifier1 and verifier2 @@ -38,22 +48,31 @@ fn remove_datacap_simple_successful_path() { // register the verified client let add_verified_client_params = AddVerifierClientParams { address: verified_client, allowance: verifier_allowance.clone() }; - let add_verified_client_params_ser = - serialize(&add_verified_client_params, "add verifier params").unwrap(); + let mint_params = MintParams { + to: verified_client, + amount: TokenAmount::from_whole(verifier_allowance.to_i64().unwrap()), + operators: vec![STORAGE_MARKET_ACTOR_ADDR], + }; apply_ok( &v, verifier1, VERIFIED_REGISTRY_ACTOR_ADDR, TokenAmount::zero(), VerifregMethod::AddVerifiedClient as u64, - add_verified_client_params, + add_verified_client_params.clone(), ); ExpectInvocation { to: VERIFIED_REGISTRY_ACTOR_ADDR, method: VerifregMethod::AddVerifiedClient as u64, - params: Some(add_verified_client_params_ser), - subinvocs: Some(vec![]), + params: Some(serialize(&add_verified_client_params, "add verifier params").unwrap()), + subinvocs: Some(vec![ExpectInvocation { + to: DATACAP_TOKEN_ACTOR_ADDR, + method: DataCapMethod::Mint as u64, + params: Some(serialize(&mint_params, "mint params").unwrap()), + subinvocs: None, + ..Default::default() + }]), ..Default::default() } .matches(v.take_invocations().last().unwrap()); @@ -72,16 +91,9 @@ fn remove_datacap_simple_successful_path() { verifiers.get(&verifier2_id_addr.to_bytes()).unwrap().unwrap(); assert_eq!(verifier_allowance, *verifier2_data_cap); - let mut verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &v_st.verified_clients, - &store, - HAMT_BIT_WIDTH, - ) - .unwrap(); - - let BigIntDe(data_cap) = - verified_clients.get(&verified_client_id_addr.to_bytes()).unwrap().unwrap(); - assert_eq!(*data_cap, verifier_allowance); + let token_st = v.get_state::(DATACAP_TOKEN_ACTOR_ADDR).unwrap(); + let balance = token_st.balance(&store, verified_client_id_addr.id().unwrap()).unwrap(); + assert_eq!(balance, TokenAmount::from_whole(verifier_allowance.to_i64().unwrap())); let mut proposal_ids = make_map_with_root_and_bitwidth::<_, RemoveDataCapProposalID>( &v_st.remove_data_cap_proposal_ids, @@ -134,47 +146,31 @@ fn remove_datacap_simple_successful_path() { }, }; - let mut remove_datacap_params_ser = - serialize(&remove_datacap_params, "add verifier params").unwrap(); - let remove_datacap_ret: RemoveDataCapReturn = apply_ok( &v, TEST_VERIFREG_ROOT_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, TokenAmount::zero(), VerifregMethod::RemoveVerifiedClientDataCap as u64, - remove_datacap_params, + remove_datacap_params.clone(), ) .deserialize() .unwrap(); - ExpectInvocation { - to: VERIFIED_REGISTRY_ACTOR_ADDR, - method: VerifregMethod::RemoveVerifiedClientDataCap as u64, - params: Some(remove_datacap_params_ser), - subinvocs: Some(vec![]), - ..Default::default() - } - .matches(v.take_invocations().last().unwrap()); + expect_remove_datacap(&remove_datacap_params).matches(v.take_invocations().last().unwrap()); assert_eq!(verified_client_id_addr, remove_datacap_ret.verified_client); assert_eq!(allowance_to_remove, remove_datacap_ret.data_cap_removed); - v_st = v.get_state::(VERIFIED_REGISTRY_ACTOR_ADDR).unwrap(); - // confirm client's allowance has fallen by half - verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &v_st.verified_clients, - &store, - HAMT_BIT_WIDTH, - ) - .unwrap(); - - let BigIntDe(data_cap) = - verified_clients.get(&verified_client_id_addr.to_bytes()).unwrap().unwrap(); - - assert_eq!(*data_cap, verifier_allowance.sub(allowance_to_remove.clone())); + let token_st = v.get_state::(DATACAP_TOKEN_ACTOR_ADDR).unwrap(); + let balance = token_st.balance(&store, verified_client_id_addr.id().unwrap()).unwrap(); + assert_eq!( + balance, + TokenAmount::from_whole(verifier_allowance.sub(&allowance_to_remove).to_i64().unwrap()) + ); + v_st = v.get_state::(VERIFIED_REGISTRY_ACTOR_ADDR).unwrap(); // confirm proposalIds has changed as expected proposal_ids = make_map_with_root_and_bitwidth(&v_st.remove_data_cap_proposal_ids, &store, HAMT_BIT_WIDTH) @@ -229,44 +225,29 @@ fn remove_datacap_simple_successful_path() { }, }; - remove_datacap_params_ser = serialize(&remove_datacap_params, "add verifier params").unwrap(); - let remove_datacap_ret: RemoveDataCapReturn = apply_ok( &v, TEST_VERIFREG_ROOT_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, TokenAmount::zero(), VerifregMethod::RemoveVerifiedClientDataCap as u64, - remove_datacap_params, + remove_datacap_params.clone(), ) .deserialize() .unwrap(); - ExpectInvocation { - to: VERIFIED_REGISTRY_ACTOR_ADDR, - method: VerifregMethod::RemoveVerifiedClientDataCap as u64, - params: Some(remove_datacap_params_ser), - subinvocs: Some(vec![]), - ..Default::default() - } - .matches(v.take_invocations().last().unwrap()); + expect_remove_datacap(&remove_datacap_params).matches(v.take_invocations().last().unwrap()); assert_eq!(verified_client_id_addr, remove_datacap_ret.verified_client); assert_eq!(allowance_to_remove, remove_datacap_ret.data_cap_removed); - // confirm client has been removed entirely - - v_st = v.get_state::(VERIFIED_REGISTRY_ACTOR_ADDR).unwrap(); - verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( - &v_st.verified_clients, - &store, - HAMT_BIT_WIDTH, - ) - .unwrap(); - - assert!(verified_clients.get(&verified_client_id_addr.to_bytes()).unwrap().is_none()); + // confirm client has no balance + let token_st = v.get_state::(DATACAP_TOKEN_ACTOR_ADDR).unwrap(); + let balance = token_st.balance(&store, verified_client_id_addr.id().unwrap()).unwrap(); + assert_eq!(balance, TokenAmount::zero()); // confirm proposalIds has changed as expected + v_st = v.get_state::(VERIFIED_REGISTRY_ACTOR_ADDR).unwrap(); proposal_ids = make_map_with_root_and_bitwidth(&v_st.remove_data_cap_proposal_ids, &store, HAMT_BIT_WIDTH) .unwrap(); @@ -286,3 +267,44 @@ fn remove_datacap_simple_successful_path() { assert_eq!(2u64, verifier2_proposal_id.id); v.assert_state_invariants(); } + +fn expect_remove_datacap(params: &RemoveDataCapParams) -> ExpectInvocation { + ExpectInvocation { + to: VERIFIED_REGISTRY_ACTOR_ADDR, + method: VerifregMethod::RemoveVerifiedClientDataCap as u64, + params: Some(serialize(¶ms, "remove datacap params").unwrap()), + code: Some(ExitCode::OK), + subinvocs: Some(vec![ + ExpectInvocation { + to: DATACAP_TOKEN_ACTOR_ADDR, + method: DataCapMethod::BalanceOf as u64, + params: Some( + serialize(¶ms.verified_client_to_remove, "balance_of params").unwrap(), + ), + code: Some(ExitCode::OK), + subinvocs: None, + ..Default::default() + }, + ExpectInvocation { + to: DATACAP_TOKEN_ACTOR_ADDR, + method: DataCapMethod::Destroy as u64, + params: Some( + serialize( + &DestroyParams { + owner: params.verified_client_to_remove, + amount: TokenAmount::from_whole( + params.data_cap_amount_to_remove.to_i64().unwrap(), + ), + }, + "destroy params", + ) + .unwrap(), + ), + code: Some(ExitCode::OK), + subinvocs: None, + ..Default::default() + }, + ]), + ..Default::default() + } +}