From edee858b4c7a4fbd4090b1947933bc67ad20d40d Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Mon, 16 Dec 2024 07:56:15 +0200 Subject: [PATCH 01/15] wip --- entities/src/models.rs | 6 +- grpc/src/mapper.rs | 26 +- integration_tests/src/common.rs | 5 +- integrity_verification/src/main.rs | 1 + .../src/asset_streaming_and_discovery.rs | 4 +- nft_ingester/src/api/dapi/asset.rs | 81 ++++- .../src/api/dapi/rpc_asset_convertors.rs | 25 +- nft_ingester/src/api/dapi/rpc_asset_models.rs | 7 +- nft_ingester/src/bin/migrator/main.rs | 14 +- nft_ingester/src/bin/raw_backup/main.rs | 4 +- nft_ingester/src/bin/slot_checker/main.rs | 10 +- .../src/bin/synchronizer_utils/main.rs | 2 +- nft_ingester/src/json_worker.rs | 37 +- .../bubblegum_updates_processor.rs | 12 +- nft_ingester/src/scheduler.rs | 6 +- nft_ingester/tests/gapfiller_tests.rs | 2 +- rocks-db/benches/misc_benchmark.rs | 2 +- rocks-db/src/asset.rs | 42 +-- rocks-db/src/asset_client.rs | 35 +- rocks-db/src/asset_streaming_client.rs | 18 +- rocks-db/src/batch_client.rs | 29 +- rocks-db/src/batch_savers.rs | 2 +- rocks-db/src/bin/column_copier/main.rs | 3 +- rocks-db/src/column.rs | 30 +- rocks-db/src/dump_client.rs | 2 +- rocks-db/src/flatbuf/offchain_data.fbs | 16 + .../src/{ => generated}/asset_generated.rs | 0 rocks-db/src/generated/mod.rs | 12 + .../src/generated/offchain_data_generated.rs | 326 ++++++++++++++++++ rocks-db/src/lib.rs | 21 +- rocks-db/src/mappers.rs | 2 +- rocks-db/src/migrations/mod.rs | 1 + rocks-db/src/migrations/offchain_data.rs | 22 ++ rocks-db/src/migrator.rs | 20 +- rocks-db/src/offchain_data.rs | 125 ++++++- rocks-db/src/transaction.rs | 2 +- rocks-db/src/transaction_client.rs | 5 +- tests/setup/src/lib.rs | 2 - tests/setup/src/pg.rs | 15 +- tests/setup/src/rocks.rs | 13 +- 40 files changed, 815 insertions(+), 172 deletions(-) create mode 100644 rocks-db/src/flatbuf/offchain_data.fbs rename rocks-db/src/{ => generated}/asset_generated.rs (100%) create mode 100644 rocks-db/src/generated/mod.rs create mode 100644 rocks-db/src/generated/offchain_data_generated.rs create mode 100644 rocks-db/src/migrations/offchain_data.rs diff --git a/entities/src/models.rs b/entities/src/models.rs index 170f98f01..29cf07c86 100644 --- a/entities/src/models.rs +++ b/entities/src/models.rs @@ -107,13 +107,13 @@ pub struct Creator { } #[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct OffChainData { +pub struct OffChainDataGrpc { pub url: String, pub metadata: String, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct CompleteAssetDetails { +pub struct AssetCompleteDetailsGrpc { // From AssetStaticDetails pub pubkey: Pubkey, pub specification_asset_class: SpecificationAssetClass, @@ -172,7 +172,7 @@ pub struct CompleteAssetDetails { pub master_edition: Option, // OffChainData - pub offchain_data: Option, + pub offchain_data: Option, // SplMint pub spl_mint: Option, diff --git a/grpc/src/mapper.rs b/grpc/src/mapper.rs index 4ce799624..6f6ce524c 100644 --- a/grpc/src/mapper.rs +++ b/grpc/src/mapper.rs @@ -6,12 +6,12 @@ use crate::gapfiller::{ MasterEdition, OffchainData, OwnerType, RawBlock, RoyaltyTargetType, SpecificationAssetClass, SpecificationVersions, SplMint, TokenStandard, UpdateVersionValue, UseMethod, Uses, }; -use entities::models::{CompleteAssetDetails, OffChainData, UpdateVersion, Updated}; +use entities::models::{AssetCompleteDetailsGrpc, UpdateVersion, Updated}; use solana_sdk::hash::Hash; use solana_sdk::pubkey::Pubkey; -impl From for AssetDetails { - fn from(value: CompleteAssetDetails) -> Self { +impl From for AssetDetails { + fn from(value: AssetCompleteDetailsGrpc) -> Self { let delegate = value.delegate.value.map(|key| DynamicBytesField { value: key.to_bytes().to_vec(), slot_updated: value.delegate.slot_updated, @@ -85,7 +85,7 @@ impl From for AssetDetails { } } -impl TryFrom for CompleteAssetDetails { +impl TryFrom for AssetCompleteDetailsGrpc { type Error = GrpcError; fn try_from(value: AssetDetails) -> Result { @@ -136,9 +136,7 @@ impl TryFrom for CompleteAssetDetails { .is_burnt .map(Into::into) .ok_or(GrpcError::MissingField("is_burnt".to_string()))?, - was_decompressed: value - .was_decompressed - .map(Into::into), + was_decompressed: value.was_decompressed.map(Into::into), creators: value .creators .map(TryInto::try_into) @@ -203,10 +201,12 @@ impl TryFrom for CompleteAssetDetails { .collect::, _>>()?, edition: value.edition.map(TryInto::try_into).transpose()?, master_edition: value.master_edition.map(TryInto::try_into).transpose()?, - offchain_data: value.offchain_data.map(|e| OffChainData { - url: e.url, - metadata: e.metadata, - }), + offchain_data: value + .offchain_data + .map(|e| entities::models::OffChainDataGrpc { + url: e.url, + metadata: e.metadata, + }), spl_mint: value.spl_mint.map(TryInto::try_into).transpose()?, }) } @@ -258,8 +258,8 @@ impl From for SplMint { } } -impl From for OffchainData { - fn from(value: OffChainData) -> Self { +impl From for OffchainData { + fn from(value: entities::models::OffChainDataGrpc) -> Self { Self { url: value.url, metadata: value.metadata, diff --git a/integration_tests/src/common.rs b/integration_tests/src/common.rs index d2439c2cb..2007dbbd5 100644 --- a/integration_tests/src/common.rs +++ b/integration_tests/src/common.rs @@ -81,7 +81,7 @@ pub struct TestSetup { pub message_parser: MessageParser, pub acc_processor: Arc>, pub tx_processor: BubblegumTxProcessor, - pub synchronizer: Synchronizer>, + pub synchronizer: Synchronizer, pub das_api: DasApi< MaybeProofChecker, JsonWorker, @@ -109,6 +109,7 @@ impl TestSetup { red_metrics.clone(), MIN_PG_CONNECTIONS, POSTGRE_MIGRATIONS_PATH, + None, ) .await .unwrap(), @@ -185,12 +186,10 @@ impl TestSetup { let synchronizer = Synchronizer::new( storage.clone(), index_storage.clone(), - index_storage.clone(), DUMP_SYNCHRONIZER_BATCH_SIZE, "./dump".to_string(), metrics_state.synchronizer_metrics.clone(), SYNCHRONIZER_PARALLEL_TASKS, - false, ); TestSetup { diff --git a/integrity_verification/src/main.rs b/integrity_verification/src/main.rs index 54dbc6cc3..4d52317cc 100644 --- a/integrity_verification/src/main.rs +++ b/integrity_verification/src/main.rs @@ -82,6 +82,7 @@ async fn main() { &config.database_url.clone().unwrap(), 100, 500, + None, metrics.red_metrics, ) .await diff --git a/interface/src/asset_streaming_and_discovery.rs b/interface/src/asset_streaming_and_discovery.rs index 8e0dabb73..5545f745f 100644 --- a/interface/src/asset_streaming_and_discovery.rs +++ b/interface/src/asset_streaming_and_discovery.rs @@ -1,11 +1,11 @@ use async_trait::async_trait; -use entities::models::{CompleteAssetDetails, RawBlock}; +use entities::models::{AssetCompleteDetailsGrpc, RawBlock}; use futures::stream::Stream; use mockall::automock; use std::pin::Pin; pub type AsyncError = Box; -type AssetResult = Result; +type AssetResult = Result; pub type AssetDetailsStream = Pin + Send + Sync>>; pub type AssetDetailsStreamNonSync = Pin + Send>>; type RawBlocksResult = Result; diff --git a/nft_ingester/src/api/dapi/asset.rs b/nft_ingester/src/api/dapi/asset.rs index dbe836276..c45f69eeb 100644 --- a/nft_ingester/src/api/dapi/asset.rs +++ b/nft_ingester/src/api/dapi/asset.rs @@ -4,10 +4,11 @@ use std::sync::Arc; use entities::api_req_params::{AssetSortDirection, Options}; use entities::enums::SpecificationAssetClass; -use entities::models::{AssetSignatureWithPagination, OffChainData}; +use entities::models::AssetSignatureWithPagination; use interface::asset_sigratures::AssetSignaturesGetter; use interface::json::{JsonDownloadResult, JsonDownloader, JsonPersister}; use rocks_db::errors::StorageError; +use rocks_db::offchain_data::{OffChainData, StorageMutability}; use solana_sdk::pubkey::Pubkey; use tracing::error; @@ -23,6 +24,7 @@ use tokio::sync::Mutex; use tokio::task::{JoinError, JoinSet}; pub const COLLECTION_GROUP_KEY: &str = "collection"; +pub const METADATA_CACHE_TTL: i64 = 86400; // 1 day fn convert_rocks_asset_model( asset_pubkey: &Pubkey, @@ -211,15 +213,13 @@ pub async fn get_by_ids< .push(index); } - let unique_asset_ids: Vec<_> = unique_asset_ids_map.keys().cloned().collect(); - // request prices and symbols only for fungibles when the option is set. This will prolong the request at least an order of magnitude - let asset_selected_maps_fut = - rocks_db.get_asset_selected_maps_async(unique_asset_ids.clone(), owner_address, &options); + let unique_asset_ids: Vec = unique_asset_ids_map.keys().cloned().collect(); let asset_ids_string = asset_ids .clone() .into_iter() .map(|id| id.to_string()) .collect_vec(); + // request prices and symbols only for fungibles when the option is set. This will prolong the request at least an order of magnitude let (token_prices, token_symbols) = if options.show_fungible { let token_prices_fut = token_price_fetcher.fetch_token_prices(asset_ids_string.as_slice()); let token_symbols_fut = @@ -228,7 +228,7 @@ pub async fn get_by_ids< } else { (Ok(HashMap::new()), Ok(HashMap::new())) }; - let mut asset_selected_maps = asset_selected_maps_fut.await?; + let token_prices = token_prices.unwrap_or_else(|e| { error!("Fetch token prices: {}", e); metrics.inc_token_info_fetch_errors("prices"); @@ -240,6 +240,9 @@ pub async fn get_by_ids< HashMap::new() }); + let mut asset_selected_maps = rocks_db + .get_asset_selected_maps_async(unique_asset_ids.clone(), owner_address, &options) + .await?; if let Some(json_downloader) = json_downloader { let mut urls_to_download = Vec::new(); @@ -248,9 +251,47 @@ pub async fn get_by_ids< continue; } let offchain_data = asset_selected_maps.offchain_data.get(url); - if offchain_data.is_none() || offchain_data.unwrap().metadata.is_empty() { + let mut download_needed = false; + match offchain_data { + Some(offchain_data) => { + match &offchain_data.metadata { + Some(metadata) => { + if metadata.is_empty() { + download_needed = true; + } + } + None => { + download_needed = true; + } + } + + match &offchain_data.url { + Some(url) => { + if url.is_empty() { + download_needed = true; + } + } + None => { + download_needed = true; + } + } + + let curr_time = chrono::Utc::now().timestamp(); + if offchain_data.storage_mutability.is_mutable() + && curr_time > offchain_data.last_read_at + METADATA_CACHE_TTL + { + download_needed = true; + } + } + None => { + download_needed = true; + } + } + + if download_needed { urls_to_download.push(url.clone()); } + if urls_to_download.len() >= max_json_to_download { break; } @@ -258,7 +299,7 @@ pub async fn get_by_ids< let num_of_tasks = urls_to_download.len(); - if num_of_tasks != 0 { + if num_of_tasks > 0 { let download_results = stream::iter(urls_to_download) .map(|url| { let json_downloader = json_downloader.clone(); @@ -273,26 +314,33 @@ pub async fn get_by_ids< .await; for (json_url, res) in download_results.iter() { + let last_read_at = chrono::Utc::now().timestamp(); match res { Ok(JsonDownloadResult::JsonContent(metadata)) => { + let storage_mutability = StorageMutability::from(json_url.as_str()); + asset_selected_maps.offchain_data.insert( json_url.clone(), OffChainData { - url: json_url.clone(), - metadata: metadata.clone(), + url: Some(json_url.clone()), + metadata: Some(metadata.clone()), + storage_mutability, + last_read_at, }, ); } Ok(JsonDownloadResult::MediaUrlAndMimeType { url, mime_type }) => { + let storage_mutability = StorageMutability::from(json_url.as_str()); asset_selected_maps.offchain_data.insert( json_url.clone(), OffChainData { - url: json_url.clone(), - metadata: format!( - "{{\"image\":\"{}\",\"type\":\"{}\"}}", - url, mime_type - ) - .to_string(), + url: Some(json_url.clone()), + metadata: Some( + format!("{{\"image\":\"{}\",\"type\":\"{}\"}}", url, mime_type) + .to_string(), + ), + storage_mutability, + last_read_at, }, ); } @@ -302,7 +350,6 @@ pub async fn get_by_ids< if let Some(json_persister) = json_persister { if !download_results.is_empty() { - let download_results = download_results.clone(); tasks.lock().await.spawn(async move { if let Err(e) = json_persister.persist_response(download_results).await { error!("Could not persist downloaded JSONs: {:?}", e); diff --git a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs index c1b3cda0e..f333f4aee 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs @@ -2,12 +2,13 @@ use std::cmp::Ordering; use std::collections::HashMap; use std::path::Path; -use entities::models::{AssetSignatureWithPagination, OffChainData}; +use entities::models::AssetSignatureWithPagination; use entities::models::{CoreFeesAccountWithSortingID, TokenAccResponse}; use jsonpath_lib::JsonPathError; use mime_guess::Mime; use num_traits::Pow; use rocks_db::errors::StorageError; +use rocks_db::offchain_data::OffChainData; use serde_json::Value; use solana_program::pubkey::Pubkey; use tracing::error; @@ -89,7 +90,8 @@ pub fn get_content( offchain_data: &OffChainData, ) -> Result { let json_uri = asset_dynamic.url.value.clone(); - let metadata: Value = serde_json::from_str(&offchain_data.metadata).unwrap_or(Value::Null); + let metadata = offchain_data.metadata.clone().unwrap_or_default(); + let metadata: Value = serde_json::from_str(&metadata).unwrap_or(Value::Null); let chain_data: Value = serde_json::from_str( asset_dynamic .onchain_data @@ -232,7 +234,8 @@ fn extract_collection_metadata( asset_dynamic: &AssetDynamicDetails, offchain_data: &OffChainData, ) -> MetadataMap { - let metadata: Value = serde_json::from_str(&offchain_data.metadata).unwrap_or(Value::Null); + let metadata = offchain_data.metadata.clone().unwrap_or_default(); + let metadata: Value = serde_json::from_str(&metadata).unwrap_or(Value::Null); let chain_data: Value = serde_json::from_str( asset_dynamic .onchain_data @@ -387,8 +390,16 @@ pub fn asset_to_rpc( let mpl_core_info = match interface { Interface::MplCoreAsset | Interface::MplCoreCollection => Some(MplCoreInfo { - num_minted: full_asset.asset_dynamic.num_minted.as_ref().map(|u| u.value), - current_size: full_asset.asset_dynamic.current_size.as_ref().map(|u| u.value), + num_minted: full_asset + .asset_dynamic + .num_minted + .as_ref() + .map(|u| u.value), + current_size: full_asset + .asset_dynamic + .current_size + .as_ref() + .map(|u| u.value), plugins_json_version: full_asset .asset_dynamic .plugins_json_version @@ -407,14 +418,14 @@ pub fn asset_to_rpc( edition_number: edition_info.edition_number, }) } else { - Some(Supply{ + Some(Supply { edition_nonce, print_current_supply: 0, print_max_supply: Some(0), edition_number: None, }) } - }, + } _ => None, }; diff --git a/nft_ingester/src/api/dapi/rpc_asset_models.rs b/nft_ingester/src/api/dapi/rpc_asset_models.rs index efb6fef47..da4c42f45 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_models.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_models.rs @@ -9,9 +9,12 @@ use { use crate::api::dapi::response::InscriptionResponse; use entities::enums::{Interface, OwnershipModel, RoyaltyModel, UseMethod}; -use entities::models::{EditionData, OffChainData, SplMint, TokenAccount}; -use rocks_db::asset::{AssetCollection, AssetLeaf}; +use entities::models::{EditionData, SplMint, TokenAccount}; use rocks_db::inscriptions::{Inscription, InscriptionData}; +use rocks_db::{ + asset::{AssetCollection, AssetLeaf}, + offchain_data::OffChainData, +}; use rocks_db::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails}; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] diff --git a/nft_ingester/src/bin/migrator/main.rs b/nft_ingester/src/bin/migrator/main.rs index c37cacef0..bf75f8578 100644 --- a/nft_ingester/src/bin/migrator/main.rs +++ b/nft_ingester/src/bin/migrator/main.rs @@ -1,13 +1,14 @@ use std::sync::Arc; use entities::enums::TaskStatus; -use entities::models::{OffChainData, Task}; +use entities::models::Task; use metrics_utils::red::RequestErrorDurationMetrics; use metrics_utils::utils::start_metrics; use metrics_utils::{JsonMigratorMetricsConfig, MetricState, MetricStatus, MetricsTrait}; use postgre_client::PgClient; use rocks_db::asset::AssetCompleteDetails; use rocks_db::column::TypedColumn; +use rocks_db::offchain_data::OffChainData; use tokio::sync::broadcast::Receiver; use tokio::sync::{broadcast, Mutex}; use tokio::task::{JoinError, JoinSet}; @@ -18,7 +19,7 @@ use nft_ingester::config::{ }; use nft_ingester::error::IngesterError; use nft_ingester::init::graceful_stop; -use rocks_db::asset_generated::asset as fb; +use rocks_db::generated::asset_generated::asset as fb; use rocks_db::migrator::MigrationState; use rocks_db::Storage; @@ -173,11 +174,10 @@ impl JsonMigrator { match metadata { Ok(metadata) => { - match self - .target_rocks_db - .asset_offchain_data - .put(metadata.url.clone(), metadata) - { + match self.target_rocks_db.asset_offchain_data.put( + metadata.url.clone().expect("Metadata URL cannot be empty"), + metadata, + ) { Ok(_) => { self.metrics .inc_jsons_migrated("json_migrated", MetricStatus::SUCCESS); diff --git a/nft_ingester/src/bin/raw_backup/main.rs b/nft_ingester/src/bin/raw_backup/main.rs index 9acba85b6..6e49885a1 100644 --- a/nft_ingester/src/bin/raw_backup/main.rs +++ b/nft_ingester/src/bin/raw_backup/main.rs @@ -1,11 +1,11 @@ use std::sync::Arc; use clap::{arg, Parser}; -use entities::models::OffChainData; use entities::models::RawBlock; use metrics_utils::red::RequestErrorDurationMetrics; use nft_ingester::config::init_logger; use rocks_db::column::TypedColumn; +use rocks_db::offchain_data::OffChainData; use tempfile::TempDir; use tokio::sync::Mutex; use tokio::task::JoinSet; @@ -37,7 +37,7 @@ pub async fn main() -> Result<(), IngesterError> { let secondary_rocks_dir = TempDir::new().unwrap(); let source_storage = Storage::open_secondary( &config.source_db, - secondary_rocks_dir.path().to_str().unwrap(), + &secondary_rocks_dir.path().to_str().unwrap().to_string(), mutexed_tasks.clone(), red_metrics.clone(), MigrationState::Last, diff --git a/nft_ingester/src/bin/slot_checker/main.rs b/nft_ingester/src/bin/slot_checker/main.rs index 871b5c952..776be2db1 100644 --- a/nft_ingester/src/bin/slot_checker/main.rs +++ b/nft_ingester/src/bin/slot_checker/main.rs @@ -8,12 +8,13 @@ use indicatif::{ProgressBar, ProgressStyle}; use metrics_utils::MetricState; use rocks_db::column::TypedColumn; use rocks_db::migrator::MigrationVersions; +use rocks_db::offchain_data::OffChainData; use rocks_db::Storage; use tokio::signal; use tokio::sync::{broadcast, Mutex as AsyncMutex}; use tracing::{error, info, warn}; -use entities::models::{OffChainData, RawBlock}; +use entities::models::RawBlock; use interface::slots_dumper::SlotsDumper; use usecase::bigtable::BigTableClient; use usecase::slots_collector::SlotsCollector; @@ -261,7 +262,12 @@ async fn main() -> Result<(), Box> { // Start slot collection let _ = slots_collector - .collect_slots(&BUBBLEGUM_PROGRAM_ID, last_persisted_slot, args.first_slot.unwrap_or_default(), &shutdown_rx) + .collect_slots( + &BUBBLEGUM_PROGRAM_ID, + last_persisted_slot, + args.first_slot.unwrap_or_default(), + &shutdown_rx, + ) .await; // Collection done, stop the spinner diff --git a/nft_ingester/src/bin/synchronizer_utils/main.rs b/nft_ingester/src/bin/synchronizer_utils/main.rs index 66fbf213a..0b4a7867b 100644 --- a/nft_ingester/src/bin/synchronizer_utils/main.rs +++ b/nft_ingester/src/bin/synchronizer_utils/main.rs @@ -2,8 +2,8 @@ use clap::Parser; use itertools::Itertools; use nft_ingester::error::IngesterError; use rocks_db::asset::AssetCompleteDetails; -use rocks_db::asset_generated::asset as fb; use rocks_db::column::TypedColumn; +use rocks_db::generated::asset_generated::asset as fb; use rocks_db::key_encoders::decode_u64x2_pubkey; use rocks_db::migrator::MigrationState; use rocks_db::storage_traits::AssetIndexReader; diff --git a/nft_ingester/src/json_worker.rs b/nft_ingester/src/json_worker.rs index 3bd03e1a6..6ab2fb2f7 100644 --- a/nft_ingester/src/json_worker.rs +++ b/nft_ingester/src/json_worker.rs @@ -2,7 +2,7 @@ use crate::api::dapi::rpc_asset_convertors::parse_files; use crate::config::{setup_config, IngesterConfig, INGESTER_CONFIG_PREFIX}; use async_trait::async_trait; use entities::enums::TaskStatus; -use entities::models::{JsonDownloadTask, OffChainData}; +use entities::models::JsonDownloadTask; use interface::error::JsonDownloaderError; use interface::json::{JsonDownloadResult, JsonDownloader, JsonPersister}; use metrics_utils::red::RequestErrorDurationMetrics; @@ -11,6 +11,7 @@ use postgre_client::tasks::UpdatedTask; use postgre_client::PgClient; use reqwest::ClientBuilder; use rocks_db::asset_previews::UrlToDownload; +use rocks_db::offchain_data::OffChainData; use rocks_db::Storage; use serde_json::Value; use std::collections::HashMap; @@ -274,6 +275,7 @@ impl JsonDownloader for JsonWorker { JsonDownloaderError::ErrorDownloading(format!("Failed to create client: {:?}", e)) })?; + // TODO: maybe IPFS/Arweave stuff might be done here // Detect if the URL is an IPFS link let parsed_url = if url.starts_with("ipfs://") { // Extract the IPFS hash or path @@ -367,7 +369,9 @@ impl JsonPersister for JsonWorker { results: Vec<(String, Result)>, ) -> Result<(), JsonDownloaderError> { let mut pg_updates = Vec::new(); + // TODO: store updates here let mut rocks_updates = HashMap::new(); + let curr_time = chrono::Utc::now().timestamp(); for (metadata_url, result) in results.iter() { match result { @@ -375,8 +379,10 @@ impl JsonPersister for JsonWorker { rocks_updates.insert( metadata_url.clone(), OffChainData { - url: metadata_url.clone(), - metadata: json_file.clone(), + storage_mutability: metadata_url.as_str().into(), + url: Some(metadata_url.clone()), + metadata: Some(json_file.clone()), + last_read_at: curr_time, }, ); pg_updates.push(UpdatedTask { @@ -396,12 +402,13 @@ impl JsonPersister for JsonWorker { rocks_updates.insert( metadata_url.clone(), OffChainData { - url: metadata_url.clone(), - metadata: format!( - "{{\"image\":\"{}\",\"type\":\"{}\"}}", - url, mime_type - ) - .to_string(), + url: Some(metadata_url.clone()), + metadata: Some( + format!("{{\"image\":\"{}\",\"type\":\"{}\"}}", url, mime_type) + .to_string(), + ), + last_read_at: curr_time, + storage_mutability: metadata_url.as_str().into(), }, ); self.metrics.inc_tasks("media", MetricStatus::SUCCESS); @@ -417,8 +424,10 @@ impl JsonPersister for JsonWorker { rocks_updates.insert( metadata_url.clone(), OffChainData { - url: metadata_url.clone(), - metadata: "".to_string(), + url: Some(metadata_url.clone()), + metadata: Some("".to_string()), + last_read_at: curr_time, + storage_mutability: metadata_url.as_str().into(), }, ); @@ -473,8 +482,8 @@ impl JsonPersister for JsonWorker { if !rocks_updates.is_empty() { let urls_to_download = rocks_updates .values() - .filter(|data| !data.metadata.is_empty()) - .filter_map(|data| parse_files(&data.metadata)) + .filter(|data| data.metadata.is_some()) + .filter_map(|data| parse_files(data.metadata.clone().unwrap().as_str())) .flat_map(|files| files.into_iter()) .filter_map(|file| file.uri) .map(|uri| (uri, UrlToDownload::default())) @@ -482,7 +491,7 @@ impl JsonPersister for JsonWorker { self.rocks_db .asset_offchain_data - .put_batch(rocks_updates) + .put_batch_flatbuffers(rocks_updates) .await .map_err(|e| JsonDownloaderError::MainStorageError(e.to_string()))?; diff --git a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs index 0f46ef749..02f10df95 100644 --- a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs +++ b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs @@ -15,7 +15,7 @@ use entities::enums::{ SpecificationAssetClass, TokenStandard, UseMethod, }; use entities::models::{ - BatchMintToVerify, BufferedTransaction, OffChainData, SignatureWithSlot, UpdateVersion, Updated, + BatchMintToVerify, BufferedTransaction, SignatureWithSlot, UpdateVersion, Updated, }; use entities::models::{ChainDataV1, Creator, Uses}; use lazy_static::lazy_static; @@ -27,6 +27,7 @@ use rocks_db::asset::AssetOwner; use rocks_db::asset::{ AssetAuthority, AssetCollection, AssetDynamicDetails, AssetLeaf, AssetStaticDetails, }; +use rocks_db::offchain_data::OffChainData; use rocks_db::transaction::{ AssetDynamicUpdate, AssetUpdate, AssetUpdateEvent, InstructionResult, TransactionResult, TreeUpdate, @@ -1143,10 +1144,15 @@ impl BubblegumTxProcessor { if let Some(dynamic_info) = &update.update { if let Some(data) = &dynamic_info.dynamic_data { let url = data.url.value.clone(); + let storage_mutability = url.as_str().into(); + let last_read_at = Utc::now().timestamp(); + if let Some(metadata) = batch_mint.raw_metadata_map.get(&url) { update.offchain_data_update = Some(OffChainData { - url, - metadata: metadata.to_string(), + url: Some(url), + metadata: Some(metadata.to_string()), + storage_mutability, + last_read_at, }); } } diff --git a/nft_ingester/src/scheduler.rs b/nft_ingester/src/scheduler.rs index dfbcc11b2..405f5384c 100644 --- a/nft_ingester/src/scheduler.rs +++ b/nft_ingester/src/scheduler.rs @@ -2,8 +2,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use entities::models::OffChainData; use rocks_db::asset_previews::UrlToDownload; +use rocks_db::offchain_data::OffChainData; use tracing::log::error; use rocks_db::Storage; @@ -213,7 +213,9 @@ impl Job for InitUrlsToDownloadJob { let urls: HashMap = data .into_iter() - .filter_map(|(_, OffChainData { url: _, metadata })| parse_files(&metadata)) + .filter_map(|(_, OffChainData { metadata, .. })| { + metadata.as_deref().and_then(parse_files) + }) .flat_map(|files| files.into_iter().filter_map(|f| f.uri)) .map(|uri| { ( diff --git a/nft_ingester/tests/gapfiller_tests.rs b/nft_ingester/tests/gapfiller_tests.rs index dd677584c..771d24dc5 100644 --- a/nft_ingester/tests/gapfiller_tests.rs +++ b/nft_ingester/tests/gapfiller_tests.rs @@ -5,7 +5,7 @@ use interface::asset_streaming_and_discovery::{ }; use metrics_utils::red::RequestErrorDurationMetrics; use nft_ingester::gapfiller::{process_asset_details_stream, process_raw_blocks_stream}; -use rocks_db::asset_generated::asset as fb; +use rocks_db::generated::asset_generated::asset as fb; use rocks_db::{asset::AssetCompleteDetails, column::TypedColumn, migrator::MigrationState}; use solana_sdk::pubkey::Pubkey; use solana_transaction_status::UiConfirmedBlock; diff --git a/rocks-db/benches/misc_benchmark.rs b/rocks-db/benches/misc_benchmark.rs index 841749cd4..82d04f67c 100644 --- a/rocks-db/benches/misc_benchmark.rs +++ b/rocks-db/benches/misc_benchmark.rs @@ -1,4 +1,4 @@ -use rocks_db::asset_generated::asset as fb; +use rocks_db::generated::asset_generated::asset as fb; use bincode::{deserialize, serialize}; use criterion::{criterion_group, criterion_main, Criterion}; diff --git a/rocks-db/src/asset.rs b/rocks-db/src/asset.rs index 44f8ba3a3..7f8d90f79 100644 --- a/rocks-db/src/asset.rs +++ b/rocks-db/src/asset.rs @@ -1,11 +1,11 @@ use std::collections::HashMap; use crate::inscriptions::{Inscription, InscriptionData}; +use crate::offchain_data::OffChainData; use bincode::{deserialize, serialize}; use entities::enums::{ChainMutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass}; use entities::models::{ - AssetIndex, EditionData, OffChainData, SplMint, TokenAccount, UpdateVersion, Updated, - UrlWithStatus, + AssetIndex, EditionData, SplMint, TokenAccount, UpdateVersion, Updated, UrlWithStatus, }; use flatbuffers::{FlatBufferBuilder, WIPOffset}; use rocksdb::MergeOperands; @@ -15,10 +15,10 @@ use solana_sdk::{hash::Hash, pubkey::Pubkey}; use std::cmp::{max, Ordering}; use tracing::{error, warn}; -use crate::asset_generated::asset as fb; +use crate::generated::asset_generated::asset as fb; use crate::key_encoders::{decode_pubkey, decode_u64_pubkey, encode_pubkey, encode_u64_pubkey}; -use crate::Result; use crate::TypedColumn; +use crate::{Result, ToFlatbuffersConverter}; const MAX_OTHER_OWNERS: usize = 10; @@ -59,18 +59,17 @@ impl From for AssetCompleteDetails { } } -impl AssetCompleteDetails { - pub fn convert_to_fb_bytes(&self) -> Vec { +impl<'a> ToFlatbuffersConverter<'a> for AssetCompleteDetails { + type Target = fb::AssetCompleteDetails<'a>; + + fn convert_to_fb_bytes(&self) -> Vec { let mut builder = FlatBufferBuilder::new(); let asset_complete_details = self.convert_to_fb(&mut builder); builder.finish_minimal(asset_complete_details); builder.finished_data().to_vec() } - pub fn convert_to_fb<'a>( - &self, - builder: &mut FlatBufferBuilder<'a>, - ) -> WIPOffset> { + fn convert_to_fb(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset { let pk = Some(builder.create_vector(&self.pubkey.to_bytes())); let static_details = self .static_details @@ -3832,9 +3831,10 @@ mod tests { let asset; unsafe { - asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( - data_bytes.as_slice(), - ); + asset = + create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( + data_bytes.as_slice(), + ); } let asset_mapped = AssetCompleteDetails::from(asset); println!("STATIC: {:#?}", asset.static_details().is_some()); @@ -3878,9 +3878,10 @@ mod tests { let asset; unsafe { - asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( - merge_result.as_slice(), - ); + asset = + create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( + merge_result.as_slice(), + ); } assert!(asset.other_known_owners().is_none()); let asset_mapped = AssetCompleteDetails::from(asset); @@ -3920,9 +3921,10 @@ mod tests { let asset; unsafe { - asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( - merge_result.as_slice(), - ); + asset = + create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( + merge_result.as_slice(), + ); } assert!(asset.other_known_owners().is_none()); let asset_mapped = AssetCompleteDetails::from(asset); @@ -4073,7 +4075,7 @@ mod tests { let perm_name = perm.iter().map(|(k, _)| k).join(", "); let asset; unsafe { - asset = crate::asset_generated::asset::root_as_asset_complete_details_unchecked( + asset = create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( merge_result.as_slice(), ); } diff --git a/rocks-db/src/asset_client.rs b/rocks-db/src/asset_client.rs index 991db2b04..d2ccd4b00 100644 --- a/rocks-db/src/asset_client.rs +++ b/rocks-db/src/asset_client.rs @@ -6,9 +6,9 @@ use crate::asset::{ AssetCollection, AssetCompleteDetails, AssetSelectedMaps, AssetsUpdateIdx, FungibleAssetsUpdateIdx, SlotAssetIdx, SlotAssetIdxKey, }; -use crate::asset_generated::asset as fb; use crate::column::{Column, TypedColumn}; use crate::errors::StorageError; +use crate::generated::asset_generated::asset as fb; use crate::key_encoders::encode_u64x2_pubkey; use crate::{Result, Storage, BATCH_GET_ACTION, ROCKS_COMPONENT}; use entities::api_req_params::Options; @@ -17,6 +17,16 @@ use entities::models::{EditionData, PubkeyWithSlot}; use futures_util::FutureExt; use std::collections::HashMap; +#[macro_export] +macro_rules! to_map { + ($res:expr) => {{ + $res.map_err(|e| StorageError::Common(e.to_string()))? + .into_iter() + .filter_map(|asset| asset.map(|a| (a.pubkey, a))) + .collect::>() + }}; +} + impl Storage { fn get_next_fungible_asset_update_seq(&self) -> Result { if self.fungible_assets_update_last_seq.load(Ordering::Relaxed) == 0 { @@ -131,27 +141,13 @@ impl Storage { Ok(()) } -} -#[macro_export] -macro_rules! to_map { - ($res:expr) => {{ - $res.map_err(|e| StorageError::Common(e.to_string()))? - .into_iter() - .filter_map(|asset| asset.map(|a| (a.pubkey, a))) - .collect::>() - }}; -} - -impl Storage { pub async fn get_asset_selected_maps_async( &self, asset_ids: Vec, owner_address: &Option, options: &Options, ) -> Result { - let assets_with_collections_and_urls_fut = - self.get_assets_with_collections_and_urls(asset_ids.clone()); let assets_leaf_fut = self.asset_leaf_data.batch_get(asset_ids.clone()); let token_accounts_fut = if let Some(owner_address) = owner_address { self.get_raw_token_accounts(Some(*owner_address), None, None, None, None, None, true) @@ -166,8 +162,9 @@ impl Storage { } else { async { Ok(Vec::new()) }.boxed() }; - let (mut assets_data, assets_collection_pks, mut urls) = - assets_with_collections_and_urls_fut.await?; + let (mut assets_data, assets_collection_pks, mut urls) = self + .get_assets_with_collections_and_urls(asset_ids.clone()) + .await?; let mut mpl_core_collections = HashMap::new(); // todo: consider async/future here, but not likely as the very next call depends on urls from this one if !assets_collection_pks.is_empty() { @@ -221,8 +218,8 @@ impl Storage { .into_iter() .filter_map(|asset| { asset - .filter(|a| !a.metadata.is_empty()) - .map(|a| (a.url.clone(), a)) + .filter(|a| a.url.is_some() && !a.url.clone().unwrap().is_empty()) + .map(|a| (a.url.clone().unwrap(), a)) }) .collect::>(); diff --git a/rocks-db/src/asset_streaming_client.rs b/rocks-db/src/asset_streaming_client.rs index 4e33020a4..f86589923 100644 --- a/rocks-db/src/asset_streaming_client.rs +++ b/rocks-db/src/asset_streaming_client.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use async_trait::async_trait; use entities::enums::TokenMetadataEdition; -use entities::models::{CompleteAssetDetails, OffChainData, SplMint, Updated}; +use entities::models::{AssetCompleteDetailsGrpc, SplMint, Updated}; use interface::asset_streaming_and_discovery::{ AssetDetailsStream, AssetDetailsStreamer, AsyncError, }; @@ -12,8 +12,9 @@ use solana_sdk::pubkey::Pubkey; use tokio_stream::wrappers::ReceiverStream; use crate::asset::{AssetCompleteDetails, SlotAssetIdxKey}; -use crate::asset_generated::asset as fb; use crate::cl_items::{ClItem, ClItemKey, ClLeaf, ClLeafKey}; +use crate::generated::asset_generated::asset as fb; +use crate::offchain_data::OffChainData; use crate::{ asset::{AssetLeaf, SlotAssetIdx}, column::TypedColumn, @@ -51,7 +52,7 @@ async fn process_asset_details_range( start_slot: u64, end_slot: u64, metrics: Arc, - tx: tokio::sync::mpsc::Sender>, + tx: tokio::sync::mpsc::Sender>, ) -> Result<(), AsyncError> { let slot_asset_idx = Storage::column::(backend.clone(), metrics.clone()); let iterator = slot_asset_idx.iter(SlotAssetIdxKey::new( @@ -89,7 +90,7 @@ async fn get_complete_asset_details( backend: Arc, pubkey: Pubkey, metrics: Arc, -) -> crate::Result { +) -> crate::Result { let data = backend.get_pinned_cf( &backend.cf_handle(AssetCompleteDetails::NAME).unwrap(), AssetCompleteDetails::encode_key(pubkey), @@ -198,9 +199,13 @@ async fn get_complete_asset_details( (Some(edition), master_edition) } }; + let url = dynamic_data.url.clone(); let spl_mint = Storage::column::(backend.clone(), metrics.clone()).get(pubkey)?; - Ok(CompleteAssetDetails { + let off_chain_data_grpc = Storage::column::(backend.clone(), metrics.clone()) + .get(url.clone().value)? + .map(Into::into); + Ok(AssetCompleteDetailsGrpc { pubkey: static_data.pubkey, specification_asset_class: static_data.specification_asset_class, royalty_target_type: static_data.royalty_target_type, @@ -276,8 +281,7 @@ async fn get_complete_asset_details( .collect(), edition, master_edition, - offchain_data: Storage::column::(backend.clone(), metrics.clone()) - .get(url.value)?, + offchain_data: off_chain_data_grpc, spl_mint, }) } diff --git a/rocks-db/src/batch_client.rs b/rocks-db/src/batch_client.rs index 8dd36a676..cd90332c8 100644 --- a/rocks-db/src/batch_client.rs +++ b/rocks-db/src/batch_client.rs @@ -4,22 +4,24 @@ use crate::asset::{ AssetCollection, AssetCompleteDetails, AssetLeaf, AssetsUpdateIdx, FungibleAssetsUpdateIdx, MplCoreCollectionAuthority, SlotAssetIdx, SlotAssetIdxKey, SourcedAssetLeaf, }; -use crate::asset_generated::asset as fb; use crate::cl_items::{ClItem, ClItemKey, ClLeaf, ClLeafKey, SourcedClItem}; use crate::column::TypedColumn; use crate::errors::StorageError; +use crate::generated::asset_generated::asset as fb; use crate::key_encoders::{decode_u64x2_pubkey, encode_u64x2_pubkey}; +use crate::offchain_data::OffChainData; use crate::storage_traits::{ AssetIndexReader, AssetSlotStorage, AssetUpdateIndexStorage, AssetUpdatedKey, }; use crate::{ AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Result, Storage, - BATCH_GET_ACTION, BATCH_ITERATION_ACTION, ITERATOR_TOP_ACTION, ROCKS_COMPONENT, + ToFlatbuffersConverter, BATCH_GET_ACTION, BATCH_ITERATION_ACTION, ITERATOR_TOP_ACTION, + ROCKS_COMPONENT, }; use async_trait::async_trait; use entities::enums::{SpecificationAssetClass, TokenMetadataEdition}; use entities::models::{ - AssetIndex, CompleteAssetDetails, FungibleAssetIndex, UpdateVersion, Updated, + AssetCompleteDetailsGrpc, AssetIndex, FungibleAssetIndex, UpdateVersion, Updated, }; use serde_json::json; use solana_sdk::pubkey::Pubkey; @@ -382,10 +384,13 @@ impl AssetIndexReader for Storage { .await? .into_iter() .flatten() - .map(|offchain_data| { + .filter(|off_chain_data| { + off_chain_data.url.is_some() && off_chain_data.metadata.is_some() + }) + .map(|off_chain_data| { ( - offchain_data.url.clone(), - !offchain_data.metadata.is_empty(), + off_chain_data.url.unwrap().clone(), + !off_chain_data.metadata.unwrap().is_empty(), ) }) .collect::>(); @@ -436,7 +441,7 @@ impl AssetSlotStorage for Storage { } impl Storage { - pub async fn insert_gaped_data(&self, data: CompleteAssetDetails) -> Result<()> { + pub async fn insert_gaped_data(&self, data: AssetCompleteDetailsGrpc) -> Result<()> { let write_version = if let Some(write_v) = data.authority.update_version { match write_v { UpdateVersion::WriteVersion(v) => Some(v), @@ -579,11 +584,13 @@ impl Storage { &TokenMetadataEdition::MasterEdition(master_edition), )?; } - if let Some(offchain_data) = data.offchain_data { - self.asset_offchain_data.merge_with_batch_cbor( + if let Some(off_chain_data) = data.offchain_data { + let url = off_chain_data.url.clone(); + let off_chain_data = OffChainData::from(off_chain_data); + self.asset_offchain_data.merge_with_batch_flatbuffers( &mut batch, - offchain_data.url.clone(), - &offchain_data, + url, + &off_chain_data, )?; } if let Some(spl_mint) = data.spl_mint { diff --git a/rocks-db/src/batch_savers.rs b/rocks-db/src/batch_savers.rs index 5ef144352..a4d22c196 100644 --- a/rocks-db/src/batch_savers.rs +++ b/rocks-db/src/batch_savers.rs @@ -1,6 +1,6 @@ use crate::asset::{AssetCollection, AssetCompleteDetails, MetadataMintMap}; -use crate::asset_generated::asset as fb; use crate::column::TypedColumn; +use crate::generated::asset_generated::asset as fb; use crate::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use crate::Result; use crate::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Storage}; diff --git a/rocks-db/src/bin/column_copier/main.rs b/rocks-db/src/bin/column_copier/main.rs index 14b1eaef6..81ad93a2f 100644 --- a/rocks-db/src/bin/column_copier/main.rs +++ b/rocks-db/src/bin/column_copier/main.rs @@ -1,7 +1,8 @@ -use entities::models::{OffChainData, RawBlock}; +use entities::models::RawBlock; use metrics_utils::red::RequestErrorDurationMetrics; use rocks_db::column::TypedColumn; use rocks_db::migrator::MigrationState; +use rocks_db::offchain_data::OffChainData; use rocks_db::Storage; use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; diff --git a/rocks-db/src/column.rs b/rocks-db/src/column.rs index 0a9a1bc8c..f63db7097 100644 --- a/rocks-db/src/column.rs +++ b/rocks-db/src/column.rs @@ -7,7 +7,7 @@ use metrics_utils::red::RequestErrorDurationMetrics; use rocksdb::{BoundColumnFamily, DBIteratorWithThreadMode, DB}; use serde::{de::DeserializeOwned, Serialize}; -use crate::{Result, StorageError, BATCH_GET_ACTION, ROCKS_COMPONENT}; +use crate::{Result, StorageError, ToFlatbuffersConverter, BATCH_GET_ACTION, ROCKS_COMPONENT}; pub trait TypedColumn { type KeyType: Sync + Clone + Send + Debug; type ValueType: Sync + Serialize + DeserializeOwned + Send; @@ -130,6 +130,21 @@ where serialize(v).map_err(|e| StorageError::Common(e.to_string())) }) } + + pub fn merge_with_batch_flatbuffers( + &self, + batch: &mut rocksdb::WriteBatchWithTransaction, + key: C::KeyType, + value: &C::ValueType, + ) -> Result<()> + where + C::ValueType: for<'a> ToFlatbuffersConverter<'a>, + { + self.merge_with_batch_generic(batch, key, value, |v| { + Ok(ToFlatbuffersConverter::convert_to_fb_bytes(v)) + }) + } + pub(crate) fn merge_with_batch_raw( &self, batch: &mut rocksdb::WriteBatchWithTransaction, @@ -266,6 +281,19 @@ where .await } + pub async fn put_batch_flatbuffers( + &self, + values: HashMap, + ) -> Result<()> + where + C::ValueType: for<'a> ToFlatbuffersConverter<'a>, + { + self.put_batch_generic(values, |v| { + Ok(ToFlatbuffersConverter::convert_to_fb_bytes(v)) + }) + .await + } + pub async fn get_cbor_encoded(&self, key: C::KeyType) -> Result> { let mut result = Ok(None); diff --git a/rocks-db/src/dump_client.rs b/rocks-db/src/dump_client.rs index eb2f7f4e6..a2e851406 100644 --- a/rocks-db/src/dump_client.rs +++ b/rocks-db/src/dump_client.rs @@ -1,6 +1,6 @@ use crate::asset::MplCoreCollectionAuthority; -use crate::asset_generated::asset as fb; use crate::column::TypedColumn; +use crate::generated::asset_generated::asset as fb; use crate::key_encoders::encode_u64x2_pubkey; use crate::storage_traits::AssetUpdatedKey; use crate::{column::Column, storage_traits::Dumper, Storage}; diff --git a/rocks-db/src/flatbuf/offchain_data.fbs b/rocks-db/src/flatbuf/offchain_data.fbs new file mode 100644 index 000000000..ddf749b26 --- /dev/null +++ b/rocks-db/src/flatbuf/offchain_data.fbs @@ -0,0 +1,16 @@ +// offchain_data.fbs +namespace OffChainData; + +enum StorageMutability : byte { + Immutable = 0, + Mutable +} + +table OffChainData { + storage_mutability: StorageMutability; + url: string; + metadata: string; + last_read_at: long = 0; +} + +root_type OffChainData; diff --git a/rocks-db/src/asset_generated.rs b/rocks-db/src/generated/asset_generated.rs similarity index 100% rename from rocks-db/src/asset_generated.rs rename to rocks-db/src/generated/asset_generated.rs diff --git a/rocks-db/src/generated/mod.rs b/rocks-db/src/generated/mod.rs new file mode 100644 index 000000000..17b5f8aa4 --- /dev/null +++ b/rocks-db/src/generated/mod.rs @@ -0,0 +1,12 @@ +#[allow( + clippy::missing_safety_doc, + unused_imports, + clippy::extra_unused_lifetimes +)] +pub mod asset_generated; +#[allow( + clippy::missing_safety_doc, + unused_imports, + clippy::extra_unused_lifetimes +)] +pub mod offchain_data_generated; diff --git a/rocks-db/src/generated/offchain_data_generated.rs b/rocks-db/src/generated/offchain_data_generated.rs new file mode 100644 index 000000000..7e2410cca --- /dev/null +++ b/rocks-db/src/generated/offchain_data_generated.rs @@ -0,0 +1,326 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +// @generated + +use core::mem; +use core::cmp::Ordering; + +extern crate flatbuffers; +use self::flatbuffers::{EndianScalar, Follow}; + +#[allow(unused_imports, dead_code)] +pub mod off_chain_data { + + use core::mem; + use core::cmp::Ordering; + + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; + +#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] +pub const ENUM_MIN_STORAGE_MUTABILITY: i8 = 0; +#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] +pub const ENUM_MAX_STORAGE_MUTABILITY: i8 = 1; +#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] +#[allow(non_camel_case_types)] +pub const ENUM_VALUES_STORAGE_MUTABILITY: [StorageMutability; 2] = [ + StorageMutability::Immutable, + StorageMutability::Mutable, +]; + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +#[repr(transparent)] +pub struct StorageMutability(pub i8); +#[allow(non_upper_case_globals)] +impl StorageMutability { + pub const Immutable: Self = Self(0); + pub const Mutable: Self = Self(1); + + pub const ENUM_MIN: i8 = 0; + pub const ENUM_MAX: i8 = 1; + pub const ENUM_VALUES: &'static [Self] = &[ + Self::Immutable, + Self::Mutable, + ]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::Immutable => Some("Immutable"), + Self::Mutable => Some("Mutable"), + _ => None, + } + } +} +impl core::fmt::Debug for StorageMutability { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } +} +impl<'a> flatbuffers::Follow<'a> for StorageMutability { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = flatbuffers::read_scalar_at::(buf, loc); + Self(b) + } +} + +impl flatbuffers::Push for StorageMutability { + type Output = StorageMutability; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + flatbuffers::emplace_scalar::(dst, self.0); + } +} + +impl flatbuffers::EndianScalar for StorageMutability { + type Scalar = i8; + #[inline] + fn to_little_endian(self) -> i8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: i8) -> Self { + let b = i8::from_le(v); + Self(b) + } +} + +impl<'a> flatbuffers::Verifiable for StorageMutability { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, pos: usize + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + i8::run_verifier(v, pos) + } +} + +impl flatbuffers::SimpleToVerifyInSlice for StorageMutability {} +pub enum OffChainDataOffset {} +#[derive(Copy, Clone, PartialEq)] + +pub struct OffChainData<'a> { + pub _tab: flatbuffers::Table<'a>, +} + +impl<'a> flatbuffers::Follow<'a> for OffChainData<'a> { + type Inner = OffChainData<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { _tab: flatbuffers::Table::new(buf, loc) } + } +} + +impl<'a> OffChainData<'a> { + pub const VT_STORAGE_MUTABILITY: flatbuffers::VOffsetT = 4; + pub const VT_URL: flatbuffers::VOffsetT = 6; + pub const VT_METADATA: flatbuffers::VOffsetT = 8; + pub const VT_LAST_READ_AT: flatbuffers::VOffsetT = 10; + + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + OffChainData { _tab: table } + } + #[allow(unused_mut)] + pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args OffChainDataArgs<'args> + ) -> flatbuffers::WIPOffset> { + let mut builder = OffChainDataBuilder::new(_fbb); + builder.add_last_read_at(args.last_read_at); + if let Some(x) = args.metadata { builder.add_metadata(x); } + if let Some(x) = args.url { builder.add_url(x); } + builder.add_storage_mutability(args.storage_mutability); + builder.finish() + } + + + #[inline] + pub fn storage_mutability(&self) -> StorageMutability { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(OffChainData::VT_STORAGE_MUTABILITY, Some(StorageMutability::Immutable)).unwrap()} + } + #[inline] + pub fn url(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::>(OffChainData::VT_URL, None)} + } + #[inline] + pub fn metadata(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::>(OffChainData::VT_METADATA, None)} + } + #[inline] + pub fn last_read_at(&self) -> i64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(OffChainData::VT_LAST_READ_AT, Some(0)).unwrap()} + } +} + +impl flatbuffers::Verifiable for OffChainData<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, pos: usize + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::("storage_mutability", Self::VT_STORAGE_MUTABILITY, false)? + .visit_field::>("url", Self::VT_URL, false)? + .visit_field::>("metadata", Self::VT_METADATA, false)? + .visit_field::("last_read_at", Self::VT_LAST_READ_AT, false)? + .finish(); + Ok(()) + } +} +pub struct OffChainDataArgs<'a> { + pub storage_mutability: StorageMutability, + pub url: Option>, + pub metadata: Option>, + pub last_read_at: i64, +} +impl<'a> Default for OffChainDataArgs<'a> { + #[inline] + fn default() -> Self { + OffChainDataArgs { + storage_mutability: StorageMutability::Immutable, + url: None, + metadata: None, + last_read_at: 0, + } + } +} + +pub struct OffChainDataBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, +} +impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> OffChainDataBuilder<'a, 'b, A> { + #[inline] + pub fn add_storage_mutability(&mut self, storage_mutability: StorageMutability) { + self.fbb_.push_slot::(OffChainData::VT_STORAGE_MUTABILITY, storage_mutability, StorageMutability::Immutable); + } + #[inline] + pub fn add_url(&mut self, url: flatbuffers::WIPOffset<&'b str>) { + self.fbb_.push_slot_always::>(OffChainData::VT_URL, url); + } + #[inline] + pub fn add_metadata(&mut self, metadata: flatbuffers::WIPOffset<&'b str>) { + self.fbb_.push_slot_always::>(OffChainData::VT_METADATA, metadata); + } + #[inline] + pub fn add_last_read_at(&mut self, last_read_at: i64) { + self.fbb_.push_slot::(OffChainData::VT_LAST_READ_AT, last_read_at, 0); + } + #[inline] + pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> OffChainDataBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + OffChainDataBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } +} + +impl core::fmt::Debug for OffChainData<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("OffChainData"); + ds.field("storage_mutability", &self.storage_mutability()); + ds.field("url", &self.url()); + ds.field("metadata", &self.metadata()); + ds.field("last_read_at", &self.last_read_at()); + ds.finish() + } +} +#[inline] +/// Verifies that a buffer of bytes contains a `OffChainData` +/// and returns it. +/// Note that verification is still experimental and may not +/// catch every error, or be maximally performant. For the +/// previous, unchecked, behavior use +/// `root_as_off_chain_data_unchecked`. +pub fn root_as_off_chain_data(buf: &[u8]) -> Result { + flatbuffers::root::(buf) +} +#[inline] +/// Verifies that a buffer of bytes contains a size prefixed +/// `OffChainData` and returns it. +/// Note that verification is still experimental and may not +/// catch every error, or be maximally performant. For the +/// previous, unchecked, behavior use +/// `size_prefixed_root_as_off_chain_data_unchecked`. +pub fn size_prefixed_root_as_off_chain_data(buf: &[u8]) -> Result { + flatbuffers::size_prefixed_root::(buf) +} +#[inline] +/// Verifies, with the given options, that a buffer of bytes +/// contains a `OffChainData` and returns it. +/// Note that verification is still experimental and may not +/// catch every error, or be maximally performant. For the +/// previous, unchecked, behavior use +/// `root_as_off_chain_data_unchecked`. +pub fn root_as_off_chain_data_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], +) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) +} +#[inline] +/// Verifies, with the given verifier options, that a buffer of +/// bytes contains a size prefixed `OffChainData` and returns +/// it. Note that verification is still experimental and may not +/// catch every error, or be maximally performant. For the +/// previous, unchecked, behavior use +/// `root_as_off_chain_data_unchecked`. +pub fn size_prefixed_root_as_off_chain_data_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], +) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) +} +#[inline] +/// Assumes, without verification, that a buffer of bytes contains a OffChainData and returns it. +/// # Safety +/// Callers must trust the given bytes do indeed contain a valid `OffChainData`. +pub unsafe fn root_as_off_chain_data_unchecked(buf: &[u8]) -> OffChainData { + flatbuffers::root_unchecked::(buf) +} +#[inline] +/// Assumes, without verification, that a buffer of bytes contains a size prefixed OffChainData and returns it. +/// # Safety +/// Callers must trust the given bytes do indeed contain a valid size prefixed `OffChainData`. +pub unsafe fn size_prefixed_root_as_off_chain_data_unchecked(buf: &[u8]) -> OffChainData { + flatbuffers::size_prefixed_root_unchecked::(buf) +} +#[inline] +pub fn finish_off_chain_data_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + root: flatbuffers::WIPOffset>) { + fbb.finish(root, None); +} + +#[inline] +pub fn finish_size_prefixed_off_chain_data_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>(fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, root: flatbuffers::WIPOffset>) { + fbb.finish_size_prefixed(root, None); +} +} // pub mod OffChainData + diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index 969e977d7..5ffe5d4b2 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -1,7 +1,9 @@ use asset_previews::{AssetPreviews, UrlToDownload}; use entities::schedule::ScheduledJob; +use flatbuffers::{FlatBufferBuilder, WIPOffset}; use inflector::Inflector; use leaf_signatures::LeafSignature; +use offchain_data::{OffChainData, OffChainDataDeprecated}; use std::path::Path; use std::sync::atomic::AtomicU64; use std::{marker::PhantomData, sync::Arc}; @@ -20,8 +22,7 @@ pub use asset::{ use column::{Column, TypedColumn}; use entities::enums::TokenMetadataEdition; use entities::models::{ - AssetSignature, BatchMintToVerify, FailedBatchMint, OffChainData, RawBlock, SplMint, - TokenAccount, + AssetSignature, BatchMintToVerify, FailedBatchMint, RawBlock, SplMint, TokenAccount, }; use metrics_utils::red::RequestErrorDurationMetrics; use tokio::sync::Mutex; @@ -78,12 +79,7 @@ pub mod transaction_client; pub mod tree_seq; // import the flatbuffers runtime library extern crate flatbuffers; -#[allow( - clippy::missing_safety_doc, - unused_imports, - clippy::extra_unused_lifetimes -)] -pub mod asset_generated; +pub mod generated; pub mod mappers; pub type Result = std::result::Result; @@ -189,6 +185,7 @@ pub struct Storage { pub asset_owner_data: Column, pub asset_collection_data: Column, pub asset_collection_data_deprecated: Column, + pub asset_offchain_data_deprecated: Column, // Deprecated, remove end pub metadata_mint_map: Column, pub asset_leaf_data: Column, @@ -244,6 +241,7 @@ impl Storage { let asset_leaf_data = Self::column(db.clone(), red_metrics.clone()); let asset_collection_data = Self::column(db.clone(), red_metrics.clone()); let asset_collection_data_deprecated = Self::column(db.clone(), red_metrics.clone()); + let asset_offchain_data_deprecated = Self::column(db.clone(), red_metrics.clone()); let asset_offchain_data = Self::column(db.clone(), red_metrics.clone()); let cl_items = Self::column(db.clone(), red_metrics.clone()); @@ -275,6 +273,7 @@ impl Storage { let spl_mints = Self::column(db.clone(), red_metrics.clone()); Self { + asset_offchain_data_deprecated, asset_data, mpl_core_collection_authorities, @@ -827,3 +826,9 @@ impl Storage { } } } + +pub trait ToFlatbuffersConverter<'a> { + type Target: 'a; + fn convert_to_fb(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset; + fn convert_to_fb_bytes(&self) -> Vec; +} diff --git a/rocks-db/src/mappers.rs b/rocks-db/src/mappers.rs index 7404a7cf1..15edbf9f1 100644 --- a/rocks-db/src/mappers.rs +++ b/rocks-db/src/mappers.rs @@ -2,7 +2,7 @@ use std::cmp::Ordering; use solana_sdk::pubkey::Pubkey; -use crate::asset_generated::asset as fb; +use crate::generated::asset_generated::asset as fb; use entities::enums::*; use entities::models::*; diff --git a/rocks-db/src/migrations/mod.rs b/rocks-db/src/migrations/mod.rs index ad9630703..086037ca7 100644 --- a/rocks-db/src/migrations/mod.rs +++ b/rocks-db/src/migrations/mod.rs @@ -1,4 +1,5 @@ pub mod clean_update_authorities; pub mod collection_authority; pub mod external_plugins; +pub mod offchain_data; pub mod spl2022; diff --git a/rocks-db/src/migrations/offchain_data.rs b/rocks-db/src/migrations/offchain_data.rs new file mode 100644 index 000000000..2fee0df82 --- /dev/null +++ b/rocks-db/src/migrations/offchain_data.rs @@ -0,0 +1,22 @@ +use crate::migrator::{RocksMigration, SerializationType}; +use crate::offchain_data::{OffChainData, OffChainDataDeprecated, StorageMutability}; + +impl From for OffChainData { + fn from(value: OffChainDataDeprecated) -> Self { + let immutability = StorageMutability::from(value.url.as_str()); + Self { + storage_mutability: immutability, + url: Some(value.url), + metadata: Some(value.metadata), + last_read_at: 0, + } + } +} + +pub(crate) struct OffChainDataMigration; +impl RocksMigration for OffChainDataMigration { + const VERSION: u64 = 4; + const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; + type NewDataType = OffChainData; + type OldDataType = OffChainDataDeprecated; +} diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index b6cff3815..5d11ae371 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -28,6 +28,7 @@ pub enum MigrationState { pub enum SerializationType { Bincode, Cbor, + Flatbuffers, } pub trait RocksMigration { @@ -67,11 +68,11 @@ impl Storage { migration_version_manager: Arc, ) -> Result<()> { // TODO: how do I fix this for a brand new DB? - // let applied_migrations = migration_version_manager - // .get_all_applied_migrations() - // .map_err(StorageError::Common)?; - // let migration_applier = - // MigrationApplier::new(db_path, migration_storage_path, applied_migrations); + let applied_migrations = migration_version_manager + .get_all_applied_migrations() + .map_err(StorageError::Common)?; + let migration_applier = + MigrationApplier::new(db_path, migration_storage_path, applied_migrations); // // apply all migrations // migration_applier @@ -93,6 +94,11 @@ impl Storage { // crate::migrations::spl2022::DynamicDataToken2022MintExtentionsMigration, // ) // .await?; + + migration_applier + .apply_migration(crate::migrations::offchain_data::OffChainDataMigration) + .await?; + Ok(()) } @@ -346,6 +352,9 @@ impl<'a> MigrationApplier<'a> { StorageError::Common(e.to_string()) }) } + SerializationType::Flatbuffers => { + todo!() + } } } @@ -363,6 +372,7 @@ impl<'a> MigrationApplier<'a> { match M::SERIALIZATION_TYPE { SerializationType::Bincode => column.put_batch(std::mem::take(batch)).await, SerializationType::Cbor => column.put_batch_cbor(std::mem::take(batch)).await, + SerializationType::Flatbuffers => todo!(), } } } diff --git a/rocks-db/src/offchain_data.rs b/rocks-db/src/offchain_data.rs index 876ebf678..4b040b8eb 100644 --- a/rocks-db/src/offchain_data.rs +++ b/rocks-db/src/offchain_data.rs @@ -1,11 +1,114 @@ use crate::column::TypedColumn; +use crate::generated::offchain_data_generated::off_chain_data as fb; use crate::key_encoders::{decode_string, encode_string}; -use crate::Result; -use entities::models::OffChainData; +use crate::{Result, ToFlatbuffersConverter}; +use entities::models::OffChainDataGrpc; +use flatbuffers::{FlatBufferBuilder, WIPOffset}; +use serde::{Deserialize, Serialize}; -impl TypedColumn for OffChainData { +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +pub enum StorageMutability { + #[default] + Immutable, + Mutable, +} + +impl StorageMutability { + pub fn is_mutable(&self) -> bool { + match self { + StorageMutability::Immutable => false, + StorageMutability::Mutable => true, + } + } +} + +impl From<&str> for StorageMutability { + fn from(storage_mutability: &str) -> Self { + if storage_mutability.starts_with("ipfs") || storage_mutability.starts_with("arweave") { + return StorageMutability::Immutable; + } else { + return StorageMutability::Mutable; + } + } +} + +impl From for OffChainDataGrpc { + fn from(off_chain_data: OffChainData) -> Self { + OffChainDataGrpc { + url: off_chain_data.url.unwrap_or_default(), + metadata: off_chain_data.metadata.unwrap_or_default(), + } + } +} + +impl From for OffChainData { + fn from(off_chain_data: OffChainDataGrpc) -> Self { + let storage_mutability = StorageMutability::from(off_chain_data.url.as_str()); + OffChainData { + storage_mutability, + url: Some(off_chain_data.url), + metadata: Some(off_chain_data.metadata), + last_read_at: 0, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +pub struct OffChainData { + pub storage_mutability: StorageMutability, + pub url: Option, + pub metadata: Option, + pub last_read_at: i64, +} + +impl<'a> ToFlatbuffersConverter<'a> for OffChainData { + type Target = fb::OffChainData<'a>; + + fn convert_to_fb_bytes(&self) -> Vec { + let mut builder = FlatBufferBuilder::new(); + let off_chain_data = self.convert_to_fb(&mut builder); + builder.finish_minimal(off_chain_data); + builder.finished_data().to_vec() + } + + fn convert_to_fb(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset { + let storage_mutability = self.storage_mutability.clone().into(); + let url = self.url.as_ref().map(|url| builder.create_string(&url)); + let metadata = self + .metadata + .as_ref() + .map(|metadata| builder.create_string(&metadata)); + + fb::OffChainData::create( + builder, + &fb::OffChainDataArgs { + storage_mutability, + url, + metadata, + last_read_at: self.last_read_at, + }, + ) + } +} + +impl From for fb::StorageMutability { + fn from(storage_mutability: StorageMutability) -> Self { + match storage_mutability { + StorageMutability::Immutable => fb::StorageMutability::Immutable, + StorageMutability::Mutable => fb::StorageMutability::Mutable, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +pub struct OffChainDataDeprecated { + pub url: String, + pub metadata: String, +} + +impl TypedColumn for OffChainDataDeprecated { type KeyType = String; - type ValueType = Self; // The value type is the Asset struct itself + type ValueType = Self; const NAME: &'static str = "OFFCHAIN_DATA"; // Name of the column family fn encode_key(key: String) -> Vec { @@ -16,3 +119,17 @@ impl TypedColumn for OffChainData { decode_string(bytes) } } + +impl TypedColumn for OffChainData { + type KeyType = String; + type ValueType = Self; + const NAME: &'static str = "OFFCHAIN_DATA_V2"; // Name of the column family + + fn encode_key(key: String) -> Vec { + encode_string(key) + } + + fn decode_key(bytes: Vec) -> Result { + decode_string(bytes) + } +} diff --git a/rocks-db/src/transaction.rs b/rocks-db/src/transaction.rs index 6f98ac427..a5ea091e5 100644 --- a/rocks-db/src/transaction.rs +++ b/rocks-db/src/transaction.rs @@ -1,5 +1,4 @@ use async_trait::async_trait; -use entities::models::OffChainData; use entities::models::{BatchMintToVerify, BufferedTransaction, SignatureWithSlot}; use interface::error::StorageError; use solana_sdk::pubkey::Pubkey; @@ -8,6 +7,7 @@ use spl_account_compression::state::PathNode; use crate::{ asset::{AssetCollection, AssetLeaf}, + offchain_data::OffChainData, AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, }; diff --git a/rocks-db/src/transaction_client.rs b/rocks-db/src/transaction_client.rs index d56ae5c68..33775fc4e 100644 --- a/rocks-db/src/transaction_client.rs +++ b/rocks-db/src/transaction_client.rs @@ -6,6 +6,7 @@ use solana_sdk::pubkey::Pubkey; use crate::asset::{AssetCompleteDetails, SourcedAssetLeaf}; use crate::column::TypedColumn; use crate::parameters::Parameter; +use crate::ToFlatbuffersConverter; use crate::{ parameters, signature_client::SignatureIdx, @@ -136,9 +137,9 @@ impl Storage { } if let Some(ref offchain_data) = update.offchain_data_update { - if let Err(e) = self.asset_offchain_data.merge_with_batch( + if let Err(e) = self.asset_offchain_data.merge_with_batch_flatbuffers( batch, - offchain_data.url.clone(), + offchain_data.url.clone().expect("Url should not be empty"), offchain_data, ) { tracing::error!("Failed to merge offchain data: {}", e); diff --git a/tests/setup/src/lib.rs b/tests/setup/src/lib.rs index 54213e45f..0a752e3d0 100644 --- a/tests/setup/src/lib.rs +++ b/tests/setup/src/lib.rs @@ -72,12 +72,10 @@ impl<'a> TestEnvironment<'a> { let syncronizer = nft_ingester::index_syncronizer::Synchronizer::new( env.rocks_env.storage.clone(), env.pg_env.client.clone(), - env.pg_env.client.clone(), BATCH_SIZE, "".to_string(), metrics_state.synchronizer_metrics.clone(), 1, - false, ); let (_, rx) = tokio::sync::broadcast::channel::<()>(1); let synchronizer = Arc::new(syncronizer); diff --git a/tests/setup/src/pg.rs b/tests/setup/src/pg.rs index ec31bfcd2..4655e1b7f 100644 --- a/tests/setup/src/pg.rs +++ b/tests/setup/src/pg.rs @@ -73,8 +73,11 @@ impl<'a> TestEnvironment<'a> { let node = cli.run(image); let (pool, db_name) = setup_database(&node).await; - let client = - PgClient::new_with_pool(pool.clone(), Arc::new(RequestErrorDurationMetrics::new())); + let client = PgClient::new_with_pool( + pool.clone(), + None, + Arc::new(RequestErrorDurationMetrics::new()), + ); TestEnvironment { client: Arc::new(client), @@ -88,8 +91,11 @@ impl<'a> TestEnvironment<'a> { let node = cli.run(image); let (pool, db_name) = setup_database(&node).await; - let client = - PgClient::new_with_pool(pool.clone(), Arc::new(RequestErrorDurationMetrics::new())); + let client = PgClient::new_with_pool( + pool.clone(), + None, + Arc::new(RequestErrorDurationMetrics::new()), + ); TestEnvironment { client: Arc::new(client), @@ -170,6 +176,7 @@ pub async fn setup_database(node: &Container<'_, T>) -> (Pool Asset supply: Some(Updated::new(slot, None, 1)), seq: None, is_burnt: Updated::new(slot, None, false), - was_decompressed: Updated::new(slot, None, false), + was_decompressed: Some(Updated::new(slot, None, false)), onchain_data: None, creators: Updated::new(slot, None, vec![generate_test_creator()]), royalty_amount: Updated::new(slot, None, 0), From c82abdaa69a7310d079ae14a0cb23ac1721cee72 Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Tue, 17 Dec 2024 07:16:27 +0200 Subject: [PATCH 02/15] reorganize folders in rocksdb --- nft_ingester/src/api/dapi/asset.rs | 4 +- nft_ingester/src/api/dapi/change_logs.rs | 4 +- .../src/api/dapi/rpc_asset_convertors.rs | 7 +-- nft_ingester/src/api/dapi/rpc_asset_models.rs | 10 ++-- nft_ingester/src/backfiller.rs | 4 +- .../src/batch_mint/batch_mint_persister.rs | 2 +- .../src/batch_mint/batch_mint_processor.rs | 2 +- nft_ingester/src/bin/explorer/main.rs | 2 +- nft_ingester/src/bin/ingester/main.rs | 2 +- nft_ingester/src/bin/migrator/main.rs | 4 +- nft_ingester/src/bin/raw_backup/main.rs | 2 +- nft_ingester/src/bin/slot_checker/main.rs | 2 +- .../src/bin/synchronizer_utils/main.rs | 2 +- nft_ingester/src/json_worker.rs | 4 +- nft_ingester/src/price_fetcher.rs | 2 +- .../account_based/mpl_core_processor.rs | 6 ++- .../account_based/mplx_updates_processor.rs | 4 +- .../account_based/token_updates_processor.rs | 5 +- .../bubblegum_updates_processor.rs | 7 ++- nft_ingester/src/scheduler.rs | 4 +- nft_ingester/tests/api_tests.rs | 4 +- nft_ingester/tests/asset_previews_tests.rs | 2 +- nft_ingester/tests/batch_mint_test.rs | 2 +- nft_ingester/tests/clean_forks_test.rs | 2 +- nft_ingester/tests/gapfiller_tests.rs | 10 ++-- nft_ingester/tests/process_accounts.rs | 4 +- nft_ingester/tests/scheduler_tests.rs | 2 +- rocks-db/benches/misc_benchmark.rs | 5 +- rocks-db/src/batch_savers.rs | 3 +- rocks-db/src/bin/column_copier/main.rs | 2 +- rocks-db/src/bin/column_remover/main.rs | 26 +++++---- rocks-db/src/bin/fork_detector/main.rs | 2 +- rocks-db/src/bin/leaf_checker/main.rs | 2 +- rocks-db/src/{ => clients}/asset_client.rs | 2 + .../{ => clients}/asset_streaming_client.rs | 2 +- rocks-db/src/{ => clients}/batch_client.rs | 2 +- rocks-db/src/{ => clients}/dump_client.rs | 0 rocks-db/src/clients/mod.rs | 7 +++ .../raw_blocks_streaming_client.rs | 0 .../src/{ => clients}/signature_client.rs | 0 .../src/{ => clients}/transaction_client.rs | 0 rocks-db/src/{ => columns}/asset.rs | 12 ++--- rocks-db/src/{ => columns}/asset_previews.rs | 0 .../src/{ => columns}/asset_signatures.rs | 0 rocks-db/src/{ => columns}/batch_mint.rs | 0 rocks-db/src/{ => columns}/bubblegum_slots.rs | 0 rocks-db/src/{ => columns}/cl_items.rs | 0 rocks-db/src/{ => columns}/editions.rs | 0 rocks-db/src/{ => columns}/inscriptions.rs | 0 rocks-db/src/{ => columns}/leaf_signatures.rs | 0 rocks-db/src/columns/mod.rs | 16 ++++++ rocks-db/src/{ => columns}/offchain_data.rs | 0 rocks-db/src/{ => columns}/parameters.rs | 0 rocks-db/src/{ => columns}/raw_block.rs | 0 rocks-db/src/{ => columns}/token_accounts.rs | 0 rocks-db/src/{ => columns}/token_prices.rs | 0 rocks-db/src/fork_cleaner.rs | 4 +- rocks-db/src/lib.rs | 53 +++++++------------ rocks-db/src/migrations/offchain_data.rs | 4 +- rocks-db/src/migrator.rs | 2 +- rocks-db/src/transaction.rs | 2 +- .../tests/batch_client_integration_tests.rs | 5 +- rocks-db/tests/migration_tests.rs | 2 +- rocks-db/tests/parameters_tests.rs | 2 +- rocks-db/tests/urls_to_download_test.rs | 2 +- tests/setup/src/lib.rs | 5 +- tests/setup/src/rocks.rs | 8 +-- 67 files changed, 146 insertions(+), 129 deletions(-) rename rocks-db/src/{ => clients}/asset_client.rs (99%) rename rocks-db/src/{ => clients}/asset_streaming_client.rs (99%) rename rocks-db/src/{ => clients}/batch_client.rs (99%) rename rocks-db/src/{ => clients}/dump_client.rs (100%) create mode 100644 rocks-db/src/clients/mod.rs rename rocks-db/src/{ => clients}/raw_blocks_streaming_client.rs (100%) rename rocks-db/src/{ => clients}/signature_client.rs (100%) rename rocks-db/src/{ => clients}/transaction_client.rs (100%) rename rocks-db/src/{ => columns}/asset.rs (99%) rename rocks-db/src/{ => columns}/asset_previews.rs (100%) rename rocks-db/src/{ => columns}/asset_signatures.rs (100%) rename rocks-db/src/{ => columns}/batch_mint.rs (100%) rename rocks-db/src/{ => columns}/bubblegum_slots.rs (100%) rename rocks-db/src/{ => columns}/cl_items.rs (100%) rename rocks-db/src/{ => columns}/editions.rs (100%) rename rocks-db/src/{ => columns}/inscriptions.rs (100%) rename rocks-db/src/{ => columns}/leaf_signatures.rs (100%) create mode 100644 rocks-db/src/columns/mod.rs rename rocks-db/src/{ => columns}/offchain_data.rs (100%) rename rocks-db/src/{ => columns}/parameters.rs (100%) rename rocks-db/src/{ => columns}/raw_block.rs (100%) rename rocks-db/src/{ => columns}/token_accounts.rs (100%) rename rocks-db/src/{ => columns}/token_prices.rs (100%) diff --git a/nft_ingester/src/api/dapi/asset.rs b/nft_ingester/src/api/dapi/asset.rs index c45f69eeb..019e454be 100644 --- a/nft_ingester/src/api/dapi/asset.rs +++ b/nft_ingester/src/api/dapi/asset.rs @@ -7,8 +7,8 @@ use entities::enums::SpecificationAssetClass; use entities::models::AssetSignatureWithPagination; use interface::asset_sigratures::AssetSignaturesGetter; use interface::json::{JsonDownloadResult, JsonDownloader, JsonPersister}; +use rocks_db::columns::offchain_data::{OffChainData, StorageMutability}; use rocks_db::errors::StorageError; -use rocks_db::offchain_data::{OffChainData, StorageMutability}; use solana_sdk::pubkey::Pubkey; use tracing::error; @@ -18,7 +18,7 @@ use interface::price_fetcher::TokenPriceFetcher; use interface::processing_possibility::ProcessingPossibilityChecker; use itertools::Itertools; use metrics_utils::ApiMetricsConfig; -use rocks_db::asset::{AssetLeaf, AssetSelectedMaps}; +use rocks_db::columns::asset::{AssetLeaf, AssetSelectedMaps}; use rocks_db::Storage; use tokio::sync::Mutex; use tokio::task::{JoinError, JoinSet}; diff --git a/nft_ingester/src/api/dapi/change_logs.rs b/nft_ingester/src/api/dapi/change_logs.rs index 09de59b52..bbe661017 100644 --- a/nft_ingester/src/api/dapi/change_logs.rs +++ b/nft_ingester/src/api/dapi/change_logs.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, str::FromStr}; use interface::proofs::ProofChecker; use metrics_utils::ApiMetricsConfig; -use rocks_db::cl_items::{ClItemKey, ClLeafKey}; +use rocks_db::columns::cl_items::{ClItemKey, ClLeafKey}; use rocks_db::errors::StorageError; use solana_sdk::pubkey::Pubkey; use tracing::{debug, warn}; @@ -11,7 +11,7 @@ use tracing::{debug, warn}; use crate::api::dapi::model; use crate::api::dapi::rpc_asset_models::AssetProof; use interface::processing_possibility::ProcessingPossibilityChecker; -use rocks_db::asset_streaming_client::get_required_nodes_for_proof; +use rocks_db::clients::asset_streaming_client::get_required_nodes_for_proof; use rocks_db::Storage; use spl_concurrent_merkle_tree::node::empty_node; diff --git a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs index f333f4aee..e55b5c13e 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs @@ -7,8 +7,8 @@ use entities::models::{CoreFeesAccountWithSortingID, TokenAccResponse}; use jsonpath_lib::JsonPathError; use mime_guess::Mime; use num_traits::Pow; +use rocks_db::columns::offchain_data::OffChainData; use rocks_db::errors::StorageError; -use rocks_db::offchain_data::OffChainData; use serde_json::Value; use solana_program::pubkey::Pubkey; use tracing::error; @@ -29,8 +29,9 @@ use crate::api::dapi::response::InscriptionResponse; use crate::api::dapi::rpc_asset_models::{PriceInfo, TokenInfo}; use entities::api_req_params::Pagination; use entities::enums::{Interface, SpecificationVersions}; -use rocks_db::asset::AssetCollection; -use rocks_db::{AssetAuthority, AssetDynamicDetails, AssetStaticDetails}; +use rocks_db::columns::asset::{ + AssetAuthority, AssetCollection, AssetDynamicDetails, AssetStaticDetails, +}; use usecase::response_prettier::filter_non_null_fields; pub fn to_uri(uri: String) -> Option { diff --git a/nft_ingester/src/api/dapi/rpc_asset_models.rs b/nft_ingester/src/api/dapi/rpc_asset_models.rs index da4c42f45..b129fa0c3 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_models.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_models.rs @@ -10,12 +10,14 @@ use { use crate::api::dapi::response::InscriptionResponse; use entities::enums::{Interface, OwnershipModel, RoyaltyModel, UseMethod}; use entities::models::{EditionData, SplMint, TokenAccount}; -use rocks_db::inscriptions::{Inscription, InscriptionData}; -use rocks_db::{ - asset::{AssetCollection, AssetLeaf}, +use rocks_db::columns::{ + asset::{ + AssetAuthority, AssetCollection, AssetDynamicDetails, AssetLeaf, AssetOwner, + AssetStaticDetails, + }, + inscriptions::{Inscription, InscriptionData}, offchain_data::OffChainData, }; -use rocks_db::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails}; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct AssetProof { diff --git a/nft_ingester/src/backfiller.rs b/nft_ingester/src/backfiller.rs index ca90c5f31..c4a59acec 100644 --- a/nft_ingester/src/backfiller.rs +++ b/nft_ingester/src/backfiller.rs @@ -9,8 +9,8 @@ use interface::signature_persistence::{BlockConsumer, BlockProducer}; use interface::slots_dumper::{SlotGetter, SlotsDumper}; use metrics_utils::BackfillerMetricsConfig; use plerkle_serialization::serializer::seralize_encoded_transaction_with_status; -use rocks_db::bubblegum_slots::ForceReingestableSlots; use rocks_db::column::TypedColumn; +use rocks_db::columns::bubblegum_slots::ForceReingestableSlots; use rocks_db::transaction::{TransactionProcessor, TransactionResultPersister}; use rocks_db::{SlotStorage, Storage}; use solana_program::pubkey::Pubkey; @@ -385,7 +385,7 @@ where C: BlockConsumer, { let start_slot = db - .get_parameter::(rocks_db::parameters::Parameter::LastBackfilledSlot) + .get_parameter::(rocks_db::columns::parameters::Parameter::LastBackfilledSlot) .await?; slot_db .db diff --git a/nft_ingester/src/batch_mint/batch_mint_persister.rs b/nft_ingester/src/batch_mint/batch_mint_persister.rs index 05fff186c..e845fc8f5 100644 --- a/nft_ingester/src/batch_mint/batch_mint_persister.rs +++ b/nft_ingester/src/batch_mint/batch_mint_persister.rs @@ -11,7 +11,7 @@ use entities::enums::PersistingBatchMintState; use entities::{enums::FailedBatchMintState, models::BatchMintToVerify}; use interface::{batch_mint::BatchMintDownloader, error::UsecaseError}; use metrics_utils::{BatchMintPersisterMetricsConfig, MetricStatus}; -use rocks_db::batch_mint::BatchMintWithStaker; +use rocks_db::columns::batch_mint::BatchMintWithStaker; use tokio::{sync::broadcast::Receiver, task::JoinError, time::Instant}; use tracing::{error, info}; diff --git a/nft_ingester/src/batch_mint/batch_mint_processor.rs b/nft_ingester/src/batch_mint/batch_mint_processor.rs index 54a56bd47..2030b5744 100644 --- a/nft_ingester/src/batch_mint/batch_mint_processor.rs +++ b/nft_ingester/src/batch_mint/batch_mint_processor.rs @@ -10,7 +10,7 @@ use interface::error::UsecaseError; use metrics_utils::BatchMintProcessorMetricsConfig; use postgre_client::model::BatchMintState; use postgre_client::PgClient; -use rocks_db::batch_mint::BatchMintWithStaker; +use rocks_db::columns::batch_mint::BatchMintWithStaker; use rocks_db::Storage; use solana_program::pubkey::Pubkey; use std::path::PathBuf; diff --git a/nft_ingester/src/bin/explorer/main.rs b/nft_ingester/src/bin/explorer/main.rs index 8c90e5e59..8f71b2bb5 100644 --- a/nft_ingester/src/bin/explorer/main.rs +++ b/nft_ingester/src/bin/explorer/main.rs @@ -9,7 +9,7 @@ use itertools::Itertools; use metrics_utils::ApiMetricsConfig; use prometheus_client::registry::Registry; use rocks_db::Storage; -use rocks_db::{asset, migrator::MigrationState}; +use rocks_db::{columns::asset, migrator::MigrationState}; use rocksdb::{ColumnFamilyDescriptor, Options, DB}; use serde::Deserialize; use std::net::SocketAddr; diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index 69f870032..18b317ecd 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -442,7 +442,7 @@ pub async fn main() -> Result<(), IngesterError> { if backfiller_config.should_reingest { warn!("'Reingest' flag is set, deleting last backfilled slot."); primary_rocks_storage - .delete_parameter::(rocks_db::parameters::Parameter::LastBackfilledSlot) + .delete_parameter::(rocks_db::columns::parameters::Parameter::LastBackfilledSlot) .await?; } diff --git a/nft_ingester/src/bin/migrator/main.rs b/nft_ingester/src/bin/migrator/main.rs index bf75f8578..de74d8840 100644 --- a/nft_ingester/src/bin/migrator/main.rs +++ b/nft_ingester/src/bin/migrator/main.rs @@ -6,9 +6,9 @@ use metrics_utils::red::RequestErrorDurationMetrics; use metrics_utils::utils::start_metrics; use metrics_utils::{JsonMigratorMetricsConfig, MetricState, MetricStatus, MetricsTrait}; use postgre_client::PgClient; -use rocks_db::asset::AssetCompleteDetails; use rocks_db::column::TypedColumn; -use rocks_db::offchain_data::OffChainData; +use rocks_db::columns::asset::AssetCompleteDetails; +use rocks_db::columns::offchain_data::OffChainData; use tokio::sync::broadcast::Receiver; use tokio::sync::{broadcast, Mutex}; use tokio::task::{JoinError, JoinSet}; diff --git a/nft_ingester/src/bin/raw_backup/main.rs b/nft_ingester/src/bin/raw_backup/main.rs index 6e49885a1..982562f90 100644 --- a/nft_ingester/src/bin/raw_backup/main.rs +++ b/nft_ingester/src/bin/raw_backup/main.rs @@ -5,7 +5,7 @@ use entities::models::RawBlock; use metrics_utils::red::RequestErrorDurationMetrics; use nft_ingester::config::init_logger; use rocks_db::column::TypedColumn; -use rocks_db::offchain_data::OffChainData; +use rocks_db::columns::offchain_data::OffChainData; use tempfile::TempDir; use tokio::sync::Mutex; use tokio::task::JoinSet; diff --git a/nft_ingester/src/bin/slot_checker/main.rs b/nft_ingester/src/bin/slot_checker/main.rs index 776be2db1..52a0547c6 100644 --- a/nft_ingester/src/bin/slot_checker/main.rs +++ b/nft_ingester/src/bin/slot_checker/main.rs @@ -7,8 +7,8 @@ use clap::Parser; use indicatif::{ProgressBar, ProgressStyle}; use metrics_utils::MetricState; use rocks_db::column::TypedColumn; +use rocks_db::columns::offchain_data::OffChainData; use rocks_db::migrator::MigrationVersions; -use rocks_db::offchain_data::OffChainData; use rocks_db::Storage; use tokio::signal; use tokio::sync::{broadcast, Mutex as AsyncMutex}; diff --git a/nft_ingester/src/bin/synchronizer_utils/main.rs b/nft_ingester/src/bin/synchronizer_utils/main.rs index 0b4a7867b..b43d4fcb2 100644 --- a/nft_ingester/src/bin/synchronizer_utils/main.rs +++ b/nft_ingester/src/bin/synchronizer_utils/main.rs @@ -1,8 +1,8 @@ use clap::Parser; use itertools::Itertools; use nft_ingester::error::IngesterError; -use rocks_db::asset::AssetCompleteDetails; use rocks_db::column::TypedColumn; +use rocks_db::columns::asset::AssetCompleteDetails; use rocks_db::generated::asset_generated::asset as fb; use rocks_db::key_encoders::decode_u64x2_pubkey; use rocks_db::migrator::MigrationState; diff --git a/nft_ingester/src/json_worker.rs b/nft_ingester/src/json_worker.rs index 6ab2fb2f7..47199b150 100644 --- a/nft_ingester/src/json_worker.rs +++ b/nft_ingester/src/json_worker.rs @@ -10,8 +10,8 @@ use metrics_utils::{JsonDownloaderMetricsConfig, MetricStatus}; use postgre_client::tasks::UpdatedTask; use postgre_client::PgClient; use reqwest::ClientBuilder; -use rocks_db::asset_previews::UrlToDownload; -use rocks_db::offchain_data::OffChainData; +use rocks_db::columns::asset_previews::UrlToDownload; +use rocks_db::columns::offchain_data::OffChainData; use rocks_db::Storage; use serde_json::Value; use std::collections::HashMap; diff --git a/nft_ingester/src/price_fetcher.rs b/nft_ingester/src/price_fetcher.rs index 27be27c43..fdbbc0434 100644 --- a/nft_ingester/src/price_fetcher.rs +++ b/nft_ingester/src/price_fetcher.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use coingecko::CoinGeckoClient; use interface::error::UsecaseError; use interface::price_fetcher::PriceFetcher; -use rocks_db::token_prices::TokenPrice; +use rocks_db::columns::token_prices::TokenPrice; use rocks_db::Storage; use std::sync::Arc; use std::time::Duration; diff --git a/nft_ingester/src/processors/account_based/mpl_core_processor.rs b/nft_ingester/src/processors/account_based/mpl_core_processor.rs index 99d913725..20c864282 100644 --- a/nft_ingester/src/processors/account_based/mpl_core_processor.rs +++ b/nft_ingester/src/processors/account_based/mpl_core_processor.rs @@ -7,10 +7,12 @@ use entities::models::{ }; use heck::ToSnakeCase; use metrics_utils::IngesterMetricsConfig; -use rocks_db::asset::{AssetCollection, AssetCompleteDetails}; use rocks_db::batch_savers::{BatchSaveStorage, MetadataModels}; +use rocks_db::columns::asset::{ + AssetAuthority, AssetCollection, AssetCompleteDetails, AssetDynamicDetails, AssetOwner, + AssetStaticDetails, +}; use rocks_db::errors::StorageError; -use rocks_db::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails}; use serde_json::Map; use serde_json::{json, Value}; use solana_program::pubkey::Pubkey; diff --git a/nft_ingester/src/processors/account_based/mplx_updates_processor.rs b/nft_ingester/src/processors/account_based/mplx_updates_processor.rs index 8967bc12c..37e7ec607 100644 --- a/nft_ingester/src/processors/account_based/mplx_updates_processor.rs +++ b/nft_ingester/src/processors/account_based/mplx_updates_processor.rs @@ -12,11 +12,11 @@ use entities::enums::{ use entities::models::{BurntMetadataSlot, MetadataInfo, Updated}; use entities::models::{ChainDataV1, Creator, UpdateVersion, Uses}; use metrics_utils::IngesterMetricsConfig; -use rocks_db::asset::{ +use rocks_db::batch_savers::{BatchSaveStorage, MetadataModels}; +use rocks_db::columns::asset::{ AssetAuthority, AssetCollection, AssetCompleteDetails, AssetDynamicDetails, AssetStaticDetails, MetadataMintMap, }; -use rocks_db::batch_savers::{BatchSaveStorage, MetadataModels}; use rocks_db::errors::StorageError; use usecase::save_metrics::result_to_metrics; diff --git a/nft_ingester/src/processors/account_based/token_updates_processor.rs b/nft_ingester/src/processors/account_based/token_updates_processor.rs index 7f4af0257..9d46ea704 100644 --- a/nft_ingester/src/processors/account_based/token_updates_processor.rs +++ b/nft_ingester/src/processors/account_based/token_updates_processor.rs @@ -1,10 +1,11 @@ use entities::enums::{OwnerType, SpecificationAssetClass}; use entities::models::{Mint, TokenAccount, UpdateVersion, Updated}; use metrics_utils::IngesterMetricsConfig; -use rocks_db::asset::{AssetCompleteDetails, AssetDynamicDetails, AssetOwner}; use rocks_db::batch_savers::BatchSaveStorage; +use rocks_db::columns::asset::{ + AssetCompleteDetails, AssetDynamicDetails, AssetOwner, AssetStaticDetails, +}; use rocks_db::errors::StorageError; -use rocks_db::AssetStaticDetails; use solana_program::pubkey::Pubkey; use std::sync::Arc; use tokio::time::Instant; diff --git a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs index 02f10df95..9840b5f52 100644 --- a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs +++ b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs @@ -23,11 +23,10 @@ use metrics_utils::IngesterMetricsConfig; use mpl_bubblegum::types::LeafSchema; use mpl_bubblegum::InstructionName; use num_traits::FromPrimitive; -use rocks_db::asset::AssetOwner; -use rocks_db::asset::{ - AssetAuthority, AssetCollection, AssetDynamicDetails, AssetLeaf, AssetStaticDetails, +use rocks_db::columns::asset::{ + AssetAuthority, AssetCollection, AssetDynamicDetails, AssetLeaf, AssetOwner, AssetStaticDetails, }; -use rocks_db::offchain_data::OffChainData; +use rocks_db::columns::offchain_data::OffChainData; use rocks_db::transaction::{ AssetDynamicUpdate, AssetUpdate, AssetUpdateEvent, InstructionResult, TransactionResult, TreeUpdate, diff --git a/nft_ingester/src/scheduler.rs b/nft_ingester/src/scheduler.rs index 405f5384c..885f52333 100644 --- a/nft_ingester/src/scheduler.rs +++ b/nft_ingester/src/scheduler.rs @@ -2,8 +2,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use rocks_db::asset_previews::UrlToDownload; -use rocks_db::offchain_data::OffChainData; +use rocks_db::columns::asset_previews::UrlToDownload; +use rocks_db::columns::offchain_data::OffChainData; use tracing::log::error; use rocks_db::Storage; diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index 7eb93ba11..a298cbb27 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -52,9 +52,9 @@ mod tests { config::JsonMiddlewareConfig, json_worker::JsonWorker, processors::account_based::token_updates_processor::TokenAccountsProcessor, }; - use rocks_db::asset::{AssetCompleteDetails, AssetLeaf}; use rocks_db::batch_savers::BatchSaveStorage; - use rocks_db::inscriptions::{Inscription, InscriptionData}; + use rocks_db::columns::asset::{AssetCompleteDetails, AssetLeaf}; + use rocks_db::columns::inscriptions::{Inscription, InscriptionData}; use rocks_db::tree_seq::TreesGaps; use rocks_db::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Storage}; use serde_json::{json, Value}; diff --git a/nft_ingester/tests/asset_previews_tests.rs b/nft_ingester/tests/asset_previews_tests.rs index e397d3ea0..e76d02e16 100644 --- a/nft_ingester/tests/asset_previews_tests.rs +++ b/nft_ingester/tests/asset_previews_tests.rs @@ -4,7 +4,7 @@ mod tests { use itertools::Itertools; use nft_ingester::api::dapi::asset_preview::populate_previews_opt; use nft_ingester::api::dapi::rpc_asset_models::{Asset, Content, File, MetadataMap, Ownership}; - use rocks_db::asset_previews::AssetPreviews; + use rocks_db::columns::asset_previews::AssetPreviews; use setup::rocks::RocksTestEnvironment; use solana_sdk::keccak::{self, HASH_BYTES}; diff --git a/nft_ingester/tests/batch_mint_test.rs b/nft_ingester/tests/batch_mint_test.rs index de4ec3122..335fc7669 100644 --- a/nft_ingester/tests/batch_mint_test.rs +++ b/nft_ingester/tests/batch_mint_test.rs @@ -40,7 +40,7 @@ use nft_ingester::processors::transaction_based::bubblegum_updates_processor::Bu use nft_ingester::raydium_price_fetcher::RaydiumTokenPriceFetcher; use plerkle_serialization::serializer::serialize_transaction; use postgre_client::PgClient; -use rocks_db::batch_mint::FailedBatchMintKey; +use rocks_db::columns::batch_mint::FailedBatchMintKey; use rocks_db::Storage; use serde_json::json; use solana_client::nonblocking::rpc_client::RpcClient; diff --git a/nft_ingester/tests/clean_forks_test.rs b/nft_ingester/tests/clean_forks_test.rs index c6929c0bf..cbc9abdbd 100644 --- a/nft_ingester/tests/clean_forks_test.rs +++ b/nft_ingester/tests/clean_forks_test.rs @@ -8,8 +8,8 @@ use mpl_bubblegum::types::{BubblegumEventType, LeafSchema, Version}; use mpl_bubblegum::{InstructionName, LeafSchemaEvent}; use nft_ingester::cleaners::fork_cleaner::ForkCleaner; use nft_ingester::processors::transaction_based::bubblegum_updates_processor::BubblegumTxProcessor; -use rocks_db::cl_items::ClItem; use rocks_db::column::TypedColumn; +use rocks_db::columns::cl_items::ClItem; use rocks_db::transaction::{InstructionResult, TransactionResult, TreeUpdate}; use rocks_db::tree_seq::TreeSeqIdx; use setup::rocks::RocksTestEnvironment; diff --git a/nft_ingester/tests/gapfiller_tests.rs b/nft_ingester/tests/gapfiller_tests.rs index 771d24dc5..67ea441f2 100644 --- a/nft_ingester/tests/gapfiller_tests.rs +++ b/nft_ingester/tests/gapfiller_tests.rs @@ -1,4 +1,4 @@ -use entities::models::{CompleteAssetDetails, Updated}; +use entities::models::{AssetCompleteDetailsGrpc, Updated}; use futures::stream; use interface::asset_streaming_and_discovery::{ AsyncError, MockAssetDetailsConsumer, MockRawBlocksConsumer, @@ -6,7 +6,9 @@ use interface::asset_streaming_and_discovery::{ use metrics_utils::red::RequestErrorDurationMetrics; use nft_ingester::gapfiller::{process_asset_details_stream, process_raw_blocks_stream}; use rocks_db::generated::asset_generated::asset as fb; -use rocks_db::{asset::AssetCompleteDetails, column::TypedColumn, migrator::MigrationState}; +use rocks_db::{ + column::TypedColumn, columns::asset::AssetCompleteDetails, migrator::MigrationState, +}; use solana_sdk::pubkey::Pubkey; use solana_transaction_status::UiConfirmedBlock; use std::sync::Arc; @@ -15,8 +17,8 @@ use tokio::{sync::Mutex, task::JoinSet}; use rocks_db::Storage; -fn create_test_complete_asset_details(pubkey: Pubkey) -> CompleteAssetDetails { - CompleteAssetDetails { +fn create_test_complete_asset_details(pubkey: Pubkey) -> AssetCompleteDetailsGrpc { + AssetCompleteDetailsGrpc { pubkey, supply: Some(Updated::new(1, None, 10)), ..Default::default() diff --git a/nft_ingester/tests/process_accounts.rs b/nft_ingester/tests/process_accounts.rs index c3fa07589..1b0ce051e 100644 --- a/nft_ingester/tests/process_accounts.rs +++ b/nft_ingester/tests/process_accounts.rs @@ -27,10 +27,10 @@ mod tests { use nft_ingester::processors::account_based::mpl_core_processor::MplCoreProcessor; use nft_ingester::processors::account_based::mplx_updates_processor::MplxAccountsProcessor; use nft_ingester::processors::account_based::token_updates_processor::TokenAccountsProcessor; - use rocks_db::asset::AssetCompleteDetails; use rocks_db::batch_savers::BatchSaveStorage; use rocks_db::column::TypedColumn; - use rocks_db::AssetAuthority; + use rocks_db::columns::asset::AssetCompleteDetails; + use rocks_db::columns::assetAuthority; use solana_program::pubkey::Pubkey; use std::collections::HashMap; use std::str::FromStr; diff --git a/nft_ingester/tests/scheduler_tests.rs b/nft_ingester/tests/scheduler_tests.rs index 34ad571cd..186cdd32a 100644 --- a/nft_ingester/tests/scheduler_tests.rs +++ b/nft_ingester/tests/scheduler_tests.rs @@ -2,7 +2,7 @@ use assertables::assert_contains; use assertables::assert_contains_as_result; use entities::models::OffChainData; use nft_ingester::scheduler::Scheduler; -use rocks_db::asset_previews::UrlToDownload; +use rocks_db::columns::asset_previews::UrlToDownload; use setup::await_async_for; use setup::rocks::RocksTestEnvironment; diff --git a/rocks-db/benches/misc_benchmark.rs b/rocks-db/benches/misc_benchmark.rs index 82d04f67c..c02036f4e 100644 --- a/rocks-db/benches/misc_benchmark.rs +++ b/rocks-db/benches/misc_benchmark.rs @@ -2,10 +2,7 @@ use rocks_db::generated::asset_generated::asset as fb; use bincode::{deserialize, serialize}; use criterion::{criterion_group, criterion_main, Criterion}; -use rocks_db::{ - asset::{self, AssetCompleteDetails}, - AssetDynamicDetails, -}; +use rocks_db::columns::asset::{self, AssetCompleteDetails, AssetDynamicDetails}; use setup::rocks::RocksTestEnvironmentSetup; use solana_sdk::pubkey::Pubkey; diff --git a/rocks-db/src/batch_savers.rs b/rocks-db/src/batch_savers.rs index a4d22c196..21746c532 100644 --- a/rocks-db/src/batch_savers.rs +++ b/rocks-db/src/batch_savers.rs @@ -1,5 +1,6 @@ use crate::asset::{AssetCollection, AssetCompleteDetails, MetadataMintMap}; use crate::column::TypedColumn; +use crate::columns::inscriptions::InscriptionData; use crate::generated::asset_generated::asset as fb; use crate::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use crate::Result; @@ -165,7 +166,7 @@ impl BatchSaveStorage { self.storage.inscription_data.merge_with_batch( &mut self.batch, key, - &crate::inscriptions::InscriptionData { + &InscriptionData { pubkey: key, data: inscription_data.inscription_data.clone(), write_version: inscription_data.write_version, diff --git a/rocks-db/src/bin/column_copier/main.rs b/rocks-db/src/bin/column_copier/main.rs index 81ad93a2f..2637edcad 100644 --- a/rocks-db/src/bin/column_copier/main.rs +++ b/rocks-db/src/bin/column_copier/main.rs @@ -1,8 +1,8 @@ use entities::models::RawBlock; use metrics_utils::red::RequestErrorDurationMetrics; use rocks_db::column::TypedColumn; +use rocks_db::columns::offchain_data::OffChainData; use rocks_db::migrator::MigrationState; -use rocks_db::offchain_data::OffChainData; use rocks_db::Storage; use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; diff --git a/rocks-db/src/bin/column_remover/main.rs b/rocks-db/src/bin/column_remover/main.rs index a3ccfa8c1..8b5f4295d 100644 --- a/rocks-db/src/bin/column_remover/main.rs +++ b/rocks-db/src/bin/column_remover/main.rs @@ -1,21 +1,19 @@ use std::sync::Arc; use entities::schedule::ScheduledJob; -use rocks_db::asset::{ - self, AssetAuthorityDeprecated, AssetCollectionDeprecated, AssetDynamicDetailsDeprecated, - AssetOwnerDeprecated, AssetStaticDetailsDeprecated, MetadataMintMap, -}; -use rocks_db::asset_previews::{AssetPreviews, UrlToDownload}; -use rocks_db::batch_mint::BatchMintWithStaker; use rocks_db::column::TypedColumn; -use rocks_db::inscriptions::{Inscription, InscriptionData}; -use rocks_db::leaf_signatures::LeafSignature; -use rocks_db::token_prices::TokenPrice; -use rocks_db::tree_seq::{TreeSeqIdx, TreesGaps}; -use rocks_db::{ - bubblegum_slots, cl_items, parameters, signature_client, AssetAuthority, AssetDynamicDetails, - AssetOwner, AssetStaticDetails, +use rocks_db::columns::asset::{ + self, AssetAuthority, AssetAuthorityDeprecated, AssetCollectionDeprecated, AssetDynamicDetails, + AssetDynamicDetailsDeprecated, AssetOwner, AssetOwnerDeprecated, AssetStaticDetails, + AssetStaticDetailsDeprecated, MetadataMintMap, }; +use rocks_db::columns::asset_previews::{AssetPreviews, UrlToDownload}; +use rocks_db::columns::batch_mint::BatchMintWithStaker; +use rocks_db::columns::inscriptions::{Inscription, InscriptionData}; +use rocks_db::columns::leaf_signatures::LeafSignature; +use rocks_db::columns::token_prices::TokenPrice; +use rocks_db::columns::{bubblegum_slots, cl_items, parameters}; +use rocks_db::tree_seq::{TreeSeqIdx, TreesGaps}; use tokio::sync::Mutex; use tokio::task::JoinSet; use tracing::info; @@ -29,8 +27,8 @@ use entities::models::{ AssetSignature, BatchMintToVerify, FailedBatchMint, RawBlock, SplMint, TokenAccount, }; use metrics_utils::red::RequestErrorDurationMetrics; +use rocks_db::columns::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use rocks_db::migrator::MigrationState; -use rocks_db::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; use std::{env, option}; #[tokio::main(flavor = "multi_thread")] diff --git a/rocks-db/src/bin/fork_detector/main.rs b/rocks-db/src/bin/fork_detector/main.rs index c63ada197..7a766e530 100644 --- a/rocks-db/src/bin/fork_detector/main.rs +++ b/rocks-db/src/bin/fork_detector/main.rs @@ -1,7 +1,7 @@ use entities::models::AssetSignature; use metrics_utils::red::RequestErrorDurationMetrics; -use rocks_db::cl_items::{ClItemKey, ClLeafKey}; use rocks_db::column::TypedColumn; +use rocks_db::columns::cl_items::{ClItemKey, ClLeafKey}; use rocks_db::migrator::MigrationState; use rocks_db::{SlotStorage, Storage}; use solana_sdk::pubkey::Pubkey; diff --git a/rocks-db/src/bin/leaf_checker/main.rs b/rocks-db/src/bin/leaf_checker/main.rs index 7527e35ab..e919ff4b1 100644 --- a/rocks-db/src/bin/leaf_checker/main.rs +++ b/rocks-db/src/bin/leaf_checker/main.rs @@ -3,7 +3,7 @@ use std::{env, str::FromStr, sync::Arc, time::Instant}; use bincode::deserialize; use metrics_utils::red::RequestErrorDurationMetrics; use rocks_db::{ - cl_items::{ClItemKey, ClLeaf}, + columns::cl_items::{ClItemKey, ClLeaf}, migrator::MigrationState, Storage, }; diff --git a/rocks-db/src/asset_client.rs b/rocks-db/src/clients/asset_client.rs similarity index 99% rename from rocks-db/src/asset_client.rs rename to rocks-db/src/clients/asset_client.rs index d2ccd4b00..a86a8c02b 100644 --- a/rocks-db/src/asset_client.rs +++ b/rocks-db/src/clients/asset_client.rs @@ -382,6 +382,8 @@ impl Storage { &self, assets: HashMap, ) -> Result<()> { + use crate::ToFlatbuffersConverter; + let mut batch = rocksdb::WriteBatchWithTransaction::::default(); for (pubkey, asset) in assets { batch.put_cf( diff --git a/rocks-db/src/asset_streaming_client.rs b/rocks-db/src/clients/asset_streaming_client.rs similarity index 99% rename from rocks-db/src/asset_streaming_client.rs rename to rocks-db/src/clients/asset_streaming_client.rs index f86589923..fe2c35df9 100644 --- a/rocks-db/src/asset_streaming_client.rs +++ b/rocks-db/src/clients/asset_streaming_client.rs @@ -13,8 +13,8 @@ use tokio_stream::wrappers::ReceiverStream; use crate::asset::{AssetCompleteDetails, SlotAssetIdxKey}; use crate::cl_items::{ClItem, ClItemKey, ClLeaf, ClLeafKey}; +use crate::columns::offchain_data::OffChainData; use crate::generated::asset_generated::asset as fb; -use crate::offchain_data::OffChainData; use crate::{ asset::{AssetLeaf, SlotAssetIdx}, column::TypedColumn, diff --git a/rocks-db/src/batch_client.rs b/rocks-db/src/clients/batch_client.rs similarity index 99% rename from rocks-db/src/batch_client.rs rename to rocks-db/src/clients/batch_client.rs index cd90332c8..544d12ffe 100644 --- a/rocks-db/src/batch_client.rs +++ b/rocks-db/src/clients/batch_client.rs @@ -6,10 +6,10 @@ use crate::asset::{ }; use crate::cl_items::{ClItem, ClItemKey, ClLeaf, ClLeafKey, SourcedClItem}; use crate::column::TypedColumn; +use crate::columns::offchain_data::OffChainData; use crate::errors::StorageError; use crate::generated::asset_generated::asset as fb; use crate::key_encoders::{decode_u64x2_pubkey, encode_u64x2_pubkey}; -use crate::offchain_data::OffChainData; use crate::storage_traits::{ AssetIndexReader, AssetSlotStorage, AssetUpdateIndexStorage, AssetUpdatedKey, }; diff --git a/rocks-db/src/dump_client.rs b/rocks-db/src/clients/dump_client.rs similarity index 100% rename from rocks-db/src/dump_client.rs rename to rocks-db/src/clients/dump_client.rs diff --git a/rocks-db/src/clients/mod.rs b/rocks-db/src/clients/mod.rs new file mode 100644 index 000000000..b8bc46f35 --- /dev/null +++ b/rocks-db/src/clients/mod.rs @@ -0,0 +1,7 @@ +mod asset_client; +pub mod asset_streaming_client; +mod batch_client; +pub mod dump_client; +pub mod raw_blocks_streaming_client; +pub mod signature_client; +pub mod transaction_client; diff --git a/rocks-db/src/raw_blocks_streaming_client.rs b/rocks-db/src/clients/raw_blocks_streaming_client.rs similarity index 100% rename from rocks-db/src/raw_blocks_streaming_client.rs rename to rocks-db/src/clients/raw_blocks_streaming_client.rs diff --git a/rocks-db/src/signature_client.rs b/rocks-db/src/clients/signature_client.rs similarity index 100% rename from rocks-db/src/signature_client.rs rename to rocks-db/src/clients/signature_client.rs diff --git a/rocks-db/src/transaction_client.rs b/rocks-db/src/clients/transaction_client.rs similarity index 100% rename from rocks-db/src/transaction_client.rs rename to rocks-db/src/clients/transaction_client.rs diff --git a/rocks-db/src/asset.rs b/rocks-db/src/columns/asset.rs similarity index 99% rename from rocks-db/src/asset.rs rename to rocks-db/src/columns/asset.rs index 7f8d90f79..039d9a6e0 100644 --- a/rocks-db/src/asset.rs +++ b/rocks-db/src/columns/asset.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; -use crate::inscriptions::{Inscription, InscriptionData}; -use crate::offchain_data::OffChainData; +use crate::columns::inscriptions::{Inscription, InscriptionData}; +use crate::columns::offchain_data::OffChainData; use bincode::{deserialize, serialize}; use entities::enums::{ChainMutability, OwnerType, RoyaltyTargetType, SpecificationAssetClass}; use entities::models::{ @@ -3832,7 +3832,7 @@ mod tests { let asset; unsafe { asset = - create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( + crate::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( data_bytes.as_slice(), ); } @@ -3879,7 +3879,7 @@ mod tests { let asset; unsafe { asset = - create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( + crate::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( merge_result.as_slice(), ); } @@ -3922,7 +3922,7 @@ mod tests { let asset; unsafe { asset = - create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( + crate::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( merge_result.as_slice(), ); } @@ -4075,7 +4075,7 @@ mod tests { let perm_name = perm.iter().map(|(k, _)| k).join(", "); let asset; unsafe { - asset = create::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( + asset = crate::generated::asset_generated::asset::root_as_asset_complete_details_unchecked( merge_result.as_slice(), ); } diff --git a/rocks-db/src/asset_previews.rs b/rocks-db/src/columns/asset_previews.rs similarity index 100% rename from rocks-db/src/asset_previews.rs rename to rocks-db/src/columns/asset_previews.rs diff --git a/rocks-db/src/asset_signatures.rs b/rocks-db/src/columns/asset_signatures.rs similarity index 100% rename from rocks-db/src/asset_signatures.rs rename to rocks-db/src/columns/asset_signatures.rs diff --git a/rocks-db/src/batch_mint.rs b/rocks-db/src/columns/batch_mint.rs similarity index 100% rename from rocks-db/src/batch_mint.rs rename to rocks-db/src/columns/batch_mint.rs diff --git a/rocks-db/src/bubblegum_slots.rs b/rocks-db/src/columns/bubblegum_slots.rs similarity index 100% rename from rocks-db/src/bubblegum_slots.rs rename to rocks-db/src/columns/bubblegum_slots.rs diff --git a/rocks-db/src/cl_items.rs b/rocks-db/src/columns/cl_items.rs similarity index 100% rename from rocks-db/src/cl_items.rs rename to rocks-db/src/columns/cl_items.rs diff --git a/rocks-db/src/editions.rs b/rocks-db/src/columns/editions.rs similarity index 100% rename from rocks-db/src/editions.rs rename to rocks-db/src/columns/editions.rs diff --git a/rocks-db/src/inscriptions.rs b/rocks-db/src/columns/inscriptions.rs similarity index 100% rename from rocks-db/src/inscriptions.rs rename to rocks-db/src/columns/inscriptions.rs diff --git a/rocks-db/src/leaf_signatures.rs b/rocks-db/src/columns/leaf_signatures.rs similarity index 100% rename from rocks-db/src/leaf_signatures.rs rename to rocks-db/src/columns/leaf_signatures.rs diff --git a/rocks-db/src/columns/mod.rs b/rocks-db/src/columns/mod.rs new file mode 100644 index 000000000..49219f791 --- /dev/null +++ b/rocks-db/src/columns/mod.rs @@ -0,0 +1,16 @@ +pub mod asset; +pub mod asset_previews; +pub mod asset_signatures; + +pub mod batch_mint; +pub mod bubblegum_slots; +pub mod cl_items; +pub mod editions; + +pub mod inscriptions; +pub mod leaf_signatures; +pub mod offchain_data; +pub mod parameters; +pub mod raw_block; +pub mod token_accounts; +pub mod token_prices; diff --git a/rocks-db/src/offchain_data.rs b/rocks-db/src/columns/offchain_data.rs similarity index 100% rename from rocks-db/src/offchain_data.rs rename to rocks-db/src/columns/offchain_data.rs diff --git a/rocks-db/src/parameters.rs b/rocks-db/src/columns/parameters.rs similarity index 100% rename from rocks-db/src/parameters.rs rename to rocks-db/src/columns/parameters.rs diff --git a/rocks-db/src/raw_block.rs b/rocks-db/src/columns/raw_block.rs similarity index 100% rename from rocks-db/src/raw_block.rs rename to rocks-db/src/columns/raw_block.rs diff --git a/rocks-db/src/token_accounts.rs b/rocks-db/src/columns/token_accounts.rs similarity index 100% rename from rocks-db/src/token_accounts.rs rename to rocks-db/src/columns/token_accounts.rs diff --git a/rocks-db/src/token_prices.rs b/rocks-db/src/columns/token_prices.rs similarity index 100% rename from rocks-db/src/token_prices.rs rename to rocks-db/src/columns/token_prices.rs diff --git a/rocks-db/src/fork_cleaner.rs b/rocks-db/src/fork_cleaner.rs index 8c5e7f3e4..c48f8e8bc 100644 --- a/rocks-db/src/fork_cleaner.rs +++ b/rocks-db/src/fork_cleaner.rs @@ -1,6 +1,6 @@ use crate::{ - cl_items::ClItemKey, column::TypedColumn, leaf_signatures::LeafSignature, SlotStorage, Storage, - DROP_ACTION, FULL_ITERATION_ACTION, ITERATOR_TOP_ACTION, RAW_BLOCKS_CBOR_ENDPOINT, + cl_items::ClItemKey, column::TypedColumn, columns::leaf_signatures::LeafSignature, SlotStorage, + Storage, DROP_ACTION, FULL_ITERATION_ACTION, ITERATOR_TOP_ACTION, RAW_BLOCKS_CBOR_ENDPOINT, ROCKS_COMPONENT, }; use async_trait::async_trait; diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index 5ffe5d4b2..52b01c721 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -1,9 +1,19 @@ -use asset_previews::{AssetPreviews, UrlToDownload}; +use clients::signature_client; +use columns::asset::{ + self, AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, AssetsUpdateIdx, +}; +use columns::asset_previews::{AssetPreviews, UrlToDownload}; +use columns::batch_mint::{self, BatchMintWithStaker}; +use columns::inscriptions::{Inscription, InscriptionData}; +use columns::leaf_signatures::LeafSignature; +use columns::offchain_data::{OffChainData, OffChainDataDeprecated}; +use columns::parameters::ParameterColumn; +use columns::token_accounts::{self, TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; +use columns::token_prices::TokenPrice; +use columns::{bubblegum_slots, cl_items, parameters}; use entities::schedule::ScheduledJob; use flatbuffers::{FlatBufferBuilder, WIPOffset}; use inflector::Inflector; -use leaf_signatures::LeafSignature; -use offchain_data::{OffChainData, OffChainDataDeprecated}; use std::path::Path; use std::sync::atomic::AtomicU64; use std::{marker::PhantomData, sync::Arc}; @@ -16,9 +26,7 @@ use asset::{ use rocksdb::{ColumnFamilyDescriptor, IteratorMode, Options, DB}; use crate::migrator::{MigrationState, MigrationVersions, RocksMigration}; -pub use asset::{ - AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, AssetsUpdateIdx, -}; + use column::{Column, TypedColumn}; use entities::enums::TokenMetadataEdition; use entities::models::{ @@ -28,54 +36,29 @@ use metrics_utils::red::RequestErrorDurationMetrics; use tokio::sync::Mutex; use tokio::task::JoinSet; -use crate::batch_mint::BatchMintWithStaker; use crate::errors::StorageError; -use crate::inscriptions::{Inscription, InscriptionData}; -use crate::migrations::clean_update_authorities::CleanCollectionAuthoritiesMigration; use crate::migrations::collection_authority::{ AssetCollectionVersion0, CollectionAuthorityMigration, }; use crate::migrations::external_plugins::AssetDynamicDetailsV0; use crate::migrations::spl2022::TokenAccounts2022ExtentionsMigration; -use crate::parameters::ParameterColumn; -use crate::token_accounts::{TokenAccountMintOwnerIdx, TokenAccountOwnerIdx}; -use crate::token_prices::TokenPrice; use crate::tree_seq::{TreeSeqIdx, TreesGaps}; -pub mod asset; -mod asset_client; -pub mod asset_previews; -pub mod asset_signatures; -pub mod asset_streaming_client; pub mod backup_service; -mod batch_client; -pub mod batch_mint; pub mod batch_savers; -pub mod bubblegum_slots; -pub mod cl_items; +pub mod clients; pub mod column; -pub mod dump_client; -pub mod editions; +pub mod columns; pub mod errors; pub mod fork_cleaner; -pub mod inscriptions; pub mod key_encoders; -pub mod leaf_signatures; pub mod migrations; pub mod migrator; -pub mod offchain_data; -pub mod parameters; pub mod processing_possibility; -pub mod raw_block; -pub mod raw_blocks_streaming_client; pub mod schedule; pub mod sequence_consistent; -pub mod signature_client; pub mod storage_traits; -pub mod token_accounts; -pub mod token_prices; pub mod transaction; -pub mod transaction_client; pub mod tree_seq; // import the flatbuffers runtime library extern crate flatbuffers; @@ -679,7 +662,7 @@ impl Storage { TokenMetadataEdition::NAME => { cf_options.set_merge_operator_associative( "merge_fn_token_metadata_edition_keep_existing", - crate::editions::merge_token_metadata_edition, + crate::columns::editions::merge_token_metadata_edition, ); } AssetStaticDetailsDeprecated::NAME => { @@ -703,7 +686,7 @@ impl Storage { } _ => token_accounts::merge_token_accounts, }, - MigrationState::Last => crate::token_accounts::merge_token_accounts, + MigrationState::Last => token_accounts::merge_token_accounts, MigrationState::CreateColumnFamilies => { asset::AssetStaticDetails::merge_keep_existing } diff --git a/rocks-db/src/migrations/offchain_data.rs b/rocks-db/src/migrations/offchain_data.rs index 2fee0df82..80b47ebd2 100644 --- a/rocks-db/src/migrations/offchain_data.rs +++ b/rocks-db/src/migrations/offchain_data.rs @@ -1,5 +1,5 @@ +use crate::columns::offchain_data::{OffChainData, OffChainDataDeprecated, StorageMutability}; use crate::migrator::{RocksMigration, SerializationType}; -use crate::offchain_data::{OffChainData, OffChainDataDeprecated, StorageMutability}; impl From for OffChainData { fn from(value: OffChainDataDeprecated) -> Self { @@ -16,7 +16,7 @@ impl From for OffChainData { pub(crate) struct OffChainDataMigration; impl RocksMigration for OffChainDataMigration { const VERSION: u64 = 4; - const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; + const SERIALIZATION_TYPE: SerializationType = SerializationType::Flatbuffers; type NewDataType = OffChainData; type OldDataType = OffChainDataDeprecated; } diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index 5d11ae371..0e53adea4 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -353,7 +353,7 @@ impl<'a> MigrationApplier<'a> { }) } SerializationType::Flatbuffers => { - todo!() + todo!(); } } } diff --git a/rocks-db/src/transaction.rs b/rocks-db/src/transaction.rs index a5ea091e5..d8bff3f2f 100644 --- a/rocks-db/src/transaction.rs +++ b/rocks-db/src/transaction.rs @@ -7,7 +7,7 @@ use spl_account_compression::state::PathNode; use crate::{ asset::{AssetCollection, AssetLeaf}, - offchain_data::OffChainData, + columns::offchain_data::OffChainData, AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, }; diff --git a/rocks-db/tests/batch_client_integration_tests.rs b/rocks-db/tests/batch_client_integration_tests.rs index 724d375e1..c1fc0bd8a 100644 --- a/rocks-db/tests/batch_client_integration_tests.rs +++ b/rocks-db/tests/batch_client_integration_tests.rs @@ -12,7 +12,10 @@ mod tests { use rocks_db::key_encoders::encode_u64x2_pubkey; use rocks_db::migrator::MigrationState; use rocks_db::storage_traits::{AssetUpdateIndexStorage, AssetUpdatedKey}; - use rocks_db::{AssetDynamicDetails, AssetOwner, Storage}; + use rocks_db::{ + columns::asset::{AssetDynamicDetails, AssetOwner}, + Storage, + }; use tokio::sync::Mutex; use tokio::task::JoinSet; diff --git a/rocks-db/tests/migration_tests.rs b/rocks-db/tests/migration_tests.rs index 4389ea768..604990881 100644 --- a/rocks-db/tests/migration_tests.rs +++ b/rocks-db/tests/migration_tests.rs @@ -2,7 +2,7 @@ mod tests { use bincode::serialize; use metrics_utils::red::RequestErrorDurationMetrics; - use rocks_db::asset::AssetCollection; + use rocks_db::columns::asset::AssetCollection; use rocks_db::column::TypedColumn; use rocks_db::migrations::collection_authority::AssetCollectionVersion0; use rocks_db::migrator::MigrationState; diff --git a/rocks-db/tests/parameters_tests.rs b/rocks-db/tests/parameters_tests.rs index 3d3f56116..d51bb9450 100644 --- a/rocks-db/tests/parameters_tests.rs +++ b/rocks-db/tests/parameters_tests.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod tests { use interface::slot_getter::LastProcessedSlotGetter; - use rocks_db::parameters::Parameter; + use rocks_db::columns::parameters::Parameter; use setup::rocks::*; #[tokio::test] diff --git a/rocks-db/tests/urls_to_download_test.rs b/rocks-db/tests/urls_to_download_test.rs index 37de60c17..fdb88a32a 100644 --- a/rocks-db/tests/urls_to_download_test.rs +++ b/rocks-db/tests/urls_to_download_test.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod tests { use interface::assert_urls::{DownloadOutcome, UrlDownloadNotification, UrlsToDownloadStore}; - use rocks_db::asset_previews::{AssetPreviews, UrlToDownload, DL_MAX_ATTEMPTS}; + use rocks_db::columns::asset_previews::{AssetPreviews, UrlToDownload, DL_MAX_ATTEMPTS}; use setup::{await_async_for, rocks::*}; use solana_sdk::keccak; diff --git a/tests/setup/src/lib.rs b/tests/setup/src/lib.rs index 0a752e3d0..9ca2a1f1e 100644 --- a/tests/setup/src/lib.rs +++ b/tests/setup/src/lib.rs @@ -7,8 +7,9 @@ use std::sync::Arc; use crate::rocks::RocksTestEnvironmentSetup; use entities::enums::{AssetType, ASSET_TYPES}; use metrics_utils::MetricsTrait; -use rocks_db::asset::AssetCollection; -use rocks_db::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails}; +use rocks_db::columns::asset::{ + AssetAuthority, AssetCollection, AssetDynamicDetails, AssetOwner, AssetStaticDetails, +}; use solana_sdk::pubkey::Pubkey; use testcontainers::clients::Cli; diff --git a/tests/setup/src/rocks.rs b/tests/setup/src/rocks.rs index f6ecad6eb..aa21a7358 100644 --- a/tests/setup/src/rocks.rs +++ b/tests/setup/src/rocks.rs @@ -2,9 +2,9 @@ use std::sync::Arc; use entities::models::Updated; use rand::{random, Rng}; -use rocks_db::asset::AssetCompleteDetails; use rocks_db::column::TypedColumn; -use rocks_db::offchain_data::OffChainData; +use rocks_db::columns::asset::AssetCompleteDetails; +use rocks_db::columns::offchain_data::OffChainData; use rocks_db::ToFlatbuffersConverter; use solana_sdk::pubkey::Pubkey; use sqlx::types::chrono::Utc; @@ -15,7 +15,9 @@ use metrics_utils::red::RequestErrorDurationMetrics; use rocks_db::errors::StorageError; use rocks_db::migrator::MigrationState; use rocks_db::{ - asset::AssetCollection, AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, + columns::asset::{ + AssetAuthority, AssetCollection, AssetDynamicDetails, AssetOwner, AssetStaticDetails, + }, Storage, }; use tokio::{sync::Mutex, task::JoinSet}; From e6a0df05d7a53ac2090249fb933b632afe96f366 Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Tue, 17 Dec 2024 17:06:18 +0200 Subject: [PATCH 03/15] add ad-hoc migrator --- .../migrations/clean_update_authorities.rs | 16 ++++++++++++ .../src/migrations/collection_authority.rs | 1 + rocks-db/src/migrations/spl2022.rs | 17 +++++++++++- rocks-db/src/migrator.rs | 26 ++++++++++++++----- 4 files changed, 52 insertions(+), 8 deletions(-) diff --git a/rocks-db/src/migrations/clean_update_authorities.rs b/rocks-db/src/migrations/clean_update_authorities.rs index 928e44626..907ee490f 100644 --- a/rocks-db/src/migrations/clean_update_authorities.rs +++ b/rocks-db/src/migrations/clean_update_authorities.rs @@ -1,5 +1,6 @@ use crate::asset::AssetCollection; use crate::migrator::{RocksMigration, SerializationType}; +use crate::ToFlatbuffersConverter; use entities::models::{UpdateVersion, Updated}; use serde::{Deserialize, Serialize}; use solana_sdk::pubkey::Pubkey; @@ -23,6 +24,21 @@ impl From for AssetCollection { } } +impl<'a> ToFlatbuffersConverter<'a> for AssetCollection { + type Target = AssetCollection; + + fn convert_to_fb( + &self, + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + ) -> flatbuffers::WIPOffset { + todo!() + } + + fn convert_to_fb_bytes(&self) -> Vec { + todo!() + } +} + pub(crate) struct CleanCollectionAuthoritiesMigration; impl RocksMigration for CleanCollectionAuthoritiesMigration { const VERSION: u64 = 2; diff --git a/rocks-db/src/migrations/collection_authority.rs b/rocks-db/src/migrations/collection_authority.rs index 416d52410..591c71fd3 100644 --- a/rocks-db/src/migrations/collection_authority.rs +++ b/rocks-db/src/migrations/collection_authority.rs @@ -1,5 +1,6 @@ use crate::asset::AssetCollection; use crate::migrator::{RocksMigration, SerializationType}; +use crate::ToFlatbuffersConverter; use bincode::deserialize; use entities::models::{UpdateVersion, Updated}; use rocksdb::MergeOperands; diff --git a/rocks-db/src/migrations/spl2022.rs b/rocks-db/src/migrations/spl2022.rs index e73a0e9a6..1a1610757 100644 --- a/rocks-db/src/migrations/spl2022.rs +++ b/rocks-db/src/migrations/spl2022.rs @@ -2,7 +2,7 @@ use crate::asset::{update_field, update_optional_field}; use crate::column::TypedColumn; use crate::key_encoders::{decode_pubkey, encode_pubkey}; use crate::migrator::{RocksMigration, SerializationType}; -use crate::{impl_merge_values, AssetDynamicDetails}; +use crate::{impl_merge_values, AssetDynamicDetails, ToFlatbuffersConverter}; use bincode::{deserialize, serialize}; use entities::enums::ChainMutability; use entities::models::{TokenAccount, Updated}; @@ -43,6 +43,21 @@ impl From for TokenAccount { } } +impl<'a> ToFlatbuffersConverter<'a> for TokenAccount { + type Target = TokenAccount; + + fn convert_to_fb( + &self, + builder: &mut flatbuffers::FlatBufferBuilder<'a>, + ) -> flatbuffers::WIPOffset { + todo!() + } + + fn convert_to_fb_bytes(&self) -> Vec { + todo!() + } +} + pub(crate) struct TokenAccounts2022ExtentionsMigration; impl RocksMigration for TokenAccounts2022ExtentionsMigration { const VERSION: u64 = 3; diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index 0e53adea4..0de219ec8 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -2,8 +2,8 @@ use crate::asset::{AssetCollection, AssetCompleteDetails}; use crate::column::{Column, TypedColumn}; use crate::errors::StorageError; use crate::key_encoders::{decode_u64, encode_u64}; -use crate::Storage; use crate::{AssetAuthority, AssetDynamicDetails, AssetOwner, AssetStaticDetails, Result}; +use crate::{Storage, ToFlatbuffersConverter}; use bincode::deserialize; use interface::migration_version_manager::PrimaryStorageMigrationVersionManager; use metrics_utils::red::RequestErrorDurationMetrics; @@ -34,7 +34,14 @@ pub enum SerializationType { pub trait RocksMigration { const VERSION: u64; const SERIALIZATION_TYPE: SerializationType; - type NewDataType: Sync + Serialize + DeserializeOwned + Send + TypedColumn; + type NewDataType: Sync + + Serialize + + DeserializeOwned + + Send + + TypedColumn + // that restrictrion breaks the backward compatibility for the previous migrations + // however, it's the simplest way to provide the migration to flatbuffers + + ToFlatbuffersConverter<'static>; type OldDataType: Sync + Serialize + DeserializeOwned @@ -197,7 +204,8 @@ impl<'a> MigrationApplier<'a> { async fn apply_migration(&self, _: M) -> Result<()> where - <::NewDataType as TypedColumn>::ValueType: 'static + Clone, + for<'b> <::NewDataType as TypedColumn>::ValueType: + 'static + Clone + ToFlatbuffersConverter<'b>, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { if self.applied_migration_versions.contains(&M::VERSION) { @@ -289,7 +297,8 @@ impl<'a> MigrationApplier<'a> { column: &Column, ) -> Result<()> where - <::NewDataType as TypedColumn>::ValueType: 'static + Clone, + for<'b> <::NewDataType as TypedColumn>::ValueType: + 'static + Clone + ToFlatbuffersConverter<'b>, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { let mut batch = HashMap::new(); @@ -353,7 +362,7 @@ impl<'a> MigrationApplier<'a> { }) } SerializationType::Flatbuffers => { - todo!(); + unreachable!("Flatbuffers migration is not supported yet") } } } @@ -366,13 +375,16 @@ impl<'a> MigrationApplier<'a> { column: &Column, ) -> Result<()> where - <::NewDataType as TypedColumn>::ValueType: 'static + Clone, + for<'b> <::NewDataType as TypedColumn>::ValueType: + 'static + Clone + ToFlatbuffersConverter<'b>, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { match M::SERIALIZATION_TYPE { SerializationType::Bincode => column.put_batch(std::mem::take(batch)).await, SerializationType::Cbor => column.put_batch_cbor(std::mem::take(batch)).await, - SerializationType::Flatbuffers => todo!(), + SerializationType::Flatbuffers => { + column.put_batch_flatbuffers(std::mem::take(batch)).await + } } } } From ad3855b8ba3d53ed874419b1cabdd5846306bd86 Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Wed, 18 Dec 2024 13:46:45 +0200 Subject: [PATCH 04/15] Clean up --- grpc/src/mapper.rs | 16 ++++----- nft_ingester/src/api/dapi/asset.rs | 34 ++++++------------- .../src/api/dapi/rpc_asset_convertors.rs | 8 ++--- nft_ingester/src/json_worker.rs | 6 ++-- .../bubblegum_updates_processor.rs | 8 ++--- rocks-db/src/columns/offchain_data.rs | 5 ++- rocks-db/src/lib.rs | 9 +++-- .../migrations/clean_update_authorities.rs | 12 +------ .../src/migrations/collection_authority.rs | 1 + rocks-db/src/migrations/offchain_data.rs | 1 + rocks-db/src/migrations/spl2022.rs | 12 +------ rocks-db/src/migrator.rs | 7 ++-- 12 files changed, 47 insertions(+), 72 deletions(-) diff --git a/grpc/src/mapper.rs b/grpc/src/mapper.rs index 6f6ce524c..552c4ba51 100644 --- a/grpc/src/mapper.rs +++ b/grpc/src/mapper.rs @@ -6,7 +6,7 @@ use crate::gapfiller::{ MasterEdition, OffchainData, OwnerType, RawBlock, RoyaltyTargetType, SpecificationAssetClass, SpecificationVersions, SplMint, TokenStandard, UpdateVersionValue, UseMethod, Uses, }; -use entities::models::{AssetCompleteDetailsGrpc, UpdateVersion, Updated}; +use entities::models::{AssetCompleteDetailsGrpc, OffChainDataGrpc, UpdateVersion, Updated}; use solana_sdk::hash::Hash; use solana_sdk::pubkey::Pubkey; @@ -201,12 +201,10 @@ impl TryFrom for AssetCompleteDetailsGrpc { .collect::, _>>()?, edition: value.edition.map(TryInto::try_into).transpose()?, master_edition: value.master_edition.map(TryInto::try_into).transpose()?, - offchain_data: value - .offchain_data - .map(|e| entities::models::OffChainDataGrpc { - url: e.url, - metadata: e.metadata, - }), + offchain_data: value.offchain_data.map(|e| OffChainDataGrpc { + url: e.url, + metadata: e.metadata, + }), spl_mint: value.spl_mint.map(TryInto::try_into).transpose()?, }) } @@ -258,8 +256,8 @@ impl From for SplMint { } } -impl From for OffchainData { - fn from(value: entities::models::OffChainDataGrpc) -> Self { +impl From for OffchainData { + fn from(value: OffChainDataGrpc) -> Self { Self { url: value.url, metadata: value.metadata, diff --git a/nft_ingester/src/api/dapi/asset.rs b/nft_ingester/src/api/dapi/asset.rs index 019e454be..2a4c3a080 100644 --- a/nft_ingester/src/api/dapi/asset.rs +++ b/nft_ingester/src/api/dapi/asset.rs @@ -219,7 +219,6 @@ pub async fn get_by_ids< .into_iter() .map(|id| id.to_string()) .collect_vec(); - // request prices and symbols only for fungibles when the option is set. This will prolong the request at least an order of magnitude let (token_prices, token_symbols) = if options.show_fungible { let token_prices_fut = token_price_fetcher.fetch_token_prices(asset_ids_string.as_slice()); let token_symbols_fut = @@ -240,6 +239,7 @@ pub async fn get_by_ids< HashMap::new() }); + // request prices and symbols only for fungibles when the option is set. This will prolong the request at least an order of magnitude let mut asset_selected_maps = rocks_db .get_asset_selected_maps_async(unique_asset_ids.clone(), owner_address, &options) .await?; @@ -254,6 +254,13 @@ pub async fn get_by_ids< let mut download_needed = false; match offchain_data { Some(offchain_data) => { + let curr_time = chrono::Utc::now().timestamp(); + if offchain_data.storage_mutability.is_mutable() + && curr_time > offchain_data.last_read_at + METADATA_CACHE_TTL + { + download_needed = true; + } + match &offchain_data.metadata { Some(metadata) => { if metadata.is_empty() { @@ -264,24 +271,6 @@ pub async fn get_by_ids< download_needed = true; } } - - match &offchain_data.url { - Some(url) => { - if url.is_empty() { - download_needed = true; - } - } - None => { - download_needed = true; - } - } - - let curr_time = chrono::Utc::now().timestamp(); - if offchain_data.storage_mutability.is_mutable() - && curr_time > offchain_data.last_read_at + METADATA_CACHE_TTL - { - download_needed = true; - } } None => { download_needed = true; @@ -317,20 +306,17 @@ pub async fn get_by_ids< let last_read_at = chrono::Utc::now().timestamp(); match res { Ok(JsonDownloadResult::JsonContent(metadata)) => { - let storage_mutability = StorageMutability::from(json_url.as_str()); - asset_selected_maps.offchain_data.insert( json_url.clone(), OffChainData { url: Some(json_url.clone()), metadata: Some(metadata.clone()), - storage_mutability, + storage_mutability: StorageMutability::from(json_url.as_str()), last_read_at, }, ); } Ok(JsonDownloadResult::MediaUrlAndMimeType { url, mime_type }) => { - let storage_mutability = StorageMutability::from(json_url.as_str()); asset_selected_maps.offchain_data.insert( json_url.clone(), OffChainData { @@ -339,7 +325,7 @@ pub async fn get_by_ids< format!("{{\"image\":\"{}\",\"type\":\"{}\"}}", url, mime_type) .to_string(), ), - storage_mutability, + storage_mutability: StorageMutability::from(json_url.as_str()), last_read_at, }, ); diff --git a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs index e55b5c13e..7b6f82112 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs @@ -91,8 +91,8 @@ pub fn get_content( offchain_data: &OffChainData, ) -> Result { let json_uri = asset_dynamic.url.value.clone(); - let metadata = offchain_data.metadata.clone().unwrap_or_default(); - let metadata: Value = serde_json::from_str(&metadata).unwrap_or(Value::Null); + let metadata = serde_json::from_str(&offchain_data.metadata.clone().unwrap_or_default()) + .unwrap_or(Value::Null); let chain_data: Value = serde_json::from_str( asset_dynamic .onchain_data @@ -235,8 +235,8 @@ fn extract_collection_metadata( asset_dynamic: &AssetDynamicDetails, offchain_data: &OffChainData, ) -> MetadataMap { - let metadata = offchain_data.metadata.clone().unwrap_or_default(); - let metadata: Value = serde_json::from_str(&metadata).unwrap_or(Value::Null); + let metadata = serde_json::from_str(&offchain_data.metadata.clone().unwrap_or_default()) + .unwrap_or(Value::Null); let chain_data: Value = serde_json::from_str( asset_dynamic .onchain_data diff --git a/nft_ingester/src/json_worker.rs b/nft_ingester/src/json_worker.rs index 47199b150..5e1b555bc 100644 --- a/nft_ingester/src/json_worker.rs +++ b/nft_ingester/src/json_worker.rs @@ -275,7 +275,6 @@ impl JsonDownloader for JsonWorker { JsonDownloaderError::ErrorDownloading(format!("Failed to create client: {:?}", e)) })?; - // TODO: maybe IPFS/Arweave stuff might be done here // Detect if the URL is an IPFS link let parsed_url = if url.starts_with("ipfs://") { // Extract the IPFS hash or path @@ -369,7 +368,6 @@ impl JsonPersister for JsonWorker { results: Vec<(String, Result)>, ) -> Result<(), JsonDownloaderError> { let mut pg_updates = Vec::new(); - // TODO: store updates here let mut rocks_updates = HashMap::new(); let curr_time = chrono::Utc::now().timestamp(); @@ -482,7 +480,9 @@ impl JsonPersister for JsonWorker { if !rocks_updates.is_empty() { let urls_to_download = rocks_updates .values() - .filter(|data| data.metadata.is_some()) + .filter(|data| { + data.metadata.is_some() && !data.metadata.clone().unwrap().is_empty() + }) .filter_map(|data| parse_files(data.metadata.clone().unwrap().as_str())) .flat_map(|files| files.into_iter()) .filter_map(|file| file.uri) diff --git a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs index 9840b5f52..4445d888f 100644 --- a/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs +++ b/nft_ingester/src/processors/transaction_based/bubblegum_updates_processor.rs @@ -1143,15 +1143,13 @@ impl BubblegumTxProcessor { if let Some(dynamic_info) = &update.update { if let Some(data) = &dynamic_info.dynamic_data { let url = data.url.value.clone(); - let storage_mutability = url.as_str().into(); - let last_read_at = Utc::now().timestamp(); if let Some(metadata) = batch_mint.raw_metadata_map.get(&url) { update.offchain_data_update = Some(OffChainData { - url: Some(url), + url: Some(url.clone()), metadata: Some(metadata.to_string()), - storage_mutability, - last_read_at, + storage_mutability: url.as_str().into(), + last_read_at: Utc::now().timestamp(), }); } } diff --git a/rocks-db/src/columns/offchain_data.rs b/rocks-db/src/columns/offchain_data.rs index 4b040b8eb..7546a1271 100644 --- a/rocks-db/src/columns/offchain_data.rs +++ b/rocks-db/src/columns/offchain_data.rs @@ -24,7 +24,10 @@ impl StorageMutability { impl From<&str> for StorageMutability { fn from(storage_mutability: &str) -> Self { - if storage_mutability.starts_with("ipfs") || storage_mutability.starts_with("arweave") { + if storage_mutability.is_empty() + || storage_mutability.starts_with("ipfs") + || storage_mutability.starts_with("arweave") + { return StorageMutability::Immutable; } else { return StorageMutability::Mutable; diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index 52b01c721..c8a8e1ef0 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -810,8 +810,13 @@ impl Storage { } } +#[allow(unused_variables)] pub trait ToFlatbuffersConverter<'a> { type Target: 'a; - fn convert_to_fb(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset; - fn convert_to_fb_bytes(&self) -> Vec; + fn convert_to_fb(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset { + todo!() + } + fn convert_to_fb_bytes(&self) -> Vec { + todo!() + } } diff --git a/rocks-db/src/migrations/clean_update_authorities.rs b/rocks-db/src/migrations/clean_update_authorities.rs index 907ee490f..c40f8e9ac 100644 --- a/rocks-db/src/migrations/clean_update_authorities.rs +++ b/rocks-db/src/migrations/clean_update_authorities.rs @@ -26,22 +26,12 @@ impl From for AssetCollection { impl<'a> ToFlatbuffersConverter<'a> for AssetCollection { type Target = AssetCollection; - - fn convert_to_fb( - &self, - builder: &mut flatbuffers::FlatBufferBuilder<'a>, - ) -> flatbuffers::WIPOffset { - todo!() - } - - fn convert_to_fb_bytes(&self) -> Vec { - todo!() - } } pub(crate) struct CleanCollectionAuthoritiesMigration; impl RocksMigration for CleanCollectionAuthoritiesMigration { const VERSION: u64 = 2; + const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; type NewDataType = AssetCollection; type OldDataType = AssetCollectionBeforeCleanUp; diff --git a/rocks-db/src/migrations/collection_authority.rs b/rocks-db/src/migrations/collection_authority.rs index 591c71fd3..d40538f5a 100644 --- a/rocks-db/src/migrations/collection_authority.rs +++ b/rocks-db/src/migrations/collection_authority.rs @@ -99,6 +99,7 @@ impl AssetCollectionVersion0 { pub(crate) struct CollectionAuthorityMigration; impl RocksMigration for CollectionAuthorityMigration { const VERSION: u64 = 0; + const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; type NewDataType = AssetCollection; type OldDataType = AssetCollectionVersion0; diff --git a/rocks-db/src/migrations/offchain_data.rs b/rocks-db/src/migrations/offchain_data.rs index 80b47ebd2..7be9c56c4 100644 --- a/rocks-db/src/migrations/offchain_data.rs +++ b/rocks-db/src/migrations/offchain_data.rs @@ -16,6 +16,7 @@ impl From for OffChainData { pub(crate) struct OffChainDataMigration; impl RocksMigration for OffChainDataMigration { const VERSION: u64 = 4; + const DESERIALIZATION_TYPE: SerializationType = SerializationType::Flatbuffers; const SERIALIZATION_TYPE: SerializationType = SerializationType::Flatbuffers; type NewDataType = OffChainData; type OldDataType = OffChainDataDeprecated; diff --git a/rocks-db/src/migrations/spl2022.rs b/rocks-db/src/migrations/spl2022.rs index 1a1610757..8be7c2119 100644 --- a/rocks-db/src/migrations/spl2022.rs +++ b/rocks-db/src/migrations/spl2022.rs @@ -45,22 +45,12 @@ impl From for TokenAccount { impl<'a> ToFlatbuffersConverter<'a> for TokenAccount { type Target = TokenAccount; - - fn convert_to_fb( - &self, - builder: &mut flatbuffers::FlatBufferBuilder<'a>, - ) -> flatbuffers::WIPOffset { - todo!() - } - - fn convert_to_fb_bytes(&self) -> Vec { - todo!() - } } pub(crate) struct TokenAccounts2022ExtentionsMigration; impl RocksMigration for TokenAccounts2022ExtentionsMigration { const VERSION: u64 = 3; + const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; type NewDataType = TokenAccount; type OldDataType = TokenAccountWithoutExtentions; diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index 0de219ec8..2c88ee60c 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -33,6 +33,7 @@ pub enum SerializationType { pub trait RocksMigration { const VERSION: u64; + const DESERIALIZATION_TYPE: SerializationType; const SERIALIZATION_TYPE: SerializationType; type NewDataType: Sync + Serialize @@ -350,7 +351,7 @@ impl<'a> MigrationApplier<'a> { <::NewDataType as TypedColumn>::ValueType: 'static + Clone, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { - match M::SERIALIZATION_TYPE { + match M::DESERIALIZATION_TYPE { SerializationType::Bincode => deserialize::(value).map_err(|e| { error!("migration data deserialize: {:?}, {}", key_decoded, e); e.into() @@ -362,7 +363,9 @@ impl<'a> MigrationApplier<'a> { }) } SerializationType::Flatbuffers => { - unreachable!("Flatbuffers migration is not supported yet") + unreachable!( + "Deserialization from Flatbuffers in term of migration is not supported yet" + ) } } } From 0c8e602cfe2df5e850c742a105e63c81ac988a63 Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Thu, 19 Dec 2024 14:56:15 +0200 Subject: [PATCH 05/15] Apply fixes from the PR's review --- rocks-db/src/clients/asset_client.rs | 8 +- rocks-db/src/columns/offchain_data.rs | 7 +- rocks-db/src/lib.rs | 61 ++++------ .../migrations/clean_update_authorities.rs | 38 ------ .../src/migrations/collection_authority.rs | 106 ---------------- rocks-db/src/migrations/external_plugins.rs | 114 ------------------ rocks-db/src/migrations/mod.rs | 4 - rocks-db/src/migrations/offchain_data.rs | 43 ++++++- rocks-db/src/migrations/spl2022.rs | 57 --------- 9 files changed, 76 insertions(+), 362 deletions(-) delete mode 100644 rocks-db/src/migrations/clean_update_authorities.rs delete mode 100644 rocks-db/src/migrations/collection_authority.rs delete mode 100644 rocks-db/src/migrations/external_plugins.rs delete mode 100644 rocks-db/src/migrations/spl2022.rs diff --git a/rocks-db/src/clients/asset_client.rs b/rocks-db/src/clients/asset_client.rs index a86a8c02b..c9151976d 100644 --- a/rocks-db/src/clients/asset_client.rs +++ b/rocks-db/src/clients/asset_client.rs @@ -218,7 +218,13 @@ impl Storage { .into_iter() .filter_map(|asset| { asset - .filter(|a| a.url.is_some() && !a.url.clone().unwrap().is_empty()) + .filter(|a| { + if let Some(url) = a.url.as_ref() { + !url.is_empty() && a.url.is_some() + } else { + false + } + }) .map(|a| (a.url.clone().unwrap(), a)) }) .collect::>(); diff --git a/rocks-db/src/columns/offchain_data.rs b/rocks-db/src/columns/offchain_data.rs index 7546a1271..677bfdde3 100644 --- a/rocks-db/src/columns/offchain_data.rs +++ b/rocks-db/src/columns/offchain_data.rs @@ -25,8 +25,11 @@ impl StorageMutability { impl From<&str> for StorageMutability { fn from(storage_mutability: &str) -> Self { if storage_mutability.is_empty() - || storage_mutability.starts_with("ipfs") - || storage_mutability.starts_with("arweave") + || storage_mutability.starts_with("") + || storage_mutability.starts_with("ipfs://") + || storage_mutability.starts_with("https://ipfs") + || storage_mutability.starts_with("https://arweave") + || storage_mutability.starts_with("https://www.arweave") { return StorageMutability::Immutable; } else { diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index c8a8e1ef0..88c2dde8a 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -37,11 +37,6 @@ use tokio::sync::Mutex; use tokio::task::JoinSet; use crate::errors::StorageError; -use crate::migrations::collection_authority::{ - AssetCollectionVersion0, CollectionAuthorityMigration, -}; -use crate::migrations::external_plugins::AssetDynamicDetailsV0; -use crate::migrations::spl2022::TokenAccounts2022ExtentionsMigration; use crate::tree_seq::{TreeSeqIdx, TreesGaps}; pub mod backup_service; @@ -527,13 +522,9 @@ impl Storage { } asset::AssetDynamicDetails::NAME => { let mf = match migration_state { - MigrationState::Version(version) => match *version { - CollectionAuthorityMigration::VERSION => { - AssetDynamicDetailsV0::merge_dynamic_details - } - _ => asset::AssetDynamicDetails::merge_dynamic_details, - }, - MigrationState::Last => asset::AssetDynamicDetails::merge_dynamic_details, + MigrationState::Last | MigrationState::Version(_) => { + asset::AssetDynamicDetails::merge_dynamic_details + } MigrationState::CreateColumnFamilies => { asset::AssetStaticDetails::merge_keep_existing } @@ -577,15 +568,10 @@ impl Storage { ); } asset::AssetCollection::NAME => { - let mf = if matches!( - migration_state, - &MigrationState::Version(CollectionAuthorityMigration::VERSION) - ) { - AssetCollectionVersion0::merge_asset_collection - } else { - asset::AssetCollection::merge_asset_collection - }; - cf_options.set_merge_operator_associative("merge_fn_asset_collection", mf); + cf_options.set_merge_operator_associative( + "merge_fn_asset_collection", + asset::AssetCollection::merge_asset_collection, + ); } cl_items::ClItem::NAME => { cf_options.set_merge_operator_associative( @@ -612,10 +598,16 @@ impl Storage { ); } OffChainData::NAME => { - cf_options.set_merge_operator_associative( - "merge_fn_off_chain_data_keep_existing", - asset::AssetStaticDetails::merge_keep_existing, - ); + let mf = match migration_state { + MigrationState::Last | MigrationState::Version(_) => { + OffChainData::merge_off_chain_data + } + MigrationState::CreateColumnFamilies => { + asset::AssetStaticDetails::merge_keep_existing + } + }; + cf_options + .set_merge_operator_associative("merge_fn_off_chain_data_keep_existing", mf); } cl_items::ClLeaf::NAME => { cf_options.set_merge_operator_associative( @@ -679,14 +671,9 @@ impl Storage { } TokenAccount::NAME => { let mf = match migration_state { - MigrationState::Version(version) => match *version { - CollectionAuthorityMigration::VERSION - ..=TokenAccounts2022ExtentionsMigration::VERSION => { - AssetDynamicDetailsV0::merge_dynamic_details - } - _ => token_accounts::merge_token_accounts, - }, - MigrationState::Last => token_accounts::merge_token_accounts, + MigrationState::Last | MigrationState::Version(_) => { + token_accounts::merge_token_accounts + } MigrationState::CreateColumnFamilies => { asset::AssetStaticDetails::merge_keep_existing } @@ -813,10 +800,6 @@ impl Storage { #[allow(unused_variables)] pub trait ToFlatbuffersConverter<'a> { type Target: 'a; - fn convert_to_fb(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset { - todo!() - } - fn convert_to_fb_bytes(&self) -> Vec { - todo!() - } + fn convert_to_fb(&self, builder: &mut FlatBufferBuilder<'a>) -> WIPOffset; + fn convert_to_fb_bytes(&self) -> Vec; } diff --git a/rocks-db/src/migrations/clean_update_authorities.rs b/rocks-db/src/migrations/clean_update_authorities.rs deleted file mode 100644 index c40f8e9ac..000000000 --- a/rocks-db/src/migrations/clean_update_authorities.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::asset::AssetCollection; -use crate::migrator::{RocksMigration, SerializationType}; -use crate::ToFlatbuffersConverter; -use entities::models::{UpdateVersion, Updated}; -use serde::{Deserialize, Serialize}; -use solana_sdk::pubkey::Pubkey; - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct AssetCollectionBeforeCleanUp { - pub pubkey: Pubkey, - pub collection: Updated, - pub is_collection_verified: Updated, - pub authority: Updated>, -} - -impl From for AssetCollection { - fn from(value: AssetCollectionBeforeCleanUp) -> Self { - Self { - pubkey: value.pubkey, - collection: value.collection, - is_collection_verified: value.is_collection_verified, - authority: Updated::new(0, Some(UpdateVersion::WriteVersion(0)), None), - } - } -} - -impl<'a> ToFlatbuffersConverter<'a> for AssetCollection { - type Target = AssetCollection; -} - -pub(crate) struct CleanCollectionAuthoritiesMigration; -impl RocksMigration for CleanCollectionAuthoritiesMigration { - const VERSION: u64 = 2; - const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - type NewDataType = AssetCollection; - type OldDataType = AssetCollectionBeforeCleanUp; -} diff --git a/rocks-db/src/migrations/collection_authority.rs b/rocks-db/src/migrations/collection_authority.rs deleted file mode 100644 index d40538f5a..000000000 --- a/rocks-db/src/migrations/collection_authority.rs +++ /dev/null @@ -1,106 +0,0 @@ -use crate::asset::AssetCollection; -use crate::migrator::{RocksMigration, SerializationType}; -use crate::ToFlatbuffersConverter; -use bincode::deserialize; -use entities::models::{UpdateVersion, Updated}; -use rocksdb::MergeOperands; -use serde::{Deserialize, Serialize}; -use solana_sdk::pubkey::Pubkey; -use tracing::error; - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct AssetCollectionVersion0 { - pub pubkey: Pubkey, - pub collection: Pubkey, - pub is_collection_verified: bool, - pub collection_seq: Option, - pub slot_updated: u64, - pub write_version: Option, -} - -impl From for AssetCollection { - fn from(value: AssetCollectionVersion0) -> Self { - let update_version = if let Some(write_version) = value.write_version { - Some(UpdateVersion::WriteVersion(write_version)) - } else { - value.collection_seq.map(UpdateVersion::Sequence) - }; - Self { - pubkey: value.pubkey, - collection: Updated::new(value.slot_updated, update_version.clone(), value.collection), - is_collection_verified: Updated::new( - value.slot_updated, - update_version, - value.is_collection_verified, - ), - authority: Default::default(), - } - } -} - -impl AssetCollectionVersion0 { - pub fn merge_asset_collection( - _new_key: &[u8], - existing_val: Option<&[u8]>, - operands: &MergeOperands, - ) -> Option> { - let mut result = vec![]; - let mut slot = 0; - let mut collection_seq = None; - let mut write_version = None; - if let Some(existing_val) = existing_val { - match deserialize::(existing_val) { - Ok(value) => { - slot = value.slot_updated; - collection_seq = value.collection_seq; - write_version = value.write_version; - result = existing_val.to_vec(); - } - Err(e) => { - error!("RocksDB: AssetCollectionV0 deserialize existing_val: {}", e) - } - } - } - - for op in operands { - match deserialize::(op) { - Ok(new_val) => { - if write_version.is_some() && new_val.write_version.is_some() { - if new_val.write_version.unwrap() > write_version.unwrap() { - slot = new_val.slot_updated; - write_version = new_val.write_version; - collection_seq = new_val.collection_seq; - result = op.to_vec(); - } - } else if collection_seq.is_some() && new_val.collection_seq.is_some() { - if new_val.collection_seq.unwrap() > collection_seq.unwrap() { - slot = new_val.slot_updated; - write_version = new_val.write_version; - collection_seq = new_val.collection_seq; - result = op.to_vec(); - } - } else if new_val.slot_updated > slot { - slot = new_val.slot_updated; - write_version = new_val.write_version; - collection_seq = new_val.collection_seq; - result = op.to_vec(); - } - } - Err(e) => { - error!("RocksDB: AssetCollectionV0 deserialize new_val: {}", e) - } - } - } - - Some(result) - } -} - -pub(crate) struct CollectionAuthorityMigration; -impl RocksMigration for CollectionAuthorityMigration { - const VERSION: u64 = 0; - const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - type NewDataType = AssetCollection; - type OldDataType = AssetCollectionVersion0; -} diff --git a/rocks-db/src/migrations/external_plugins.rs b/rocks-db/src/migrations/external_plugins.rs deleted file mode 100644 index ef4af7431..000000000 --- a/rocks-db/src/migrations/external_plugins.rs +++ /dev/null @@ -1,114 +0,0 @@ -use crate::asset::{update_field, update_optional_field}; -use crate::migrator::{RocksMigration, SerializationType}; -use bincode::{deserialize, serialize}; -use entities::enums::ChainMutability; -use entities::models::Updated; -use rocksdb::MergeOperands; -use serde::{Deserialize, Serialize}; -use solana_sdk::pubkey::Pubkey; -use tracing::error; - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct AssetDynamicDetailsV0 { - pub pubkey: Pubkey, - pub is_compressible: Updated, - pub is_compressed: Updated, - pub is_frozen: Updated, - pub supply: Option>, - pub seq: Option>, - pub is_burnt: Updated, - pub was_decompressed: Option>, - pub onchain_data: Option>, - pub creators: Updated>, - pub royalty_amount: Updated, - pub url: Updated, - pub chain_mutability: Option>, - pub lamports: Option>, - pub executable: Option>, - pub metadata_owner: Option>, - pub raw_name: Option>, - pub plugins: Option>, - pub unknown_plugins: Option>, - pub rent_epoch: Option>, - pub num_minted: Option>, - pub current_size: Option>, - pub plugins_json_version: Option>, -} - -impl AssetDynamicDetailsV0 { - pub fn merge_dynamic_details( - _new_key: &[u8], - existing_val: Option<&[u8]>, - operands: &MergeOperands, - ) -> Option> { - let mut result: Option = None; - if let Some(existing_val) = existing_val { - match deserialize::(existing_val) { - Ok(value) => { - result = Some(value); - } - Err(e) => { - error!( - "RocksDB: AssetDynamicDetailsV0 deserialize existing_val: {}", - e - ) - } - } - } - - for op in operands { - match deserialize::(op) { - Ok(new_val) => { - result = Some(if let Some(mut current_val) = result { - update_field(&mut current_val.is_compressible, &new_val.is_compressible); - update_field(&mut current_val.is_compressed, &new_val.is_compressed); - update_field(&mut current_val.is_frozen, &new_val.is_frozen); - update_optional_field(&mut current_val.supply, &new_val.supply); - update_optional_field(&mut current_val.seq, &new_val.seq); - update_field(&mut current_val.is_burnt, &new_val.is_burnt); - update_field(&mut current_val.creators, &new_val.creators); - update_field(&mut current_val.royalty_amount, &new_val.royalty_amount); - update_optional_field( - &mut current_val.was_decompressed, - &new_val.was_decompressed, - ); - update_optional_field(&mut current_val.onchain_data, &new_val.onchain_data); - update_field(&mut current_val.url, &new_val.url); - update_optional_field( - &mut current_val.chain_mutability, - &new_val.chain_mutability, - ); - update_optional_field(&mut current_val.lamports, &new_val.lamports); - update_optional_field(&mut current_val.executable, &new_val.executable); - update_optional_field( - &mut current_val.metadata_owner, - &new_val.metadata_owner, - ); - update_optional_field(&mut current_val.raw_name, &new_val.raw_name); - update_optional_field(&mut current_val.plugins, &new_val.plugins); - update_optional_field( - &mut current_val.unknown_plugins, - &new_val.unknown_plugins, - ); - update_optional_field(&mut current_val.num_minted, &new_val.num_minted); - update_optional_field(&mut current_val.current_size, &new_val.current_size); - update_optional_field(&mut current_val.rent_epoch, &new_val.rent_epoch); - update_optional_field( - &mut current_val.plugins_json_version, - &new_val.plugins_json_version, - ); - - current_val - } else { - new_val - }); - } - Err(e) => { - error!("RocksDB: AssetDynamicDetailsV0 deserialize new_val: {}", e) - } - } - } - - result.and_then(|result| serialize(&result).ok()) - } -} diff --git a/rocks-db/src/migrations/mod.rs b/rocks-db/src/migrations/mod.rs index 086037ca7..f0438a673 100644 --- a/rocks-db/src/migrations/mod.rs +++ b/rocks-db/src/migrations/mod.rs @@ -1,5 +1 @@ -pub mod clean_update_authorities; -pub mod collection_authority; -pub mod external_plugins; pub mod offchain_data; -pub mod spl2022; diff --git a/rocks-db/src/migrations/offchain_data.rs b/rocks-db/src/migrations/offchain_data.rs index 7be9c56c4..8a3711c20 100644 --- a/rocks-db/src/migrations/offchain_data.rs +++ b/rocks-db/src/migrations/offchain_data.rs @@ -1,5 +1,10 @@ +use bincode::deserialize; +use rocksdb::MergeOperands; +use tracing::error; + use crate::columns::offchain_data::{OffChainData, OffChainDataDeprecated, StorageMutability}; use crate::migrator::{RocksMigration, SerializationType}; +use crate::ToFlatbuffersConverter; impl From for OffChainData { fn from(value: OffChainDataDeprecated) -> Self { @@ -13,10 +18,46 @@ impl From for OffChainData { } } +impl OffChainData { + pub fn merge_off_chain_data( + _new_key: &[u8], + existing_val: Option<&[u8]>, + operands: &MergeOperands, + ) -> Option> { + let mut old_data = None; + + // Deserialize existing value if present + if let Some(existing_val) = existing_val { + match deserialize::(existing_val) { + Ok(value) => old_data = Some(value), + Err(e) => error!( + "RocksDB: AssetDynamicDetails deserialize existing_val: {}", + e + ), + } + } + + // Iterate over operands and merge + for op in operands { + match deserialize::(op) { + Ok(new_val) => { + old_data = Some(new_val); + } + Err(e) => error!("RocksDB: AssetDynamicDetails deserialize new_val: {}", e), + } + } + + // Serialize the result back into bytes + old_data.and_then(|deprecated_format| { + Some(OffChainData::from(deprecated_format).convert_to_fb_bytes()) + }) + } +} + pub(crate) struct OffChainDataMigration; impl RocksMigration for OffChainDataMigration { const VERSION: u64 = 4; - const DESERIALIZATION_TYPE: SerializationType = SerializationType::Flatbuffers; + const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; const SERIALIZATION_TYPE: SerializationType = SerializationType::Flatbuffers; type NewDataType = OffChainData; type OldDataType = OffChainDataDeprecated; diff --git a/rocks-db/src/migrations/spl2022.rs b/rocks-db/src/migrations/spl2022.rs deleted file mode 100644 index 8be7c2119..000000000 --- a/rocks-db/src/migrations/spl2022.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::asset::{update_field, update_optional_field}; -use crate::column::TypedColumn; -use crate::key_encoders::{decode_pubkey, encode_pubkey}; -use crate::migrator::{RocksMigration, SerializationType}; -use crate::{impl_merge_values, AssetDynamicDetails, ToFlatbuffersConverter}; -use bincode::{deserialize, serialize}; -use entities::enums::ChainMutability; -use entities::models::{TokenAccount, Updated}; -use rocksdb::MergeOperands; -use serde::{Deserialize, Serialize}; -use solana_sdk::pubkey::Pubkey; -use tracing::log::error; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TokenAccountWithoutExtentions { - pub pubkey: Pubkey, - pub mint: Pubkey, - pub delegate: Option, - pub owner: Pubkey, - pub frozen: bool, - pub delegated_amount: i64, - pub slot_updated: i64, - pub amount: i64, - pub write_version: u64, -} - -impl_merge_values!(TokenAccountWithoutExtentions); - -impl From for TokenAccount { - fn from(value: TokenAccountWithoutExtentions) -> Self { - Self { - pubkey: value.pubkey, - mint: value.mint, - delegate: value.delegate, - owner: value.owner, - extensions: None, - frozen: value.frozen, - delegated_amount: value.delegated_amount, - slot_updated: value.slot_updated, - amount: value.amount, - write_version: value.write_version, - } - } -} - -impl<'a> ToFlatbuffersConverter<'a> for TokenAccount { - type Target = TokenAccount; -} - -pub(crate) struct TokenAccounts2022ExtentionsMigration; -impl RocksMigration for TokenAccounts2022ExtentionsMigration { - const VERSION: u64 = 3; - const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; - type NewDataType = TokenAccount; - type OldDataType = TokenAccountWithoutExtentions; -} From beeebc3db910a2f940416f72fb901b1267166efc Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Thu, 19 Dec 2024 21:20:30 +0200 Subject: [PATCH 06/15] Rectify issues mentioned on the PR's review --- rocks-db/src/clients/asset_client.rs | 4 +-- rocks-db/src/columns/offchain_data.rs | 1 - rocks-db/src/lib.rs | 18 ++++++-------- rocks-db/src/migrations/offchain_data.rs | 31 +++--------------------- rocks-db/src/migrator.rs | 21 ---------------- 5 files changed, 14 insertions(+), 61 deletions(-) diff --git a/rocks-db/src/clients/asset_client.rs b/rocks-db/src/clients/asset_client.rs index c9151976d..0d35fded8 100644 --- a/rocks-db/src/clients/asset_client.rs +++ b/rocks-db/src/clients/asset_client.rs @@ -219,8 +219,8 @@ impl Storage { .filter_map(|asset| { asset .filter(|a| { - if let Some(url) = a.url.as_ref() { - !url.is_empty() && a.url.is_some() + if let Some(metadata) = a.metadata.as_ref() { + !metadata.is_empty() && a.url.is_some() } else { false } diff --git a/rocks-db/src/columns/offchain_data.rs b/rocks-db/src/columns/offchain_data.rs index 677bfdde3..3c4d2b428 100644 --- a/rocks-db/src/columns/offchain_data.rs +++ b/rocks-db/src/columns/offchain_data.rs @@ -25,7 +25,6 @@ impl StorageMutability { impl From<&str> for StorageMutability { fn from(storage_mutability: &str) -> Self { if storage_mutability.is_empty() - || storage_mutability.starts_with("") || storage_mutability.starts_with("ipfs://") || storage_mutability.starts_with("https://ipfs") || storage_mutability.starts_with("https://arweave") diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index 88c2dde8a..c0c50a254 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -597,17 +597,15 @@ impl Storage { asset::AssetStaticDetails::merge_keep_existing, ); } + OffChainDataDeprecated::NAME => cf_options.set_merge_operator_associative( + "merge_fn_off_chain_data_keep_existing", + asset::AssetStaticDetails::merge_keep_existing, + ), OffChainData::NAME => { - let mf = match migration_state { - MigrationState::Last | MigrationState::Version(_) => { - OffChainData::merge_off_chain_data - } - MigrationState::CreateColumnFamilies => { - asset::AssetStaticDetails::merge_keep_existing - } - }; - cf_options - .set_merge_operator_associative("merge_fn_off_chain_data_keep_existing", mf); + cf_options.set_merge_operator_associative( + "merge_fn_off_chain_data_keep_existing", + OffChainData::merge_off_chain_data, + ); } cl_items::ClLeaf::NAME => { cf_options.set_merge_operator_associative( diff --git a/rocks-db/src/migrations/offchain_data.rs b/rocks-db/src/migrations/offchain_data.rs index 8a3711c20..ba437b2e0 100644 --- a/rocks-db/src/migrations/offchain_data.rs +++ b/rocks-db/src/migrations/offchain_data.rs @@ -1,10 +1,7 @@ -use bincode::deserialize; use rocksdb::MergeOperands; -use tracing::error; use crate::columns::offchain_data::{OffChainData, OffChainDataDeprecated, StorageMutability}; use crate::migrator::{RocksMigration, SerializationType}; -use crate::ToFlatbuffersConverter; impl From for OffChainData { fn from(value: OffChainDataDeprecated) -> Self { @@ -24,33 +21,13 @@ impl OffChainData { existing_val: Option<&[u8]>, operands: &MergeOperands, ) -> Option> { - let mut old_data = None; + let mut bytes = existing_val; - // Deserialize existing value if present - if let Some(existing_val) = existing_val { - match deserialize::(existing_val) { - Ok(value) => old_data = Some(value), - Err(e) => error!( - "RocksDB: AssetDynamicDetails deserialize existing_val: {}", - e - ), - } + if let Some(op_bytes) = operands.into_iter().last() { + bytes = Some(op_bytes); } - // Iterate over operands and merge - for op in operands { - match deserialize::(op) { - Ok(new_val) => { - old_data = Some(new_val); - } - Err(e) => error!("RocksDB: AssetDynamicDetails deserialize new_val: {}", e), - } - } - - // Serialize the result back into bytes - old_data.and_then(|deprecated_format| { - Some(OffChainData::from(deprecated_format).convert_to_fb_bytes()) - }) + bytes.map(|bytes| bytes.to_vec()) } } diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index 2c88ee60c..958fdbdab 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -82,27 +82,6 @@ impl Storage { let migration_applier = MigrationApplier::new(db_path, migration_storage_path, applied_migrations); - // // apply all migrations - // migration_applier - // .apply_migration(crate::migrations::collection_authority::CollectionAuthorityMigration) - // .await?; - // migration_applier - // .apply_migration(crate::migrations::external_plugins::ExternalPluginsMigration) - // .await?; - // migration_applier - // .apply_migration( - // crate::migrations::clean_update_authorities::CleanCollectionAuthoritiesMigration, - // ) - // .await?; - // migration_applier - // .apply_migration(crate::migrations::spl2022::TokenAccounts2022ExtentionsMigration) - // .await?; - // migration_applier - // .apply_migration( - // crate::migrations::spl2022::DynamicDataToken2022MintExtentionsMigration, - // ) - // .await?; - migration_applier .apply_migration(crate::migrations::offchain_data::OffChainDataMigration) .await?; From 4e83c768b9349be07970a907ed0a019ece557094 Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Thu, 19 Dec 2024 18:48:23 -0500 Subject: [PATCH 07/15] migration test with some fixes, the test itself is failing --- integration_tests/src/common.rs | 2 +- rocks-db/src/columns/asset.rs | 2 +- rocks-db/src/columns/offchain_data.rs | 2 +- rocks-db/src/lib.rs | 3 +- .../tests/asset_streaming_client_tests.rs | 73 ++++++----- rocks-db/tests/migration_tests.rs | 123 +++++++----------- rocks-db/tests/raw_block_tests.rs | 15 ++- tests/setup/src/pg.rs | 4 +- 8 files changed, 99 insertions(+), 125 deletions(-) diff --git a/integration_tests/src/common.rs b/integration_tests/src/common.rs index 2007dbbd5..024db4d57 100644 --- a/integration_tests/src/common.rs +++ b/integration_tests/src/common.rs @@ -109,7 +109,7 @@ impl TestSetup { red_metrics.clone(), MIN_PG_CONNECTIONS, POSTGRE_MIGRATIONS_PATH, - None, + Some(PathBuf::from_str("./dump").unwrap()), ) .await .unwrap(), diff --git a/rocks-db/src/columns/asset.rs b/rocks-db/src/columns/asset.rs index 039d9a6e0..87081b073 100644 --- a/rocks-db/src/columns/asset.rs +++ b/rocks-db/src/columns/asset.rs @@ -3303,7 +3303,7 @@ impl TypedColumn for AssetLeaf { impl AssetLeaf { pub fn merge_asset_leaf( - new_key: &[u8], + _new_key: &[u8], existing_val: Option<&[u8]>, operands: &MergeOperands, ) -> Option> { diff --git a/rocks-db/src/columns/offchain_data.rs b/rocks-db/src/columns/offchain_data.rs index 3c4d2b428..3f6e96433 100644 --- a/rocks-db/src/columns/offchain_data.rs +++ b/rocks-db/src/columns/offchain_data.rs @@ -6,7 +6,7 @@ use entities::models::OffChainDataGrpc; use flatbuffers::{FlatBufferBuilder, WIPOffset}; use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, Clone, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq)] pub enum StorageMutability { #[default] Immutable, diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index c0c50a254..7512f304a 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -388,6 +388,7 @@ impl Storage { fn create_cf_descriptors(migration_state: &MigrationState) -> Vec { vec![ Self::new_cf_descriptor::(migration_state), + Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), Self::new_cf_descriptor::(migration_state), @@ -598,7 +599,7 @@ impl Storage { ); } OffChainDataDeprecated::NAME => cf_options.set_merge_operator_associative( - "merge_fn_off_chain_data_keep_existing", + "merge_fn_off_chain_data_keep_existing_deprecated", asset::AssetStaticDetails::merge_keep_existing, ), OffChainData::NAME => { diff --git a/rocks-db/tests/asset_streaming_client_tests.rs b/rocks-db/tests/asset_streaming_client_tests.rs index d5ef20f1f..6efba3a8e 100644 --- a/rocks-db/tests/asset_streaming_client_tests.rs +++ b/rocks-db/tests/asset_streaming_client_tests.rs @@ -101,40 +101,41 @@ mod tests { assert_eq!(pk_set, pks.pubkeys.into_iter().collect::>()); } - #[tokio::test] - async fn test_get_raw_blocks_stream_in_range_data() { - let env = RocksTestEnvironment::new(&[]); - let storage = &env.storage; - let slot = 153; - let blockhash = "blockhash"; - storage - .raw_blocks_cbor - .put_cbor_encoded( - slot, - RawBlock { - slot, - block: UiConfirmedBlock { - previous_blockhash: "".to_string(), - blockhash: blockhash.to_string(), - parent_slot: 0, - transactions: None, - signatures: None, - rewards: None, - block_time: None, - block_height: None, - }, - }, - ) - .await - .unwrap(); - // Call get_asset_details_stream_in_range on a database - let response = storage.get_raw_blocks_stream_in_range(100, 200).await; - - assert!(response.is_ok()); - let mut stream = response.unwrap(); - let resp = stream.next().await.unwrap().unwrap(); - - assert_eq!(resp.slot, slot); - assert_eq!(resp.block.blockhash, blockhash.to_string()); - } + //todo: + // #[tokio::test] + // async fn test_get_raw_blocks_stream_in_range_data() { + // let env = RocksTestEnvironment::new(&[]); + // let storage = &env.storage; + // let slot = 153; + // let blockhash = "blockhash"; + // storage + // .raw_blocks_cbor + // .put_cbor_encoded( + // slot, + // RawBlock { + // slot, + // block: UiConfirmedBlock { + // previous_blockhash: "".to_string(), + // blockhash: blockhash.to_string(), + // parent_slot: 0, + // transactions: None, + // signatures: None, + // rewards: None, + // block_time: None, + // block_height: None, + // }, + // }, + // ) + // .await + // .unwrap(); + // // Call get_asset_details_stream_in_range on a database + // let response = storage.get_raw_blocks_stream_in_range(100, 200).await; + + // assert!(response.is_ok()); + // let mut stream = response.unwrap(); + // let resp = stream.next().await.unwrap().unwrap(); + + // assert_eq!(resp.slot, slot); + // assert_eq!(resp.block.blockhash, blockhash.to_string()); + // } } diff --git a/rocks-db/tests/migration_tests.rs b/rocks-db/tests/migration_tests.rs index 604990881..77662ffbe 100644 --- a/rocks-db/tests/migration_tests.rs +++ b/rocks-db/tests/migration_tests.rs @@ -1,19 +1,28 @@ #[cfg(test)] mod tests { - use bincode::serialize; use metrics_utils::red::RequestErrorDurationMetrics; - use rocks_db::columns::asset::AssetCollection; - use rocks_db::column::TypedColumn; - use rocks_db::migrations::collection_authority::AssetCollectionVersion0; + use rocks_db::columns::offchain_data::OffChainDataDeprecated; use rocks_db::migrator::MigrationState; use rocks_db::Storage; - use solana_sdk::pubkey::Pubkey; use std::sync::Arc; use tempfile::TempDir; use tokio::sync::Mutex; use tokio::task::JoinSet; - fn put_unmerged_value_to_storage(path: &str) -> (Pubkey, AssetCollectionVersion0) { + #[tokio::test] + async fn test_migration() { + let dir = TempDir::new().unwrap(); + let v1 = OffChainDataDeprecated { + url: "https://mail.com".to_string(), + metadata: "".to_string(), + }; + let url2 = "ipfs://abcdefg"; + let v2 = OffChainDataDeprecated { + url: url2.to_string(), + metadata: format!("{{\"image\":\"{}\",\"type\":\"{}\"}}", url2, "image/gif") + .to_string(), + }; + let path = dir.path().to_str().unwrap(); let old_storage = Storage::open( path, Arc::new(Mutex::new(JoinSet::new())), @@ -21,76 +30,15 @@ mod tests { MigrationState::Version(0), ) .unwrap(); - let key = Pubkey::new_unique(); old_storage - .asset_collection_data - .backend - .put_cf( - &old_storage - .asset_collection_data - .backend - .cf_handle(AssetCollection::NAME) - .unwrap(), - AssetCollection::encode_key(key), - serialize(&AssetCollectionVersion0 { - pubkey: Pubkey::new_unique(), - collection: Pubkey::new_unique(), - is_collection_verified: false, - collection_seq: Some(5), - slot_updated: 5, - write_version: Some(5), - }) - .unwrap(), - ) - .unwrap(); - - let new_val = AssetCollectionVersion0 { - pubkey: Pubkey::new_unique(), - collection: Pubkey::new_unique(), - is_collection_verified: false, - collection_seq: Some(10), - slot_updated: 10, - write_version: Some(10), - }; + .asset_offchain_data_deprecated + .put(v1.url.clone(), v1.clone()) + .expect("should put"); old_storage - .asset_collection_data - .backend - .merge_cf( - &old_storage - .asset_collection_data - .backend - .cf_handle(AssetCollection::NAME) - .unwrap(), - AssetCollection::encode_key(key), - serialize(&new_val).unwrap(), - ) - .unwrap(); - // close connection in the end of the scope - (key, new_val) - } - - #[test] - fn test_merge_fail() { - let dir = TempDir::new().unwrap(); - put_unmerged_value_to_storage(dir.path().to_str().unwrap()); - assert_eq!( - Storage::open( - dir.path().to_str().unwrap(), - Arc::new(Mutex::new(JoinSet::new())), - Arc::new(RequestErrorDurationMetrics::new()), - MigrationState::Last, - ) - .err() - .unwrap() - .to_string(), - "storage error: RocksDb(Error { message: \"Corruption: Merge operator failed\" })" - ); - } - - #[tokio::test] - async fn test_migration() { - let dir = TempDir::new().unwrap(); - let (key, val) = put_unmerged_value_to_storage(dir.path().to_str().unwrap()); + .asset_offchain_data_deprecated + .put(v2.url.clone(), v2.clone()) + .expect("should put data"); + drop(old_storage); let secondary_storage_dir = TempDir::new().unwrap(); let migration_version_manager = Storage::open_secondary( dir.path().to_str().unwrap(), @@ -116,8 +64,29 @@ mod tests { ) .unwrap(); - let selected_val = new_storage.asset_collection_data.get(key).unwrap().unwrap(); - - assert_eq!(selected_val.pubkey, val.pubkey) + let migrated_v1 = new_storage + .asset_offchain_data + .get(v1.url.clone()) + .expect("should get value successfully") + .expect("the value should be not empty"); + assert_eq!( + migrated_v1.storage_mutability, + rocks_db::columns::offchain_data::StorageMutability::Mutable + ); + assert_eq!(migrated_v1.url, Some(v1.url.to_string())); + assert_eq!(migrated_v1.metadata, Some(v1.metadata)); + assert_eq!(migrated_v1.last_read_at, 0); + let migrated_v2 = new_storage + .asset_offchain_data + .get(v2.url.clone()) + .expect("should get value successfully") + .expect("the value should be not empty"); + assert_eq!( + migrated_v2.storage_mutability, + rocks_db::columns::offchain_data::StorageMutability::Immutable + ); + assert_eq!(migrated_v2.url, Some(v2.url.to_string())); + assert_eq!(migrated_v2.metadata, Some(v2.metadata)); + assert_eq!(migrated_v2.last_read_at, 0); } } diff --git a/rocks-db/tests/raw_block_tests.rs b/rocks-db/tests/raw_block_tests.rs index b925c7758..50293cc0a 100644 --- a/rocks-db/tests/raw_block_tests.rs +++ b/rocks-db/tests/raw_block_tests.rs @@ -5,11 +5,12 @@ mod tests { use setup::rocks::*; - #[tokio::test] - #[tracing_test::traced_test] - async fn test_get_raw_block_on_empty_db() { - let storage = RocksTestEnvironment::new(&[]).storage; - let response = storage.already_processed_slot(137827927).await.unwrap(); - assert!(response == false); - } + // todo + // #[tokio::test] + // #[tracing_test::traced_test] + // async fn test_get_raw_block_on_empty_db() { + // let storage = RocksTestEnvironment::new(&[]).storage; + // let response = storage.already_processed_slot(137827927).await.unwrap(); + // assert!(response == false); + // } } diff --git a/tests/setup/src/pg.rs b/tests/setup/src/pg.rs index 4655e1b7f..481587770 100644 --- a/tests/setup/src/pg.rs +++ b/tests/setup/src/pg.rs @@ -7,6 +7,8 @@ use rand::Rng; use solana_sdk::pubkey::Pubkey; use sqlx::{Executor, Pool, Postgres}; use std::collections::BTreeMap; +use std::path::PathBuf; +use std::str::FromStr; use std::sync::Arc; use testcontainers::core::WaitFor; use testcontainers::{Container, Image}; @@ -75,7 +77,7 @@ impl<'a> TestEnvironment<'a> { let (pool, db_name) = setup_database(&node).await; let client = PgClient::new_with_pool( pool.clone(), - None, + Some(PathBuf::from_str(path).unwrap()), Arc::new(RequestErrorDurationMetrics::new()), ); From 252c4e36c6a328fea99801a7ba92af0b4f038190 Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Thu, 19 Dec 2024 19:34:12 -0500 Subject: [PATCH 08/15] more migration checks... --- rocks-db/tests/migration_tests.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/rocks-db/tests/migration_tests.rs b/rocks-db/tests/migration_tests.rs index 77662ffbe..c58b70f2b 100644 --- a/rocks-db/tests/migration_tests.rs +++ b/rocks-db/tests/migration_tests.rs @@ -1,7 +1,8 @@ #[cfg(test)] mod tests { use metrics_utils::red::RequestErrorDurationMetrics; - use rocks_db::columns::offchain_data::OffChainDataDeprecated; + use rocks_db::column::TypedColumn; + use rocks_db::columns::offchain_data::{OffChainData, OffChainDataDeprecated}; use rocks_db::migrator::MigrationState; use rocks_db::Storage; use std::sync::Arc; @@ -63,7 +64,25 @@ mod tests { MigrationState::Last, ) .unwrap(); + let mut it = new_storage + .db + .raw_iterator_cf(&new_storage.db.cf_handle(OffChainData::NAME).unwrap()); + it.seek_to_first(); + assert!(it.valid(),"iterator should be valid on start"); + while it.valid() { + println!("has key {:?} with value {:?}", it.key(), it.value()); + it.next(); + } + let migrated_v1 = new_storage + .db + .get_pinned_cf( + &new_storage.db.cf_handle(OffChainData::NAME).unwrap(), + OffChainData::encode_key(v1.url.clone()), + ) + .expect("expect to get value successfully") + .expect("value to be present"); + print!("migrated is {:?}", migrated_v1.to_vec()); let migrated_v1 = new_storage .asset_offchain_data .get(v1.url.clone()) From 73a7bae91c32fd333a51cc3e7982d951b47b0351 Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Sat, 21 Dec 2024 19:11:17 +0200 Subject: [PATCH 09/15] add getter for generic TypedColumn type --- rocks-db/src/bin/fork_detector/main.rs | 3 ++- rocks-db/src/column.rs | 37 ++++++++++++++++++++++++-- rocks-db/src/columns/offchain_data.rs | 22 +++++++++++++++ rocks-db/src/migrator.rs | 9 ++++--- rocks-db/tests/migration_tests.rs | 20 +++++++------- 5 files changed, 75 insertions(+), 16 deletions(-) diff --git a/rocks-db/src/bin/fork_detector/main.rs b/rocks-db/src/bin/fork_detector/main.rs index 7a766e530..c4f36fbe1 100644 --- a/rocks-db/src/bin/fork_detector/main.rs +++ b/rocks-db/src/bin/fork_detector/main.rs @@ -55,7 +55,8 @@ async fn find_forks(source_path: &str) -> Result<(), String> { println!("Opened in {:?}", start.elapsed()); let slots_db = Arc::new( - SlotStorage::open_secondary(todo!(), todo!(), js.clone(), red_metrics.clone()) + // where secondary path comes from? + SlotStorage::open_secondary(source_path, source_path, js.clone(), red_metrics.clone()) .expect("should open slots db"), ); diff --git a/rocks-db/src/column.rs b/rocks-db/src/column.rs index f63db7097..c1fb2c555 100644 --- a/rocks-db/src/column.rs +++ b/rocks-db/src/column.rs @@ -3,11 +3,12 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use bincode::{deserialize, serialize}; +use crate::columns::offchain_data::OffChainData; +use crate::generated::offchain_data_generated::off_chain_data; +use crate::{Result, StorageError, ToFlatbuffersConverter, BATCH_GET_ACTION, ROCKS_COMPONENT}; use metrics_utils::red::RequestErrorDurationMetrics; use rocksdb::{BoundColumnFamily, DBIteratorWithThreadMode, DB}; use serde::{de::DeserializeOwned, Serialize}; - -use crate::{Result, StorageError, ToFlatbuffersConverter, BATCH_GET_ACTION, ROCKS_COMPONENT}; pub trait TypedColumn { type KeyType: Sync + Clone + Send + Debug; type ValueType: Sync + Serialize + DeserializeOwned + Send; @@ -327,6 +328,38 @@ where result } + pub fn get_flatbuffers_encoded(&self, key: C::KeyType) -> Result> { + let type_name = std::any::type_name::().split("::").last().unwrap(); + + let value = if let Some(serialized_value) = + self.backend.get_cf(&self.handle(), C::encode_key(key))? + { + // instead of this match macro probably may be used which will do the same thing + // without excessive hands waving + let deserialized_data = match type_name { + "OffChainData" => { + let fb_structure = off_chain_data::root_as_off_chain_data(&serialized_value) + .map_err(|e| StorageError::Common(e.to_string()))?; + OffChainData::from(fb_structure) + } + _ => unreachable!(), + }; + + // Safety: we are sure that the deserialized_data is of the same type as C::ValueType + // because ValueType is usually Self, and type comes from C type name + unsafe { + // TODO: maybe, pointer cast maybe used instead of the Box + Some(std::mem::transmute::<_, Box>(Box::new( + deserialized_data, + ))) + } + } else { + None + }; + + Ok(value.map(|val| *val)) + } + async fn batch_get_generic( &self, keys: Vec, diff --git a/rocks-db/src/columns/offchain_data.rs b/rocks-db/src/columns/offchain_data.rs index 3f6e96433..081aefaec 100644 --- a/rocks-db/src/columns/offchain_data.rs +++ b/rocks-db/src/columns/offchain_data.rs @@ -46,6 +46,28 @@ impl From for OffChainDataGrpc { } } +impl<'a> From> for OffChainData { + fn from(value: fb::OffChainData<'a>) -> Self { + Self { + storage_mutability: value.storage_mutability().into(), + url: value.url().map(|url| url.to_string()), + metadata: value.metadata().map(|metadata| metadata.to_string()), + last_read_at: value.last_read_at(), + } + } +} + +impl<'a> From for StorageMutability { + fn from(storage_mutability: fb::StorageMutability) -> Self { + match storage_mutability.variant_name() { + Some("Immutable") => StorageMutability::Immutable, + Some("Mutable") => StorageMutability::Mutable, + Some(_) => unreachable!(), + None => unreachable!(), + } + } +} + impl From for OffChainData { fn from(off_chain_data: OffChainDataGrpc) -> Self { let storage_mutability = StorageMutability::from(off_chain_data.url.as_str()); diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index 958fdbdab..aaee8cb0c 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -47,6 +47,7 @@ pub trait RocksMigration { + Serialize + DeserializeOwned + Send + + TypedColumn + Into<::ValueType>; } @@ -249,10 +250,10 @@ impl<'a> MigrationApplier<'a> { batch.put_cf( &temporary_migration_storage .db - .cf_handle(<::NewDataType as TypedColumn>::NAME) + .cf_handle(<::OldDataType as TypedColumn>::NAME) .ok_or(StorageError::Common(format!( "Cannot get cf_handle for {}", - <::NewDataType as TypedColumn>::NAME + <::OldDataType as TypedColumn>::NAME )))?, key, value, @@ -312,10 +313,10 @@ impl<'a> MigrationApplier<'a> { ) -> Result, Box<[u8]>)> + '_> { Ok(db .iterator_cf( - &db.cf_handle(<::NewDataType as TypedColumn>::NAME) + &db.cf_handle(<::OldDataType as TypedColumn>::NAME) .ok_or(StorageError::Common(format!( "Cannot get cf_handle for {}", - <::NewDataType as TypedColumn>::NAME + <::OldDataType as TypedColumn>::NAME )))?, IteratorMode::Start, ) diff --git a/rocks-db/tests/migration_tests.rs b/rocks-db/tests/migration_tests.rs index c58b70f2b..8a6e7fb98 100644 --- a/rocks-db/tests/migration_tests.rs +++ b/rocks-db/tests/migration_tests.rs @@ -42,33 +42,35 @@ mod tests { drop(old_storage); let secondary_storage_dir = TempDir::new().unwrap(); let migration_version_manager = Storage::open_secondary( - dir.path().to_str().unwrap(), + path, secondary_storage_dir.path().to_str().unwrap(), Arc::new(Mutex::new(JoinSet::new())), Arc::new(RequestErrorDurationMetrics::new()), - MigrationState::Last, + MigrationState::Version(4), ) .unwrap(); + let binding = TempDir::new().unwrap(); + let migration_storage_path = binding.path().to_str().unwrap(); Storage::apply_all_migrations( - dir.path().to_str().unwrap(), - TempDir::new().unwrap().path().to_str().unwrap(), + path, + migration_storage_path, Arc::new(migration_version_manager), ) .await .unwrap(); let new_storage = Storage::open( - dir.path().to_str().unwrap(), + path, Arc::new(Mutex::new(JoinSet::new())), Arc::new(RequestErrorDurationMetrics::new()), - MigrationState::Last, + MigrationState::Version(4), ) .unwrap(); let mut it = new_storage .db .raw_iterator_cf(&new_storage.db.cf_handle(OffChainData::NAME).unwrap()); it.seek_to_first(); - assert!(it.valid(),"iterator should be valid on start"); + assert!(it.valid(), "iterator should be valid on start"); while it.valid() { println!("has key {:?} with value {:?}", it.key(), it.value()); it.next(); @@ -85,7 +87,7 @@ mod tests { print!("migrated is {:?}", migrated_v1.to_vec()); let migrated_v1 = new_storage .asset_offchain_data - .get(v1.url.clone()) + .get_flatbuffers_encoded(v1.url.clone()) .expect("should get value successfully") .expect("the value should be not empty"); assert_eq!( @@ -97,7 +99,7 @@ mod tests { assert_eq!(migrated_v1.last_read_at, 0); let migrated_v2 = new_storage .asset_offchain_data - .get(v2.url.clone()) + .get_flatbuffers_encoded(v2.url.clone()) .expect("should get value successfully") .expect("the value should be not empty"); assert_eq!( From 3d1eef5b37f6da1b98862929071dd4f49e789f93 Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Thu, 26 Dec 2024 18:49:46 +0000 Subject: [PATCH 10/15] encode/decode as part of the column specification --- nft_ingester/src/api/dapi/change_logs.rs | 9 +- nft_ingester/src/bin/migrator/main.rs | 2 +- nft_ingester/src/bin/slot_persister/main.rs | 2 +- nft_ingester/src/gapfiller.rs | 2 +- nft_ingester/src/json_worker.rs | 2 +- nft_ingester/src/transaction_ingester.rs | 2 +- nft_ingester/tests/batch_mint_test.rs | 11 +- nft_ingester/tests/clean_forks_test.rs | 4 +- nft_ingester/tests/gapfiller_tests.rs | 2 +- nft_ingester/tests/process_accounts.rs | 4 +- .../tests/sequence_consistent_tests.rs | 5 +- rocks-db/src/batch_savers.rs | 2 +- rocks-db/src/clients/asset_client.rs | 4 +- .../src/clients/asset_streaming_client.rs | 4 +- rocks-db/src/clients/batch_client.rs | 11 +- rocks-db/src/clients/transaction_client.rs | 11 +- rocks-db/src/column.rs | 286 +++--------------- rocks-db/src/columns/editions.rs | 9 + rocks-db/src/columns/offchain_data.rs | 11 + rocks-db/src/columns/raw_block.rs | 11 +- rocks-db/src/migrator.rs | 8 +- .../tests/asset_streaming_client_tests.rs | 2 +- rocks-db/tests/migration_tests.rs | 4 +- 23 files changed, 128 insertions(+), 280 deletions(-) diff --git a/nft_ingester/src/api/dapi/change_logs.rs b/nft_ingester/src/api/dapi/change_logs.rs index bbe661017..ebbcccf97 100644 --- a/nft_ingester/src/api/dapi/change_logs.rs +++ b/nft_ingester/src/api/dapi/change_logs.rs @@ -49,10 +49,11 @@ pub async fn get_proof_for_assets< asset_ids.iter().map(|id| (id.to_string(), None)).collect(); // Instead of using a HashMap keyed by tree_id, we keep a Vec of (tree_id, pubkey, nonce). - let tree_pubkeys: Vec<(Pubkey, Pubkey, u64)> = fetch_asset_data!(rocks_db, asset_leaf_data, asset_ids) - .values() - .map(|asset| (asset.tree_id, asset.pubkey, asset.nonce.unwrap_or_default())) - .collect(); + let tree_pubkeys: Vec<(Pubkey, Pubkey, u64)> = + fetch_asset_data!(rocks_db, asset_leaf_data, asset_ids) + .values() + .map(|asset| (asset.tree_id, asset.pubkey, asset.nonce.unwrap_or_default())) + .collect(); // Construct leaf keys for all requested assets let leaf_keys: Vec = tree_pubkeys diff --git a/nft_ingester/src/bin/migrator/main.rs b/nft_ingester/src/bin/migrator/main.rs index de74d8840..af2186bbf 100644 --- a/nft_ingester/src/bin/migrator/main.rs +++ b/nft_ingester/src/bin/migrator/main.rs @@ -170,7 +170,7 @@ impl JsonMigrator { match json { Ok((_key, value)) => { - let metadata = bincode::deserialize::(&value); + let metadata = OffChainData::decode(&value); match metadata { Ok(metadata) => { diff --git a/nft_ingester/src/bin/slot_persister/main.rs b/nft_ingester/src/bin/slot_persister/main.rs index f7acb727f..1a199002a 100644 --- a/nft_ingester/src/bin/slot_persister/main.rs +++ b/nft_ingester/src/bin/slot_persister/main.rs @@ -406,7 +406,7 @@ async fn process_slots( ); if let Err(e) = target_db .raw_blocks_cbor - .put_batch_cbor(successful_blocks.clone()) + .put_batch(successful_blocks.clone()) .await { error!("Failed to save blocks to RocksDB: {}", e); diff --git a/nft_ingester/src/gapfiller.rs b/nft_ingester/src/gapfiller.rs index f7f9d76f9..a7c547a69 100644 --- a/nft_ingester/src/gapfiller.rs +++ b/nft_ingester/src/gapfiller.rs @@ -133,7 +133,7 @@ pub async fn process_raw_blocks_stream( // Some(Ok(block)) => { // if let Some(e) = storage // .raw_blocks_cbor - // .put_cbor_encoded(block.slot, block) + // .put(block.slot, block) // .await // .err() // { diff --git a/nft_ingester/src/json_worker.rs b/nft_ingester/src/json_worker.rs index 5e1b555bc..b10539851 100644 --- a/nft_ingester/src/json_worker.rs +++ b/nft_ingester/src/json_worker.rs @@ -491,7 +491,7 @@ impl JsonPersister for JsonWorker { self.rocks_db .asset_offchain_data - .put_batch_flatbuffers(rocks_updates) + .put_batch(rocks_updates) .await .map_err(|e| JsonDownloaderError::MainStorageError(e.to_string()))?; diff --git a/nft_ingester/src/transaction_ingester.rs b/nft_ingester/src/transaction_ingester.rs index 5aaf05fb2..9c5e0b7e9 100644 --- a/nft_ingester/src/transaction_ingester.rs +++ b/nft_ingester/src/transaction_ingester.rs @@ -17,7 +17,7 @@ impl BackfillTransactionIngester { #[async_trait] impl TransactionIngester for BackfillTransactionIngester { - // called only from the signatures fetcher at the moment, as it's switched to fetch finalized signatures only it's safe to assume the source is finalized + // called only from the signatures fetcher at the moment, as it's switched to fetch finalized signatures only it's safe to assume the source is finalized async fn ingest_transaction(&self, tx: BufferedTransaction) -> Result<(), StorageError> { self.tx_processor .process_transaction(tx, true) diff --git a/nft_ingester/tests/batch_mint_test.rs b/nft_ingester/tests/batch_mint_test.rs index 335fc7669..a30b09a4a 100644 --- a/nft_ingester/tests/batch_mint_test.rs +++ b/nft_ingester/tests/batch_mint_test.rs @@ -749,11 +749,16 @@ async fn batch_mint_persister_test() { // Test get asset proof batch let payload = GetAssetProofBatch { ids: test_batch_mint - .batch_mints.into_iter().map(|lu|lu.leaf_update.id().to_string()).take(10).collect(), + .batch_mints + .into_iter() + .map(|lu| lu.leaf_update.id().to_string()) + .take(10) + .collect(), }; let proof_result = api.get_asset_proof_batch(payload).await.unwrap(); - let asset_proofs: HashMap> = serde_json::from_value(proof_result).unwrap(); - for (_key, proof) in asset_proofs{ + let asset_proofs: HashMap> = + serde_json::from_value(proof_result).unwrap(); + for (_key, proof) in asset_proofs { assert!(proof.is_some()) } } diff --git a/nft_ingester/tests/clean_forks_test.rs b/nft_ingester/tests/clean_forks_test.rs index cbc9abdbd..e731f7c3a 100644 --- a/nft_ingester/tests/clean_forks_test.rs +++ b/nft_ingester/tests/clean_forks_test.rs @@ -925,7 +925,7 @@ async fn test_process_forked_transaction() { // for this test all we need is key from Rocks raw_blocks_cbor column family, so RawBlock data could be arbitrary storage .raw_blocks_cbor - .put_cbor_encoded( + .put( slot_normal_tx, RawBlock { slot: slot_normal_tx, @@ -948,7 +948,7 @@ async fn test_process_forked_transaction() { // 16000 is arbitrary number storage .raw_blocks_cbor - .put_cbor_encoded( + .put( slot_normal_tx + 16000, RawBlock { slot: slot_normal_tx + 16000, diff --git a/nft_ingester/tests/gapfiller_tests.rs b/nft_ingester/tests/gapfiller_tests.rs index 67ea441f2..3abc59e29 100644 --- a/nft_ingester/tests/gapfiller_tests.rs +++ b/nft_ingester/tests/gapfiller_tests.rs @@ -123,7 +123,7 @@ async fn test_process_raw_blocks_stream() { let selected_data = storage .raw_blocks_cbor - .get_cbor_encoded(slot) + .get_async(slot) .await .unwrap() .unwrap(); diff --git a/nft_ingester/tests/process_accounts.rs b/nft_ingester/tests/process_accounts.rs index 1b0ce051e..b57203b95 100644 --- a/nft_ingester/tests/process_accounts.rs +++ b/nft_ingester/tests/process_accounts.rs @@ -274,7 +274,7 @@ mod tests { .rocks_env .storage .token_metadata_edition_cbor - .get_cbor_encoded(first_edition) + .get_async(first_edition) .await .unwrap() .unwrap(); @@ -282,7 +282,7 @@ mod tests { .rocks_env .storage .token_metadata_edition_cbor - .get_cbor_encoded(second_edition) + .get_async(second_edition) .await .unwrap() .unwrap(); diff --git a/nft_ingester/tests/sequence_consistent_tests.rs b/nft_ingester/tests/sequence_consistent_tests.rs index 588374cfa..67cf3f9b3 100644 --- a/nft_ingester/tests/sequence_consistent_tests.rs +++ b/nft_ingester/tests/sequence_consistent_tests.rs @@ -122,7 +122,10 @@ mod tests { let it = storage .tree_seq_idx .pairs_iterator(storage.tree_seq_idx.iter_start()); - let slots: Vec<_> = it.filter(|((k, _), _)| *k == tree_key).map(|((_, seq), _)| seq).collect(); + let slots: Vec<_> = it + .filter(|((k, _), _)| *k == tree_key) + .map(|((_, seq), _)| seq) + .collect(); assert_eq!(slots, vec![39738, 39739, 39740, 39741, 39742]); } } diff --git a/rocks-db/src/batch_savers.rs b/rocks-db/src/batch_savers.rs index 21746c532..10e768e71 100644 --- a/rocks-db/src/batch_savers.rs +++ b/rocks-db/src/batch_savers.rs @@ -147,7 +147,7 @@ impl BatchSaveStorage { pub fn store_edition(&mut self, key: Pubkey, edition: &TokenMetadataEdition) -> Result<()> { self.storage .token_metadata_edition_cbor - .merge_with_batch_cbor(&mut self.batch, key, edition)?; + .merge_with_batch(&mut self.batch, key, edition)?; Ok(()) } pub fn store_inscription(&mut self, inscription: &InscriptionInfo) -> Result<()> { diff --git a/rocks-db/src/clients/asset_client.rs b/rocks-db/src/clients/asset_client.rs index 0d35fded8..d053f2396 100644 --- a/rocks-db/src/clients/asset_client.rs +++ b/rocks-db/src/clients/asset_client.rs @@ -308,7 +308,7 @@ impl Storage { ) -> Result> { let first_batch = self .token_metadata_edition_cbor - .batch_get_cbor(edition_keys) + .batch_get(edition_keys) .await?; let mut edition_data_list = Vec::new(); let mut parent_keys = Vec::new(); @@ -333,7 +333,7 @@ impl Storage { if !parent_keys.is_empty() { let master_edition_map = self .token_metadata_edition_cbor - .batch_get_cbor(parent_keys) + .batch_get(parent_keys) .await? .into_iter() .filter_map(|e| { diff --git a/rocks-db/src/clients/asset_streaming_client.rs b/rocks-db/src/clients/asset_streaming_client.rs index fe2c35df9..1f00f4c64 100644 --- a/rocks-db/src/clients/asset_streaming_client.rs +++ b/rocks-db/src/clients/asset_streaming_client.rs @@ -178,7 +178,7 @@ async fn get_complete_asset_details( let token_metadata_edition = if let Some(edition_address) = static_data.edition_address { Storage::column::(backend.clone(), metrics.clone()) - .get_cbor_encoded(edition_address) + .get_async(edition_address) .await? } else { None @@ -188,7 +188,7 @@ async fn get_complete_asset_details( Some(TokenMetadataEdition::MasterEdition(master_edition)) => (None, Some(master_edition)), Some(TokenMetadataEdition::EditionV1(edition)) => { let parent = Storage::column::(backend.clone(), metrics.clone()) - .get_cbor_encoded(edition.parent) + .get_async(edition.parent) .await?; let master_edition = if let Some(TokenMetadataEdition::MasterEdition(master_edition)) = parent { diff --git a/rocks-db/src/clients/batch_client.rs b/rocks-db/src/clients/batch_client.rs index 544d12ffe..24a797ede 100644 --- a/rocks-db/src/clients/batch_client.rs +++ b/rocks-db/src/clients/batch_client.rs @@ -571,14 +571,14 @@ impl Storage { )?; } if let Some(edition) = data.edition { - self.token_metadata_edition_cbor.merge_with_batch_cbor( + self.token_metadata_edition_cbor.merge_with_batch( &mut batch, edition.key, &TokenMetadataEdition::EditionV1(edition), )?; } if let Some(master_edition) = data.master_edition { - self.token_metadata_edition_cbor.merge_with_batch_cbor( + self.token_metadata_edition_cbor.merge_with_batch( &mut batch, master_edition.key, &TokenMetadataEdition::MasterEdition(master_edition), @@ -587,11 +587,8 @@ impl Storage { if let Some(off_chain_data) = data.offchain_data { let url = off_chain_data.url.clone(); let off_chain_data = OffChainData::from(off_chain_data); - self.asset_offchain_data.merge_with_batch_flatbuffers( - &mut batch, - url, - &off_chain_data, - )?; + self.asset_offchain_data + .merge_with_batch(&mut batch, url, &off_chain_data)?; } if let Some(spl_mint) = data.spl_mint { self.spl_mints diff --git a/rocks-db/src/clients/transaction_client.rs b/rocks-db/src/clients/transaction_client.rs index 33775fc4e..f7920ae42 100644 --- a/rocks-db/src/clients/transaction_client.rs +++ b/rocks-db/src/clients/transaction_client.rs @@ -39,8 +39,13 @@ impl Storage { is_from_finalized_source: bool, ) -> Result<(), StorageError> { let mut batch = rocksdb::WriteBatch::default(); - self.store_transaction_result_with_batch(&mut batch, tx, with_signatures, is_from_finalized_source) - .await?; + self.store_transaction_result_with_batch( + &mut batch, + tx, + with_signatures, + is_from_finalized_source, + ) + .await?; self.write_batch(batch) .await .map_err(|e| StorageError::Common(e.to_string()))?; @@ -137,7 +142,7 @@ impl Storage { } if let Some(ref offchain_data) = update.offchain_data_update { - if let Err(e) = self.asset_offchain_data.merge_with_batch_flatbuffers( + if let Err(e) = self.asset_offchain_data.merge_with_batch( batch, offchain_data.url.clone().expect("Url should not be empty"), offchain_data, diff --git a/rocks-db/src/column.rs b/rocks-db/src/column.rs index c1fb2c555..a1d562885 100644 --- a/rocks-db/src/column.rs +++ b/rocks-db/src/column.rs @@ -1,11 +1,7 @@ use std::fmt::Debug; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; -use bincode::{deserialize, serialize}; - -use crate::columns::offchain_data::OffChainData; -use crate::generated::offchain_data_generated::off_chain_data; -use crate::{Result, StorageError, ToFlatbuffersConverter, BATCH_GET_ACTION, ROCKS_COMPONENT}; +use crate::{Result, StorageError, BATCH_GET_ACTION, ROCKS_COMPONENT}; use metrics_utils::red::RequestErrorDurationMetrics; use rocksdb::{BoundColumnFamily, DBIteratorWithThreadMode, DB}; use serde::{de::DeserializeOwned, Serialize}; @@ -22,6 +18,19 @@ pub trait TypedColumn { fn encode_key(index: Self::KeyType) -> Vec; fn decode_key(bytes: Vec) -> Result; + + /// Decodes the value for a column from it's bytes. + /// By default uses bincode, should be overloaded if the existing type uses any other format. + fn decode(bytes: &[u8]) -> Result { + let decoded = bincode::deserialize::(bytes)?; + Ok(decoded) + } + + /// Encodes the value for a column to bytes. + /// By default uses bincode, should be overloaded if the existing type uses any other format. + fn encode(v: &Self::ValueType) -> Result> { + bincode::serialize(v).map_err(|e| e.into()) + } } #[derive(Debug)] @@ -51,36 +60,14 @@ where .map_err(|e| StorageError::Common(e.to_string()))? } - pub async fn put_cbor_encoded(&self, key: C::KeyType, value: C::ValueType) -> Result<()> { - let backend = self.backend.clone(); - tokio::task::spawn_blocking(move || Self::put_cbor_encoded_sync(backend, key, value)) - .await - .map_err(|e| StorageError::Common(e.to_string()))? - } - pub fn put(&self, key: C::KeyType, value: C::ValueType) -> Result<()> { Self::put_sync(self.backend.clone(), key, value) } - fn put_cbor_encoded_sync(backend: Arc, key: C::KeyType, value: C::ValueType) -> Result<()> { - let serialized_value = - serde_cbor::to_vec(&value).map_err(|e| StorageError::Common(e.to_string()))?; - Self::put_sync_raw(backend, key, serialized_value, C::NAME) - } - fn put_sync(backend: Arc, key: C::KeyType, value: C::ValueType) -> Result<()> { - let serialized_value = serialize(&value)?; - Self::put_sync_raw(backend, key, serialized_value, C::NAME) - } - - fn put_sync_raw( - backend: Arc, - key: C::KeyType, - serialized_value: Vec, - col_name: &str, - ) -> Result<()> { + let serialized_value = C::encode(&value)?; backend.put_cf( - &backend.cf_handle(col_name).unwrap(), + &backend.cf_handle(C::NAME).unwrap(), C::encode_key(key), serialized_value, )?; @@ -95,7 +82,7 @@ where } pub fn merge_sync(backend: Arc, key: C::KeyType, value: C::ValueType) -> Result<()> { - let serialized_value = serialize(&value)?; + let serialized_value = C::encode(&value)?; backend.merge_cf( &backend.cf_handle(C::NAME).unwrap(), @@ -106,44 +93,15 @@ where Ok(()) } - fn merge_with_batch_generic( - &self, - batch: &mut rocksdb::WriteBatchWithTransaction, - key: C::KeyType, - value: &C::ValueType, - serialize_fn: F, - ) -> Result<()> - where - F: Fn(&C::ValueType) -> Result>, - { - let serialized_value = serialize_fn(value)?; - batch.merge_cf(&self.handle(), C::encode_key(key), serialized_value); - Ok(()) - } - pub fn merge_with_batch( &self, batch: &mut rocksdb::WriteBatchWithTransaction, key: C::KeyType, value: &C::ValueType, ) -> Result<()> { - self.merge_with_batch_generic(batch, key, value, |v| { - serialize(v).map_err(|e| StorageError::Common(e.to_string())) - }) - } - - pub fn merge_with_batch_flatbuffers( - &self, - batch: &mut rocksdb::WriteBatchWithTransaction, - key: C::KeyType, - value: &C::ValueType, - ) -> Result<()> - where - C::ValueType: for<'a> ToFlatbuffersConverter<'a>, - { - self.merge_with_batch_generic(batch, key, value, |v| { - Ok(ToFlatbuffersConverter::convert_to_fb_bytes(v)) - }) + let serialized_value = C::encode(value).map_err(|e| StorageError::Common(e.to_string()))?; + batch.merge_cf(&self.handle(), C::encode_key(key), serialized_value); + Ok(()) } pub(crate) fn merge_with_batch_raw( @@ -156,58 +114,31 @@ where Ok(()) } - pub fn merge_with_batch_cbor( - &self, - batch: &mut rocksdb::WriteBatchWithTransaction, - key: C::KeyType, - value: &C::ValueType, - ) -> Result<()> { - self.merge_with_batch_generic(batch, key, value, |v| { - serde_cbor::to_vec(v).map_err(|e| StorageError::Common(e.to_string())) - }) - } - pub(crate) fn put_with_batch( &self, batch: &mut rocksdb::WriteBatchWithTransaction, key: C::KeyType, value: &C::ValueType, ) -> Result<()> { - let serialized_value = serialize(value)?; + let serialized_value = C::encode(value)?; batch.put_cf(&self.handle(), C::encode_key(key), serialized_value); Ok(()) } - async fn merge_batch_generic( - &self, - values: HashMap, - serialize_fn: F, - ) -> Result<()> - where - F: Fn(&C::ValueType) -> Result> + Copy + Send + 'static, - { + pub async fn merge_batch(&self, values: HashMap) -> Result<()> { let db = self.backend.clone(); let values = values.clone(); - tokio::task::spawn_blocking(move || { - Self::merge_batch_sync_generic(db, values, serialize_fn) - }) - .await - .map_err(|e| StorageError::Common(e.to_string()))? + tokio::task::spawn_blocking(move || Self::merge_batch_sync(db, values)) + .await + .map_err(|e| StorageError::Common(e.to_string()))? } - fn merge_batch_sync_generic( - backend: Arc, - values: HashMap, - serialize_fn: F, - ) -> Result<()> - where - F: Fn(&C::ValueType) -> Result>, - { + fn merge_batch_sync(backend: Arc, values: HashMap) -> Result<()> { let mut batch = rocksdb::WriteBatchWithTransaction::::default(); for (k, v) in values.iter() { - let serialized_value = serialize_fn(v)?; + let serialized_value = C::encode(v).map_err(|e| StorageError::Common(e.to_string()))?; batch.merge_cf( &backend.cf_handle(C::NAME).unwrap(), C::encode_key(k.clone()), @@ -218,46 +149,18 @@ where Ok(()) } - pub async fn merge_batch(&self, values: HashMap) -> Result<()> { - self.merge_batch_generic(values, |v| { - serialize(v).map_err(|e| StorageError::Common(e.to_string())) - }) - .await - } - - pub async fn merge_batch_cbor(&self, values: HashMap) -> Result<()> { - self.merge_batch_generic(values, |v| { - serde_cbor::to_vec(v).map_err(|e| StorageError::Common(e.to_string())) - }) - .await - } - - async fn put_batch_generic( - &self, - values: HashMap, - serialize_fn: F, - ) -> Result<()> - where - F: Fn(&C::ValueType) -> Result> + Copy + Send + 'static, - { + pub async fn put_batch(&self, values: HashMap) -> Result<()> { let db = self.backend.clone(); let values = values.clone(); - tokio::task::spawn_blocking(move || Self::put_batch_sync_generic(db, values, serialize_fn)) + tokio::task::spawn_blocking(move || Self::put_batch_sync(db, values)) .await .map_err(|e| StorageError::Common(e.to_string()))? } - fn put_batch_sync_generic( - backend: Arc, - values: HashMap, - serialize_fn: F, - ) -> Result<()> - where - F: Fn(&C::ValueType) -> Result>, - { + fn put_batch_sync(backend: Arc, values: HashMap) -> Result<()> { let mut batch = rocksdb::WriteBatchWithTransaction::::default(); for (k, v) in values.iter() { - let serialized_value = serialize_fn(v)?; + let serialized_value = C::encode(v).map_err(|e| StorageError::Common(e.to_string()))?; batch.put_cf( &backend.cf_handle(C::NAME).unwrap(), C::encode_key(k.clone()), @@ -268,114 +171,43 @@ where Ok(()) } - pub async fn put_batch(&self, values: HashMap) -> Result<()> { - self.put_batch_generic(values, |v| { - serialize(v).map_err(|e| StorageError::Common(e.to_string())) - }) - .await - } - - pub async fn put_batch_cbor(&self, values: HashMap) -> Result<()> { - self.put_batch_generic(values, |v| { - serde_cbor::to_vec(v).map_err(|e| StorageError::Common(e.to_string())) - }) - .await + fn get_raw(backend: Arc, key: C::KeyType) -> Result>> { + let r = backend.get_cf(&backend.cf_handle(C::NAME).unwrap(), C::encode_key(key))?; + Ok(r) } - pub async fn put_batch_flatbuffers( - &self, - values: HashMap, - ) -> Result<()> - where - C::ValueType: for<'a> ToFlatbuffersConverter<'a>, - { - self.put_batch_generic(values, |v| { - Ok(ToFlatbuffersConverter::convert_to_fb_bytes(v)) - }) - .await - } - - pub async fn get_cbor_encoded(&self, key: C::KeyType) -> Result> { + pub async fn get_async(&self, key: C::KeyType) -> Result> { let mut result = Ok(None); - let backend = self.backend.clone(); - let res = tokio::task::spawn_blocking(move || Self::get_raw(backend, key)) - .await - .map_err(|e| StorageError::Common(e.to_string()))??; - - if let Some(serialized_value) = res { - let value = serde_cbor::from_slice(&serialized_value) - .map_err(|e| StorageError::Common(e.to_string()))?; + let self_clone = self.clone(); + if let Some(serialized_value) = + tokio::task::spawn_blocking(move || Self::get_raw(backend, key)) + .await + .map_err(|e| StorageError::Common(e.to_string()))?? + { + let value = C::decode(&serialized_value)?; result = Ok(Some(value)) } result } - fn get_raw(backend: Arc, key: C::KeyType) -> Result>> { - let r = backend.get_cf(&backend.cf_handle(C::NAME).unwrap(), C::encode_key(key))?; - Ok(r) - } - pub fn get(&self, key: C::KeyType) -> Result> { let mut result = Ok(None); if let Some(serialized_value) = self.backend.get_cf(&self.handle(), C::encode_key(key))? { - let value = deserialize(&serialized_value)?; + let value = C::decode(&serialized_value)?; result = Ok(Some(value)) } result } - pub fn get_flatbuffers_encoded(&self, key: C::KeyType) -> Result> { - let type_name = std::any::type_name::().split("::").last().unwrap(); - - let value = if let Some(serialized_value) = - self.backend.get_cf(&self.handle(), C::encode_key(key))? - { - // instead of this match macro probably may be used which will do the same thing - // without excessive hands waving - let deserialized_data = match type_name { - "OffChainData" => { - let fb_structure = off_chain_data::root_as_off_chain_data(&serialized_value) - .map_err(|e| StorageError::Common(e.to_string()))?; - OffChainData::from(fb_structure) - } - _ => unreachable!(), - }; - - // Safety: we are sure that the deserialized_data is of the same type as C::ValueType - // because ValueType is usually Self, and type comes from C type name - unsafe { - // TODO: maybe, pointer cast maybe used instead of the Box - Some(std::mem::transmute::<_, Box>(Box::new( - deserialized_data, - ))) - } - } else { - None - }; - - Ok(value.map(|val| *val)) - } - - async fn batch_get_generic( - &self, - keys: Vec, - deserialize_fn: F, - ) -> Result>> - where - F: Fn(&[u8]) -> Result + Copy + Send + 'static, - { + pub async fn batch_get(&self, keys: Vec) -> Result>> { let start_time = chrono::Utc::now(); let db = self.backend.clone(); let keys = keys.clone(); - match tokio::task::spawn_blocking(move || { - Self::batch_get_sync_generic(db, keys, deserialize_fn) - }) - .await - { + match tokio::task::spawn_blocking(move || Self::batch_get_sync(db, keys)).await { Ok(res) => { self.red_metrics.observe_request( ROCKS_COMPONENT, @@ -393,14 +225,10 @@ where } } - fn batch_get_sync_generic( + fn batch_get_sync( backend: Arc, keys: Vec, - deserialize_fn: F, - ) -> Result>> - where - F: Fn(&[u8]) -> Result, - { + ) -> Result>> { backend .batched_multi_get_cf( &backend.cf_handle(C::NAME).unwrap(), @@ -410,27 +238,13 @@ where .into_iter() .map(|res| { res.map_err(StorageError::from).and_then(|opt| { - opt.map(|pinned| deserialize_fn(pinned.as_ref())) + opt.map(|pinned| C::decode(pinned.as_ref()).map_err(StorageError::from)) .transpose() }) }) .collect() } - pub async fn batch_get(&self, keys: Vec) -> Result>> { - self.batch_get_generic(keys, |bytes| { - deserialize::(bytes).map_err(StorageError::from) - }) - .await - } - - pub async fn batch_get_cbor(&self, keys: Vec) -> Result>> { - self.batch_get_generic(keys, |bytes| { - serde_cbor::from_slice(bytes).map_err(|e| StorageError::Common(e.to_string())) - }) - .await - } - #[allow(clippy::type_complexity)] fn to_pairs_generic( &self, @@ -440,7 +254,7 @@ where it.filter_map(|r| r.ok()) .filter_map(|(key_bytes, val_bytes)| { let k_op = C::decode_key(key_bytes.to_vec()).ok(); - let v_op = deserialize::(&val_bytes).ok(); + let v_op = C::decode(&val_bytes).ok(); k_op.zip(v_op) }) .take(num) @@ -454,7 +268,7 @@ where it.filter_map(|r| r.ok()) .filter_map(|(key_bytes, val_bytes)| { let k_op = C::decode_key(key_bytes.to_vec()).ok(); - let v_op = deserialize::(&val_bytes).ok(); + let v_op = C::decode(&val_bytes).ok(); k_op.zip(v_op) }) } diff --git a/rocks-db/src/columns/editions.rs b/rocks-db/src/columns/editions.rs index 3055b9ed4..bd1db9b3e 100644 --- a/rocks-db/src/columns/editions.rs +++ b/rocks-db/src/columns/editions.rs @@ -1,4 +1,5 @@ use crate::column::TypedColumn; +use crate::errors::StorageError; use crate::key_encoders::{decode_pubkey, encode_pubkey}; use crate::Result; use entities::enums::TokenMetadataEdition; @@ -19,6 +20,14 @@ impl TypedColumn for TokenMetadataEdition { fn decode_key(bytes: Vec) -> Result { decode_pubkey(bytes) } + + fn decode(bytes: &[u8]) -> Result { + serde_cbor::from_slice(bytes).map_err(|e| StorageError::Common(e.to_string())) + } + + fn encode(v: &Self::ValueType) -> Result> { + serde_cbor::to_vec(&v).map_err(|e| StorageError::Common(e.to_string())) + } } pub fn merge_token_metadata_edition( diff --git a/rocks-db/src/columns/offchain_data.rs b/rocks-db/src/columns/offchain_data.rs index 081aefaec..66dc7abbc 100644 --- a/rocks-db/src/columns/offchain_data.rs +++ b/rocks-db/src/columns/offchain_data.rs @@ -1,4 +1,5 @@ use crate::column::TypedColumn; +use crate::errors::StorageError; use crate::generated::offchain_data_generated::off_chain_data as fb; use crate::key_encoders::{decode_string, encode_string}; use crate::{Result, ToFlatbuffersConverter}; @@ -159,4 +160,14 @@ impl TypedColumn for OffChainData { fn decode_key(bytes: Vec) -> Result { decode_string(bytes) } + + fn decode(bytes: &[u8]) -> Result { + let fb_structure = + fb::root_as_off_chain_data(bytes).map_err(|e| StorageError::Common(e.to_string()))?; + Ok(OffChainData::from(fb_structure)) + } + + fn encode(v: &Self::ValueType) -> Result> { + Ok(ToFlatbuffersConverter::convert_to_fb_bytes(v)) + } } diff --git a/rocks-db/src/columns/raw_block.rs b/rocks-db/src/columns/raw_block.rs index f65b027d2..5db0400fd 100644 --- a/rocks-db/src/columns/raw_block.rs +++ b/rocks-db/src/columns/raw_block.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use crate::errors::StorageError; use crate::SlotStorage; use crate::{column::TypedColumn, key_encoders}; use async_trait::async_trait; @@ -20,6 +21,14 @@ impl TypedColumn for RawBlock { fn decode_key(bytes: Vec) -> crate::Result { key_encoders::decode_u64(bytes) } + + fn decode(bytes: &[u8]) -> crate::Result { + serde_cbor::from_slice(bytes).map_err(|e| StorageError::Common(e.to_string())) + } + + fn encode(v: &Self::ValueType) -> crate::Result> { + serde_cbor::to_vec(&v).map_err(|e| StorageError::Common(e.to_string())) + } } #[async_trait] @@ -31,7 +40,7 @@ impl BlockProducer for SlotStorage { ) -> Result { let raw_block = self .raw_blocks_cbor - .get_cbor_encoded(slot) + .get_async(slot) .await .map_err(|e| InterfaceStorageError::Common(e.to_string()))?; if raw_block.is_none() { diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index aaee8cb0c..74f7c7e26 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -362,12 +362,6 @@ impl<'a> MigrationApplier<'a> { 'static + Clone + ToFlatbuffersConverter<'b>, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { - match M::SERIALIZATION_TYPE { - SerializationType::Bincode => column.put_batch(std::mem::take(batch)).await, - SerializationType::Cbor => column.put_batch_cbor(std::mem::take(batch)).await, - SerializationType::Flatbuffers => { - column.put_batch_flatbuffers(std::mem::take(batch)).await - } - } + column.put_batch(std::mem::take(batch)).await } } diff --git a/rocks-db/tests/asset_streaming_client_tests.rs b/rocks-db/tests/asset_streaming_client_tests.rs index 6efba3a8e..c5bbaad3a 100644 --- a/rocks-db/tests/asset_streaming_client_tests.rs +++ b/rocks-db/tests/asset_streaming_client_tests.rs @@ -110,7 +110,7 @@ mod tests { // let blockhash = "blockhash"; // storage // .raw_blocks_cbor - // .put_cbor_encoded( + // .put( // slot, // RawBlock { // slot, diff --git a/rocks-db/tests/migration_tests.rs b/rocks-db/tests/migration_tests.rs index 8a6e7fb98..73fea707c 100644 --- a/rocks-db/tests/migration_tests.rs +++ b/rocks-db/tests/migration_tests.rs @@ -87,7 +87,7 @@ mod tests { print!("migrated is {:?}", migrated_v1.to_vec()); let migrated_v1 = new_storage .asset_offchain_data - .get_flatbuffers_encoded(v1.url.clone()) + .get(v1.url.clone()) .expect("should get value successfully") .expect("the value should be not empty"); assert_eq!( @@ -99,7 +99,7 @@ mod tests { assert_eq!(migrated_v1.last_read_at, 0); let migrated_v2 = new_storage .asset_offchain_data - .get_flatbuffers_encoded(v2.url.clone()) + .get(v2.url.clone()) .expect("should get value successfully") .expect("the value should be not empty"); assert_eq!( From 83d3681fb7536182907ea16af923d4ee4abaf41f Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Thu, 26 Dec 2024 19:19:00 +0000 Subject: [PATCH 11/15] fmt for generated data --- .../src/generated/offchain_data_generated.rs | 640 ++++++++++-------- 1 file changed, 348 insertions(+), 292 deletions(-) diff --git a/rocks-db/src/generated/offchain_data_generated.rs b/rocks-db/src/generated/offchain_data_generated.rs index 7e2410cca..9f32970bb 100644 --- a/rocks-db/src/generated/offchain_data_generated.rs +++ b/rocks-db/src/generated/offchain_data_generated.rs @@ -1,10 +1,9 @@ // automatically generated by the FlatBuffers compiler, do not modify - // @generated -use core::mem; use core::cmp::Ordering; +use core::mem; extern crate flatbuffers; use self::flatbuffers::{EndianScalar, Follow}; @@ -12,315 +11,372 @@ use self::flatbuffers::{EndianScalar, Follow}; #[allow(unused_imports, dead_code)] pub mod off_chain_data { - use core::mem; - use core::cmp::Ordering; + use core::cmp::Ordering; + use core::mem; - extern crate flatbuffers; - use self::flatbuffers::{EndianScalar, Follow}; + extern crate flatbuffers; + use self::flatbuffers::{EndianScalar, Follow}; -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -pub const ENUM_MIN_STORAGE_MUTABILITY: i8 = 0; -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -pub const ENUM_MAX_STORAGE_MUTABILITY: i8 = 1; -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -#[allow(non_camel_case_types)] -pub const ENUM_VALUES_STORAGE_MUTABILITY: [StorageMutability; 2] = [ - StorageMutability::Immutable, - StorageMutability::Mutable, -]; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_STORAGE_MUTABILITY: i8 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_STORAGE_MUTABILITY: i8 = 1; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_STORAGE_MUTABILITY: [StorageMutability; 2] = + [StorageMutability::Immutable, StorageMutability::Mutable]; -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] -#[repr(transparent)] -pub struct StorageMutability(pub i8); -#[allow(non_upper_case_globals)] -impl StorageMutability { - pub const Immutable: Self = Self(0); - pub const Mutable: Self = Self(1); + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct StorageMutability(pub i8); + #[allow(non_upper_case_globals)] + impl StorageMutability { + pub const Immutable: Self = Self(0); + pub const Mutable: Self = Self(1); - pub const ENUM_MIN: i8 = 0; - pub const ENUM_MAX: i8 = 1; - pub const ENUM_VALUES: &'static [Self] = &[ - Self::Immutable, - Self::Mutable, - ]; - /// Returns the variant's name or "" if unknown. - pub fn variant_name(self) -> Option<&'static str> { - match self { - Self::Immutable => Some("Immutable"), - Self::Mutable => Some("Mutable"), - _ => None, + pub const ENUM_MIN: i8 = 0; + pub const ENUM_MAX: i8 = 1; + pub const ENUM_VALUES: &'static [Self] = &[Self::Immutable, Self::Mutable]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::Immutable => Some("Immutable"), + Self::Mutable => Some("Mutable"), + _ => None, + } + } } - } -} -impl core::fmt::Debug for StorageMutability { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - if let Some(name) = self.variant_name() { - f.write_str(name) - } else { - f.write_fmt(format_args!("", self.0)) + impl core::fmt::Debug for StorageMutability { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } } - } -} -impl<'a> flatbuffers::Follow<'a> for StorageMutability { - type Inner = Self; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - let b = flatbuffers::read_scalar_at::(buf, loc); - Self(b) - } -} - -impl flatbuffers::Push for StorageMutability { - type Output = StorageMutability; - #[inline] - unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { - flatbuffers::emplace_scalar::(dst, self.0); + impl<'a> flatbuffers::Follow<'a> for StorageMutability { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = flatbuffers::read_scalar_at::(buf, loc); + Self(b) + } } -} - -impl flatbuffers::EndianScalar for StorageMutability { - type Scalar = i8; - #[inline] - fn to_little_endian(self) -> i8 { - self.0.to_le() - } - #[inline] - #[allow(clippy::wrong_self_convention)] - fn from_little_endian(v: i8) -> Self { - let b = i8::from_le(v); - Self(b) - } -} -impl<'a> flatbuffers::Verifiable for StorageMutability { - #[inline] - fn run_verifier( - v: &mut flatbuffers::Verifier, pos: usize - ) -> Result<(), flatbuffers::InvalidFlatbuffer> { - use self::flatbuffers::Verifiable; - i8::run_verifier(v, pos) - } -} + impl flatbuffers::Push for StorageMutability { + type Output = StorageMutability; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + flatbuffers::emplace_scalar::(dst, self.0); + } + } -impl flatbuffers::SimpleToVerifyInSlice for StorageMutability {} -pub enum OffChainDataOffset {} -#[derive(Copy, Clone, PartialEq)] + impl flatbuffers::EndianScalar for StorageMutability { + type Scalar = i8; + #[inline] + fn to_little_endian(self) -> i8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: i8) -> Self { + let b = i8::from_le(v); + Self(b) + } + } -pub struct OffChainData<'a> { - pub _tab: flatbuffers::Table<'a>, -} + impl<'a> flatbuffers::Verifiable for StorageMutability { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + i8::run_verifier(v, pos) + } + } -impl<'a> flatbuffers::Follow<'a> for OffChainData<'a> { - type Inner = OffChainData<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: flatbuffers::Table::new(buf, loc) } - } -} + impl flatbuffers::SimpleToVerifyInSlice for StorageMutability {} + pub enum OffChainDataOffset {} + #[derive(Copy, Clone, PartialEq)] -impl<'a> OffChainData<'a> { - pub const VT_STORAGE_MUTABILITY: flatbuffers::VOffsetT = 4; - pub const VT_URL: flatbuffers::VOffsetT = 6; - pub const VT_METADATA: flatbuffers::VOffsetT = 8; - pub const VT_LAST_READ_AT: flatbuffers::VOffsetT = 10; + pub struct OffChainData<'a> { + pub _tab: flatbuffers::Table<'a>, + } - #[inline] - pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { - OffChainData { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args OffChainDataArgs<'args> - ) -> flatbuffers::WIPOffset> { - let mut builder = OffChainDataBuilder::new(_fbb); - builder.add_last_read_at(args.last_read_at); - if let Some(x) = args.metadata { builder.add_metadata(x); } - if let Some(x) = args.url { builder.add_url(x); } - builder.add_storage_mutability(args.storage_mutability); - builder.finish() - } + impl<'a> flatbuffers::Follow<'a> for OffChainData<'a> { + type Inner = OffChainData<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: flatbuffers::Table::new(buf, loc), + } + } + } + impl<'a> OffChainData<'a> { + pub const VT_STORAGE_MUTABILITY: flatbuffers::VOffsetT = 4; + pub const VT_URL: flatbuffers::VOffsetT = 6; + pub const VT_METADATA: flatbuffers::VOffsetT = 8; + pub const VT_LAST_READ_AT: flatbuffers::VOffsetT = 10; - #[inline] - pub fn storage_mutability(&self) -> StorageMutability { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(OffChainData::VT_STORAGE_MUTABILITY, Some(StorageMutability::Immutable)).unwrap()} - } - #[inline] - pub fn url(&self) -> Option<&'a str> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::>(OffChainData::VT_URL, None)} - } - #[inline] - pub fn metadata(&self) -> Option<&'a str> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::>(OffChainData::VT_METADATA, None)} - } - #[inline] - pub fn last_read_at(&self) -> i64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(OffChainData::VT_LAST_READ_AT, Some(0)).unwrap()} - } -} + #[inline] + pub unsafe fn init_from_table(table: flatbuffers::Table<'a>) -> Self { + OffChainData { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args OffChainDataArgs<'args>, + ) -> flatbuffers::WIPOffset> { + let mut builder = OffChainDataBuilder::new(_fbb); + builder.add_last_read_at(args.last_read_at); + if let Some(x) = args.metadata { + builder.add_metadata(x); + } + if let Some(x) = args.url { + builder.add_url(x); + } + builder.add_storage_mutability(args.storage_mutability); + builder.finish() + } -impl flatbuffers::Verifiable for OffChainData<'_> { - #[inline] - fn run_verifier( - v: &mut flatbuffers::Verifier, pos: usize - ) -> Result<(), flatbuffers::InvalidFlatbuffer> { - use self::flatbuffers::Verifiable; - v.visit_table(pos)? - .visit_field::("storage_mutability", Self::VT_STORAGE_MUTABILITY, false)? - .visit_field::>("url", Self::VT_URL, false)? - .visit_field::>("metadata", Self::VT_METADATA, false)? - .visit_field::("last_read_at", Self::VT_LAST_READ_AT, false)? - .finish(); - Ok(()) - } -} -pub struct OffChainDataArgs<'a> { - pub storage_mutability: StorageMutability, - pub url: Option>, - pub metadata: Option>, - pub last_read_at: i64, -} -impl<'a> Default for OffChainDataArgs<'a> { - #[inline] - fn default() -> Self { - OffChainDataArgs { - storage_mutability: StorageMutability::Immutable, - url: None, - metadata: None, - last_read_at: 0, + #[inline] + pub fn storage_mutability(&self) -> StorageMutability { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::( + OffChainData::VT_STORAGE_MUTABILITY, + Some(StorageMutability::Immutable), + ) + .unwrap() + } + } + #[inline] + pub fn url(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>(OffChainData::VT_URL, None) + } + } + #[inline] + pub fn metadata(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::>(OffChainData::VT_METADATA, None) + } + } + #[inline] + pub fn last_read_at(&self) -> i64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(OffChainData::VT_LAST_READ_AT, Some(0)) + .unwrap() + } + } } - } -} -pub struct OffChainDataBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { - fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, - start_: flatbuffers::WIPOffset, -} -impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> OffChainDataBuilder<'a, 'b, A> { - #[inline] - pub fn add_storage_mutability(&mut self, storage_mutability: StorageMutability) { - self.fbb_.push_slot::(OffChainData::VT_STORAGE_MUTABILITY, storage_mutability, StorageMutability::Immutable); - } - #[inline] - pub fn add_url(&mut self, url: flatbuffers::WIPOffset<&'b str>) { - self.fbb_.push_slot_always::>(OffChainData::VT_URL, url); - } - #[inline] - pub fn add_metadata(&mut self, metadata: flatbuffers::WIPOffset<&'b str>) { - self.fbb_.push_slot_always::>(OffChainData::VT_METADATA, metadata); - } - #[inline] - pub fn add_last_read_at(&mut self, last_read_at: i64) { - self.fbb_.push_slot::(OffChainData::VT_LAST_READ_AT, last_read_at, 0); - } - #[inline] - pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>) -> OffChainDataBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - OffChainDataBuilder { - fbb_: _fbb, - start_: start, + impl flatbuffers::Verifiable for OffChainData<'_> { + #[inline] + fn run_verifier( + v: &mut flatbuffers::Verifier, + pos: usize, + ) -> Result<(), flatbuffers::InvalidFlatbuffer> { + use self::flatbuffers::Verifiable; + v.visit_table(pos)? + .visit_field::( + "storage_mutability", + Self::VT_STORAGE_MUTABILITY, + false, + )? + .visit_field::>("url", Self::VT_URL, false)? + .visit_field::>( + "metadata", + Self::VT_METADATA, + false, + )? + .visit_field::("last_read_at", Self::VT_LAST_READ_AT, false)? + .finish(); + Ok(()) + } + } + pub struct OffChainDataArgs<'a> { + pub storage_mutability: StorageMutability, + pub url: Option>, + pub metadata: Option>, + pub last_read_at: i64, + } + impl<'a> Default for OffChainDataArgs<'a> { + #[inline] + fn default() -> Self { + OffChainDataArgs { + storage_mutability: StorageMutability::Immutable, + url: None, + metadata: None, + last_read_at: 0, + } + } } - } - #[inline] - pub fn finish(self) -> flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - flatbuffers::WIPOffset::new(o.value()) - } -} -impl core::fmt::Debug for OffChainData<'_> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let mut ds = f.debug_struct("OffChainData"); - ds.field("storage_mutability", &self.storage_mutability()); - ds.field("url", &self.url()); - ds.field("metadata", &self.metadata()); - ds.field("last_read_at", &self.last_read_at()); - ds.finish() - } -} -#[inline] -/// Verifies that a buffer of bytes contains a `OffChainData` -/// and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_off_chain_data_unchecked`. -pub fn root_as_off_chain_data(buf: &[u8]) -> Result { - flatbuffers::root::(buf) -} -#[inline] -/// Verifies that a buffer of bytes contains a size prefixed -/// `OffChainData` and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `size_prefixed_root_as_off_chain_data_unchecked`. -pub fn size_prefixed_root_as_off_chain_data(buf: &[u8]) -> Result { - flatbuffers::size_prefixed_root::(buf) -} -#[inline] -/// Verifies, with the given options, that a buffer of bytes -/// contains a `OffChainData` and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_off_chain_data_unchecked`. -pub fn root_as_off_chain_data_with_opts<'b, 'o>( - opts: &'o flatbuffers::VerifierOptions, - buf: &'b [u8], -) -> Result, flatbuffers::InvalidFlatbuffer> { - flatbuffers::root_with_opts::>(opts, buf) -} -#[inline] -/// Verifies, with the given verifier options, that a buffer of -/// bytes contains a size prefixed `OffChainData` and returns -/// it. Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_off_chain_data_unchecked`. -pub fn size_prefixed_root_as_off_chain_data_with_opts<'b, 'o>( - opts: &'o flatbuffers::VerifierOptions, - buf: &'b [u8], -) -> Result, flatbuffers::InvalidFlatbuffer> { - flatbuffers::size_prefixed_root_with_opts::>(opts, buf) -} -#[inline] -/// Assumes, without verification, that a buffer of bytes contains a OffChainData and returns it. -/// # Safety -/// Callers must trust the given bytes do indeed contain a valid `OffChainData`. -pub unsafe fn root_as_off_chain_data_unchecked(buf: &[u8]) -> OffChainData { - flatbuffers::root_unchecked::(buf) -} -#[inline] -/// Assumes, without verification, that a buffer of bytes contains a size prefixed OffChainData and returns it. -/// # Safety -/// Callers must trust the given bytes do indeed contain a valid size prefixed `OffChainData`. -pub unsafe fn size_prefixed_root_as_off_chain_data_unchecked(buf: &[u8]) -> OffChainData { - flatbuffers::size_prefixed_root_unchecked::(buf) -} -#[inline] -pub fn finish_off_chain_data_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>( - fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, - root: flatbuffers::WIPOffset>) { - fbb.finish(root, None); -} + pub struct OffChainDataBuilder<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> { + fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + start_: flatbuffers::WIPOffset, + } + impl<'a: 'b, 'b, A: flatbuffers::Allocator + 'a> OffChainDataBuilder<'a, 'b, A> { + #[inline] + pub fn add_storage_mutability(&mut self, storage_mutability: StorageMutability) { + self.fbb_.push_slot::( + OffChainData::VT_STORAGE_MUTABILITY, + storage_mutability, + StorageMutability::Immutable, + ); + } + #[inline] + pub fn add_url(&mut self, url: flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::>(OffChainData::VT_URL, url); + } + #[inline] + pub fn add_metadata(&mut self, metadata: flatbuffers::WIPOffset<&'b str>) { + self.fbb_ + .push_slot_always::>(OffChainData::VT_METADATA, metadata); + } + #[inline] + pub fn add_last_read_at(&mut self, last_read_at: i64) { + self.fbb_ + .push_slot::(OffChainData::VT_LAST_READ_AT, last_read_at, 0); + } + #[inline] + pub fn new( + _fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + ) -> OffChainDataBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + OffChainDataBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + flatbuffers::WIPOffset::new(o.value()) + } + } -#[inline] -pub fn finish_size_prefixed_off_chain_data_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>(fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, root: flatbuffers::WIPOffset>) { - fbb.finish_size_prefixed(root, None); -} -} // pub mod OffChainData + impl core::fmt::Debug for OffChainData<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut ds = f.debug_struct("OffChainData"); + ds.field("storage_mutability", &self.storage_mutability()); + ds.field("url", &self.url()); + ds.field("metadata", &self.metadata()); + ds.field("last_read_at", &self.last_read_at()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `OffChainData` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_off_chain_data_unchecked`. + pub fn root_as_off_chain_data( + buf: &[u8], + ) -> Result { + flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `OffChainData` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_off_chain_data_unchecked`. + pub fn size_prefixed_root_as_off_chain_data( + buf: &[u8], + ) -> Result { + flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `OffChainData` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_off_chain_data_unchecked`. + pub fn root_as_off_chain_data_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `OffChainData` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_off_chain_data_unchecked`. + pub fn size_prefixed_root_as_off_chain_data_with_opts<'b, 'o>( + opts: &'o flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, flatbuffers::InvalidFlatbuffer> { + flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a OffChainData and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `OffChainData`. + pub unsafe fn root_as_off_chain_data_unchecked(buf: &[u8]) -> OffChainData { + flatbuffers::root_unchecked::(buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed OffChainData and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `OffChainData`. + pub unsafe fn size_prefixed_root_as_off_chain_data_unchecked(buf: &[u8]) -> OffChainData { + flatbuffers::size_prefixed_root_unchecked::(buf) + } + #[inline] + pub fn finish_off_chain_data_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish(root, None); + } + #[inline] + pub fn finish_size_prefixed_off_chain_data_buffer<'a, 'b, A: flatbuffers::Allocator + 'a>( + fbb: &'b mut flatbuffers::FlatBufferBuilder<'a, A>, + root: flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, None); + } +} // pub mod OffChainData From 5410cd2a22dbe89e4e4ab3250ab5bdea746d326f Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Thu, 26 Dec 2024 19:42:58 +0000 Subject: [PATCH 12/15] imports minor cleanup --- nft_ingester/src/json_worker.rs | 1 - nft_ingester/src/sequence_consistent.rs | 1 - rocks-db/src/column.rs | 1 - rocks-db/src/columns/asset.rs | 3 +-- rocks-db/src/columns/cl_items.rs | 5 +---- rocks-db/src/lib.rs | 2 +- 6 files changed, 3 insertions(+), 10 deletions(-) diff --git a/nft_ingester/src/json_worker.rs b/nft_ingester/src/json_worker.rs index b10539851..aa13e902d 100644 --- a/nft_ingester/src/json_worker.rs +++ b/nft_ingester/src/json_worker.rs @@ -1,5 +1,4 @@ use crate::api::dapi::rpc_asset_convertors::parse_files; -use crate::config::{setup_config, IngesterConfig, INGESTER_CONFIG_PREFIX}; use async_trait::async_trait; use entities::enums::TaskStatus; use entities::models::JsonDownloadTask; diff --git a/nft_ingester/src/sequence_consistent.rs b/nft_ingester/src/sequence_consistent.rs index 5b507d10d..ff22667b4 100644 --- a/nft_ingester/src/sequence_consistent.rs +++ b/nft_ingester/src/sequence_consistent.rs @@ -1,7 +1,6 @@ use entities::models::TreeState; use interface::signature_persistence::BlockConsumer; use interface::slot_getter::FinalizedSlotGetter; -use interface::slots_dumper::SlotsDumper; use interface::{ sequence_consistent::SequenceConsistentManager, signature_persistence::BlockProducer, }; diff --git a/rocks-db/src/column.rs b/rocks-db/src/column.rs index a1d562885..8c64a3f8f 100644 --- a/rocks-db/src/column.rs +++ b/rocks-db/src/column.rs @@ -179,7 +179,6 @@ where pub async fn get_async(&self, key: C::KeyType) -> Result> { let mut result = Ok(None); let backend = self.backend.clone(); - let self_clone = self.clone(); if let Some(serialized_value) = tokio::task::spawn_blocking(move || Self::get_raw(backend, key)) .await diff --git a/rocks-db/src/columns/asset.rs b/rocks-db/src/columns/asset.rs index 87081b073..c6325b3d5 100644 --- a/rocks-db/src/columns/asset.rs +++ b/rocks-db/src/columns/asset.rs @@ -10,10 +10,9 @@ use entities::models::{ use flatbuffers::{FlatBufferBuilder, WIPOffset}; use rocksdb::MergeOperands; use serde::{Deserialize, Serialize}; -use solana_sdk::bs58; use solana_sdk::{hash::Hash, pubkey::Pubkey}; use std::cmp::{max, Ordering}; -use tracing::{error, warn}; +use tracing::error; use crate::generated::asset_generated::asset as fb; use crate::key_encoders::{decode_pubkey, decode_u64_pubkey, encode_pubkey, encode_u64_pubkey}; diff --git a/rocks-db/src/columns/cl_items.rs b/rocks-db/src/columns/cl_items.rs index e48197758..0769741e5 100644 --- a/rocks-db/src/columns/cl_items.rs +++ b/rocks-db/src/columns/cl_items.rs @@ -1,13 +1,10 @@ use std::collections::HashMap; -use bincode::deserialize; use entities::models::{AssetSignature, AssetSignatureKey}; use rocksdb::MergeOperands; use serde::{Deserialize, Serialize}; -use solana_sdk::bs58; use solana_sdk::pubkey::Pubkey; -use spl_account_compression::events::ChangeLogEventV1; -use tracing::{debug, error, warn}; +use tracing::{debug, error}; use crate::column::TypedColumn; use crate::key_encoders::{decode_u64_pubkey, encode_u64_pubkey}; diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index 7512f304a..311218cb4 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -25,7 +25,7 @@ use asset::{ }; use rocksdb::{ColumnFamilyDescriptor, IteratorMode, Options, DB}; -use crate::migrator::{MigrationState, MigrationVersions, RocksMigration}; +use crate::migrator::{MigrationState, MigrationVersions}; use column::{Column, TypedColumn}; use entities::enums::TokenMetadataEdition; From 0fe96e5e6e92e5d377b7ded2ad7081b6818efcba Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Mon, 30 Dec 2024 17:42:01 +0000 Subject: [PATCH 13/15] cleaned up unnecessary trait --- rocks-db/src/migrator.rs | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index 74f7c7e26..14db759bc 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -35,14 +35,7 @@ pub trait RocksMigration { const VERSION: u64; const DESERIALIZATION_TYPE: SerializationType; const SERIALIZATION_TYPE: SerializationType; - type NewDataType: Sync - + Serialize - + DeserializeOwned - + Send - + TypedColumn - // that restrictrion breaks the backward compatibility for the previous migrations - // however, it's the simplest way to provide the migration to flatbuffers - + ToFlatbuffersConverter<'static>; + type NewDataType: Sync + Serialize + DeserializeOwned + Send + TypedColumn; type OldDataType: Sync + Serialize + DeserializeOwned @@ -185,8 +178,7 @@ impl<'a> MigrationApplier<'a> { async fn apply_migration(&self, _: M) -> Result<()> where - for<'b> <::NewDataType as TypedColumn>::ValueType: - 'static + Clone + ToFlatbuffersConverter<'b>, + for<'b> <::NewDataType as TypedColumn>::ValueType: 'static + Clone, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { if self.applied_migration_versions.contains(&M::VERSION) { @@ -278,8 +270,7 @@ impl<'a> MigrationApplier<'a> { column: &Column, ) -> Result<()> where - for<'b> <::NewDataType as TypedColumn>::ValueType: - 'static + Clone + ToFlatbuffersConverter<'b>, + for<'b> <::NewDataType as TypedColumn>::ValueType: 'static + Clone, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { let mut batch = HashMap::new(); @@ -358,8 +349,7 @@ impl<'a> MigrationApplier<'a> { column: &Column, ) -> Result<()> where - for<'b> <::NewDataType as TypedColumn>::ValueType: - 'static + Clone + ToFlatbuffersConverter<'b>, + for<'b> <::NewDataType as TypedColumn>::ValueType: 'static + Clone, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { column.put_batch(std::mem::take(batch)).await From 42460dcd33a1f79b5e8e475934e25c1d29ef9c52 Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Mon, 30 Dec 2024 17:44:27 +0000 Subject: [PATCH 14/15] cleaned up unnecessary lifetime spec --- rocks-db/src/migrator.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index 14db759bc..0e5103a6c 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -178,7 +178,7 @@ impl<'a> MigrationApplier<'a> { async fn apply_migration(&self, _: M) -> Result<()> where - for<'b> <::NewDataType as TypedColumn>::ValueType: 'static + Clone, + <::NewDataType as TypedColumn>::ValueType: 'static + Clone, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { if self.applied_migration_versions.contains(&M::VERSION) { @@ -270,7 +270,7 @@ impl<'a> MigrationApplier<'a> { column: &Column, ) -> Result<()> where - for<'b> <::NewDataType as TypedColumn>::ValueType: 'static + Clone, + <::NewDataType as TypedColumn>::ValueType: 'static + Clone, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { let mut batch = HashMap::new(); @@ -349,7 +349,7 @@ impl<'a> MigrationApplier<'a> { column: &Column, ) -> Result<()> where - for<'b> <::NewDataType as TypedColumn>::ValueType: 'static + Clone, + <::NewDataType as TypedColumn>::ValueType: 'static + Clone, <::NewDataType as TypedColumn>::KeyType: 'static + Hash + Eq, { column.put_batch(std::mem::take(batch)).await From d7d6e6ed159daf137c3f7cc11e6485fd37fc303c Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Mon, 6 Jan 2025 16:42:49 +0000 Subject: [PATCH 15/15] using the right column name for slots storage --- rocks-db/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rocks-db/src/lib.rs b/rocks-db/src/lib.rs index 311218cb4..4af7092e8 100644 --- a/rocks-db/src/lib.rs +++ b/rocks-db/src/lib.rs @@ -93,7 +93,7 @@ impl SlotStorage { } pub fn cf_names() -> Vec<&'static str> { - vec![RawBlock::NAME, MigrationVersions::NAME, OffChainData::NAME] + vec![RawBlock::NAME, MigrationVersions::NAME, OffChainDataDeprecated::NAME] } pub fn open

(