Skip to content

Commit

Permalink
add current token datas
Browse files Browse the repository at this point in the history
  • Loading branch information
bowenyang007 committed Sep 23, 2022
1 parent 1279411 commit 6bfe376
Show file tree
Hide file tree
Showing 16 changed files with 1,072 additions and 654 deletions.
72 changes: 32 additions & 40 deletions crates/indexer/migrations/2022-09-04-194128_add_token_data/up.sql
Original file line number Diff line number Diff line change
@@ -1,63 +1,61 @@
-- Your SQL goes here
-- tracks tokens per version
CREATE TABLE tokens (
creator_address VARCHAR(100) NOT NULL,
collection_name_hash VARCHAR(64) NOT NULL,
name_hash VARCHAR(64) NOT NULL,
collection_name TEXT NOT NULL,
name TEXT NOT NULL,
-- sha256 of creator + collection_name + name
token_data_id_hash VARCHAR(64) NOT NULL,
property_version NUMERIC NOT NULL,
transaction_version BIGINT NOT NULL,
creator_address VARCHAR(66) NOT NULL,
collection_name VARCHAR(128) NOT NULL,
name VARCHAR(128) NOT NULL,
token_properties jsonb NOT NULL,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW(),
-- Constraints
PRIMARY KEY (
creator_address,
collection_name_hash,
name_hash,
token_data_id_hash,
property_version,
transaction_version
)
);
CREATE INDEX token_crea_cn_name_index ON tokens (creator_address, collection_name, name);
CREATE INDEX token_insat_index ON tokens (inserted_at);
-- tracks who owns tokens at certain version
CREATE TABLE token_ownerships (
creator_address VARCHAR(66) NOT NULL,
collection_name_hash VARCHAR(64) NOT NULL,
name_hash VARCHAR(64) NOT NULL,
collection_name TEXT NOT NULL,
name TEXT NOT NULL,
-- sha256 of creator + collection_name + name
token_data_id_hash VARCHAR(64) NOT NULL,
property_version NUMERIC NOT NULL,
transaction_version BIGINT NOT NULL,
table_handle VARCHAR(66) NOT NULL,
creator_address VARCHAR(66) NOT NULL,
collection_name VARCHAR(128) NOT NULL,
name VARCHAR(128) NOT NULL,
owner_address VARCHAR(66),
amount NUMERIC NOT NULL,
table_handle VARCHAR(66) NOT NULL,
table_type TEXT,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW(),
-- Constraints
PRIMARY KEY (
creator_address,
collection_name_hash,
name_hash,
token_data_id_hash,
property_version,
transaction_version,
table_handle
)
);
CREATE INDEX to_owner ON token_ownerships (owner_address);
CREATE INDEX to_owner_index ON token_ownerships (owner_address);
CREATE INDEX to_crea_cn_name_index ON token_ownerships (creator_address, collection_name, name);
CREATE INDEX to_insat_index ON token_ownerships (inserted_at);
-- tracks token metadata
CREATE TABLE token_datas (
creator_address VARCHAR(66) NOT NULL,
collection_name_hash VARCHAR(64) NOT NULL,
name_hash VARCHAR(64) NOT NULL,
collection_name TEXT NOT NULL,
name TEXT NOT NULL,
-- sha256 of creator + collection_name + name
token_data_id_hash VARCHAR(64) NOT NULL,
transaction_version BIGINT NOT NULL,
creator_address VARCHAR(66) NOT NULL,
collection_name VARCHAR(128) NOT NULL,
name VARCHAR(128) NOT NULL,
maximum NUMERIC NOT NULL,
supply NUMERIC NOT NULL,
largest_property_version NUMERIC NOT NULL,
metadata_uri TEXT NOT NULL,
metadata_uri VARCHAR(512) NOT NULL,
payee_address VARCHAR(66) NOT NULL,
royalty_points_numerator NUMERIC NOT NULL,
royalty_points_denominator NUMERIC NOT NULL,
Expand All @@ -69,33 +67,27 @@ CREATE TABLE token_datas (
default_properties jsonb NOT NULL,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW(),
-- Constraints
PRIMARY KEY (
creator_address,
collection_name_hash,
name_hash,
transaction_version
)
PRIMARY KEY (token_data_id_hash, transaction_version)
);
CREATE INDEX td_crea_cn_name_index ON token_datas (creator_address, collection_name, name);
CREATE INDEX td_insat_index ON token_datas (inserted_at);
-- tracks collection metadata
CREATE TABLE collection_datas (
-- sha256 of creator + collection_name
collection_data_id_hash VARCHAR(64) NOT NULL,
transaction_version BIGINT NOT NULL,
creator_address VARCHAR(66) NOT NULL,
collection_name_hash VARCHAR(64) NOT NULL,
collection_name TEXT NOT NULL,
collection_name VARCHAR(128) NOT NULL,
description TEXT NOT NULL,
transaction_version BIGINT NOT NULL,
metadata_uri TEXT NOT NULL,
metadata_uri VARCHAR(512) NOT NULL,
supply NUMERIC NOT NULL,
maximum NUMERIC NOT NULL,
maximum_mutable BOOLEAN NOT NULL,
uri_mutable BOOLEAN NOT NULL,
description_mutable BOOLEAN NOT NULL,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW(),
-- Constraints
PRIMARY KEY (
creator_address,
collection_name_hash,
transaction_version
)
PRIMARY KEY (collection_data_id_hash, transaction_version)
);
CREATE INDEX cd_insat_index ON collection_datas (inserted_at);
CREATE INDEX cd_crea_cn_index ON collection_datas (creator_address, collection_name);
CREATE INDEX cd_insat_index ON collection_datas (inserted_at);
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
-- This file should undo anything in `up.sql`
DROP TABLE IF EXISTS current_token_ownerships;
DROP TABLE IF EXISTS current_token_datas;
DROP TABLE IF EXISTS current_collection_datas;
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
-- Your SQL goes here
-- tracks tokens in owner's tokenstore
CREATE TABLE current_token_ownerships (
-- sha256 of creator + collection_name + name
token_data_id_hash VARCHAR(64) NOT NULL,
property_version NUMERIC NOT NULL,
owner_address VARCHAR(66) NOT NULL,
creator_address VARCHAR(66) NOT NULL,
collection_name VARCHAR(128) NOT NULL,
name VARCHAR(128) NOT NULL,
amount NUMERIC NOT NULL,
token_properties jsonb NOT NULL,
last_transaction_version BIGINT NOT NULL,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW(),
-- Constraints
PRIMARY KEY (
token_data_id_hash,
property_version,
owner_address
)
);
CREATE INDEX curr_to_crea_cn_name_index ON current_token_ownerships (creator_address, collection_name, name);
CREATE INDEX curr_to_owner_index ON current_token_ownerships (owner_address);
CREATE INDEX curr_to_insat_index ON current_token_ownerships (inserted_at);
-- tracks latest token metadata
CREATE TABLE current_token_datas (
-- sha256 of creator + collection_name + name
token_data_id_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL,
creator_address VARCHAR(66) NOT NULL,
collection_name VARCHAR(128) NOT NULL,
name VARCHAR(128) NOT NULL,
maximum NUMERIC NOT NULL,
supply NUMERIC NOT NULL,
largest_property_version NUMERIC NOT NULL,
metadata_uri VARCHAR(512) NOT NULL,
payee_address VARCHAR(66) NOT NULL,
royalty_points_numerator NUMERIC NOT NULL,
royalty_points_denominator NUMERIC NOT NULL,
maximum_mutable BOOLEAN NOT NULL,
uri_mutable BOOLEAN NOT NULL,
description_mutable BOOLEAN NOT NULL,
properties_mutable BOOLEAN NOT NULL,
royalty_mutable BOOLEAN NOT NULL,
default_properties jsonb NOT NULL,
last_transaction_version BIGINT NOT NULL,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW()
);
CREATE INDEX curr_td_crea_cn_name_index ON current_token_datas (creator_address, collection_name, name);
CREATE INDEX curr_td_insat_index ON current_token_datas (inserted_at);
-- tracks latest collection metadata
CREATE TABLE current_collection_datas (
-- sha256 of creator + collection_name
collection_data_id_hash VARCHAR(64) UNIQUE PRIMARY KEY NOT NULL,
creator_address VARCHAR(66) NOT NULL,
collection_name VARCHAR(128) NOT NULL,
description TEXT NOT NULL,
metadata_uri VARCHAR(512) NOT NULL,
supply NUMERIC NOT NULL,
maximum NUMERIC NOT NULL,
maximum_mutable BOOLEAN NOT NULL,
uri_mutable BOOLEAN NOT NULL,
description_mutable BOOLEAN NOT NULL,
last_transaction_version BIGINT NOT NULL,
inserted_at TIMESTAMP NOT NULL DEFAULT NOW()
);
CREATE INDEX curr_cd_crea_cn_index ON current_collection_datas (creator_address, collection_name);
CREATE INDEX curr_cd_insat_index ON current_collection_datas (inserted_at);
48 changes: 44 additions & 4 deletions crates/indexer/src/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,24 @@
#![allow(clippy::extra_unused_lifetimes)]
use crate::util::remove_null_bytes;
use diesel::{
pg::PgConnection,
pg::{Pg, PgConnection},
query_builder::{AstPass, Query, QueryFragment},
r2d2::{ConnectionManager, PoolError, PooledConnection},
RunQueryDsl,
QueryResult, RunQueryDsl,
};
use std::{cmp::min, sync::Arc};

pub type PgPool = diesel::r2d2::Pool<ConnectionManager<PgConnection>>;
pub type PgDbPool = Arc<PgPool>;
pub type PgPoolConnection = PooledConnection<ConnectionManager<PgConnection>>;
#[derive(QueryId)]
/// Using this will append a where clause at the end of the string upsert function, e.g.
/// INSERT INTO ... ON CONFLICT DO UPDATE SET ... WHERE "transaction_version" = excluded."transaction_version"
/// This is needed when we want to maintain a table with only the latest state
pub struct UpsertFilterLatestTransactionQuery<T> {
query: T,
where_clause: Option<&'static str>,
}

pub const MAX_DIESEL_PARAM_SIZE: u16 = u16::MAX;

Expand Down Expand Up @@ -60,19 +69,50 @@ pub fn execute_with_better_error<
>(
conn: &mut PgConnection,
query: diesel::query_builder::InsertStatement<T, U>,
mut additional_where_clause: Option<&'static str>,
) -> diesel::QueryResult<usize>
where
<T as diesel::QuerySource>::FromClause: diesel::query_builder::QueryFragment<diesel::pg::Pg>,
{
let debug = diesel::debug_query::<diesel::pg::Pg, _>(&query).to_string();
let original_query = diesel::debug_query::<diesel::pg::Pg, _>(&query).to_string();
// This is needed because if we don't insert any row, then diesel makes a call like this
// SELECT 1 FROM TABLE WHERE 1=0
if original_query.to_lowercase().contains("where") {
additional_where_clause = None;
}
let final_query = UpsertFilterLatestTransactionQuery {
query,
where_clause: additional_where_clause,
};
let debug = diesel::debug_query::<diesel::pg::Pg, _>(&final_query).to_string();
aptos_logger::debug!("Executing query: {:?}", debug);
let res = query.execute(conn);
let res = final_query.execute(conn);
if let Err(ref e) = res {
aptos_logger::warn!("Error running query: {:?}\n{}", e, debug);
}
res
}

/// Section below is required to modify the query.
impl<T: Query> Query for UpsertFilterLatestTransactionQuery<T> {
type SqlType = T::SqlType;
}

impl<T> RunQueryDsl<PgConnection> for UpsertFilterLatestTransactionQuery<T> {}

impl<T> QueryFragment<Pg> for UpsertFilterLatestTransactionQuery<T>
where
T: QueryFragment<Pg>,
{
fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> {
self.query.walk_ast(out.reborrow())?;
if let Some(w) = self.where_clause {
out.push_sql(w);
}
Ok(())
}
}

#[cfg(test)]
mod test {
use super::*;
Expand Down
28 changes: 7 additions & 21 deletions crates/indexer/src/indexer/tailer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ impl Tailer {
diesel::insert_into(ledger_infos::table).values(LedgerInfo {
chain_id: new_chain_id,
}),
None,
)
.context(r#"Error updating chain_id!"#)
.map(|_| new_chain_id as u64)
Expand Down Expand Up @@ -316,28 +317,13 @@ mod test {
}

pub fn wipe_database(conn: &mut PgPoolConnection) {
for table in [
"collection_datas",
"tokens",
"token_datas",
"token_ownerships",
"signatures",
"move_modules",
"move_resources",
"table_items",
"table_metadatas",
"write_set_changes",
"events",
"user_transactions",
"block_metadata_transactions",
"transactions",
"processor_statuses",
"ledger_infos",
"__diesel_schema_migrations",
for command in [
"DROP SCHEMA public CASCADE",
"CREATE SCHEMA public",
"GRANT ALL ON SCHEMA public TO postgres",
"GRANT ALL ON SCHEMA public TO public",
] {
diesel::sql_query(format!("DROP TABLE IF EXISTS {} CASCADE", table))
.execute(conn)
.unwrap();
diesel::sql_query(command).execute(conn).unwrap();
}
}

Expand Down
1 change: 1 addition & 0 deletions crates/indexer/src/indexer/transaction_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ pub trait TransactionProcessor: Send + Sync + Debug {
dsl::details.eq(excluded(dsl::details)),
dsl::last_updated.eq(excluded(dsl::last_updated)),
)),
None,
)
.expect("Error updating Processor Status!");
}
Expand Down
Loading

0 comments on commit 6bfe376

Please sign in to comment.