Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

1045 - additional fork handling #1048

Merged
merged 6 commits into from
Nov 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,10 @@ class GenericPlatformDiscoveryOperationsTest
Set(
Attribute("big_map_id", "Big map id", DataType.Decimal, None, KeyType.UniqueKey, "big_maps"),
Attribute("key_type", "Key type", DataType.String, None, KeyType.NonKey, "big_maps"),
Attribute("value_type", "Value type", DataType.String, None, KeyType.NonKey, "big_maps")
Attribute("value_type", "Value type", DataType.String, None, KeyType.NonKey, "big_maps"),
Attribute("fork_id", "Fork id", DataType.String, None, KeyType.UniqueKey, "big_maps"),
Attribute("block_level", "Block level", DataType.LargeInt, None, KeyType.NonKey, "big_maps"),
Attribute("invalidated_asof", "Invalidated asof", DataType.DateTime, None, KeyType.NonKey, "big_maps")
)
)
}
Expand All @@ -472,7 +475,9 @@ class GenericPlatformDiscoveryOperationsTest
Attribute("block_level", "Block level", DataType.LargeInt, None, KeyType.NonKey, "big_map_contents"),
Attribute("timestamp", "Timestamp", DataType.DateTime, None, KeyType.NonKey, "big_map_contents"),
Attribute("cycle", "Cycle", DataType.Int, None, KeyType.NonKey, "big_map_contents"),
Attribute("period", "Period", DataType.Int, None, KeyType.NonKey, "big_map_contents")
Attribute("period", "Period", DataType.Int, None, KeyType.NonKey, "big_map_contents"),
Attribute("fork_id", "Fork id", DataType.String, None, KeyType.UniqueKey, "big_map_contents"),
Attribute("invalidated_asof", "Invalidated asof", DataType.DateTime, None, KeyType.NonKey, "big_map_contents")
)
)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1486,7 +1486,9 @@ trait Tables {
* @param blockLevel Database column block_level SqlType(int8), Default(None)
* @param timestamp Database column timestamp SqlType(timestamp), Default(None)
* @param cycle Database column cycle SqlType(int4), Default(None)
* @param period Database column period SqlType(int4), Default(None) */
* @param period Database column period SqlType(int4), Default(None)
* @param forkId Database column fork_id SqlType(varchar)
* @param invalidatedAsof Database column invalidated_asof SqlType(timestamp), Default(None) */
case class BigMapContentsRow(
bigMapId: scala.math.BigDecimal,
key: String,
Expand All @@ -1497,7 +1499,9 @@ trait Tables {
blockLevel: Option[Long] = None,
timestamp: Option[java.sql.Timestamp] = None,
cycle: Option[Int] = None,
period: Option[Int] = None
period: Option[Int] = None,
forkId: String,
invalidatedAsof: Option[java.sql.Timestamp] = None
)

/** GetResult implicit for fetching BigMapContentsRow objects using plain SQL queries */
Expand All @@ -1521,7 +1525,9 @@ trait Tables {
<<?[Long],
<<?[java.sql.Timestamp],
<<?[Int],
<<?[Int]
<<?[Int],
<<[String],
<<?[java.sql.Timestamp]
)
)
}
Expand All @@ -1530,7 +1536,20 @@ trait Tables {
class BigMapContents(_tableTag: Tag)
extends profile.api.Table[BigMapContentsRow](_tableTag, Some("tezos"), "big_map_contents") {
def * =
(bigMapId, key, keyHash, operationGroupId, value, valueMicheline, blockLevel, timestamp, cycle, period) <> (BigMapContentsRow.tupled, BigMapContentsRow.unapply)
(
bigMapId,
key,
keyHash,
operationGroupId,
value,
valueMicheline,
blockLevel,
timestamp,
cycle,
period,
forkId,
invalidatedAsof
) <> (BigMapContentsRow.tupled, BigMapContentsRow.unapply)

/** Maps whole row to an option. Useful for outer joins. */
def ? =
Expand All @@ -1545,11 +1564,14 @@ trait Tables {
blockLevel,
timestamp,
cycle,
period
period,
Rep.Some(forkId),
invalidatedAsof
)
).shaped.<>(
{ r =>
import r._; _1.map(_ => BigMapContentsRow.tupled((_1.get, _2.get, _3, _4, _5, _6, _7, _8, _9, _10)))
import r._;
_1.map(_ => BigMapContentsRow.tupled((_1.get, _2.get, _3, _4, _5, _6, _7, _8, _9, _10, _11.get, _12)))
},
(_: Any) => throw new Exception("Inserting into ? projection not supported.")
)
Expand Down Expand Up @@ -1584,8 +1606,15 @@ trait Tables {
/** Database column period SqlType(int4), Default(None) */
val period: Rep[Option[Int]] = column[Option[Int]]("period", O.Default(None))

/** Database column fork_id SqlType(varchar) */
val forkId: Rep[String] = column[String]("fork_id")

/** Database column invalidated_asof SqlType(timestamp), Default(None) */
val invalidatedAsof: Rep[Option[java.sql.Timestamp]] =
column[Option[java.sql.Timestamp]]("invalidated_asof", O.Default(None))

/** Primary key of BigMapContents (database name big_map_contents_pkey) */
val pk = primaryKey("big_map_contents_pkey", (bigMapId, key))
val pk = primaryKey("big_map_contents_pkey", (bigMapId, key, forkId))

/** Index over (bigMapId) (database name big_map_id_idx) */
val index1 = index("big_map_id_idx", bigMapId)
Expand All @@ -1609,7 +1638,9 @@ trait Tables {
* @param blockLevel Database column block_level SqlType(int8), Default(None)
* @param timestamp Database column timestamp SqlType(timestamp), Default(None)
* @param cycle Database column cycle SqlType(int4), Default(None)
* @param period Database column period SqlType(int4), Default(None) */
* @param period Database column period SqlType(int4), Default(None)
* @param forkId Database column fork_id SqlType(varchar)
* @param invalidatedAsof Database column invalidated_asof SqlType(timestamp), Default(None) */
case class BigMapContentsHistoryRow(
bigMapId: scala.math.BigDecimal,
key: String,
Expand All @@ -1619,7 +1650,9 @@ trait Tables {
blockLevel: Option[Long] = None,
timestamp: Option[java.sql.Timestamp] = None,
cycle: Option[Int] = None,
period: Option[Int] = None
period: Option[Int] = None,
forkId: String,
invalidatedAsof: Option[java.sql.Timestamp] = None
)

/** GetResult implicit for fetching BigMapContentsHistoryRow objects using plain SQL queries */
Expand All @@ -1642,7 +1675,9 @@ trait Tables {
<<?[Long],
<<?[java.sql.Timestamp],
<<?[Int],
<<?[Int]
<<?[Int],
<<[String],
<<?[java.sql.Timestamp]
)
)
}
Expand All @@ -1651,17 +1686,31 @@ trait Tables {
class BigMapContentsHistory(_tableTag: Tag)
extends profile.api.Table[BigMapContentsHistoryRow](_tableTag, Some("tezos"), "big_map_contents_history") {
def * =
(bigMapId, key, keyHash, operationGroupId, value, blockLevel, timestamp, cycle, period) <> (BigMapContentsHistoryRow.tupled, BigMapContentsHistoryRow.unapply)
(bigMapId, key, keyHash, operationGroupId, value, blockLevel, timestamp, cycle, period, forkId, invalidatedAsof) <> (BigMapContentsHistoryRow.tupled, BigMapContentsHistoryRow.unapply)

/** Maps whole row to an option. Useful for outer joins. */
def ? =
((Rep.Some(bigMapId), Rep.Some(key), keyHash, operationGroupId, value, blockLevel, timestamp, cycle, period)).shaped
.<>(
{ r =>
import r._; _1.map(_ => BigMapContentsHistoryRow.tupled((_1.get, _2.get, _3, _4, _5, _6, _7, _8, _9)))
},
(_: Any) => throw new Exception("Inserting into ? projection not supported.")
(
(
Rep.Some(bigMapId),
Rep.Some(key),
keyHash,
operationGroupId,
value,
blockLevel,
timestamp,
cycle,
period,
Rep.Some(forkId),
invalidatedAsof
)
).shaped.<>(
{ r =>
import r._;
_1.map(_ => BigMapContentsHistoryRow.tupled((_1.get, _2.get, _3, _4, _5, _6, _7, _8, _9, _10.get, _11)))
},
(_: Any) => throw new Exception("Inserting into ? projection not supported.")
)

/** Database column big_map_id SqlType(numeric) */
val bigMapId: Rep[scala.math.BigDecimal] = column[scala.math.BigDecimal]("big_map_id")
Expand Down Expand Up @@ -1689,46 +1738,80 @@ trait Tables {

/** Database column period SqlType(int4), Default(None) */
val period: Rep[Option[Int]] = column[Option[Int]]("period", O.Default(None))

/** Database column fork_id SqlType(varchar) */
val forkId: Rep[String] = column[String]("fork_id")

/** Database column invalidated_asof SqlType(timestamp), Default(None) */
val invalidatedAsof: Rep[Option[java.sql.Timestamp]] =
column[Option[java.sql.Timestamp]]("invalidated_asof", O.Default(None))
}

/** Collection-like TableQuery object for table BigMapContentsHistory */
lazy val BigMapContentsHistory = new TableQuery(tag => new BigMapContentsHistory(tag))

/** Entity class storing rows of table BigMaps
* @param bigMapId Database column big_map_id SqlType(numeric), PrimaryKey
* @param bigMapId Database column big_map_id SqlType(numeric)
* @param keyType Database column key_type SqlType(varchar), Default(None)
* @param valueType Database column value_type SqlType(varchar), Default(None) */
* @param valueType Database column value_type SqlType(varchar), Default(None)
* @param forkId Database column fork_id SqlType(varchar)
* @param blockLevel Database column block_level SqlType(int8), Default(None)
* @param invalidatedAsof Database column invalidated_asof SqlType(timestamp), Default(None) */
case class BigMapsRow(
bigMapId: scala.math.BigDecimal,
keyType: Option[String] = None,
valueType: Option[String] = None
valueType: Option[String] = None,
forkId: String,
blockLevel: Option[Long] = None,
invalidatedAsof: Option[java.sql.Timestamp] = None
)

/** GetResult implicit for fetching BigMapsRow objects using plain SQL queries */
implicit def GetResultBigMapsRow(implicit e0: GR[scala.math.BigDecimal], e1: GR[Option[String]]): GR[BigMapsRow] =
GR { prs =>
import prs._
BigMapsRow.tupled((<<[scala.math.BigDecimal], <<?[String], <<?[String]))
}
implicit def GetResultBigMapsRow(
implicit e0: GR[scala.math.BigDecimal],
e1: GR[Option[String]],
e2: GR[String],
e3: GR[Option[Long]],
e4: GR[Option[java.sql.Timestamp]]
): GR[BigMapsRow] = GR { prs =>
import prs._
BigMapsRow.tupled(
(<<[scala.math.BigDecimal], <<?[String], <<?[String], <<[String], <<?[Long], <<?[java.sql.Timestamp])
)
}

/** Table description of table big_maps. Objects of this class serve as prototypes for rows in queries. */
class BigMaps(_tableTag: Tag) extends profile.api.Table[BigMapsRow](_tableTag, Some("tezos"), "big_maps") {
def * = (bigMapId, keyType, valueType) <> (BigMapsRow.tupled, BigMapsRow.unapply)
def * =
(bigMapId, keyType, valueType, forkId, blockLevel, invalidatedAsof) <> (BigMapsRow.tupled, BigMapsRow.unapply)

/** Maps whole row to an option. Useful for outer joins. */
def ? =
((Rep.Some(bigMapId), keyType, valueType)).shaped.<>({ r =>
import r._; _1.map(_ => BigMapsRow.tupled((_1.get, _2, _3)))
((Rep.Some(bigMapId), keyType, valueType, Rep.Some(forkId), blockLevel, invalidatedAsof)).shaped.<>({ r =>
import r._; _1.map(_ => BigMapsRow.tupled((_1.get, _2, _3, _4.get, _5, _6)))
}, (_: Any) => throw new Exception("Inserting into ? projection not supported."))

/** Database column big_map_id SqlType(numeric), PrimaryKey */
val bigMapId: Rep[scala.math.BigDecimal] = column[scala.math.BigDecimal]("big_map_id", O.PrimaryKey)
/** Database column big_map_id SqlType(numeric) */
val bigMapId: Rep[scala.math.BigDecimal] = column[scala.math.BigDecimal]("big_map_id")

/** Database column key_type SqlType(varchar), Default(None) */
val keyType: Rep[Option[String]] = column[Option[String]]("key_type", O.Default(None))

/** Database column value_type SqlType(varchar), Default(None) */
val valueType: Rep[Option[String]] = column[Option[String]]("value_type", O.Default(None))

/** Database column fork_id SqlType(varchar) */
val forkId: Rep[String] = column[String]("fork_id")

/** Database column block_level SqlType(int8), Default(None) */
val blockLevel: Rep[Option[Long]] = column[Option[Long]]("block_level", O.Default(None))

/** Database column invalidated_asof SqlType(timestamp), Default(None) */
val invalidatedAsof: Rep[Option[java.sql.Timestamp]] =
column[Option[java.sql.Timestamp]]("invalidated_asof", O.Default(None))

/** Primary key of BigMaps (database name big_maps_pkey) */
val pk = primaryKey("big_maps_pkey", (bigMapId, forkId))
}

/** Collection-like TableQuery object for table BigMaps */
Expand Down
11 changes: 9 additions & 2 deletions conseil-lorre/src/main/resources/application.conf
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@ lorre {
block-rights-fetching-is-on: true
block-rights-fetching-is-on: ${?CONSEIL_LORRE_BLOCK_RIGHTS_FETCHING_ENABLED}
metadata-fetching-is-on: false
metadata-fetching-is-on: ${?CONSEIL_LORRE_BLOCK_RIGHTS_FETCHING_ENABLED}
metadata-fetching-is-on: ${?CONSEIL_LORRE_METADATA_FETCHING_ENABLED}
fork-handling-is-on: false
fork-handling-is-on: ${?CONSEIL_LORRE_FORK_DETECTION_ENABLED}
registered-tokens-is-on: true
registered-tokens-is-on: false
vishakh marked this conversation as resolved.
Show resolved Hide resolved
registered-tokens-is-on: ${?CONSEIL_LORRE_REGISTERED_TOKENS_FETCHING_ENABLED}
}

Expand Down Expand Up @@ -99,4 +99,11 @@ lorre {
interval: 360 minutes # interval between fetches
}

fork-handling {
backtrack-levels: 100 //how many levels back we should check for the forks
backtrack-levels: ${?CONSEIL_LORRE_FORK_HANDLING_BAKCTRACK_LEVELS}
backtrack-interval: 120 // every how many iterations we should check for forks
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you sure 100 levels will be enough every 120 iterations?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've used default values from the #1045 but to be honest to get right values here is trickier than it seems.
For example:
Let's assume that blocks on depth 10 and greater from head are safe from forks.
We started Lorre which is fully indexed and there are no new blocks in the node.
Backtracking forks check looks at the last 10 indexed blocks and everything is ok.
Then it waits number of intervals for a next check. Now we need to estimate amount of blocks that will be baked during this time.
And that depends on few things: sleep_interval and backtrack-interval.
I believe with those assumptions we should make backtrack-levels to be ((Amount_of_blocks_baked_during(sleep_interval) + Amount_of_blocks_baked_during_processing) * backtrack-interval) + 10 + 1.
And that should be an estimate.
We can tune it afterwards, but I definitely think we should follow this rule: backtrack-levels > backtrack-interval

backtrack-interval: ${?CONSEIL_LORRE_FORK_HANDLING_BAKCTRACK_INTERVAL}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ final case class LorreConfiguration(
blockRightsFetching: BakingAndEndorsingRights,
tokenContracts: TokenContracts,
metadataFetching: TzipMetadata,
forkHandling: ForkHandling,
enabledFeatures: Features
)

Expand Down Expand Up @@ -67,6 +68,11 @@ final case class TokenContracts(
interval: FiniteDuration
)

final case class ForkHandling(
backtrackLevels: Int,
backtrackInterval: Int
)

/** sodium library references */
final case class SodiumConfiguration(libraryPath: String) extends AnyVal with Product with Serializable

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1025,6 +1025,9 @@ object TezosDatabaseOperations extends ConseilLogSupport {
lazy val tokenBalances = EntityTableInvalidator(TokenBalances)(_.blockLevel, _.invalidatedAsof, _.forkId)
lazy val governance = EntityTableInvalidator(Governance)(_.level.ifNull(-1L), _.invalidatedAsof, _.forkId)
lazy val fees = EntityTableInvalidator(Fees)(_.level.ifNull(-1L), _.invalidatedAsof, _.forkId)
lazy val bigMaps = EntityTableInvalidator(BigMaps)(_.blockLevel.ifNull(-1L), _.invalidatedAsof, _.forkId)
lazy val bigMapContents = EntityTableInvalidator(BigMapContents)(_.blockLevel.ifNull(-1L), _.invalidatedAsof, _.forkId)
lazy val bigMapContentsHistory = EntityTableInvalidator(BigMapContentsHistory)(_.blockLevel.ifNull(-1L), _.invalidatedAsof, _.forkId)

/** Deletes entries for the registry of processed chain events.
* Due to a fork, those events will need be processed again over the new fork
Expand Down
Loading