diff --git a/packages/eventindexer/.l1.env b/packages/eventindexer/.l1.env index e13f1d9c7a1..e137fc178ce 100644 --- a/packages/eventindexer/.l1.env +++ b/packages/eventindexer/.l1.env @@ -1,16 +1,16 @@ HTTP_PORT=4100 PROMETHEUS_HTTP_PORT=6063 -MYSQL_USER=root -MYSQL_PASSWORD=root -MYSQL_DATABASE=eventindexer -MYSQL_HOST=localhost:3306 -MYSQL_MAX_IDLE_CONNS=50 -MYSQL_MAX_OPEN_CONNS=3000 -MYSQL_CONN_MAX_LIFETIME_IN_MS=100000 -L1_TAIKO_ADDRESS=0x6375394335f34848b850114b66A49D6F47f2cdA8 -BRIDGE_ADDRESS=0x7D992599E1B8b4508Ba6E2Ba97893b4C36C23A28 -PROVER_POOL_ADDRESS=0x7D992599E1B8b4508Ba6E2Ba97893b4C36C23A28 -RPC_URL=wss://l1ws.test.taiko.xyz +DATABASE_USER=root +DATABASE_PASSWORD=root +DATABASE_NAME=eventindexer +DATABASE_HOST=localhost:3306 +DATABASE_MAX_IDLE_CONNS=50 +DATABASE_MAX_OPEN_CONNS=3000 +DATABASE_CONN_MAX_LIFETIME_IN_MS=100000 +L1_TAIKO_ADDRESS=0x0DCd1Bf9A1b36cE34237eEaFef220932846BCD82 +BRIDGE_ADDRESS=0x9A9f2CCfdE556A7E9Ff0848998Aa4a0CFD8863AE +RPC_URL=wss://l1ws.internal.taiko.xyz CORS_ORIGINS=* -BLOCK_BATCH_SIZE=100 +BLOCK_BATCH_SIZE=10 CACHE_INTERVAL_IN_SECONDS=60 +LAYER=l1 \ No newline at end of file diff --git a/packages/eventindexer/.l2.env b/packages/eventindexer/.l2.env index daff48fb32d..7950ff835cd 100644 --- a/packages/eventindexer/.l2.env +++ b/packages/eventindexer/.l2.env @@ -1,15 +1,14 @@ -HTTP_PORT=4100 -PROMETHEUS_HTTP_PORT=6063 -MYSQL_USER=root -MYSQL_PASSWORD=root -MYSQL_DATABASE=eventindexer -MYSQL_HOST=localhost:3306 -MYSQL_MAX_IDLE_CONNS=50 -MYSQL_MAX_OPEN_CONNS=3000 -MYSQL_CONN_MAX_LIFETIME_IN_MS=100000 -PROVER_POOL_ADDRESS=0x7D992599E1B8b4508Ba6E2Ba97893b4C36C23A28 -SWAP_ADDRESSES=0x501f63210aE6D7Eeb50DaE74DA5Ae407515ee246,0x926815A3fb587DDF5e2d2A03ea235630c0A53a16,0x2223D60359736532958DF6a4E9A5e4A5a71729A1 -RPC_URL=wss://ws.test.taiko.xyz +HTTP_PORT=4009 +METRICS_HTTP_PORT=6067 +DATABASE_USER=root +DATABASE_PASSWORD=root +DATABASE_NAME=eventindexer +DATABASE_HOST=localhost:3306 +DATABASE_MAX_IDLE_CONNS=50 +DATABASE_MAX_OPEN_CONNS=3000 +DATABASE_CONN_MAX_LIFETIME_IN_MS=100000 +RPC_URL=wss://ws.internal.taiko.xyz CORS_ORIGINS=* -BLOCK_BATCH_SIZE=1000 +BLOCK_BATCH_SIZE=100 CACHE_INTERVAL_IN_SECONDS=60 +LAYER=l2 diff --git a/packages/eventindexer/README.md b/packages/eventindexer/README.md index 34ea0ee5318..aabcf07dc91 100644 --- a/packages/eventindexer/README.md +++ b/packages/eventindexer/README.md @@ -10,3 +10,9 @@ Catches events, stores them in the database to be queried via API. run `cp .default.env .env`, and add your own private key as `RELAYER_ECDSA_KEY` in `.env`. You need to be running a MySQL instance, and replace all the `MYSQL_` env vars with yours. Run `go run cmd/main.go --help` to see a list of possible configuration flags, or `go run cmd/main.go` to run with defaults, which will process messages from L1 to L2, and from L2 to L1, and start indexing blocks from 0. + +# Block data + +1. parse data +2. store +3. cron job that updates every 24 hours diff --git a/packages/eventindexer/account.go b/packages/eventindexer/account.go new file mode 100644 index 00000000000..819af85caaa --- /dev/null +++ b/packages/eventindexer/account.go @@ -0,0 +1,18 @@ +package eventindexer + +import ( + "context" + "time" + + "github.com/ethereum/go-ethereum/common" +) + +type Account struct { + ID int `json:"id"` + Address string `json:"address"` + TransactedAt time.Time `json:"transactedAt"` +} + +type AccountRepository interface { + Save(ctx context.Context, address common.Address, transactedAt time.Time) error +} diff --git a/packages/eventindexer/block.go b/packages/eventindexer/block.go index cb8e724e835..30bad3d8035 100644 --- a/packages/eventindexer/block.go +++ b/packages/eventindexer/block.go @@ -1,30 +1,44 @@ package eventindexer import ( + "context" "math/big" + "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) -// Block is a database model representing simple header types +// ProcessedBlock is a database model representing simple header types // to keep track of our most recently processed block number and hash. -type Block struct { +type ProcessedBlock struct { ID int `json:"id"` Height uint64 `json:"blockHeight" gorm:"column:block_height"` Hash string `json:"hash"` ChainID int64 `json:"chainID"` } -// SaveBlockOpts is required to store a new block -type SaveBlockOpts struct { +// SaveProcessedBlockOpts is required to store a new block +type SaveProcessedBlockOpts struct { Height uint64 Hash common.Hash ChainID *big.Int } -// BlockRepository defines methods necessary for interacting with +// ProcessedBlockRepository defines methods necessary for interacting with // the block store. +type ProcessedBlockRepository interface { + Save(opts SaveProcessedBlockOpts) error + GetLatestBlockProcessed(chainID *big.Int) (*ProcessedBlock, error) +} + +type Block struct { + ID int `json:"id"` + ChainID int64 `json:"chainID"` + BlockID int64 `json:"blockID"` + TransactedAt time.Time `json:"transactedAt"` +} + type BlockRepository interface { - Save(opts SaveBlockOpts) error - GetLatestBlockProcessed(chainID *big.Int) (*Block, error) + Save(ctx context.Context, tx *types.Block, chainID *big.Int) error } diff --git a/packages/eventindexer/chart.go b/packages/eventindexer/chart.go new file mode 100644 index 00000000000..088ecfdf203 --- /dev/null +++ b/packages/eventindexer/chart.go @@ -0,0 +1,21 @@ +package eventindexer + +import "context" + +type ChartResponse struct { + Chart []ChartItem `json:"chart"` +} + +type ChartItem struct { + Date string `json:"date"` + Value string `json:"value"` +} + +type ChartRepository interface { + Find( + ctx context.Context, + task string, + start string, + end string, + ) (*ChartResponse, error) +} diff --git a/packages/eventindexer/cli/cli.go b/packages/eventindexer/cli/cli.go deleted file mode 100644 index 19cd47d2ac7..00000000000 --- a/packages/eventindexer/cli/cli.go +++ /dev/null @@ -1,297 +0,0 @@ -package cli - -import ( - "context" - "fmt" - "log" - "os" - "strconv" - "strings" - "time" - - "github.com/labstack/echo/v4" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/rpc" - "github.com/joho/godotenv" - "github.com/pkg/errors" - "github.com/taikoxyz/taiko-mono/packages/eventindexer" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/db" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/http" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/indexer" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/repo" - - "gorm.io/driver/mysql" - "gorm.io/gorm" - "gorm.io/gorm/logger" -) - -var ( - envVars = []string{ - "HTTP_PORT", - "RPC_URL", - "MYSQL_USER", - "MYSQL_DATABASE", - "MYSQL_HOST", - "PROMETHEUS_HTTP_PORT", - } - - defaultBlockBatchSize = 2 - defaultSubscriptionBackoff = 600 * time.Second -) - -func Run( - mode eventindexer.Mode, - watchMode eventindexer.WatchMode, - httpOnly eventindexer.HTTPOnly, - indexNfts eventindexer.IndexNFTS, -) { - if err := loadAndValidateEnv(); err != nil { - log.Fatal(err) - } - - db, err := openDBConnection(eventindexer.DBConnectionOpts{ - Name: os.Getenv("MYSQL_USER"), - Password: os.Getenv("MYSQL_PASSWORD"), - Database: os.Getenv("MYSQL_DATABASE"), - Host: os.Getenv("MYSQL_HOST"), - OpenFunc: func(dsn string) (eventindexer.DB, error) { - gormDB, err := gorm.Open(mysql.Open(dsn), &gorm.Config{ - Logger: logger.Default.LogMode(logger.Silent), - }) - if err != nil { - return nil, err - } - - return db.New(gormDB), nil - }, - }) - - if err != nil { - log.Fatal(err) - } - - ethClient, err := ethclient.Dial(os.Getenv("RPC_URL")) - if err != nil { - log.Fatal(err) - } - - srv, err := newHTTPServer(db, ethClient) - if err != nil { - log.Fatal(err) - } - - forever := make(chan struct{}) - - go func() { - if err := srv.Start(fmt.Sprintf(":%v", os.Getenv("HTTP_PORT"))); err != nil { - log.Fatal(err) - } - }() - - if !httpOnly { - eventRepository, err := repo.NewEventRepository(db) - if err != nil { - log.Fatal(err) - } - - statRepository, err := repo.NewStatRepository(db) - if err != nil { - log.Fatal(err) - } - - blockRepository, err := repo.NewBlockRepository(db) - if err != nil { - log.Fatal(err) - } - - blockBatchSize, err := strconv.Atoi(os.Getenv("BLOCK_BATCH_SIZE")) - if err != nil || blockBatchSize <= 0 { - blockBatchSize = defaultBlockBatchSize - } - - var subscriptionBackoff time.Duration - - subscriptionBackoffInSeconds, err := strconv.Atoi(os.Getenv("SUBSCRIPTION_BACKOFF_IN_SECONDS")) - if err != nil || subscriptionBackoffInSeconds <= 0 { - subscriptionBackoff = defaultSubscriptionBackoff - } else { - subscriptionBackoff = time.Duration(subscriptionBackoffInSeconds) * time.Second - } - - rpcClient, err := rpc.DialContext(context.Background(), os.Getenv("RPC_URL")) - if err != nil { - log.Fatal(err) - } - - var nftBalanceRepo eventindexer.NFTBalanceRepository - - if indexNfts { - nftBalanceRepo, err = repo.NewNFTBalanceRepository(db) - if err != nil { - log.Fatal(err) - } - } - - i, err := indexer.NewService(indexer.NewServiceOpts{ - EventRepo: eventRepository, - BlockRepo: blockRepository, - StatRepo: statRepository, - NFTBalanceRepo: nftBalanceRepo, - EthClient: ethClient, - RPCClient: rpcClient, - SrcTaikoAddress: common.HexToAddress(os.Getenv("L1_TAIKO_ADDRESS")), - SrcBridgeAddress: common.HexToAddress(os.Getenv("BRIDGE_ADDRESS")), - SrcSwapAddresses: stringsToAddresses(strings.Split(os.Getenv("SWAP_ADDRESSES"), ",")), - BlockBatchSize: uint64(blockBatchSize), - SubscriptionBackoff: subscriptionBackoff, - IndexNFTs: bool(indexNfts), - }) - if err != nil { - log.Fatal(err) - } - - var filterFunc indexer.FilterFunc = indexer.L1FilterFunc - - if os.Getenv("L1_TAIKO_ADDRESS") == "" { - filterFunc = indexer.L2FilterFunc - } - - go func() { - if err := i.FilterThenSubscribe(context.Background(), mode, watchMode, filterFunc); err != nil { - log.Fatal(err) - } - }() - } - - <-forever -} - -func stringsToAddresses(s []string) []common.Address { - a := []common.Address{} - - for _, v := range s { - if v != "" { - a = append(a, common.HexToAddress(v)) - } - } - - return a -} - -func openDBConnection(opts eventindexer.DBConnectionOpts) (eventindexer.DB, error) { - dsn := "" - if opts.Password == "" { - dsn = fmt.Sprintf( - "%v@tcp(%v)/%v?charset=utf8mb4&parseTime=True&loc=Local", - opts.Name, - opts.Host, - opts.Database, - ) - } else { - dsn = fmt.Sprintf( - "%v:%v@tcp(%v)/%v?charset=utf8mb4&parseTime=True&loc=Local", - opts.Name, - opts.Password, - opts.Host, - opts.Database, - ) - } - - db, err := opts.OpenFunc(dsn) - if err != nil { - return nil, err - } - - sqlDB, err := db.DB() - if err != nil { - return nil, err - } - - var ( - defaultMaxIdleConns = 50 - defaultMaxOpenConns = 200 - defaultConnMaxLifetime = 10 * time.Second - ) - - maxIdleConns, err := strconv.Atoi(os.Getenv("MYSQL_MAX_IDLE_CONNS")) - if err != nil || maxIdleConns <= 0 { - maxIdleConns = defaultMaxIdleConns - } - - maxOpenConns, err := strconv.Atoi(os.Getenv("MYSQL_MAX_OPEN_CONNS")) - if err != nil || maxOpenConns <= 0 { - maxOpenConns = defaultMaxOpenConns - } - - var maxLifetime time.Duration - - connMaxLifetime, err := strconv.Atoi(os.Getenv("MYSQL_CONN_MAX_LIFETIME_IN_MS")) - if err != nil || connMaxLifetime <= 0 { - maxLifetime = defaultConnMaxLifetime - } else { - maxLifetime = time.Duration(connMaxLifetime) - } - - // SetMaxOpenConns sets the maximum number of open connections to the database. - sqlDB.SetMaxOpenConns(maxOpenConns) - - // SetMaxIdleConns sets the maximum number of connections in the idle connection pool. - sqlDB.SetMaxIdleConns(maxIdleConns) - - // SetConnMaxLifetime sets the maximum amount of time a connection may be reused. - sqlDB.SetConnMaxLifetime(maxLifetime) - - return db, nil -} - -func loadAndValidateEnv() error { - _ = godotenv.Load() - - missing := make([]string, 0) - - for _, v := range envVars { - e := os.Getenv(v) - if e == "" { - missing = append(missing, v) - } - } - - if len(missing) == 0 { - return nil - } - - return errors.Errorf("Missing env vars: %v", missing) -} - -func newHTTPServer(db eventindexer.DB, l1EthClient *ethclient.Client) (*http.Server, error) { - eventRepo, err := repo.NewEventRepository(db) - if err != nil { - return nil, err - } - - statRepo, err := repo.NewStatRepository(db) - if err != nil { - return nil, err - } - - nftBalanceRepo, err := repo.NewNFTBalanceRepository(db) - if err != nil { - return nil, err - } - - srv, err := http.NewServer(http.NewServerOpts{ - EventRepo: eventRepo, - StatRepo: statRepo, - NFTBalanceRepo: nftBalanceRepo, - Echo: echo.New(), - CorsOrigins: strings.Split(os.Getenv("CORS_ORIGINS"), ","), - EthClient: l1EthClient, - }) - if err != nil { - return nil, err - } - - return srv, nil -} diff --git a/packages/eventindexer/cmd/flags/common.go b/packages/eventindexer/cmd/flags/common.go new file mode 100644 index 00000000000..cce7b249140 --- /dev/null +++ b/packages/eventindexer/cmd/flags/common.go @@ -0,0 +1,88 @@ +package flags + +import ( + "github.com/urfave/cli/v2" +) + +var ( + commonCategory = "COMMON" + indexerCategory = "INDEXER" + generatorCategory = "GENERATOR" +) + +var ( + DatabaseUsername = &cli.StringFlag{ + Name: "db.username", + Usage: "Database connection username", + Required: true, + Category: commonCategory, + EnvVars: []string{"DATABASE_USER"}, + } + DatabasePassword = &cli.StringFlag{ + Name: "db.password", + Usage: "Database connection password", + Required: true, + Category: commonCategory, + EnvVars: []string{"DATABASE_PASSWORD"}, + } + DatabaseHost = &cli.StringFlag{ + Name: "db.host", + Usage: "Database connection host", + Required: true, + Category: commonCategory, + EnvVars: []string{"DATABASE_HOST"}, + } + DatabaseName = &cli.StringFlag{ + Name: "db.name", + Usage: "Database connection name", + Required: true, + Category: commonCategory, + EnvVars: []string{"DATABASE_NAME"}, + } +) + +var ( + DatabaseMaxIdleConns = &cli.Uint64Flag{ + Name: "db.maxIdleConns", + Usage: "Database max idle connections", + Value: 50, + Category: commonCategory, + EnvVars: []string{"DATABASE_MAX_IDLE_CONNS"}, + } + DatabaseMaxOpenConns = &cli.Uint64Flag{ + Name: "db.maxOpenConns", + Usage: "Database max open connections", + Value: 200, + Category: commonCategory, + EnvVars: []string{"DATABASE_MAX_OPEN_CONNS"}, + } + DatabaseConnMaxLifetime = &cli.Uint64Flag{ + Name: "db.connMaxLifetime", + Usage: "Database connection max lifetime in seconds", + Value: 10, + Category: commonCategory, + EnvVars: []string{"DATABASE_CONN_MAX_LIFETIME"}, + } +) + +// All common flags. +var CommonFlags = []cli.Flag{ + // required + DatabaseUsername, + DatabasePassword, + DatabaseHost, + DatabaseName, + DatabaseMaxIdleConns, + DatabaseConnMaxLifetime, + DatabaseMaxOpenConns, +} + +// MergeFlags merges the given flag slices. +func MergeFlags(groups ...[]cli.Flag) []cli.Flag { + var merged []cli.Flag + for _, group := range groups { + merged = append(merged, group...) + } + + return merged +} diff --git a/packages/eventindexer/cmd/flags/generator.go b/packages/eventindexer/cmd/flags/generator.go new file mode 100644 index 00000000000..b9388df43ac --- /dev/null +++ b/packages/eventindexer/cmd/flags/generator.go @@ -0,0 +1,16 @@ +package flags + +import "github.com/urfave/cli/v2" + +var ( + GenesisDate = &cli.StringFlag{ + Name: "genesisDate", + Usage: "Genesis date to start genrating data from, YYYY-MM-DD", + Required: true, + Category: generatorCategory, + EnvVars: []string{"GENESIS_DATE"}, + } +) +var GeneratorFlags = MergeFlags(CommonFlags, []cli.Flag{ + GenesisDate, +}) diff --git a/packages/eventindexer/cmd/flags/indexer.go b/packages/eventindexer/cmd/flags/indexer.go new file mode 100644 index 00000000000..3b19820b05b --- /dev/null +++ b/packages/eventindexer/cmd/flags/indexer.go @@ -0,0 +1,136 @@ +package flags + +import "github.com/urfave/cli/v2" + +// required flags +var ( + RPCUrl = &cli.StringFlag{ + Name: "rpcUrl", + Usage: "RPC URL for the source chain", + Required: true, + Category: commonCategory, + EnvVars: []string{"RPC_URL"}, + } +) + +// optional flags +var ( + HTTPPort = &cli.Uint64Flag{ + Name: "http.port", + Usage: "Port to run http server on", + Category: indexerCategory, + Required: false, + Value: 4102, + EnvVars: []string{"HTTP_PORT"}, + } + MetricsHTTPPort = &cli.Uint64Flag{ + Name: "metrics.port", + Usage: "Port to run metrics http server on", + Category: indexerCategory, + Required: false, + Value: 6061, + EnvVars: []string{"METRICS_HTTP_PORT"}, + } + ETHClientTimeout = &cli.Uint64Flag{ + Name: "ethClientTimeout", + Usage: "Timeout for eth client and contract binding calls", + Category: indexerCategory, + Required: false, + Value: 10, + EnvVars: []string{"ETH_CLIENT_TIMEOUT"}, + } + L1TaikoAddress = &cli.StringFlag{ + Name: "l1TaikoAddress", + Usage: "Address of the TaikoL1 contract", + Required: false, + Category: indexerCategory, + EnvVars: []string{"L1_TAIKO_ADDRESS"}, + } + BridgeAddress = &cli.StringFlag{ + Name: "bridgeAddress", + Usage: "Address of the Bridge contract", + Required: false, + Category: indexerCategory, + EnvVars: []string{"BRIDGE_ADDRESS"}, + } + SwapAddresses = &cli.StringFlag{ + Name: "swapAddresses", + Usage: "Comma-delinated list of Swap contract addresses", + Required: false, + Category: indexerCategory, + EnvVars: []string{"SWAP_ADDRESSES"}, + } + CORSOrigins = &cli.StringFlag{ + Name: "http.corsOrigins", + Usage: "Comma-delinated list of cors origins", + Required: false, + Value: "*", + Category: indexerCategory, + } + BlockBatchSize = &cli.Uint64Flag{ + Name: "blockBatchSize", + Usage: "Block batch size when iterating through blocks", + Value: 10, + Required: false, + Category: indexerCategory, + EnvVars: []string{"BLOCK_BATCH_SIZE"}, + } + SubscriptionBackoff = &cli.Uint64Flag{ + Name: "subscriptionBackoff", + Usage: "Subscription backoff in seconds", + Value: 30, + Required: false, + Category: indexerCategory, + EnvVars: []string{"SUBSCRIPTION_BACKOFF_IN_SECONDS"}, + } + SyncMode = &cli.StringFlag{ + Name: "syncMode", + Usage: "Mode of syncing. Pass in 'sync' to continue, and 'resync' to start from genesis again.", + Value: "sync", + Category: indexerCategory, + EnvVars: []string{"SYNC_MODE"}, + } + WatchMode = &cli.StringFlag{ + Name: "watchMode", + Usage: `Mode of watching the chain. Options are: + filter: only filter the chain, when caught up, exit + subscribe: do not filter the chain, only subscribe to new events + filter-and-subscribe: the default behavior, filter the chain and subscribe when caught up + `, + Value: "filter-and-subscribe", + Category: indexerCategory, + EnvVars: []string{"SYNC_MODE"}, + } + IndexNFTs = &cli.StringFlag{ + Name: "indexNfts", + Usage: "Whether to index nft transfer events orn ot", + Required: false, + Category: indexerCategory, + EnvVars: []string{"INDEX_NFTS"}, + } + Layer = &cli.StringFlag{ + Name: "layer", + Usage: "Which layer indexing is occurring on", + Required: false, + Value: "l1", + Category: indexerCategory, + EnvVars: []string{"LAYER"}, + } +) + +var IndexerFlags = MergeFlags(CommonFlags, []cli.Flag{ + RPCUrl, + // optional + L1TaikoAddress, + HTTPPort, + MetricsHTTPPort, + BridgeAddress, + SwapAddresses, + CORSOrigins, + BlockBatchSize, + SubscriptionBackoff, + SyncMode, + WatchMode, + IndexNFTs, + Layer, +}) diff --git a/packages/eventindexer/cmd/main.go b/packages/eventindexer/cmd/main.go index a43520605e0..53f652255aa 100644 --- a/packages/eventindexer/cmd/main.go +++ b/packages/eventindexer/cmd/main.go @@ -1,45 +1,59 @@ package main import ( - "flag" - - "github.com/taikoxyz/taiko-mono/packages/eventindexer" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/cli" + "fmt" + "log" + "os" + + "github.com/joho/godotenv" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/cmd/flags" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/cmd/utils" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/generator" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/indexer" + "github.com/urfave/cli/v2" ) func main() { - modePtr := flag.String("mode", string(eventindexer.SyncMode), `mode to run in. - options: - sync: continue syncing from previous block - resync: restart syncing from block 0 - fromBlock: restart syncing from specified block number - `) - - watchModePtr := flag.String("watch-mode", string(eventindexer.FilterAndSubscribeWatchMode), `watch mode to run in. - options: - filter: only filter previous messages - subscribe: only subscribe to new messages - filter-and-subscribe: catch up on all previous messages, then subscribe to new messages - `) - - httpOnlyPtr := flag.Bool("http-only", false, `only run an http server and don't index blocks. - options: - true: only run an http server, dont index blocks - false: run an http server and index blocks - `) - - indexNfts := flag.Bool("index-nfts", false, `index nft transfer events. - options: - true: index - false: dont index - `) - - flag.Parse() - - cli.Run( - eventindexer.Mode(*modePtr), - eventindexer.WatchMode(*watchModePtr), - eventindexer.HTTPOnly(*httpOnlyPtr), - eventindexer.IndexNFTS(*indexNfts), - ) + app := cli.NewApp() + + log.SetOutput(os.Stdout) + // attempt to load a .env file to overwrite CLI flags, but allow it to not + // exist. + + envFile := os.Getenv("EVENTINDEXER_ENV_FILE") + if envFile == "" { + envFile = ".env" + } + + _ = godotenv.Load(envFile) + + app.Name = "Taiko EventIndexer" + app.Usage = "The taiko eventindexing softwares command line interface" + app.Copyright = "Copyright 2021-2023 Taiko Labs" + app.Description = "Eventindexer implementation in Golang for Taiko protocol" + app.Authors = []*cli.Author{{Name: "Taiko Labs", Email: "info@taiko.xyz"}} + app.EnableBashCompletion = true + + // All supported sub commands. + app.Commands = []*cli.Command{ + { + Name: "indexer", + Flags: flags.IndexerFlags, + Usage: "Starts the indexer software", + Description: "Taiko indexer software", + Action: utils.SubcommandAction(new(indexer.Indexer)), + }, + { + Name: "generator", + Flags: flags.GeneratorFlags, + Usage: "Starts the generator software", + Description: "Taiko time-series data generator", + Action: utils.SubcommandAction(new(generator.Generator)), + }, + } + + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } } diff --git a/packages/eventindexer/cmd/utils/subcommand_action.go b/packages/eventindexer/cmd/utils/subcommand_action.go new file mode 100644 index 00000000000..b4fa237b775 --- /dev/null +++ b/packages/eventindexer/cmd/utils/subcommand_action.go @@ -0,0 +1,62 @@ +package utils + +import ( + "context" + "os" + "os/signal" + "syscall" + + "log/slog" + + "github.com/taikoxyz/taiko-mono/packages/relayer/metrics" + "github.com/urfave/cli/v2" +) + +type SubcommandApplication interface { + InitFromCli(context.Context, *cli.Context) error + Name() string + Start() error + Close(context.Context) +} + +func SubcommandAction(app SubcommandApplication) cli.ActionFunc { + return func(c *cli.Context) error { + ctx, ctxClose := context.WithCancel(context.Background()) + defer func() { ctxClose() }() + + if err := app.InitFromCli(ctx, c); err != nil { + return err + } + + slog.Info("Starting Taiko relayer application", "name", app.Name()) + + if err := app.Start(); err != nil { + slog.Error("Starting application error", "name", app.Name(), "error", err) + return err + } + + _, startMetrics := metrics.Serve(ctx, c) + + if err := startMetrics(); err != nil { + slog.Error("Starting metrics server error", "error", err) + return err + } + + defer func() { + ctxClose() + app.Close(ctx) + slog.Info("Application stopped", "name", app.Name()) + }() + + quitCh := make(chan os.Signal, 1) + signal.Notify(quitCh, []os.Signal{ + os.Interrupt, + os.Kill, + syscall.SIGTERM, + syscall.SIGQUIT, + }...) + <-quitCh + + return nil + } +} diff --git a/packages/eventindexer/db/db.go b/packages/eventindexer/db/db.go index 4f557011cfc..9223263b3cb 100644 --- a/packages/eventindexer/db/db.go +++ b/packages/eventindexer/db/db.go @@ -2,7 +2,10 @@ package db import ( "database/sql" + "fmt" + "time" + "github.com/cyberhorsey/errors" "gorm.io/gorm" ) @@ -23,3 +26,59 @@ func New(gormdb *gorm.DB) *DB { gormdb: gormdb, } } + +var ( + ErrNoDB = errors.Validation.NewWithKeyAndDetail("ERR_NO_DB", "DB is required") +) + +type DBConnectionOpts struct { + Name string + Password string + Host string + Database string + MaxIdleConns uint64 + MaxOpenConns uint64 + MaxConnLifetime uint64 + OpenFunc func(dsn string) (*DB, error) +} + +func OpenDBConnection(opts DBConnectionOpts) (*DB, error) { + dsn := "" + if opts.Password == "" { + dsn = fmt.Sprintf( + "%v@tcp(%v)/%v?charset=utf8mb4&parseTime=True&loc=Local", + opts.Name, + opts.Host, + opts.Database, + ) + } else { + dsn = fmt.Sprintf( + "%v:%v@tcp(%v)/%v?charset=utf8mb4&parseTime=True&loc=Local", + opts.Name, + opts.Password, + opts.Host, + opts.Database, + ) + } + + db, err := opts.OpenFunc(dsn) + if err != nil { + return nil, err + } + + sqlDB, err := db.DB() + if err != nil { + return nil, err + } + + // SetMaxOpenConns sets the maximum number of open connections to the database. + sqlDB.SetMaxOpenConns(int(opts.MaxOpenConns)) + + // SetMaxIdleConns sets the maximum number of connections in the idle connection pool. + sqlDB.SetMaxIdleConns(int(opts.MaxIdleConns)) + + // SetConnMaxLifetime sets the maximum amount of time a connection may be reused. + sqlDB.SetConnMaxLifetime(time.Duration(opts.MaxConnLifetime)) + + return db, nil +} diff --git a/packages/eventindexer/event.go b/packages/eventindexer/event.go index c7714d4d88b..288801324ac 100644 --- a/packages/eventindexer/event.go +++ b/packages/eventindexer/event.go @@ -5,6 +5,7 @@ import ( "database/sql" "math/big" "net/http" + "time" "github.com/morkid/paginate" "github.com/shopspring/decimal" @@ -37,6 +38,7 @@ type Event struct { To string `json:"to"` TokenID sql.NullInt64 `json:"tokenID"` ContractAddress string `json:"contractAddress"` + TransactedAt time.Time `json:"transactedAt"` } // SaveEventOpts @@ -52,6 +54,7 @@ type SaveEventOpts struct { To *string TokenID *int64 ContractAddress *string + TransactedAt time.Time } type UniqueProversResponse struct { diff --git a/packages/eventindexer/flags.go b/packages/eventindexer/flags.go deleted file mode 100644 index 76db6b018ef..00000000000 --- a/packages/eventindexer/flags.go +++ /dev/null @@ -1,22 +0,0 @@ -package eventindexer - -type Mode string - -var ( - SyncMode Mode = "sync" - ResyncMode Mode = "resync" - Modes = []Mode{SyncMode, ResyncMode} -) - -type WatchMode string - -var ( - FilterWatchMode WatchMode = "filter" - SubscribeWatchMode WatchMode = "subscribe" - FilterAndSubscribeWatchMode WatchMode = "filter-and-subscribe" - WatchModes = []WatchMode{FilterWatchMode, SubscribeWatchMode} -) - -type HTTPOnly bool - -type IndexNFTS bool diff --git a/packages/eventindexer/generator/config.go b/packages/eventindexer/generator/config.go new file mode 100644 index 00000000000..1cf9ec83675 --- /dev/null +++ b/packages/eventindexer/generator/config.go @@ -0,0 +1,73 @@ +package generator + +import ( + "database/sql" + "time" + + "github.com/taikoxyz/taiko-mono/packages/eventindexer/cmd/flags" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/db" + "github.com/urfave/cli/v2" + "gorm.io/driver/mysql" + "gorm.io/gorm" + "gorm.io/gorm/logger" +) + +type DB interface { + DB() (*sql.DB, error) + GormDB() *gorm.DB +} + +type Config struct { + // db configs + DatabaseUsername string + DatabasePassword string + DatabaseName string + DatabaseHost string + DatabaseMaxIdleConns uint64 + DatabaseMaxOpenConns uint64 + DatabaseMaxConnLifetime uint64 + MetricsHTTPPort uint64 + GenesisDate time.Time + OpenDBFunc func() (DB, error) +} + +// NewConfigFromCliContext creates a new config instance from command line flags. +func NewConfigFromCliContext(c *cli.Context) (*Config, error) { + date, err := time.Parse("2006-01-02", c.String(flags.GenesisDate.Name)) + if err != nil { + return nil, err + } + + return &Config{ + DatabaseUsername: c.String(flags.DatabaseUsername.Name), + DatabasePassword: c.String(flags.DatabasePassword.Name), + DatabaseName: c.String(flags.DatabaseName.Name), + DatabaseHost: c.String(flags.DatabaseHost.Name), + DatabaseMaxIdleConns: c.Uint64(flags.DatabaseMaxIdleConns.Name), + DatabaseMaxOpenConns: c.Uint64(flags.DatabaseMaxOpenConns.Name), + DatabaseMaxConnLifetime: c.Uint64(flags.DatabaseConnMaxLifetime.Name), + MetricsHTTPPort: c.Uint64(flags.MetricsHTTPPort.Name), + GenesisDate: date, + OpenDBFunc: func() (DB, error) { + return db.OpenDBConnection(db.DBConnectionOpts{ + Name: c.String(flags.DatabaseUsername.Name), + Password: c.String(flags.DatabasePassword.Name), + Database: c.String(flags.DatabaseName.Name), + Host: c.String(flags.DatabaseHost.Name), + MaxIdleConns: c.Uint64(flags.DatabaseMaxIdleConns.Name), + MaxOpenConns: c.Uint64(flags.DatabaseMaxOpenConns.Name), + MaxConnLifetime: c.Uint64(flags.DatabaseConnMaxLifetime.Name), + OpenFunc: func(dsn string) (*db.DB, error) { + gormDB, err := gorm.Open(mysql.Open(dsn), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Silent), + }) + if err != nil { + return nil, err + } + + return db.New(gormDB), nil + }, + }) + }, + }, nil +} diff --git a/packages/eventindexer/generator/config_test.go b/packages/eventindexer/generator/config_test.go new file mode 100644 index 00000000000..2731d49fb0f --- /dev/null +++ b/packages/eventindexer/generator/config_test.go @@ -0,0 +1,56 @@ +package generator + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/cmd/flags" + "github.com/urfave/cli/v2" +) + +func setupApp() *cli.App { + app := cli.NewApp() + app.Flags = flags.GeneratorFlags + app.Action = func(ctx *cli.Context) error { + _, err := NewConfigFromCliContext(ctx) + return err + } + + return app +} + +func TestNewConfigFromCliContext(t *testing.T) { + app := setupApp() + + app.Action = func(ctx *cli.Context) error { + c, err := NewConfigFromCliContext(ctx) + + assert.Nil(t, err) + assert.Equal(t, "dbuser", c.DatabaseUsername) + assert.Equal(t, "dbpass", c.DatabasePassword) + assert.Equal(t, "dbname", c.DatabaseName) + assert.Equal(t, "dbhost", c.DatabaseHost) + + wantTime, _ := time.Parse("2006-01-02", "2023-07-07") + assert.Equal(t, wantTime, c.GenesisDate) + + c.OpenDBFunc = func() (DB, error) { + return nil, nil + } + + assert.Nil(t, InitFromConfig(context.Background(), new(Generator), c)) + + return err + } + + assert.Nil(t, app.Run([]string{ + "TestNewConfigFromCliContext", + "-" + flags.DatabaseUsername.Name, "dbuser", + "-" + flags.DatabasePassword.Name, "dbpass", + "-" + flags.DatabaseHost.Name, "dbhost", + "-" + flags.DatabaseName.Name, "dbname", + "-" + flags.GenesisDate.Name, "2023-07-07", + })) +} diff --git a/packages/eventindexer/generator/generator.go b/packages/eventindexer/generator/generator.go new file mode 100644 index 00000000000..0b933732148 --- /dev/null +++ b/packages/eventindexer/generator/generator.go @@ -0,0 +1,391 @@ +package generator + +import ( + "context" + "errors" + "log/slog" + "strconv" + "syscall" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/taikoxyz/taiko-mono/packages/eventindexer" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/tasks" + "github.com/urfave/cli/v2" +) + +var ( + ZeroAddress = common.HexToAddress("0x0000000000000000000000000000000000000000") +) + +// Generator is a subcommand which is intended to be run on an interval, like +// a cronjob, to parse the indexed data from the database, and generate +// time series data that can easily be displayed via charting libraries. +type Generator struct { + db DB + genesisDate time.Time +} + +func (g *Generator) InitFromCli(ctx context.Context, c *cli.Context) error { + config, err := NewConfigFromCliContext(c) + if err != nil { + return err + } + + return InitFromConfig(ctx, g, config) +} + +func InitFromConfig(ctx context.Context, g *Generator, cfg *Config) error { + db, err := cfg.OpenDBFunc() + if err != nil { + return err + } + + g.db = db + g.genesisDate = cfg.GenesisDate + + return nil +} + +func (g *Generator) Name() string { + return "generator" +} + +func (g *Generator) Start() error { + slog.Info("generating time series data") + + if err := g.generateTimeSeriesData(context.Background()); err != nil { + return err + } + + if err := syscall.Kill(syscall.Getpid(), syscall.SIGTERM); err != nil { + return err + } + + return nil +} + +func (g *Generator) Close(ctx context.Context) { + sqlDB, err := g.db.DB() + if err != nil { + slog.Error("error getting sqldb when closing generator", "err", err.Error()) + } + + if err := sqlDB.Close(); err != nil { + slog.Error("error closing sqlbd connecting", "err", err.Error()) + } +} + +// generateTimeSeriesData iterates over each task and generates time series data. +func (g *Generator) generateTimeSeriesData(ctx context.Context) error { + for _, task := range tasks.Tasks { + if err := g.generateByTask(ctx, task); err != nil { + slog.Error("error generating for task", "task", task, "error", err.Error()) + return err + } + } + + return nil +} + +// generateByTask generates time series data for each day in between the current date +// and the most recently generated time series data, for the given task. +func (g *Generator) generateByTask(ctx context.Context, task string) error { + slog.Info("generating for task", "task", task) + + latestDate, err := g.getLatestDateByTask(ctx, task) + if err != nil { + return err + } + + currentDate := g.getCurrentDate() + if latestDate.AddDate(0, 0, 1).Compare(currentDate) == 0 { + slog.Info("data already generated up-to-date for task", "task", task, "date", latestDate.Format("2006-01-02")) + return nil + } + + // Loop through each date from latestDate to currentDate + for d := latestDate; d.Before(currentDate); d = d.AddDate(0, 0, 1) { + slog.Info("Processing", "task", task, "date", d.Format("2006-01-02"), "currentDate", currentDate.Format("2006-01-02")) + + result, err := g.queryByTask(task, d) + if err != nil { + slog.Info("Query failed", "task", task, "date", d.Format("2006-01-02"), "error", err.Error()) + return err + } + + slog.Info("Query successful", "task", task, "date", d.Format("2006-01-02"), "result", result) + + insertStmt := ` + INSERT INTO time_series_data(task, value, date) + VALUES (?, ?, ?)` + + err = g.db.GormDB().Exec(insertStmt, task, result, d.Format("2006-01-02")).Error + if err != nil { + slog.Info("Insert failed", "task", task, "date", d.Format("2006-01-02"), "error", err.Error()) + return err + } + + slog.Info("Processed", "task", task, "date", d.Format("2006-01-02")) + } + + return nil +} + +// getLatestDateByTask returns the last time time series data has been generated +// for the given task. +func (g *Generator) getLatestDateByTask(ctx context.Context, task string) (time.Time, error) { + var latestDateString string + + var latestDate time.Time + + q := `SELECT date FROM time_series_data WHERE task = ? ORDER BY date DESC LIMIT 1;` + + err := g.db.GormDB().Raw(q, task).Scan(&latestDateString).Error + + slog.Info("latestDateString", "task", task, "date", latestDateString) + + if err != nil || latestDateString == "" { + latestDate = g.genesisDate + } else { + latestDate, err = time.Parse("2006-01-02", latestDateString) + } + + if err != nil { + return time.Time{}, err + } + + slog.Info("latest date for task", "task", task, "latestDate", latestDate.Format("2006-01-02")) + + return latestDate, nil +} + +// getCurrentDate returns the current date in YYYY-MM-DD format +func (g *Generator) getCurrentDate() time.Time { + // Get current date + currentTime := time.Now() + currentDate := time.Date(currentTime.Year(), currentTime.Month(), currentTime.Day(), 0, 0, 0, 0, time.UTC) + + return currentDate +} + +// nolint: funlen +// queryByTask runs a database query which should return result data based on the +// task +func (g *Generator) queryByTask(task string, date time.Time) (string, error) { + dateString := date.Format("2006-01-02") + + var result string + + var err error + + switch task { + case tasks.BridgeMessagesSentPerDay: + err = g.eventCount(task, date, eventindexer.EventNameMessageSent, &result) + case tasks.TotalBridgeMessagesSent: + var dailyMsgSentCount int + + err = g.eventCount(task, date, eventindexer.EventNameMessageSent, &dailyMsgSentCount) + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyMsgSentCount + tsdResult) + case tasks.ProposeBlockTxPerDay: + err = g.eventCount(task, date, eventindexer.EventNameBlockProposed, &result) + case tasks.TotalProposeBlockTx: + var dailyProposerCount int + + err = g.eventCount(task, date, eventindexer.EventNameBlockProposed, &dailyProposerCount) + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyProposerCount + tsdResult) + case tasks.UniqueProposersPerDay: + query := "SELECT COUNT(DISTINCT address) FROM events WHERE event = ? AND DATE(transacted_at) = ?" + err = g.db.GormDB(). + Raw(query, eventindexer.EventNameBlockProposed, date). + Scan(&result).Error + case tasks.TotalUniqueProposers: + var dailyProposerCount int + + query := `SELECT COUNT(DISTINCT address) FROM events WHERE event = ? AND DATE(transacted_at) = ?` + + err = g.db.GormDB().Raw(query, eventindexer.EventNameBlockProposed, dateString).Scan(&dailyProposerCount).Error + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyProposerCount + tsdResult) + case tasks.UniqueProversPerDay: + query := "SELECT COUNT(DISTINCT address) FROM events WHERE event = ? AND DATE(transacted_at) = ?" + err = g.db.GormDB(). + Raw(query, eventindexer.EventNameBlockProven, date). + Scan(&result).Error + case tasks.TotalUniqueProvers: + var dailyProposerCount int + + query := `SELECT COUNT(DISTINCT address) FROM events WHERE event = ? AND DATE(transacted_at) = ?` + + err = g.db.GormDB().Raw(query, eventindexer.EventNameBlockProven, dateString).Scan(&dailyProposerCount).Error + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyProposerCount + tsdResult) + case tasks.ProveBlockTxPerDay: + query := "SELECT COUNT(*) FROM events WHERE event = ? AND DATE(transacted_at) = ?" + err = g.db.GormDB(). + Raw(query, eventindexer.EventNameBlockProven, date). + Scan(&result).Error + case tasks.TotalProveBlockTx: + var dailyProposerCount int + + query := `SELECT COUNT(*) FROM events WHERE event = ? AND DATE(transacted_at) = ?` + + err = g.db.GormDB().Raw(query, eventindexer.EventNameBlockProven, dateString).Scan(&dailyProposerCount).Error + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyProposerCount + tsdResult) + case tasks.AccountsPerDay: + query := `SELECT COUNT(*) FROM accounts WHERE DATE(transacted_at) = ?` + err = g.db.GormDB().Raw(query, dateString).Scan(&result).Error + case tasks.TotalAccounts: + var dailyAccountsCount int + + query := `SELECT COUNT(*) FROM accounts WHERE DATE(transacted_at) = ?` + + err = g.db.GormDB().Raw(query, dateString).Scan(&dailyAccountsCount).Error + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyAccountsCount + tsdResult) + case tasks.BlocksPerDay: + query := `SELECT COUNT(*) FROM blocks WHERE DATE(transacted_at) = ?` + err = g.db.GormDB().Raw(query, dateString).Scan(&result).Error + case tasks.TotalBlocks: + var dailyBlockCount int + + query := `SELECT COUNT(*) FROM blocks WHERE DATE(transacted_at) = ?` + + err = g.db.GormDB().Raw(query, dateString).Scan(&dailyBlockCount).Error + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyBlockCount + tsdResult) + case tasks.TransactionsPerDay: + query := `SELECT COUNT(*) FROM transactions WHERE DATE(transacted_at) = ?` + err = g.db.GormDB().Raw(query, dateString).Scan(&result).Error + case tasks.TotalTransactions: + var dailyTxCount int + + // get current days txs, get previous entry for the time series data, add them together. + + query := `SELECT COUNT(*) FROM transactions WHERE DATE(transacted_at) = ?` + + err = g.db.GormDB().Raw(query, dateString).Scan(&dailyTxCount).Error + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyTxCount + tsdResult) + case tasks.ContractDeploymentsPerDay: + query := `SELECT COUNT(*) FROM transactions WHERE DATE(transacted_at) = ? AND contract_address != ?` + err = g.db.GormDB().Raw(query, dateString, ZeroAddress).Scan(&result).Error + case tasks.TotalContractDeployments: + var dailyContractCount int + + // get current days txs, get previous entry for the time series data, add them together. + query := `SELECT COUNT(*) FROM transactions WHERE DATE(transacted_at) = ? AND contract_address != ?` + + err = g.db.GormDB().Raw(query, dateString, ZeroAddress).Scan(&dailyContractCount).Error + if err != nil { + return "", err + } + + tsdResult, err := g.previousDayTsdResultByTask(task, date) + if err != nil { + return "", err + } + + result = strconv.Itoa(dailyContractCount + tsdResult) + default: + return "", errors.New("task not supported") + } + + if err != nil { + return "", err + } + + return result, nil +} + +// previousDayTsdResultByTask returns the previous day's time series data, based on +// task and time passed in. +func (g *Generator) previousDayTsdResultByTask(task string, date time.Time) (int, error) { + var tsdResult int + + tsdQuery := `SELECT value FROM time_series_data WHERE task = ? AND date = ?` + + err := g.db.GormDB().Raw(tsdQuery, task, date.AddDate(0, 0, -1).Format("2006-01-02")).Scan(&tsdResult).Error + if err != nil { + return 0, err + } + + return tsdResult, nil +} + +// eventCount is a helper method to query the database for the count of a specific event +// based on the date. +func (g *Generator) eventCount(task string, date time.Time, event string, result interface{}) error { + query := "SELECT COUNT(*) FROM events WHERE event = ? AND DATE(transacted_at) = ?" + + return g.db.GormDB(). + Raw(query, event, date). + Scan(result).Error +} diff --git a/packages/eventindexer/http/cache.go b/packages/eventindexer/http/cache.go index e9bbdd41bfc..5d6143d6768 100644 --- a/packages/eventindexer/http/cache.go +++ b/packages/eventindexer/http/cache.go @@ -1,9 +1,10 @@ package http var ( - CacheKeyUniqueProposers = "unique-proposers" - CacheKeyUniqueProvers = "unique-provers" - CacheKeyStats = "stats" - CacheKeyPOSStats = "pos-stats" - CacheKeyCurrentProvers = "current-provers" + CacheKeyUniqueProposers = "unique-proposers" + CacheKeyUniqueProvers = "unique-provers" + CacheKeyStats = "stats" + CacheKeyPOSStats = "pos-stats" + CacheKeyCurrentProvers = "current-provers" + CacheKeyTotalTransactions = "total-transactions" ) diff --git a/packages/eventindexer/http/get_by_address_and_event_test.go b/packages/eventindexer/http/get_by_address_and_event_test.go index 526a3679d9a..8923252bd60 100644 --- a/packages/eventindexer/http/get_by_address_and_event_test.go +++ b/packages/eventindexer/http/get_by_address_and_event_test.go @@ -7,6 +7,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/cyberhorsey/webutils/testutils" "github.com/labstack/echo/v4" @@ -18,11 +19,12 @@ func Test_GetByAddressAndEvent(t *testing.T) { srv := newTestServer("") _, err := srv.eventRepo.Save(context.Background(), eventindexer.SaveEventOpts{ - Name: "name", - Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, - ChainID: big.NewInt(167001), - Address: "0x123", - Event: eventindexer.EventNameBlockProposed, + Name: "name", + Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, + ChainID: big.NewInt(167001), + Address: "0x123", + Event: eventindexer.EventNameBlockProposed, + TransactedAt: time.Now(), }) assert.Equal(t, nil, err) diff --git a/packages/eventindexer/http/get_chart_by_task.go b/packages/eventindexer/http/get_chart_by_task.go new file mode 100644 index 00000000000..94dc06ef900 --- /dev/null +++ b/packages/eventindexer/http/get_chart_by_task.go @@ -0,0 +1,36 @@ +package http + +import ( + "net/http" + + "github.com/cyberhorsey/webutils" + "github.com/labstack/echo/v4" + "github.com/patrickmn/go-cache" + "github.com/taikoxyz/taiko-mono/packages/eventindexer" +) + +func (srv *Server) GetChartByTask(c echo.Context) error { + cached, found := srv.cache.Get(c.QueryParam("task")) + + var chart *eventindexer.ChartResponse + + var err error + + if found { + chart = cached.(*eventindexer.ChartResponse) + } else { + chart, err = srv.chartRepo.Find( + c.Request().Context(), + c.QueryParam("task"), + c.QueryParam("start"), + c.QueryParam("end"), + ) + if err != nil { + return webutils.LogAndRenderErrors(c, http.StatusUnprocessableEntity, err) + } + + srv.cache.Set(c.QueryParam("task"), chart, cache.DefaultExpiration) + } + + return c.JSON(http.StatusOK, chart) +} diff --git a/packages/eventindexer/http/get_count_by_address_and_event_test.go b/packages/eventindexer/http/get_count_by_address_and_event_test.go index bbcacc9ecfd..6a3d6819c62 100644 --- a/packages/eventindexer/http/get_count_by_address_and_event_test.go +++ b/packages/eventindexer/http/get_count_by_address_and_event_test.go @@ -7,6 +7,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/cyberhorsey/webutils/testutils" "github.com/labstack/echo/v4" @@ -18,11 +19,12 @@ func Test_GetCountByAddressAndEvent(t *testing.T) { srv := newTestServer("") _, err := srv.eventRepo.Save(context.Background(), eventindexer.SaveEventOpts{ - Name: "name", - Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, - ChainID: big.NewInt(167001), - Address: "0x123", - Event: eventindexer.EventNameBlockProposed, + Name: "name", + Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, + ChainID: big.NewInt(167001), + Address: "0x123", + Event: eventindexer.EventNameBlockProposed, + TransactedAt: time.Now(), }) assert.Equal(t, nil, err) diff --git a/packages/eventindexer/http/get_unique_proposers_test.go b/packages/eventindexer/http/get_unique_proposers_test.go index 709a9607806..bdfcfcb6dcc 100644 --- a/packages/eventindexer/http/get_unique_proposers_test.go +++ b/packages/eventindexer/http/get_unique_proposers_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/cyberhorsey/webutils/testutils" "github.com/labstack/echo/v4" @@ -17,11 +18,12 @@ func Test_GetUniqueProposers(t *testing.T) { srv := newTestServer("") _, err := srv.eventRepo.Save(context.Background(), eventindexer.SaveEventOpts{ - Name: "name", - Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, - ChainID: big.NewInt(167001), - Address: "0x123", - Event: eventindexer.EventNameBlockProposed, + Name: "name", + Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, + ChainID: big.NewInt(167001), + Address: "0x123", + Event: eventindexer.EventNameBlockProposed, + TransactedAt: time.Now(), }) assert.Equal(t, nil, err) diff --git a/packages/eventindexer/http/get_unique_provers_test.go b/packages/eventindexer/http/get_unique_provers_test.go index e6bf5bc426b..e0927b648be 100644 --- a/packages/eventindexer/http/get_unique_provers_test.go +++ b/packages/eventindexer/http/get_unique_provers_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/cyberhorsey/webutils/testutils" "github.com/labstack/echo/v4" @@ -17,11 +18,12 @@ func Test_GetUniqueProvers(t *testing.T) { srv := newTestServer("") _, err := srv.eventRepo.Save(context.Background(), eventindexer.SaveEventOpts{ - Name: "name", - Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, - ChainID: big.NewInt(167001), - Address: "0x123", - Event: eventindexer.EventNameBlockProven, + Name: "name", + Data: `{"Owner": "0x0000000000000000000000000000000000000123"}`, + ChainID: big.NewInt(167001), + Address: "0x123", + Event: eventindexer.EventNameBlockProven, + TransactedAt: time.Now(), }) assert.Equal(t, nil, err) diff --git a/packages/eventindexer/http/routes.go b/packages/eventindexer/http/routes.go index e89d1bc5760..34592124822 100644 --- a/packages/eventindexer/http/routes.go +++ b/packages/eventindexer/http/routes.go @@ -19,4 +19,8 @@ func (srv *Server) configureRoutes() { galaxeAPI.GET("/user-bridged", srv.UserBridged) galaxeAPI.GET("/user-swapped-on-taiko", srv.UserSwappedOnTaiko) galaxeAPI.GET("/user-added-liquidity", srv.UserAddedLiquidity) + + chartAPI := srv.echo.Group("/chart") + + chartAPI.GET("/chartByTask", srv.GetChartByTask) } diff --git a/packages/eventindexer/http/server.go b/packages/eventindexer/http/server.go index 241ffa4b417..501d67b1538 100644 --- a/packages/eventindexer/http/server.go +++ b/packages/eventindexer/http/server.go @@ -2,7 +2,6 @@ package http import ( "context" - "fmt" "net/http" "os" "time" @@ -12,7 +11,6 @@ import ( "github.com/patrickmn/go-cache" "github.com/taikoxyz/taiko-mono/packages/eventindexer" - echoprom "github.com/labstack/echo-contrib/prometheus" echo "github.com/labstack/echo/v4" ) @@ -21,6 +19,7 @@ type Server struct { eventRepo eventindexer.EventRepository statRepo eventindexer.StatRepository nftBalanceRepo eventindexer.NFTBalanceRepository + chartRepo eventindexer.ChartRepository cache *cache.Cache } @@ -29,6 +28,7 @@ type NewServerOpts struct { EventRepo eventindexer.EventRepository StatRepo eventindexer.StatRepository NFTBalanceRepo eventindexer.NFTBalanceRepository + ChartRepo eventindexer.ChartRepository EthClient *ethclient.Client CorsOrigins []string } @@ -69,6 +69,7 @@ func NewServer(opts NewServerOpts) (*Server, error) { eventRepo: opts.EventRepo, statRepo: opts.StatRepo, nftBalanceRepo: opts.NFTBalanceRepo, + chartRepo: opts.ChartRepo, cache: cache, } @@ -131,18 +132,4 @@ func (srv *Server) configureMiddleware(corsOrigins []string) { AllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept}, AllowMethods: []string{http.MethodGet, http.MethodHead}, })) - - srv.configureAndStartPrometheus() -} - -func (srv *Server) configureAndStartPrometheus() { - // Enable metrics middleware - p := echoprom.NewPrometheus("echo", nil) - p.Use(srv.echo) - e := echo.New() - p.SetMetricsPath(e) - - go func() { - _ = e.Start(fmt.Sprintf(":%v", os.Getenv("PROMETHEUS_HTTP_PORT"))) - }() } diff --git a/packages/eventindexer/http/server_test.go b/packages/eventindexer/http/server_test.go index 0ef682716ff..6204f6cc8f0 100644 --- a/packages/eventindexer/http/server_test.go +++ b/packages/eventindexer/http/server_test.go @@ -29,7 +29,6 @@ func newTestServer(url string) *Server { srv.configureMiddleware([]string{"*"}) srv.configureRoutes() - srv.configureAndStartPrometheus() return srv } @@ -135,19 +134,6 @@ func Test_Root(t *testing.T) { } } -func Test_Metrics(t *testing.T) { - srv := newTestServer("") - - req, _ := http.NewRequest(echo.GET, "/metrics", nil) - rec := httptest.NewRecorder() - - srv.ServeHTTP(rec, req) - - if rec.Code != http.StatusOK { - t.Fatalf("Test_Metrics expected code %v, got %v", http.StatusOK, rec.Code) - } -} - func Test_StartShutdown(t *testing.T) { srv := newTestServer("") diff --git a/packages/eventindexer/indexer/config.go b/packages/eventindexer/indexer/config.go new file mode 100644 index 00000000000..10279990797 --- /dev/null +++ b/packages/eventindexer/indexer/config.go @@ -0,0 +1,109 @@ +package indexer + +import ( + "database/sql" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/cmd/flags" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/db" + "github.com/urfave/cli/v2" + "gorm.io/driver/mysql" + "gorm.io/gorm" + "gorm.io/gorm/logger" +) + +type DB interface { + DB() (*sql.DB, error) + GormDB() *gorm.DB +} + +type Config struct { + // db configs + DatabaseUsername string + DatabasePassword string + DatabaseName string + DatabaseHost string + DatabaseMaxIdleConns uint64 + DatabaseMaxOpenConns uint64 + DatabaseMaxConnLifetime uint64 + RPCUrl string + HTTPPort uint64 + MetricsHTTPPort uint64 + ETHClientTimeout uint64 + L1TaikoAddress common.Address + BridgeAddress common.Address + SwapAddresses []common.Address + CORSOrigins []string + BlockBatchSize uint64 + Subscriptionbackoff uint64 + SyncMode SyncMode + WatchMode WatchMode + IndexNFTs bool + Layer string + OpenDBFunc func() (DB, error) +} + +// NewConfigFromCliContext creates a new config instance from command line flags. +func NewConfigFromCliContext(c *cli.Context) (*Config, error) { + // swapAddresses is a comma-delinated list of addresses to index, so we need to + // parse that from a single string. + swapAddresses := strings.Split(c.String(flags.SwapAddresses.Name), ",") + + swaps := make([]common.Address, 0) + + for _, v := range swapAddresses { + swaps = append(swaps, common.HexToAddress(v)) + } + + // and the same for CORS origins + + cors := make([]string, 0) + + cors = append(cors, strings.Split(c.String(flags.CORSOrigins.Name), ",")...) + + return &Config{ + DatabaseUsername: c.String(flags.DatabaseUsername.Name), + DatabasePassword: c.String(flags.DatabasePassword.Name), + DatabaseName: c.String(flags.DatabaseName.Name), + DatabaseHost: c.String(flags.DatabaseHost.Name), + DatabaseMaxIdleConns: c.Uint64(flags.DatabaseMaxIdleConns.Name), + DatabaseMaxOpenConns: c.Uint64(flags.DatabaseMaxOpenConns.Name), + DatabaseMaxConnLifetime: c.Uint64(flags.DatabaseConnMaxLifetime.Name), + HTTPPort: c.Uint64(flags.HTTPPort.Name), + MetricsHTTPPort: c.Uint64(flags.MetricsHTTPPort.Name), + ETHClientTimeout: c.Uint64(flags.ETHClientTimeout.Name), + L1TaikoAddress: common.HexToAddress(c.String(flags.L1TaikoAddress.Name)), + BridgeAddress: common.HexToAddress(c.String(flags.BridgeAddress.Name)), + SwapAddresses: swaps, + CORSOrigins: cors, + BlockBatchSize: c.Uint64(flags.BlockBatchSize.Name), + Subscriptionbackoff: c.Uint64(flags.SubscriptionBackoff.Name), + RPCUrl: c.String(flags.RPCUrl.Name), + WatchMode: WatchMode(c.String(flags.WatchMode.Name)), + SyncMode: SyncMode(c.String(flags.SyncMode.Name)), + IndexNFTs: c.Bool(flags.IndexNFTs.Name), + Layer: c.String(flags.Layer.Name), + OpenDBFunc: func() (DB, error) { + return db.OpenDBConnection(db.DBConnectionOpts{ + Name: c.String(flags.DatabaseUsername.Name), + Password: c.String(flags.DatabasePassword.Name), + Database: c.String(flags.DatabaseName.Name), + Host: c.String(flags.DatabaseHost.Name), + MaxIdleConns: c.Uint64(flags.DatabaseMaxIdleConns.Name), + MaxOpenConns: c.Uint64(flags.DatabaseMaxOpenConns.Name), + MaxConnLifetime: c.Uint64(flags.DatabaseConnMaxLifetime.Name), + OpenFunc: func(dsn string) (*db.DB, error) { + gormDB, err := gorm.Open(mysql.Open(dsn), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Silent), + }) + if err != nil { + return nil, err + } + + return db.New(gormDB), nil + }, + }) + }, + }, nil +} diff --git a/packages/eventindexer/indexer/config_test.go b/packages/eventindexer/indexer/config_test.go new file mode 100644 index 00000000000..cc59a482030 --- /dev/null +++ b/packages/eventindexer/indexer/config_test.go @@ -0,0 +1,66 @@ +package indexer + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/cmd/flags" + "github.com/urfave/cli/v2" +) + +var ( + httpPort = "1000" + metricsHttpPort = "1001" + l1TaikoAddress = "0x63FaC9201494f0bd17B9892B9fae4d52fe3BD377" + bridgeAddress = "0x73FaC9201494f0bd17B9892B9fae4d52fe3BD377" + swapAddresses = "0x33FaC9201494f0bd17B9892B9fae4d52fe3BD377,0x13FaC9201494f0bd17B9892B9fae4d52fe3BD377" +) + +func setupApp() *cli.App { + app := cli.NewApp() + app.Flags = flags.IndexerFlags + app.Action = func(ctx *cli.Context) error { + _, err := NewConfigFromCliContext(ctx) + return err + } + + return app +} + +func TestNewConfigFromCliContext(t *testing.T) { + app := setupApp() + + app.Action = func(ctx *cli.Context) error { + c, err := NewConfigFromCliContext(ctx) + + assert.Nil(t, err) + assert.Equal(t, "dbuser", c.DatabaseUsername) + assert.Equal(t, "dbpass", c.DatabasePassword) + assert.Equal(t, "dbname", c.DatabaseName) + assert.Equal(t, "dbhost", c.DatabaseHost) + assert.Equal(t, "rpcUrl", c.RPCUrl) + assert.Equal(t, uint64(1000), c.HTTPPort) + assert.Equal(t, uint64(1001), c.MetricsHTTPPort) + assert.Equal(t, common.HexToAddress(l1TaikoAddress), c.L1TaikoAddress) + assert.Equal(t, common.HexToAddress(bridgeAddress), c.BridgeAddress) + + // assert.Nil(t, InitFromConfig(context.Background(), new(Indexer), c)) + + return err + } + + assert.Nil(t, app.Run([]string{ + "TestNewConfigFromCliContext", + "-" + flags.DatabaseUsername.Name, "dbuser", + "-" + flags.DatabasePassword.Name, "dbpass", + "-" + flags.DatabaseHost.Name, "dbhost", + "-" + flags.DatabaseName.Name, "dbname", + "-" + flags.RPCUrl.Name, "rpcUrl", + "-" + flags.L1TaikoAddress.Name, l1TaikoAddress, + "-" + flags.BridgeAddress.Name, bridgeAddress, + "-" + flags.SwapAddresses.Name, swapAddresses, + "-" + flags.HTTPPort.Name, httpPort, + "-" + flags.MetricsHTTPPort.Name, metricsHttpPort, + })) +} diff --git a/packages/eventindexer/indexer/detect_and_handle_reorg.go b/packages/eventindexer/indexer/detect_and_handle_reorg.go index bc61ee7807f..2ae534a2369 100644 --- a/packages/eventindexer/indexer/detect_and_handle_reorg.go +++ b/packages/eventindexer/indexer/detect_and_handle_reorg.go @@ -6,15 +6,15 @@ import ( "github.com/pkg/errors" ) -func (svc *Service) detectAndHandleReorg(ctx context.Context, event string, blockID int64) error { - existingEvent, err := svc.eventRepo.FindByEventTypeAndBlockID(ctx, event, blockID) +func (indx *Indexer) detectAndHandleReorg(ctx context.Context, event string, blockID int64) error { + existingEvent, err := indx.eventRepo.FindByEventTypeAndBlockID(ctx, event, blockID) if err != nil { return errors.Wrap(err, "svc.eventRepo.FindByEventTypeAndBlockID") } if existingEvent != nil { // reorg detected - err := svc.eventRepo.Delete(ctx, existingEvent.ID) + err := indx.eventRepo.Delete(ctx, existingEvent.ID) if err != nil { return errors.Wrap(err, "svc.eventRepo.Delete") } diff --git a/packages/eventindexer/indexer/filter.go b/packages/eventindexer/indexer/filter.go index 9ff50bc2a86..8fe6a80fb9a 100644 --- a/packages/eventindexer/indexer/filter.go +++ b/packages/eventindexer/indexer/filter.go @@ -14,160 +14,130 @@ import ( type FilterFunc func( ctx context.Context, chainID *big.Int, - svc *Service, + indxr *Indexer, filterOpts *bind.FilterOpts, ) error // nolint -func L1FilterFunc( +func filterFunc( ctx context.Context, chainID *big.Int, - svc *Service, + indxr *Indexer, filterOpts *bind.FilterOpts, ) error { wg, ctx := errgroup.WithContext(ctx) - if svc.taikol1 != nil { + if indxr.taikol1 != nil { wg.Go(func() error { - blockProvenEvents, err := svc.taikol1.FilterBlockProven(filterOpts, nil) + blockProvenEvents, err := indxr.taikol1.FilterBlockProven(filterOpts, nil) if err != nil { - return errors.Wrap(err, "svc.taikol1.FilterBlockProven") + return errors.Wrap(err, "indxr.taikol1.FilterBlockProven") } - err = svc.saveBlockProvenEvents(ctx, chainID, blockProvenEvents) + err = indxr.saveBlockProvenEvents(ctx, chainID, blockProvenEvents) if err != nil { - return errors.Wrap(err, "svc.saveBlockProvenEvents") + return errors.Wrap(err, "indxr.saveBlockProvenEvents") } return nil }) wg.Go(func() error { - blockProposedEvents, err := svc.taikol1.FilterBlockProposed(filterOpts, nil, nil) + blockProposedEvents, err := indxr.taikol1.FilterBlockProposed(filterOpts, nil, nil) if err != nil { - return errors.Wrap(err, "svc.taikol1.FilterBlockProposed") + return errors.Wrap(err, "indxr.taikol1.FilterBlockProposed") } - err = svc.saveBlockProposedEvents(ctx, chainID, blockProposedEvents) + err = indxr.saveBlockProposedEvents(ctx, chainID, blockProposedEvents) if err != nil { - return errors.Wrap(err, "svc.saveBlockProposedEvents") + return errors.Wrap(err, "indxr.saveBlockProposedEvents") } return nil }) wg.Go(func() error { - blockVerifiedEvents, err := svc.taikol1.FilterBlockVerified(filterOpts, nil, nil) + blockVerifiedEvents, err := indxr.taikol1.FilterBlockVerified(filterOpts, nil, nil) if err != nil { - return errors.Wrap(err, "svc.taikol1.FilterBlockVerified") + return errors.Wrap(err, "indxr.taikol1.FilterBlockVerified") } - err = svc.saveBlockVerifiedEvents(ctx, chainID, blockVerifiedEvents) + err = indxr.saveBlockVerifiedEvents(ctx, chainID, blockVerifiedEvents) if err != nil { - return errors.Wrap(err, "svc.saveBlockVerifiedEvents") + return errors.Wrap(err, "indxr.saveBlockVerifiedEvents") } return nil }) } - if svc.bridge != nil { + if indxr.bridge != nil { wg.Go(func() error { - messagesSent, err := svc.bridge.FilterMessageSent(filterOpts, nil) + messagesSent, err := indxr.bridge.FilterMessageSent(filterOpts, nil) if err != nil { - return errors.Wrap(err, "svc.bridge.FilterMessageSent") + return errors.Wrap(err, "indxr.bridge.FilterMessageSent") } - err = svc.saveMessageSentEvents(ctx, chainID, messagesSent) + err = indxr.saveMessageSentEvents(ctx, chainID, messagesSent) if err != nil { - return errors.Wrap(err, "svc.saveMessageSentEvents") + return errors.Wrap(err, "indxr.saveMessageSentEvents") } return nil }) } - if svc.indexNfts { - wg.Go(func() error { - if err := svc.indexNFTTransfers(ctx, chainID, filterOpts.Start, *filterOpts.End); err != nil { - return errors.Wrap(err, "svc.indexNFTTransfers") - } - return nil - }) - } - - err := wg.Wait() - - if err != nil { - if errors.Is(err, context.Canceled) { - slog.Error("context cancelled") - return err + if indxr.swaps != nil { + for _, s := range indxr.swaps { + swap := s + + wg.Go(func() error { + swaps, err := swap.FilterSwap(filterOpts, nil, nil) + if err != nil { + return errors.Wrap(err, "indxr.bridge.FilterSwap") + } + + // only save ones above 0.01 ETH, this is only for Galaxe + // and we dont care about the rest + err = indxr.saveSwapEvents(ctx, chainID, swaps) + if err != nil { + return errors.Wrap(err, "indxr.saveSwapEvents") + } + + return nil + }) + + wg.Go(func() error { + liquidityAdded, err := swap.FilterMint(filterOpts, nil) + + if err != nil { + return errors.Wrap(err, "indxr.bridge.FilterMint") + } + + // only save ones above 0.1 ETH, this is only for Galaxe + // and we dont care about the rest + err = indxr.saveLiquidityAddedEvents(ctx, chainID, liquidityAdded) + if err != nil { + return errors.Wrap(err, "indxr.saveLiquidityAddedEvents") + } + + return nil + }) } - - return err - } - - return nil -} - -func L2FilterFunc( - ctx context.Context, - chainID *big.Int, - svc *Service, - filterOpts *bind.FilterOpts, -) error { - wg, ctx := errgroup.WithContext(ctx) - - for _, s := range svc.swaps { - swap := s - - wg.Go(func() error { - swaps, err := swap.FilterSwap(filterOpts, nil, nil) - if err != nil { - return errors.Wrap(err, "svc.bridge.FilterSwap") - } - - // only save ones above 0.01 ETH, this is only for Galaxe - // and we dont care about the rest - err = svc.saveSwapEvents(ctx, chainID, swaps) - if err != nil { - return errors.Wrap(err, "svc.saveSwapEvents") - } - - return nil - }) - - wg.Go(func() error { - liquidityAdded, err := swap.FilterMint(filterOpts, nil) - - if err != nil { - return errors.Wrap(err, "svc.bridge.FilterMint") - } - - // only save ones above 0.1 ETH, this is only for Galaxe - // and we dont care about the rest - err = svc.saveLiquidityAddedEvents(ctx, chainID, liquidityAdded) - if err != nil { - return errors.Wrap(err, "svc.saveLiquidityAddedEvents") - } - - return nil - }) } - if svc.indexNfts { - wg.Go(func() error { - if err := svc.indexNFTTransfers(ctx, chainID, filterOpts.Start, *filterOpts.End); err != nil { - return errors.Wrap(err, "svc.indexNFTTransfers") - } - return nil - }) - } + wg.Go(func() error { + if err := indxr.indexRawBlockData(ctx, chainID, filterOpts.Start, *filterOpts.End); err != nil { + return errors.Wrap(err, "indxr.indexRawBlockData") + } + return nil + }) err := wg.Wait() + if err != nil { if errors.Is(err, context.Canceled) { - slog.Error("context cancelled") + slog.Error("filter context cancelled") return err } diff --git a/packages/eventindexer/indexer/filter_then_subscribe.go b/packages/eventindexer/indexer/filter_then_subscribe.go index c2d4552f6c6..70f79113258 100644 --- a/packages/eventindexer/indexer/filter_then_subscribe.go +++ b/packages/eventindexer/indexer/filter_then_subscribe.go @@ -12,44 +12,42 @@ import ( "github.com/taikoxyz/taiko-mono/packages/eventindexer" ) -func (svc *Service) FilterThenSubscribe( +func (indxr *Indexer) filterThenSubscribe( ctx context.Context, - mode eventindexer.Mode, - watchMode eventindexer.WatchMode, filter FilterFunc, ) error { - chainID, err := svc.ethClient.ChainID(ctx) + chainID, err := indxr.ethClient.ChainID(ctx) if err != nil { - return errors.Wrap(err, "svc.ethClient.ChainID()") + return errors.Wrap(err, "indxr.ethClient.ChainID()") } - if watchMode == eventindexer.SubscribeWatchMode { - return svc.subscribe(ctx, chainID) + if indxr.watchMode == Subscribe { + return indxr.subscribe(ctx, chainID) } - if err := svc.setInitialProcessingBlockByMode(ctx, mode, chainID); err != nil { - return errors.Wrap(err, "svc.setInitialProcessingBlockByMode") + if err := indxr.setInitialProcessingBlockByMode(ctx, indxr.syncMode, chainID); err != nil { + return errors.Wrap(err, "indxr.setInitialProcessingBlockByMode") } - header, err := svc.ethClient.HeaderByNumber(ctx, nil) + header, err := indxr.ethClient.HeaderByNumber(ctx, nil) if err != nil { - return errors.Wrap(err, "svc.ethClient.HeaderByNumber") + return errors.Wrap(err, "indxr.ethClient.HeaderByNumber") } - if svc.processingBlockHeight == header.Number.Uint64() { + if indxr.processingBlockHeight == header.Number.Uint64() { slog.Info("indexing caught up subscribing to new incoming events", "chainID", chainID.Uint64()) - return svc.subscribe(ctx, chainID) + return indxr.subscribe(ctx, chainID) } slog.Info("getting batch of events", "chainID", chainID.Uint64(), - "startBlock", svc.processingBlockHeight, + "startBlock", indxr.processingBlockHeight, "endBlock", header.Number.Int64(), - "batchSize", svc.blockBatchSize, + "batchSize", indxr.blockBatchSize, ) - for i := svc.processingBlockHeight; i < header.Number.Uint64(); i += svc.blockBatchSize { - end := svc.processingBlockHeight + svc.blockBatchSize + for i := indxr.processingBlockHeight; i < header.Number.Uint64(); i += indxr.blockBatchSize { + end := indxr.processingBlockHeight + indxr.blockBatchSize // if the end of the batch is greater than the latest block number, set end // to the latest block number if end > header.Number.Uint64() { @@ -65,33 +63,33 @@ func (svc *Service) FilterThenSubscribe( fmt.Println() filterOpts := &bind.FilterOpts{ - Start: svc.processingBlockHeight, + Start: indxr.processingBlockHeight, End: &filterEnd, Context: ctx, } - if err := filter(ctx, chainID, svc, filterOpts); err != nil { + if err := filter(ctx, chainID, indxr, filterOpts); err != nil { return errors.Wrap(err, "filter") } - header, err := svc.ethClient.HeaderByNumber(ctx, big.NewInt(int64(end))) + header, err := indxr.ethClient.HeaderByNumber(ctx, big.NewInt(int64(end))) if err != nil { - return errors.Wrap(err, "svc.ethClient.HeaderByNumber") + return errors.Wrap(err, "indxr.ethClient.HeaderByNumber") } slog.Info("setting last processed block", "height", end, "hash", header.Hash().Hex()) - if err := svc.blockRepo.Save(eventindexer.SaveBlockOpts{ + if err := indxr.processedBlockRepo.Save(eventindexer.SaveProcessedBlockOpts{ Height: uint64(end), Hash: header.Hash(), ChainID: chainID, }); err != nil { - return errors.Wrap(err, "svc.blockRepo.Save") + return errors.Wrap(err, "indxr.blockRepo.Save") } eventindexer.BlocksProcessed.Inc() - svc.processingBlockHeight = uint64(end) + indxr.processingBlockHeight = uint64(end) } slog.Info( @@ -99,19 +97,19 @@ func (svc *Service) FilterThenSubscribe( "chainID", chainID.Uint64(), ) - latestBlock, err := svc.ethClient.HeaderByNumber(ctx, nil) + latestBlock, err := indxr.ethClient.HeaderByNumber(ctx, nil) if err != nil { - return errors.Wrap(err, "svc.ethclient.HeaderByNumber") + return errors.Wrap(err, "indxr.ethclient.HeaderByNumber") } - if svc.processingBlockHeight < latestBlock.Number.Uint64() { - return svc.FilterThenSubscribe(ctx, eventindexer.SyncMode, watchMode, filter) + if indxr.processingBlockHeight < latestBlock.Number.Uint64() { + return indxr.filterThenSubscribe(ctx, filter) } // we are caught up and specified not to subscribe, we can return now - if watchMode == eventindexer.FilterWatchMode { + if indxr.watchMode == Filter { return nil } - return svc.subscribe(ctx, chainID) + return indxr.subscribe(ctx, chainID) } diff --git a/packages/eventindexer/indexer/index_nft_transfers.go b/packages/eventindexer/indexer/index_nft_transfers.go index 8ca16608689..0a343ff5867 100644 --- a/packages/eventindexer/indexer/index_nft_transfers.go +++ b/packages/eventindexer/indexer/index_nft_transfers.go @@ -8,7 +8,6 @@ import ( "log/slog" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -29,28 +28,17 @@ var ( // indexNFTTransfers indexes from a given starting block to a given end block and parses all event logs // to find ERC721 or ERC1155 transfer events -func (svc *Service) indexNFTTransfers( +func (indxr *Indexer) indexNFTTransfers( ctx context.Context, chainID *big.Int, - start uint64, - end uint64, + logs []types.Log, ) error { - query := ethereum.FilterQuery{ - FromBlock: big.NewInt(int64(start)), - ToBlock: big.NewInt(int64(end)), - } - - logs, err := svc.ethClient.FilterLogs(ctx, query) - if err != nil { - return err - } - for _, vLog := range logs { - if !svc.isERC721Transfer(ctx, vLog) && !svc.isERC1155Transfer(ctx, vLog) { + if !indxr.isERC721Transfer(ctx, vLog) && !indxr.isERC1155Transfer(ctx, vLog) { continue } - if err := svc.saveNFTTransfer(ctx, chainID, vLog); err != nil { + if err := indxr.saveNFTTransfer(ctx, chainID, vLog); err != nil { return err } } @@ -59,7 +47,7 @@ func (svc *Service) indexNFTTransfers( } // isERC1155Transfer determines whether a given log is a valid ERC1155 transfer event -func (svc *Service) isERC1155Transfer(ctx context.Context, vLog types.Log) bool { +func (indxr *Indexer) isERC1155Transfer(ctx context.Context, vLog types.Log) bool { // malformed event if len(vLog.Topics) == 0 { return false @@ -76,7 +64,7 @@ func (svc *Service) isERC1155Transfer(ctx context.Context, vLog types.Log) bool } // isERC721Transfer determines whether a given log is a valid ERC721 transfer event -func (svc *Service) isERC721Transfer(ctx context.Context, vLog types.Log) bool { +func (indxr *Indexer) isERC721Transfer(ctx context.Context, vLog types.Log) bool { // malformed event if len(vLog.Topics) == 0 { return false @@ -99,20 +87,20 @@ func (svc *Service) isERC721Transfer(ctx context.Context, vLog types.Log) bool { // saveNFTTrasnfer parses the event logs and saves either an ERC721 or ERC1155 event, updating // users balances -func (svc *Service) saveNFTTransfer(ctx context.Context, chainID *big.Int, vLog types.Log) error { - if svc.isERC721Transfer(ctx, vLog) { - return svc.saveERC721Transfer(ctx, chainID, vLog) +func (indxr *Indexer) saveNFTTransfer(ctx context.Context, chainID *big.Int, vLog types.Log) error { + if indxr.isERC721Transfer(ctx, vLog) { + return indxr.saveERC721Transfer(ctx, chainID, vLog) } - if svc.isERC1155Transfer(ctx, vLog) { - return svc.saveERC1155Transfer(ctx, chainID, vLog) + if indxr.isERC1155Transfer(ctx, vLog) { + return indxr.saveERC1155Transfer(ctx, chainID, vLog) } return errors.New("nftTransferVlog not ERC721 or ERC1155") } // saveERC721Transfer updates the user's balances on the from and to of a ERC721 transfer event -func (svc *Service) saveERC721Transfer(ctx context.Context, chainID *big.Int, vLog types.Log) error { +func (indxr *Indexer) saveERC721Transfer(ctx context.Context, chainID *big.Int, vLog types.Log) error { from := fmt.Sprintf("0x%v", common.Bytes2Hex(vLog.Topics[1].Bytes()[12:])) to := fmt.Sprintf("0x%v", common.Bytes2Hex(vLog.Topics[2].Bytes()[12:])) @@ -129,7 +117,7 @@ func (svc *Service) saveERC721Transfer(ctx context.Context, chainID *big.Int, vL // increment To address's balance - _, err := svc.nftBalanceRepo.IncreaseBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ + _, err := indxr.nftBalanceRepo.IncreaseBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ ChainID: chainID.Int64(), Address: to, TokenID: tokenID, @@ -144,7 +132,7 @@ func (svc *Service) saveERC721Transfer(ctx context.Context, chainID *big.Int, vL // decrement From address's balance // ignore zero address since that is usually the "mint" if from != ZeroAddress.Hex() { - _, err = svc.nftBalanceRepo.SubtractBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ + _, err = indxr.nftBalanceRepo.SubtractBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ ChainID: chainID.Int64(), Address: from, TokenID: tokenID, @@ -162,7 +150,7 @@ func (svc *Service) saveERC721Transfer(ctx context.Context, chainID *big.Int, vL // saveERC1155Transfer parses and saves either a TransferSingle or TransferBatch event to // the database and updates the user's balances -func (svc *Service) saveERC1155Transfer(ctx context.Context, chainID *big.Int, vLog types.Log) error { +func (indxr *Indexer) saveERC1155Transfer(ctx context.Context, chainID *big.Int, vLog types.Log) error { from := fmt.Sprintf("0x%v", common.Bytes2Hex(vLog.Topics[2].Bytes()[12:])) to := fmt.Sprintf("0x%v", common.Bytes2Hex(vLog.Topics[3].Bytes()[12:])) @@ -212,7 +200,7 @@ func (svc *Service) saveERC1155Transfer(ctx context.Context, chainID *big.Int, v // increment To address's balance for _, transfer := range transfers { - _, err = svc.nftBalanceRepo.IncreaseBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ + _, err = indxr.nftBalanceRepo.IncreaseBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ ChainID: chainID.Int64(), Address: to, TokenID: transfer.ID.Int64(), @@ -226,7 +214,7 @@ func (svc *Service) saveERC1155Transfer(ctx context.Context, chainID *big.Int, v if from != ZeroAddress.Hex() { // decrement From address's balance - _, err = svc.nftBalanceRepo.SubtractBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ + _, err = indxr.nftBalanceRepo.SubtractBalance(ctx, eventindexer.UpdateNFTBalanceOpts{ ChainID: chainID.Int64(), Address: from, TokenID: transfer.ID.Int64(), diff --git a/packages/eventindexer/indexer/index_raw_block_data.go b/packages/eventindexer/indexer/index_raw_block_data.go new file mode 100644 index 00000000000..edd89655d62 --- /dev/null +++ b/packages/eventindexer/indexer/index_raw_block_data.go @@ -0,0 +1,107 @@ +package indexer + +import ( + "context" + "log/slog" + "math/big" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +func (indxr *Indexer) indexRawBlockData( + ctx context.Context, + chainID *big.Int, + start uint64, + end uint64, +) error { + wg, ctx := errgroup.WithContext(ctx) + // BLOCK parsing + + slog.Info("indexRawBlockData", "start", start, "end", end) + + // only index block/transaction data on L2 + if indxr.layer == Layer2 { + for i := start; i < end; i++ { + id := i + + wg.Go(func() error { + slog.Info("processing block data", "blockNum", id) + + block, err := indxr.ethClient.BlockByNumber(ctx, big.NewInt(int64(id))) + if err != nil { + return errors.Wrap(err, "indxr.ethClient.BlockByNumber") + } + + if err := indxr.blockRepo.Save(ctx, block, chainID); err != nil { + return errors.Wrap(err, "indxr.blockRepo.Save") + } + + txs := block.Transactions() + + for _, tx := range txs { + slog.Info("transaction found", "hash", tx.Hash()) + receipt, err := indxr.ethClient.TransactionReceipt(ctx, tx.Hash()) + if err != nil { + return err + } + + sender, err := indxr.ethClient.TransactionSender(ctx, tx, block.Hash(), receipt.TransactionIndex) + if err != nil { + return err + } + + if err := indxr.accountRepo.Save(ctx, sender, time.Unix(int64(block.Time()), 0)); err != nil { + return err + } + + if err := indxr.txRepo.Save(ctx, + tx, + sender, + block.Number(), + time.Unix(int64(block.Time()), 0), + receipt.ContractAddress, + ); err != nil { + return err + } + } + + return nil + }) + } + } + + // LOGS parsing + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(start)), + ToBlock: big.NewInt(int64(end)), + } + + logs, err := indxr.ethClient.FilterLogs(ctx, query) + if err != nil { + return err + } + + // index NFT transfers + if indxr.indexNfts { + wg.Go(func() error { + if err := indxr.indexNFTTransfers(ctx, chainID, logs); err != nil { + return errors.Wrap(err, "svc.indexNFTTransfers") + } + return nil + }) + } + + if err := wg.Wait(); err != nil { + if errors.Is(err, context.Canceled) { + slog.Error("index raw block data context cancelled") + return err + } + + return err + } + + return nil +} diff --git a/packages/eventindexer/indexer/indexer.go b/packages/eventindexer/indexer/indexer.go new file mode 100644 index 00000000000..e1e8a53cd69 --- /dev/null +++ b/packages/eventindexer/indexer/indexer.go @@ -0,0 +1,251 @@ +package indexer + +import ( + "context" + "fmt" + "log/slog" + "sync" + "time" + + nethttp "net/http" + + "github.com/cyberhorsey/errors" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/labstack/echo/v4" + "github.com/taikoxyz/taiko-mono/packages/eventindexer" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/bridge" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/swap" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/taikol1" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/http" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/repo" + "github.com/urfave/cli/v2" +) + +var ( + ZeroAddress = common.HexToAddress("0x0000000000000000000000000000000000000000") +) + +var ( + Layer1 = "l1" + Layer2 = "l2" +) + +type WatchMode string + +var ( + Filter WatchMode = "filter" + Subscribe WatchMode = "subscribe" + FilterAndSubscribe WatchMode = "filter-and-subscribe" + WatchModes = []WatchMode{Filter, Subscribe, FilterAndSubscribe} +) + +type SyncMode string + +var ( + Sync SyncMode = "sync" + Resync SyncMode = "resync" + Modes = []SyncMode{Sync, Resync} +) + +type Indexer struct { + accountRepo eventindexer.AccountRepository + blockRepo eventindexer.BlockRepository + eventRepo eventindexer.EventRepository + processedBlockRepo eventindexer.ProcessedBlockRepository + statRepo eventindexer.StatRepository + nftBalanceRepo eventindexer.NFTBalanceRepository + txRepo eventindexer.TransactionRepository + + ethClient *ethclient.Client + + processingBlockHeight uint64 + + blockBatchSize uint64 + subscriptionBackoff time.Duration + + taikol1 *taikol1.TaikoL1 + bridge *bridge.Bridge + swaps []*swap.Swap + + httpPort uint64 + srv *http.Server + + indexNfts bool + layer string + + wg *sync.WaitGroup + ctx context.Context + + watchMode WatchMode + syncMode SyncMode +} + +func (indxr *Indexer) Start() error { + indxr.ctx = context.Background() + go func() { + if err := indxr.srv.Start(fmt.Sprintf(":%v", indxr.httpPort)); err != nethttp.ErrServerClosed { + slog.Error("http srv start", "error", err.Error()) + } + }() + + indxr.wg.Add(1) + + go func() { + defer func() { + indxr.wg.Done() + }() + + if err := indxr.filterThenSubscribe( + indxr.ctx, + filterFunc, + ); err != nil { + slog.Error("error filtering and subscribing", "err", err.Error()) + } + }() + + return nil +} + +func (indxr *Indexer) Name() string { + return "indexer" +} + +func (indxr *Indexer) InitFromCli(ctx context.Context, c *cli.Context) error { + cfg, err := NewConfigFromCliContext(c) + if err != nil { + return err + } + + return InitFromConfig(ctx, indxr, cfg) +} + +// nolint: funlen +func InitFromConfig(ctx context.Context, i *Indexer, cfg *Config) error { + db, err := cfg.OpenDBFunc() + if err != nil { + return err + } + + accountRepository, err := repo.NewAccountRepository(db) + if err != nil { + return err + } + + eventRepository, err := repo.NewEventRepository(db) + if err != nil { + return err + } + + processedBlockRepository, err := repo.NewProcessedBlockRepository(db) + if err != nil { + return err + } + + blockRepository, err := repo.NewBlockRepository(db) + if err != nil { + return err + } + + chartRepository, err := repo.NewChartRepository(db) + if err != nil { + return err + } + + statRepository, err := repo.NewStatRepository(db) + if err != nil { + return err + } + + nftBalanceRepository, err := repo.NewNFTBalanceRepository(db) + if err != nil { + return err + } + + txRepository, err := repo.NewTransactionRepository(db) + if err != nil { + return err + } + + ethClient, err := ethclient.Dial(cfg.RPCUrl) + if err != nil { + return err + } + + var taikoL1 *taikol1.TaikoL1 + + if cfg.L1TaikoAddress.Hex() != ZeroAddress.Hex() { + taikoL1, err = taikol1.NewTaikoL1(cfg.L1TaikoAddress, ethClient) + if err != nil { + return errors.Wrap(err, "contracts.NewTaikoL1") + } + } + + var bridgeContract *bridge.Bridge + + if cfg.BridgeAddress.Hex() != ZeroAddress.Hex() { + bridgeContract, err = bridge.NewBridge(cfg.BridgeAddress, ethClient) + if err != nil { + return errors.Wrap(err, "contracts.NewBridge") + } + } + + var swapContracts []*swap.Swap + + if cfg.SwapAddresses != nil && len(cfg.SwapAddresses) > 0 { + for _, v := range cfg.SwapAddresses { + swapContract, err := swap.NewSwap(v, ethClient) + if err != nil { + return errors.Wrap(err, "contracts.NewBridge") + } + + swapContracts = append(swapContracts, swapContract) + } + } + + srv, err := http.NewServer(http.NewServerOpts{ + EventRepo: eventRepository, + StatRepo: statRepository, + NFTBalanceRepo: nftBalanceRepository, + ChartRepo: chartRepository, + Echo: echo.New(), + CorsOrigins: cfg.CORSOrigins, + EthClient: ethClient, + }) + if err != nil { + return err + } + + i.accountRepo = accountRepository + i.eventRepo = eventRepository + i.processedBlockRepo = processedBlockRepository + i.statRepo = statRepository + i.nftBalanceRepo = nftBalanceRepository + i.txRepo = txRepository + i.blockRepo = blockRepository + + i.ethClient = ethClient + i.taikol1 = taikoL1 + i.bridge = bridgeContract + i.swaps = swapContracts + i.blockBatchSize = cfg.BlockBatchSize + i.subscriptionBackoff = time.Duration(cfg.Subscriptionbackoff) * time.Second + i.srv = srv + i.httpPort = cfg.HTTPPort + i.wg = &sync.WaitGroup{} + + i.syncMode = cfg.SyncMode + i.watchMode = cfg.WatchMode + i.indexNfts = cfg.IndexNFTs + i.layer = cfg.Layer + + return nil +} + +func (indxr *Indexer) Close(ctx context.Context) { + if err := indxr.srv.Shutdown(ctx); err != nil { + slog.Error("srv shutdown", "error", err) + } + + indxr.wg.Wait() +} diff --git a/packages/eventindexer/indexer/save_block_proposed_event.go b/packages/eventindexer/indexer/save_block_proposed_event.go index aa653cccf39..6d2c2b027fd 100644 --- a/packages/eventindexer/indexer/save_block_proposed_event.go +++ b/packages/eventindexer/indexer/save_block_proposed_event.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "math/big" + "time" "log/slog" @@ -13,7 +14,7 @@ import ( "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/taikol1" ) -func (svc *Service) saveBlockProposedEvents( +func (indxr *Indexer) saveBlockProposedEvents( ctx context.Context, chainID *big.Int, events *taikol1.TaikoL1BlockProposedIterator, @@ -26,24 +27,24 @@ func (svc *Service) saveBlockProposedEvents( for { event := events.Event - if err := svc.detectAndHandleReorg(ctx, eventindexer.EventNameBlockProposed, event.BlockId.Int64()); err != nil { - return errors.Wrap(err, "svc.detectAndHandleReorg") + if err := indxr.detectAndHandleReorg(ctx, eventindexer.EventNameBlockProposed, event.BlockId.Int64()); err != nil { + return errors.Wrap(err, "indxr.detectAndHandleReorg") } - tx, _, err := svc.ethClient.TransactionByHash(ctx, event.Raw.TxHash) + tx, _, err := indxr.ethClient.TransactionByHash(ctx, event.Raw.TxHash) if err != nil { - return errors.Wrap(err, "svc.ethClient.TransactionByHash") + return errors.Wrap(err, "indxr.ethClient.TransactionByHash") } - sender, err := svc.ethClient.TransactionSender(ctx, tx, event.Raw.BlockHash, event.Raw.TxIndex) + sender, err := indxr.ethClient.TransactionSender(ctx, tx, event.Raw.BlockHash, event.Raw.TxIndex) if err != nil { - return errors.Wrap(err, "svc.ethClient.TransactionSender") + return errors.Wrap(err, "indxr.ethClient.TransactionSender") } - if err := svc.saveBlockProposedEvent(ctx, chainID, event, sender); err != nil { + if err := indxr.saveBlockProposedEvent(ctx, chainID, event, sender); err != nil { eventindexer.BlockProposedEventsProcessedError.Inc() - return errors.Wrap(err, "svc.saveBlockProposedEvent") + return errors.Wrap(err, "indxr.saveBlockProposedEvent") } if !events.Next() { @@ -52,7 +53,7 @@ func (svc *Service) saveBlockProposedEvents( } } -func (svc *Service) saveBlockProposedEvent( +func (indxr *Indexer) saveBlockProposedEvent( ctx context.Context, chainID *big.Int, event *taikol1.TaikoL1BlockProposed, @@ -69,7 +70,12 @@ func (svc *Service) saveBlockProposedEvent( assignedProver := event.Prover.Hex() - _, err = svc.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ + block, err := indxr.ethClient.BlockByNumber(ctx, new(big.Int).SetUint64(event.Raw.BlockNumber)) + if err != nil { + return errors.Wrap(err, "indxr.ethClient.BlockByNumber") + } + + _, err = indxr.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ Name: eventindexer.EventNameBlockProposed, Data: string(marshaled), ChainID: chainID, @@ -77,9 +83,10 @@ func (svc *Service) saveBlockProposedEvent( Address: sender.Hex(), BlockID: &blockID, AssignedProver: &assignedProver, + TransactedAt: time.Unix(int64(block.Time()), 0), }) if err != nil { - return errors.Wrap(err, "svc.eventRepo.Save") + return errors.Wrap(err, "indxr.eventRepo.Save") } eventindexer.BlockProposedEventsProcessed.Inc() diff --git a/packages/eventindexer/indexer/save_block_proven_event.go b/packages/eventindexer/indexer/save_block_proven_event.go index a445809fef6..5f17015e808 100644 --- a/packages/eventindexer/indexer/save_block_proven_event.go +++ b/packages/eventindexer/indexer/save_block_proven_event.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "math/big" + "time" "log/slog" @@ -18,7 +19,7 @@ var ( oracleProver = common.HexToAddress("0x0000000000000000000000000000000000000000") ) -func (svc *Service) saveBlockProvenEvents( +func (indxr *Indexer) saveBlockProvenEvents( ctx context.Context, chainID *big.Int, events *taikol1.TaikoL1BlockProvenIterator, @@ -31,14 +32,14 @@ func (svc *Service) saveBlockProvenEvents( for { event := events.Event - if err := svc.detectAndHandleReorg(ctx, eventindexer.EventNameBlockProven, event.BlockId.Int64()); err != nil { - return errors.Wrap(err, "svc.detectAndHandleReorg") + if err := indxr.detectAndHandleReorg(ctx, eventindexer.EventNameBlockProven, event.BlockId.Int64()); err != nil { + return errors.Wrap(err, "indxr.detectAndHandleReorg") } - if err := svc.saveBlockProvenEvent(ctx, chainID, event); err != nil { + if err := indxr.saveBlockProvenEvent(ctx, chainID, event); err != nil { eventindexer.BlockProvenEventsProcessedError.Inc() - return errors.Wrap(err, "svc.saveBlockProvenEvent") + return errors.Wrap(err, "indxr.saveBlockProvenEvent") } if !events.Next() { @@ -47,7 +48,7 @@ func (svc *Service) saveBlockProvenEvents( } } -func (svc *Service) saveBlockProvenEvent( +func (indxr *Indexer) saveBlockProvenEvent( ctx context.Context, chainID *big.Int, event *taikol1.TaikoL1BlockProven, @@ -63,45 +64,51 @@ func (svc *Service) saveBlockProvenEvent( blockID := event.BlockId.Int64() - _, err = svc.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ - Name: eventindexer.EventNameBlockProven, - Data: string(marshaled), - ChainID: chainID, - Event: eventindexer.EventNameBlockProven, - Address: event.Prover.Hex(), - BlockID: &blockID, + block, err := indxr.ethClient.BlockByNumber(ctx, new(big.Int).SetUint64(event.Raw.BlockNumber)) + if err != nil { + return errors.Wrap(err, "indxr.ethClient.BlockByNumber") + } + + _, err = indxr.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ + Name: eventindexer.EventNameBlockProven, + Data: string(marshaled), + ChainID: chainID, + Event: eventindexer.EventNameBlockProven, + Address: event.Prover.Hex(), + BlockID: &blockID, + TransactedAt: time.Unix(int64(block.Time()), 0), }) if err != nil { - return errors.Wrap(err, "svc.eventRepo.Save") + return errors.Wrap(err, "indxr.eventRepo.Save") } eventindexer.BlockProvenEventsProcessed.Inc() if event.Prover.Hex() != systemProver.Hex() && event.Prover.Hex() != oracleProver.Hex() { - if err := svc.updateAverageProofTime(ctx, event); err != nil { - return errors.Wrap(err, "svc.updateAverageProofTime") + if err := indxr.updateAverageProofTime(ctx, event); err != nil { + return errors.Wrap(err, "indxr.updateAverageProofTime") } } return nil } -func (svc *Service) updateAverageProofTime(ctx context.Context, event *taikol1.TaikoL1BlockProven) error { - block, err := svc.taikol1.GetBlock(nil, event.BlockId.Uint64()) +func (indxr *Indexer) updateAverageProofTime(ctx context.Context, event *taikol1.TaikoL1BlockProven) error { + block, err := indxr.taikol1.GetBlock(nil, event.BlockId.Uint64()) // will be unable to GetBlock for older blocks, just return nil, we dont // care about averageProofTime that much to be honest for older blocks if err != nil { return nil } - eventBlock, err := svc.ethClient.BlockByHash(ctx, event.Raw.BlockHash) + eventBlock, err := indxr.ethClient.BlockByHash(ctx, event.Raw.BlockHash) if err != nil { - return errors.Wrap(err, "svc.ethClient.BlockByHash") + return errors.Wrap(err, "indxr.ethClient.BlockByHash") } - stat, err := svc.statRepo.Find(ctx) + stat, err := indxr.statRepo.Find(ctx) if err != nil { - return errors.Wrap(err, "svc.statRepo.Find") + return errors.Wrap(err, "indxr.statRepo.Find") } proposedAt := block.ProposedAt @@ -138,11 +145,11 @@ func (svc *Service) updateAverageProofTime(ctx context.Context, event *taikol1.T newAverageProofTime.String(), ) - _, err = svc.statRepo.Save(ctx, eventindexer.SaveStatOpts{ + _, err = indxr.statRepo.Save(ctx, eventindexer.SaveStatOpts{ ProofTime: newAverageProofTime, }) if err != nil { - return errors.Wrap(err, "svc.statRepo.Save") + return errors.Wrap(err, "indxr.statRepo.Save") } return nil diff --git a/packages/eventindexer/indexer/save_block_verified_event.go b/packages/eventindexer/indexer/save_block_verified_event.go index c6dc38e33cb..b5efb763030 100644 --- a/packages/eventindexer/indexer/save_block_verified_event.go +++ b/packages/eventindexer/indexer/save_block_verified_event.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "math/big" + "time" "log/slog" @@ -12,7 +13,7 @@ import ( "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/taikol1" ) -func (svc *Service) saveBlockVerifiedEvents( +func (indxr *Indexer) saveBlockVerifiedEvents( ctx context.Context, chainID *big.Int, events *taikol1.TaikoL1BlockVerifiedIterator, @@ -25,14 +26,14 @@ func (svc *Service) saveBlockVerifiedEvents( for { event := events.Event - if err := svc.detectAndHandleReorg(ctx, eventindexer.EventNameBlockVerified, event.BlockId.Int64()); err != nil { - return errors.Wrap(err, "svc.detectAndHandleReorg") + if err := indxr.detectAndHandleReorg(ctx, eventindexer.EventNameBlockVerified, event.BlockId.Int64()); err != nil { + return errors.Wrap(err, "indxr.detectAndHandleReorg") } - if err := svc.saveBlockVerifiedEvent(ctx, chainID, event); err != nil { + if err := indxr.saveBlockVerifiedEvent(ctx, chainID, event); err != nil { eventindexer.BlockVerifiedEventsProcessedError.Inc() - return errors.Wrap(err, "svc.saveBlockVerifiedEvent") + return errors.Wrap(err, "indxr.saveBlockVerifiedEvent") } if !events.Next() { @@ -41,7 +42,7 @@ func (svc *Service) saveBlockVerifiedEvents( } } -func (svc *Service) saveBlockVerifiedEvent( +func (indxr *Indexer) saveBlockVerifiedEvent( ctx context.Context, chainID *big.Int, event *taikol1.TaikoL1BlockVerified, @@ -55,16 +56,22 @@ func (svc *Service) saveBlockVerifiedEvent( blockID := event.BlockId.Int64() - _, err = svc.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ - Name: eventindexer.EventNameBlockVerified, - Data: string(marshaled), - ChainID: chainID, - Event: eventindexer.EventNameBlockVerified, - Address: "", - BlockID: &blockID, + block, err := indxr.ethClient.BlockByNumber(ctx, new(big.Int).SetUint64(event.Raw.BlockNumber)) + if err != nil { + return errors.Wrap(err, "indxr.ethClient.BlockByNumber") + } + + _, err = indxr.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ + Name: eventindexer.EventNameBlockVerified, + Data: string(marshaled), + ChainID: chainID, + Event: eventindexer.EventNameBlockVerified, + Address: "", + BlockID: &blockID, + TransactedAt: time.Unix(int64(block.Time()), 0), }) if err != nil { - return errors.Wrap(err, "svc.eventRepo.Save") + return errors.Wrap(err, "indxr.eventRepo.Save") } eventindexer.BlockVerifiedEventsProcessed.Inc() diff --git a/packages/eventindexer/indexer/save_liquidity_added_event.go b/packages/eventindexer/indexer/save_liquidity_added_event.go index 8dd0dacf16a..25a30869f39 100644 --- a/packages/eventindexer/indexer/save_liquidity_added_event.go +++ b/packages/eventindexer/indexer/save_liquidity_added_event.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "math/big" + "time" "log/slog" @@ -17,7 +18,7 @@ var ( minLiquidityAddedAmount = big.NewInt(100000000000000000) ) -func (svc *Service) saveLiquidityAddedEvents( +func (indxr *Indexer) saveLiquidityAddedEvents( ctx context.Context, chainID *big.Int, events *swap.SwapMintIterator, @@ -30,10 +31,10 @@ func (svc *Service) saveLiquidityAddedEvents( for { event := events.Event - if err := svc.saveLiquidityAddedEvent(ctx, chainID, event); err != nil { + if err := indxr.saveLiquidityAddedEvent(ctx, chainID, event); err != nil { eventindexer.LiquidityAddedEventsProcessedError.Inc() - return errors.Wrap(err, "svc.saveSwapEvent") + return errors.Wrap(err, "indxr.saveSwapEvent") } if !events.Next() { @@ -42,12 +43,12 @@ func (svc *Service) saveLiquidityAddedEvents( } } -func (svc *Service) saveLiquidityAddedEvent( +func (indxr *Indexer) saveLiquidityAddedEvent( ctx context.Context, chainID *big.Int, event *swap.SwapMint, ) error { - tx, _, err := svc.ethClient.TransactionByHash(ctx, event.Raw.TxHash) + tx, _, err := indxr.ethClient.TransactionByHash(ctx, event.Raw.TxHash) if err != nil { return err } @@ -78,15 +79,21 @@ func (svc *Service) saveLiquidityAddedEvent( return errors.Wrap(err, "json.Marshal(event)") } - _, err = svc.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ - Name: eventindexer.EventNameMint, - Data: string(marshaled), - ChainID: chainID, - Event: eventindexer.EventNameMint, - Address: from.Hex(), + block, err := indxr.ethClient.BlockByNumber(ctx, new(big.Int).SetUint64(event.Raw.BlockNumber)) + if err != nil { + return errors.Wrap(err, "indxr.ethClient.BlockByNumber") + } + + _, err = indxr.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ + Name: eventindexer.EventNameMint, + Data: string(marshaled), + ChainID: chainID, + Event: eventindexer.EventNameMint, + Address: from.Hex(), + TransactedAt: time.Unix(int64(block.Time()), 0), }) if err != nil { - return errors.Wrap(err, "svc.eventRepo.Save") + return errors.Wrap(err, "indxr.eventRepo.Save") } eventindexer.LiquidityAddedEventsProcessed.Inc() diff --git a/packages/eventindexer/indexer/save_message_sent_event.go b/packages/eventindexer/indexer/save_message_sent_event.go index 47aa63f1003..5a0adae50e3 100644 --- a/packages/eventindexer/indexer/save_message_sent_event.go +++ b/packages/eventindexer/indexer/save_message_sent_event.go @@ -4,21 +4,16 @@ import ( "context" "encoding/json" "math/big" + "time" "log/slog" - "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" "github.com/taikoxyz/taiko-mono/packages/eventindexer" "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/bridge" ) -var ( - minEthAmount = new(big.Int).SetUint64(150000000000000000) - zeroHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") -) - -func (svc *Service) saveMessageSentEvents( +func (indxr *Indexer) saveMessageSentEvents( ctx context.Context, chainID *big.Int, events *bridge.BridgeMessageSentIterator, @@ -33,10 +28,10 @@ func (svc *Service) saveMessageSentEvents( slog.Info("new messageSent event", "owner", event.Message.From.Hex()) - if err := svc.saveMessageSentEvent(ctx, chainID, event); err != nil { + if err := indxr.saveMessageSentEvent(ctx, chainID, event); err != nil { eventindexer.MessageSentEventsProcessedError.Inc() - return errors.Wrap(err, "svc.saveMessageSentEvent") + return errors.Wrap(err, "indxr.saveMessageSentEvent") } if !events.Next() { @@ -45,43 +40,31 @@ func (svc *Service) saveMessageSentEvents( } } -func (svc *Service) saveMessageSentEvent( +func (indxr *Indexer) saveMessageSentEvent( ctx context.Context, chainID *big.Int, event *bridge.BridgeMessageSent, ) error { - // only save eth transfers - if event.Message.Data != nil && common.BytesToHash(event.Message.Data) != zeroHash { - slog.Info("skipping message sent event, is not eth transfer") - return nil - } - - // amount must be >= 0.15 eth - if event.Message.Value.Cmp(minEthAmount) < 0 { - slog.Info("skipping message sent event", - "value", - event.Message.Value.String(), - "requiredValue", - minEthAmount.String(), - ) - - return nil - } - marshaled, err := json.Marshal(event) if err != nil { return errors.Wrap(err, "json.Marshal(event)") } - _, err = svc.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ - Name: eventindexer.EventNameMessageSent, - Data: string(marshaled), - ChainID: chainID, - Event: eventindexer.EventNameMessageSent, - Address: event.Message.From.Hex(), + block, err := indxr.ethClient.BlockByNumber(ctx, new(big.Int).SetUint64(event.Raw.BlockNumber)) + if err != nil { + return errors.Wrap(err, "indxr.ethClient.BlockByNumber") + } + + _, err = indxr.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ + Name: eventindexer.EventNameMessageSent, + Data: string(marshaled), + ChainID: chainID, + Event: eventindexer.EventNameMessageSent, + Address: event.Message.From.Hex(), + TransactedAt: time.Unix(int64(block.Time()), 0), }) if err != nil { - return errors.Wrap(err, "svc.eventRepo.Save") + return errors.Wrap(err, "indxr.eventRepo.Save") } eventindexer.MessageSentEventsProcessed.Inc() diff --git a/packages/eventindexer/indexer/save_swap_event.go b/packages/eventindexer/indexer/save_swap_event.go index 621b830c3a6..2bddb63ae02 100644 --- a/packages/eventindexer/indexer/save_swap_event.go +++ b/packages/eventindexer/indexer/save_swap_event.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "math/big" + "time" "log/slog" @@ -18,7 +19,7 @@ var ( minTradeAmount = big.NewInt(10000000000000000) ) -func (svc *Service) saveSwapEvents( +func (indxr *Indexer) saveSwapEvents( ctx context.Context, chainID *big.Int, events *swap.SwapSwapIterator, @@ -31,10 +32,10 @@ func (svc *Service) saveSwapEvents( for { event := events.Event - if err := svc.saveSwapEvent(ctx, chainID, event); err != nil { + if err := indxr.saveSwapEvent(ctx, chainID, event); err != nil { eventindexer.SwapEventsProcessedError.Inc() - return errors.Wrap(err, "svc.saveSwapEvent") + return errors.Wrap(err, "indxr.saveSwapEvent") } if !events.Next() { @@ -43,7 +44,7 @@ func (svc *Service) saveSwapEvents( } } -func (svc *Service) saveSwapEvent( +func (indxr *Indexer) saveSwapEvent( ctx context.Context, chainID *big.Int, event *swap.SwapSwap, @@ -68,15 +69,21 @@ func (svc *Service) saveSwapEvent( return errors.Wrap(err, "json.Marshal(event)") } - _, err = svc.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ - Name: eventindexer.EventNameSwap, - Data: string(marshaled), - ChainID: chainID, - Event: eventindexer.EventNameSwap, - Address: fmt.Sprintf("0x%v", common.Bytes2Hex(event.Raw.Topics[2].Bytes()[12:])), + block, err := indxr.ethClient.BlockByNumber(ctx, new(big.Int).SetUint64(event.Raw.BlockNumber)) + if err != nil { + return errors.Wrap(err, "indxr.ethClient.BlockByNumber") + } + + _, err = indxr.eventRepo.Save(ctx, eventindexer.SaveEventOpts{ + Name: eventindexer.EventNameSwap, + Data: string(marshaled), + ChainID: chainID, + Event: eventindexer.EventNameSwap, + Address: fmt.Sprintf("0x%v", common.Bytes2Hex(event.Raw.Topics[2].Bytes()[12:])), + TransactedAt: time.Unix(int64(block.Time()), 0), }) if err != nil { - return errors.Wrap(err, "svc.eventRepo.Save") + return errors.Wrap(err, "indxr.eventRepo.Save") } eventindexer.SwapEventsProcessed.Inc() diff --git a/packages/eventindexer/indexer/service.go b/packages/eventindexer/indexer/service.go deleted file mode 100644 index 549d942d4ae..00000000000 --- a/packages/eventindexer/indexer/service.go +++ /dev/null @@ -1,119 +0,0 @@ -package indexer - -import ( - "time" - - "github.com/cyberhorsey/errors" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/rpc" - "github.com/taikoxyz/taiko-mono/packages/eventindexer" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/bridge" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/swap" - "github.com/taikoxyz/taiko-mono/packages/eventindexer/contracts/taikol1" -) - -var ( - ZeroAddress = common.HexToAddress("0x0000000000000000000000000000000000000000") -) - -type Service struct { - eventRepo eventindexer.EventRepository - blockRepo eventindexer.BlockRepository - statRepo eventindexer.StatRepository - nftBalanceRepo eventindexer.NFTBalanceRepository - ethClient *ethclient.Client - - processingBlockHeight uint64 - - blockBatchSize uint64 - subscriptionBackoff time.Duration - - taikol1 *taikol1.TaikoL1 - bridge *bridge.Bridge - swaps []*swap.Swap - - indexNfts bool -} - -type NewServiceOpts struct { - EventRepo eventindexer.EventRepository - BlockRepo eventindexer.BlockRepository - StatRepo eventindexer.StatRepository - NFTBalanceRepo eventindexer.NFTBalanceRepository - EthClient *ethclient.Client - RPCClient *rpc.Client - SrcTaikoAddress common.Address - SrcBridgeAddress common.Address - SrcSwapAddresses []common.Address - BlockBatchSize uint64 - SubscriptionBackoff time.Duration - IndexNFTs bool -} - -func NewService(opts NewServiceOpts) (*Service, error) { - if opts.EventRepo == nil { - return nil, eventindexer.ErrNoEventRepository - } - - if opts.IndexNFTs && opts.NFTBalanceRepo == nil { - return nil, eventindexer.ErrNoNFTBalanceRepository - } - - if opts.EthClient == nil { - return nil, eventindexer.ErrNoEthClient - } - - if opts.RPCClient == nil { - return nil, eventindexer.ErrNoRPCClient - } - - var taikoL1 *taikol1.TaikoL1 - - var err error - - if opts.SrcTaikoAddress.Hex() != ZeroAddress.Hex() { - taikoL1, err = taikol1.NewTaikoL1(opts.SrcTaikoAddress, opts.EthClient) - if err != nil { - return nil, errors.Wrap(err, "contracts.NewTaikoL1") - } - } - - var bridgeContract *bridge.Bridge - - if opts.SrcBridgeAddress.Hex() != ZeroAddress.Hex() { - bridgeContract, err = bridge.NewBridge(opts.SrcBridgeAddress, opts.EthClient) - if err != nil { - return nil, errors.Wrap(err, "contracts.NewBridge") - } - } - - var swapContracts []*swap.Swap - - if opts.SrcSwapAddresses != nil && len(opts.SrcSwapAddresses) > 0 { - for _, v := range opts.SrcSwapAddresses { - swapContract, err := swap.NewSwap(v, opts.EthClient) - if err != nil { - return nil, errors.Wrap(err, "contracts.NewBridge") - } - - swapContracts = append(swapContracts, swapContract) - } - } - - return &Service{ - eventRepo: opts.EventRepo, - blockRepo: opts.BlockRepo, - statRepo: opts.StatRepo, - nftBalanceRepo: opts.NFTBalanceRepo, - ethClient: opts.EthClient, - taikol1: taikoL1, - bridge: bridgeContract, - swaps: swapContracts, - - blockBatchSize: opts.BlockBatchSize, - subscriptionBackoff: opts.SubscriptionBackoff, - - indexNfts: opts.IndexNFTs, - }, nil -} diff --git a/packages/eventindexer/indexer/set_initial_processing_block_height.go b/packages/eventindexer/indexer/set_initial_processing_block_height.go index 709241f177a..2f2f9d16121 100644 --- a/packages/eventindexer/indexer/set_initial_processing_block_height.go +++ b/packages/eventindexer/indexer/set_initial_processing_block_height.go @@ -9,29 +9,29 @@ import ( "github.com/taikoxyz/taiko-mono/packages/eventindexer" ) -func (svc *Service) setInitialProcessingBlockByMode( +func (indxr *Indexer) setInitialProcessingBlockByMode( ctx context.Context, - mode eventindexer.Mode, + mode SyncMode, chainID *big.Int, ) error { var startingBlock uint64 = 0 // only check stateVars on L1, otherwise sync from 0 - if svc.taikol1 != nil { - stateVars, err := svc.taikol1.GetStateVariables(nil) + if indxr.taikol1 != nil { + stateVars, err := indxr.taikol1.GetStateVariables(nil) if err != nil { - return errors.Wrap(err, "svc.taikoL1.GetStateVariables") + return errors.Wrap(err, "indxr.taikoL1.GetStateVariables") } startingBlock = stateVars.GenesisHeight } switch mode { - case eventindexer.SyncMode: - latestProcessedBlock, err := svc.blockRepo.GetLatestBlockProcessed( + case Sync: + latestProcessedBlock, err := indxr.processedBlockRepo.GetLatestBlockProcessed( chainID, ) if err != nil { - return errors.Wrap(err, "svc.blockRepo.GetLatestBlock()") + return errors.Wrap(err, "indxr.processedBlockRepo.GetLatestBlock()") } if latestProcessedBlock.Height != 0 { @@ -40,11 +40,11 @@ func (svc *Service) setInitialProcessingBlockByMode( slog.Info("set processingBlockHeight", "startingBlock", startingBlock) - svc.processingBlockHeight = startingBlock + indxr.processingBlockHeight = startingBlock return nil - case eventindexer.ResyncMode: - svc.processingBlockHeight = startingBlock + case Resync: + indxr.processingBlockHeight = startingBlock return nil default: return eventindexer.ErrInvalidMode diff --git a/packages/eventindexer/indexer/subscribe.go b/packages/eventindexer/indexer/subscribe.go index 5dc905a3847..36db09e2e4d 100644 --- a/packages/eventindexer/indexer/subscribe.go +++ b/packages/eventindexer/indexer/subscribe.go @@ -18,31 +18,29 @@ import ( ) // subscribe subscribes to latest events -func (svc *Service) subscribe(ctx context.Context, chainID *big.Int) error { +func (indxr *Indexer) subscribe(ctx context.Context, chainID *big.Int) error { slog.Info("subscribing to new events") errChan := make(chan error) - if svc.taikol1 != nil { - go svc.subscribeBlockProven(ctx, chainID, errChan) - go svc.subscribeBlockProposed(ctx, chainID, errChan) - go svc.subscribeBlockVerified(ctx, chainID, errChan) + if indxr.taikol1 != nil { + go indxr.subscribeBlockProven(ctx, chainID, errChan) + go indxr.subscribeBlockProposed(ctx, chainID, errChan) + go indxr.subscribeBlockVerified(ctx, chainID, errChan) } - if svc.bridge != nil { - go svc.subscribeMessageSent(ctx, chainID, errChan) + if indxr.bridge != nil { + go indxr.subscribeMessageSent(ctx, chainID, errChan) } - if svc.swaps != nil { - for _, swap := range svc.swaps { - go svc.subscribeSwap(ctx, swap, chainID, errChan) - go svc.subscribeLiquidityAdded(ctx, swap, chainID, errChan) + if indxr.swaps != nil { + for _, swap := range indxr.swaps { + go indxr.subscribeSwap(ctx, swap, chainID, errChan) + go indxr.subscribeLiquidityAdded(ctx, swap, chainID, errChan) } } - if svc.indexNfts { - go svc.subscribeNftTransfers(ctx, chainID, errChan) - } + go indxr.subscribeRawBlockData(ctx, chainID, errChan) // nolint: gosimple for { @@ -58,21 +56,23 @@ func (svc *Service) subscribe(ctx context.Context, chainID *big.Int) error { } } -func (svc *Service) subscribeNftTransfers( +func (indxr *Indexer) subscribeRawBlockData( ctx context.Context, chainID *big.Int, errChan chan error, ) { headers := make(chan *types.Header) - sub := event.ResubscribeErr(svc.subscriptionBackoff, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - slog.Error("svc.SubscribeNewHead", "error", err) - } - slog.Info("resubscribing to NewHead events for nft trasnfers") + sub := event.ResubscribeErr( + indxr.subscriptionBackoff, + func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + slog.Error("indxr.SubscribeNewHead", "error", err) + } + slog.Info("resubscribing to NewHead events for block data") - return svc.ethClient.SubscribeNewHead(ctx, headers) - }) + return indxr.ethClient.SubscribeNewHead(ctx, headers) + }) for { select { @@ -83,28 +83,32 @@ func (svc *Service) subscribeNftTransfers( slog.Error("sub.Err()", "error", err) errChan <- errors.Wrap(err, "sub.Err()") case header := <-headers: + slog.Info("new header", "header", header.Number) + go func() { - if err := svc.indexNFTTransfers(ctx, chainID, header.Number.Uint64(), header.Number.Uint64()); err != nil { - slog.Error("svc.indexNFTTransfers", "error", err) + if err := indxr.indexRawBlockData(ctx, chainID, header.Number.Uint64(), header.Number.Uint64()+1); err != nil { + slog.Error("indxr.indexRawBlockData", "error", err) } }() } } } -func (svc *Service) subscribeBlockProven(ctx context.Context, chainID *big.Int, errChan chan error) { +func (indxr *Indexer) subscribeBlockProven(ctx context.Context, chainID *big.Int, errChan chan error) { sink := make(chan *taikol1.TaikoL1BlockProven) - sub := event.ResubscribeErr(svc.subscriptionBackoff, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - log.Error("svc.taikoL1.WatchBlockProven", "error", err) - } - log.Info("resubscribing to BlockProven events") + sub := event.ResubscribeErr( + indxr.subscriptionBackoff, + func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + log.Error("indxr.taikoL1.WatchBlockProven", "error", err) + } + log.Info("resubscribing to BlockProven events") - return svc.taikol1.WatchBlockProven(&bind.WatchOpts{ - Context: ctx, - }, sink, nil) - }) + return indxr.taikol1.WatchBlockProven(&bind.WatchOpts{ + Context: ctx, + }, sink, nil) + }) defer sub.Unsubscribe() @@ -122,28 +126,28 @@ func (svc *Service) subscribeBlockProven(ctx context.Context, chainID *big.Int, "prover", event.Prover.Hex(), ) - if err := svc.saveBlockProvenEvent(ctx, chainID, event); err != nil { + if err := indxr.saveBlockProvenEvent(ctx, chainID, event); err != nil { eventindexer.BlockProvenEventsProcessedError.Inc() - log.Error("svc.subscribe, svc.saveBlockProvenEvent", "error", err) + log.Error("indxr.subscribe, indxr.saveBlockProvenEvent", "error", err) return } - block, err := svc.blockRepo.GetLatestBlockProcessed(chainID) + block, err := indxr.processedBlockRepo.GetLatestBlockProcessed(chainID) if err != nil { - slog.Error("svc.subscribe, svc.blockRepo.GetLatestBlockProcessed", "error", err) + slog.Error("indxr.subscribe, indxr.processedBlockRepo.GetLatestBlockProcessed", "error", err) return } if block.Height < event.Raw.BlockNumber { - err = svc.blockRepo.Save(eventindexer.SaveBlockOpts{ + err = indxr.processedBlockRepo.Save(eventindexer.SaveProcessedBlockOpts{ Height: event.Raw.BlockNumber, Hash: event.Raw.BlockHash, ChainID: chainID, }) if err != nil { - slog.Error("svc.subscribe, blockRepo.save", "error", err) + slog.Error("indxr.subscribe, blockRepo.save", "error", err) return } @@ -154,19 +158,21 @@ func (svc *Service) subscribeBlockProven(ctx context.Context, chainID *big.Int, } } -func (svc *Service) subscribeBlockProposed(ctx context.Context, chainID *big.Int, errChan chan error) { +func (indxr *Indexer) subscribeBlockProposed(ctx context.Context, chainID *big.Int, errChan chan error) { sink := make(chan *taikol1.TaikoL1BlockProposed) - sub := event.ResubscribeErr(svc.subscriptionBackoff, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - log.Error("svc.taikoL1.WatchBlockProposed", "error", err) - } - log.Info("resubscribing to BlockProposed events") + sub := event.ResubscribeErr( + indxr.subscriptionBackoff, + func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + log.Error("indxr.taikoL1.WatchBlockProposed", "error", err) + } + log.Info("resubscribing to BlockProposed events") - return svc.taikol1.WatchBlockProposed(&bind.WatchOpts{ - Context: ctx, - }, sink, nil, nil) - }) + return indxr.taikol1.WatchBlockProposed(&bind.WatchOpts{ + Context: ctx, + }, sink, nil, nil) + }) defer sub.Unsubscribe() @@ -182,45 +188,45 @@ func (svc *Service) subscribeBlockProposed(ctx context.Context, chainID *big.Int go func() { slog.Info("blockProposedEvent from subscription") - tx, _, err := svc.ethClient.TransactionByHash(ctx, event.Raw.TxHash) + tx, _, err := indxr.ethClient.TransactionByHash(ctx, event.Raw.TxHash) if err != nil { - slog.Error("svc.ethClient.TransactionByHash", "error", err) + slog.Error("indxr.ethClient.TransactionByHash", "error", err) return } - sender, err := svc.ethClient.TransactionSender(ctx, tx, event.Raw.BlockHash, event.Raw.TxIndex) + sender, err := indxr.ethClient.TransactionSender(ctx, tx, event.Raw.BlockHash, event.Raw.TxIndex) if err != nil { - slog.Error("svc.ethClient.TransactionSender", "error", err) + slog.Error("indxr.ethClient.TransactionSender", "error", err) return } slog.Info("blockProposed", "proposer", sender.Hex(), "blockID", event.BlockId.Uint64()) - if err := svc.saveBlockProposedEvent(ctx, chainID, event, sender); err != nil { + if err := indxr.saveBlockProposedEvent(ctx, chainID, event, sender); err != nil { eventindexer.BlockProposedEventsProcessedError.Inc() - slog.Error("svc.subscribe, svc.saveBlockProposedEvent", "error", err) + slog.Error("indxr.subscribe, indxr.saveBlockProposedEvent", "error", err) return } - block, err := svc.blockRepo.GetLatestBlockProcessed(chainID) + block, err := indxr.processedBlockRepo.GetLatestBlockProcessed(chainID) if err != nil { - slog.Error("svc.subscribe, svc.blockRepo.GetLatestBlockProcessed", "error", err) + slog.Error("indxr.subscribe, indxr.processedBlockRepo.GetLatestBlockProcessed", "error", err) return } if block.Height < event.Raw.BlockNumber { - err = svc.blockRepo.Save(eventindexer.SaveBlockOpts{ + err = indxr.processedBlockRepo.Save(eventindexer.SaveProcessedBlockOpts{ Height: event.Raw.BlockNumber, Hash: event.Raw.BlockHash, ChainID: chainID, }) if err != nil { - slog.Error("svc.subscribe, blockRepo.save", "error", err) + slog.Error("indxr.subscribe, blockRepo.save", "error", err) return } @@ -232,20 +238,22 @@ func (svc *Service) subscribeBlockProposed(ctx context.Context, chainID *big.Int } } -func (svc *Service) subscribeBlockVerified(ctx context.Context, chainID *big.Int, errChan chan error) { +func (indxr *Indexer) subscribeBlockVerified(ctx context.Context, chainID *big.Int, errChan chan error) { sink := make(chan *taikol1.TaikoL1BlockVerified) - sub := event.ResubscribeErr(svc.subscriptionBackoff, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - slog.Error("svc.taikoL1.WatchBlockVerified", "error", err) - } + sub := event.ResubscribeErr( + indxr.subscriptionBackoff, + func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + slog.Error("indxr.taikoL1.WatchBlockVerified", "error", err) + } - slog.Info("resubscribing to BlockVerified events") + slog.Info("resubscribing to BlockVerified events") - return svc.taikol1.WatchBlockVerified(&bind.WatchOpts{ - Context: ctx, - }, sink, nil, nil) - }) + return indxr.taikol1.WatchBlockVerified(&bind.WatchOpts{ + Context: ctx, + }, sink, nil, nil) + }) defer sub.Unsubscribe() @@ -261,27 +269,27 @@ func (svc *Service) subscribeBlockVerified(ctx context.Context, chainID *big.Int go func() { slog.Info("blockVerifiedEvent from subscription", "prover", event.Prover.Hex()) - if err := svc.saveBlockVerifiedEvent(ctx, chainID, event); err != nil { + if err := indxr.saveBlockVerifiedEvent(ctx, chainID, event); err != nil { eventindexer.BlockVerifiedEventsProcessedError.Inc() - slog.Error("svc.subscribe, svc.saveBlockVerifiedEvent", "error", err) + slog.Error("indxr.subscribe, indxr.saveBlockVerifiedEvent", "error", err) return } - block, err := svc.blockRepo.GetLatestBlockProcessed(chainID) + block, err := indxr.processedBlockRepo.GetLatestBlockProcessed(chainID) if err != nil { - slog.Error("svc.subscribe, svc.blockRepo.GetLatestBlockProcessed", "error", err) + slog.Error("indxr.subscribe, indxr.processedBlockRepo.GetLatestBlockProcessed", "error", err) return } if block.Height < event.Raw.BlockNumber { - err = svc.blockRepo.Save(eventindexer.SaveBlockOpts{ + err = indxr.processedBlockRepo.Save(eventindexer.SaveProcessedBlockOpts{ Height: event.Raw.BlockNumber, Hash: event.Raw.BlockHash, ChainID: chainID, }) if err != nil { - slog.Error("svc.subscribe, blockRepo.save", "error", err) + slog.Error("indxr.subscribe, blockRepo.save", "error", err) return } @@ -292,20 +300,22 @@ func (svc *Service) subscribeBlockVerified(ctx context.Context, chainID *big.Int } } -func (svc *Service) subscribeMessageSent(ctx context.Context, chainID *big.Int, errChan chan error) { +func (indxr *Indexer) subscribeMessageSent(ctx context.Context, chainID *big.Int, errChan chan error) { sink := make(chan *bridge.BridgeMessageSent) - sub := event.ResubscribeErr(svc.subscriptionBackoff, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - slog.Error("svc.taikoL1.WatchMessageSent", "error", err) - } + sub := event.ResubscribeErr( + indxr.subscriptionBackoff, + func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + slog.Error("indxr.taikoL1.WatchMessageSent", "error", err) + } - slog.Info("resubscribing to MessageSent events") + slog.Info("resubscribing to MessageSent events") - return svc.bridge.WatchMessageSent(&bind.WatchOpts{ - Context: ctx, - }, sink, nil) - }) + return indxr.bridge.WatchMessageSent(&bind.WatchOpts{ + Context: ctx, + }, sink, nil) + }) defer sub.Unsubscribe() @@ -321,28 +331,28 @@ func (svc *Service) subscribeMessageSent(ctx context.Context, chainID *big.Int, go func() { slog.Info("messageSentEvent", "owner", event.Message.From.Hex()) - if err := svc.saveMessageSentEvent(ctx, chainID, event); err != nil { + if err := indxr.saveMessageSentEvent(ctx, chainID, event); err != nil { eventindexer.MessageSentEventsProcessedError.Inc() - slog.Error("svc.subscribe, svc.saveMessageSentEvent", "error", err) + slog.Error("indxr.subscribe, indxr.saveMessageSentEvent", "error", err) return } - block, err := svc.blockRepo.GetLatestBlockProcessed(chainID) + block, err := indxr.processedBlockRepo.GetLatestBlockProcessed(chainID) if err != nil { - slog.Error("svc.subscribe, svc.blockRepo.GetLatestBlockProcessed", "error", err) + slog.Error("indxr.subscribe, indxr.processedBlockRepo.GetLatestBlockProcessed", "error", err) return } if block.Height < event.Raw.BlockNumber { - err = svc.blockRepo.Save(eventindexer.SaveBlockOpts{ + err = indxr.processedBlockRepo.Save(eventindexer.SaveProcessedBlockOpts{ Height: event.Raw.BlockNumber, Hash: event.Raw.BlockHash, ChainID: chainID, }) if err != nil { - slog.Error("svc.subscribe, blockRepo.save", "error", err) + slog.Error("indxr.subscribe, blockRepo.save", "error", err) return } @@ -353,19 +363,21 @@ func (svc *Service) subscribeMessageSent(ctx context.Context, chainID *big.Int, } } -func (svc *Service) subscribeSwap(ctx context.Context, s *swap.Swap, chainID *big.Int, errChan chan error) { +func (indxr *Indexer) subscribeSwap(ctx context.Context, s *swap.Swap, chainID *big.Int, errChan chan error) { sink := make(chan *swap.SwapSwap) - sub := event.ResubscribeErr(svc.subscriptionBackoff, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - slog.Error("s.WatchSwap", "error", err) - } - slog.Info("resubscribing to Swap events") + sub := event.ResubscribeErr( + indxr.subscriptionBackoff, + func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + slog.Error("s.WatchSwap", "error", err) + } + slog.Info("resubscribing to Swap events") - return s.WatchSwap(&bind.WatchOpts{ - Context: ctx, - }, sink, nil, nil) - }) + return s.WatchSwap(&bind.WatchOpts{ + Context: ctx, + }, sink, nil, nil) + }) defer sub.Unsubscribe() @@ -379,28 +391,28 @@ func (svc *Service) subscribeSwap(ctx context.Context, s *swap.Swap, chainID *bi errChan <- errors.Wrap(err, "sub.Err()") case event := <-sink: go func() { - if err := svc.saveSwapEvent(ctx, chainID, event); err != nil { + if err := indxr.saveSwapEvent(ctx, chainID, event); err != nil { eventindexer.SwapEventsProcessedError.Inc() - slog.Error("svc.subscribe, svc.saveSwapEvent", "error", err) + slog.Error("indxr.subscribe, indxr.saveSwapEvent", "error", err) return } - block, err := svc.blockRepo.GetLatestBlockProcessed(chainID) + block, err := indxr.processedBlockRepo.GetLatestBlockProcessed(chainID) if err != nil { - slog.Error("svc.subscribe, svc.blockRepo.GetLatestBlockProcessed", "error", err) + slog.Error("indxr.subscribe, indxr.processedBlockRepo.GetLatestBlockProcessed", "error", err) return } if block.Height < event.Raw.BlockNumber { - err = svc.blockRepo.Save(eventindexer.SaveBlockOpts{ + err = indxr.processedBlockRepo.Save(eventindexer.SaveProcessedBlockOpts{ Height: event.Raw.BlockNumber, Hash: event.Raw.BlockHash, ChainID: chainID, }) if err != nil { - slog.Error("svc.subscribe, blockRepo.save", "error", err) + slog.Error("indxr.subscribe, blockRepo.save", "error", err) return } @@ -411,19 +423,21 @@ func (svc *Service) subscribeSwap(ctx context.Context, s *swap.Swap, chainID *bi } } -func (svc *Service) subscribeLiquidityAdded(ctx context.Context, s *swap.Swap, chainID *big.Int, errChan chan error) { +func (indxr *Indexer) subscribeLiquidityAdded(ctx context.Context, s *swap.Swap, chainID *big.Int, errChan chan error) { sink := make(chan *swap.SwapMint) - sub := event.ResubscribeErr(svc.subscriptionBackoff, func(ctx context.Context, err error) (event.Subscription, error) { - if err != nil { - slog.Error("s.WatchMint", "error", err) - } - slog.Info("resubscribing to Swap events") + sub := event.ResubscribeErr( + indxr.subscriptionBackoff, + func(ctx context.Context, err error) (event.Subscription, error) { + if err != nil { + slog.Error("s.WatchMint", "error", err) + } + slog.Info("resubscribing to Swap events") - return s.WatchMint(&bind.WatchOpts{ - Context: ctx, - }, sink, nil) - }) + return s.WatchMint(&bind.WatchOpts{ + Context: ctx, + }, sink, nil) + }) defer sub.Unsubscribe() @@ -437,28 +451,28 @@ func (svc *Service) subscribeLiquidityAdded(ctx context.Context, s *swap.Swap, c errChan <- errors.Wrap(err, "sub.Err()") case event := <-sink: go func() { - if err := svc.saveLiquidityAddedEvent(ctx, chainID, event); err != nil { + if err := indxr.saveLiquidityAddedEvent(ctx, chainID, event); err != nil { eventindexer.SwapEventsProcessedError.Inc() - slog.Error("svc.subscribe, svc.saveLiquidityAddedEvent", "error", err) + slog.Error("indxr.subscribe, indxr.saveLiquidityAddedEvent", "error", err) return } - block, err := svc.blockRepo.GetLatestBlockProcessed(chainID) + block, err := indxr.processedBlockRepo.GetLatestBlockProcessed(chainID) if err != nil { - slog.Error("svc.subscribe, blockRepo.GetLatestBlockProcessed", "error", err) + slog.Error("indxr.subscribe, blockRepo.GetLatestBlockProcessed", "error", err) return } if block.Height < event.Raw.BlockNumber { - err = svc.blockRepo.Save(eventindexer.SaveBlockOpts{ + err = indxr.processedBlockRepo.Save(eventindexer.SaveProcessedBlockOpts{ Height: event.Raw.BlockNumber, Hash: event.Raw.BlockHash, ChainID: chainID, }) if err != nil { - slog.Error("svc.subscribe, svc.blockRepo.Save", "error", err) + slog.Error("indxr.subscribe, indxr.processedBlockRepo.Save", "error", err) return } diff --git a/packages/eventindexer/metrics/metrics.go b/packages/eventindexer/metrics/metrics.go new file mode 100644 index 00000000000..dfa424f25fa --- /dev/null +++ b/packages/eventindexer/metrics/metrics.go @@ -0,0 +1,34 @@ +package metrics + +import ( + "context" + "fmt" + + echoprom "github.com/labstack/echo-contrib/prometheus" + "github.com/labstack/echo/v4" + "github.com/labstack/gommon/log" + "github.com/taikoxyz/taiko-mono/packages/eventindexer/cmd/flags" + "github.com/urfave/cli/v2" + "golang.org/x/exp/slog" +) + +// Serve starts the metrics server on the given address, will be closed when the given +// context is cancelled. +func Serve(ctx context.Context, c *cli.Context) (*echo.Echo, func() error) { + // Enable metrics middleware + p := echoprom.NewPrometheus("echo", nil) + e := echo.New() + p.SetMetricsPath(e) + + go func() { + <-ctx.Done() + + if err := e.Shutdown(ctx); err != nil { + log.Error("Failed to close metrics server", "error", err) + } + }() + + slog.Info("Starting metrics server", "port", c.Uint64(flags.MetricsHTTPPort.Name)) + + return e, func() error { return e.Start(fmt.Sprintf(":%v", c.Uint64(flags.MetricsHTTPPort.Name))) } +} diff --git a/packages/eventindexer/metrics/metrics_test.go b/packages/eventindexer/metrics/metrics_test.go new file mode 100644 index 00000000000..5c4775e2e99 --- /dev/null +++ b/packages/eventindexer/metrics/metrics_test.go @@ -0,0 +1,64 @@ +package metrics + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/labstack/echo/v4" + "github.com/stretchr/testify/assert" + "github.com/taikoxyz/taiko-mono/packages/relayer/cmd/flags" + "github.com/urfave/cli/v2" +) + +func Test_Metrics(t *testing.T) { + app := cli.NewApp() + app.Flags = []cli.Flag{ + flags.MetricsHTTPPort, + } + + app.Action = func(c *cli.Context) error { + ctx, cancel := context.WithCancel(context.Background()) + + var e *echo.Echo + + var startFunc func() error + + var err error + + go func() { + e, startFunc = Serve(ctx, c) + + err = startFunc() + }() + + for e == nil && err == nil { + time.Sleep(1 * time.Second) + } + + assert.Nil(t, err) + assert.NotNil(t, e) + + req, _ := http.NewRequest(echo.GET, "/metrics", nil) + rec := httptest.NewRecorder() + + e.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("Test_Metrics expected code %v, got %v", http.StatusOK, rec.Code) + } + + cancel() + + assert.Nil(t, err) + + return nil + } + + assert.Nil(t, app.Run([]string{ + "TestMetrics", + "-" + flags.MetricsHTTPPort.Name, "5019", + })) +} diff --git a/packages/eventindexer/migrations/1666650599_create_events_table.sql b/packages/eventindexer/migrations/1666650599_create_events_table.sql index 92b0a5bceb0..e5b027fa73c 100644 --- a/packages/eventindexer/migrations/1666650599_create_events_table.sql +++ b/packages/eventindexer/migrations/1666650599_create_events_table.sql @@ -10,6 +10,7 @@ CREATE TABLE IF NOT EXISTS events ( block_id int DEFAULT NULL, amount DECIMAL(65, 0) DEFAULT NULL, assigned_prover VARCHAR(42) NOT NULL DEFAULT "", + transacted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP , updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP ); diff --git a/packages/eventindexer/migrations/20230906203839_create_transactions_table.sql b/packages/eventindexer/migrations/20230906203839_create_transactions_table.sql new file mode 100644 index 00000000000..62da266b6b7 --- /dev/null +++ b/packages/eventindexer/migrations/20230906203839_create_transactions_table.sql @@ -0,0 +1,21 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE IF NOT EXISTS transactions ( + id int NOT NULL PRIMARY KEY AUTO_INCREMENT, + chain_id int not null, + sender varchar(42) not null, + recipient varchar(42) default null, + block_id int not null, + amount DECIMAL(65, 0) DEFAULT NULL, + gas_price varchar(20) not null, + contract_address varchar(42) default "", + transacted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP , + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE transactions; +-- +goose StatementEnd diff --git a/packages/eventindexer/migrations/20230906203931_create_time_series_data_table.sql b/packages/eventindexer/migrations/20230906203931_create_time_series_data_table.sql new file mode 100644 index 00000000000..64bd23e013e --- /dev/null +++ b/packages/eventindexer/migrations/20230906203931_create_time_series_data_table.sql @@ -0,0 +1,17 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE IF NOT EXISTS time_series_data ( + id int NOT NULL PRIMARY KEY AUTO_INCREMENT, + task VARCHAR(40) NOT NULL, + value VARCHAR(100) NOT NULL, + date VARCHAR(20) NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP , + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + UNIQUE key `task_date` (`task`, `date`) +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE time_series_data; +-- +goose StatementEnd diff --git a/packages/eventindexer/migrations/20230906206839_create_blocks_table.sql b/packages/eventindexer/migrations/20230906206839_create_blocks_table.sql new file mode 100644 index 00000000000..0a057ee99df --- /dev/null +++ b/packages/eventindexer/migrations/20230906206839_create_blocks_table.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE IF NOT EXISTS blocks ( + id int NOT NULL PRIMARY KEY AUTO_INCREMENT, + chain_id int not null, + block_id int not null unique, + transacted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP , + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE blocks; +-- +goose StatementEnd diff --git a/packages/eventindexer/migrations/20230906208839_create_accounts_table.sql b/packages/eventindexer/migrations/20230906208839_create_accounts_table.sql new file mode 100644 index 00000000000..86f5c87f131 --- /dev/null +++ b/packages/eventindexer/migrations/20230906208839_create_accounts_table.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE IF NOT EXISTS accounts ( + id int NOT NULL PRIMARY KEY AUTO_INCREMENT, + address varchar(42) not null unique, + transacted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP , + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE accounts; +-- +goose StatementEnd diff --git a/packages/eventindexer/mock/block_repository.go b/packages/eventindexer/mock/processed_block_repository.go similarity index 53% rename from packages/eventindexer/mock/block_repository.go rename to packages/eventindexer/mock/processed_block_repository.go index 0ad40f1e7ff..953ad16fb01 100644 --- a/packages/eventindexer/mock/block_repository.go +++ b/packages/eventindexer/mock/processed_block_repository.go @@ -8,21 +8,23 @@ import ( ) var ( - LatestBlock = &eventindexer.Block{ + LatestBlock = &eventindexer.ProcessedBlock{ Height: 100, Hash: "0x", ChainID: MockChainID.Int64(), } ) -type BlockRepository struct { +type ProcessedBlockRepository struct { } -func (r *BlockRepository) Save(opts eventindexer.SaveBlockOpts) error { +func (r *ProcessedBlockRepository) Save(opts eventindexer.SaveProcessedBlockOpts) error { return nil } -func (r *BlockRepository) GetLatestBlockProcessedForEvent(chainID *big.Int) (*eventindexer.Block, error) { +func (r *ProcessedBlockRepository) GetLatestBlockProcessedForEvent( + chainID *big.Int, +) (*eventindexer.ProcessedBlock, error) { if chainID.Int64() != MockChainID.Int64() { return nil, errors.New("error getting latest block processed for event") } diff --git a/packages/eventindexer/repo/account.go b/packages/eventindexer/repo/account.go new file mode 100644 index 00000000000..579a441331a --- /dev/null +++ b/packages/eventindexer/repo/account.go @@ -0,0 +1,58 @@ +package repo + +import ( + "context" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/taikoxyz/taiko-mono/packages/eventindexer" + "gorm.io/gorm" +) + +type AccountRepository struct { + db eventindexer.DB +} + +func NewAccountRepository(db eventindexer.DB) (*AccountRepository, error) { + if db == nil { + return nil, eventindexer.ErrNoDB + } + + return &AccountRepository{ + db: db, + }, nil +} + +func (r *AccountRepository) Save( + ctx context.Context, + address common.Address, + transactedAt time.Time, +) error { + // only inserte if addresss doesnt exist + a := &eventindexer.Account{} + + if err := r.db.GormDB().Where("address = ?", address.Hex()).First(a).Error; err != nil { + if err != gorm.ErrRecordNotFound { + return err + } + } + + if a.ID == 0 { + t := &eventindexer.Account{ + Address: address.Hex(), + TransactedAt: transactedAt, + } + + if err := r.db.GormDB().Create(t).Error; err != nil { + if strings.Contains(err.Error(), "Duplicate") { + return nil + } + + return errors.Wrap(err, "r.db.Create") + } + } + + return nil +} diff --git a/packages/eventindexer/repo/block.go b/packages/eventindexer/repo/block.go index 0203c34a2c4..f0a99030bbb 100644 --- a/packages/eventindexer/repo/block.go +++ b/packages/eventindexer/repo/block.go @@ -1,10 +1,14 @@ package repo import ( + "context" "math/big" + "strings" + "time" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" "github.com/taikoxyz/taiko-mono/packages/eventindexer" - "gorm.io/gorm" ) type BlockRepository struct { @@ -21,42 +25,31 @@ func NewBlockRepository(db eventindexer.DB) (*BlockRepository, error) { }, nil } -func (r *BlockRepository) startQuery() *gorm.DB { - return r.db.GormDB().Table("processed_blocks") -} - -func (r *BlockRepository) Save(opts eventindexer.SaveBlockOpts) error { - exists := &eventindexer.Block{} - _ = r.startQuery().Where("block_height = ?", opts.Height).Where("chain_id = ?", opts.ChainID.Int64()).First(exists) - // block processed already - if exists.Height == opts.Height { +func (r *BlockRepository) Save( + ctx context.Context, + block *types.Block, + chainID *big.Int, +) error { + // genesis block will have 0 time and no relevant information + if block.Time() == uint64(0) { return nil } + t := time.Unix(int64(block.Time()), 0) + b := &eventindexer.Block{ - Height: opts.Height, - Hash: opts.Hash.String(), - ChainID: opts.ChainID.Int64(), - } - if err := r.startQuery().Create(b).Error; err != nil { - return err + ChainID: chainID.Int64(), + BlockID: block.Number().Int64(), + TransactedAt: t, } - return nil -} + if err := r.db.GormDB().Create(b).Error; err != nil { + if strings.Contains(err.Error(), "Duplicate") { + return nil + } -func (r *BlockRepository) GetLatestBlockProcessed(chainID *big.Int) (*eventindexer.Block, error) { - b := &eventindexer.Block{} - if err := r. - startQuery(). - Raw(`SELECT id, block_height, hash, chain_id - FROM processed_blocks - WHERE block_height = - ( SELECT MAX(block_height) from processed_blocks - WHERE chain_id = ? )`, chainID.Int64()). - FirstOrInit(b).Error; err != nil { - return nil, err + return errors.Wrap(err, "r.db.Create") } - return b, nil + return nil } diff --git a/packages/eventindexer/repo/chart_repo.go b/packages/eventindexer/repo/chart_repo.go new file mode 100644 index 00000000000..e1326c4a35c --- /dev/null +++ b/packages/eventindexer/repo/chart_repo.go @@ -0,0 +1,56 @@ +package repo + +import ( + "context" + + "github.com/taikoxyz/taiko-mono/packages/eventindexer" + "gorm.io/gorm" +) + +type ChartRepository struct { + db eventindexer.DB +} + +func NewChartRepository(db eventindexer.DB) (*ChartRepository, error) { + if db == nil { + return nil, eventindexer.ErrNoDB + } + + return &ChartRepository{ + db: db, + }, nil +} + +func (r *ChartRepository) getDB() *gorm.DB { + return r.db.GormDB().Table("time_series_data") +} + +func (r *ChartRepository) Find( + ctx context.Context, + task string, + start string, + end string, +) (*eventindexer.ChartResponse, error) { + q := `SELECT * FROM time_series_data + WHERE task = ? AND date BETWEEN ? AND ? + ORDER BY date;` + + var tsd []*eventindexer.TimeSeriesData + + if err := r.getDB().Raw(q, task, start, end).Scan(&tsd).Error; err != nil { + return nil, err + } + + chart := &eventindexer.ChartResponse{ + Chart: make([]eventindexer.ChartItem, 0), + } + + for _, d := range tsd { + chart.Chart = append(chart.Chart, eventindexer.ChartItem{ + Date: d.Date, + Value: d.Value, + }) + } + + return chart, nil +} diff --git a/packages/eventindexer/repo/event.go b/packages/eventindexer/repo/event.go index b95934753e1..4b940615a6d 100644 --- a/packages/eventindexer/repo/event.go +++ b/packages/eventindexer/repo/event.go @@ -29,11 +29,12 @@ func NewEventRepository(db eventindexer.DB) (*EventRepository, error) { func (r *EventRepository) Save(ctx context.Context, opts eventindexer.SaveEventOpts) (*eventindexer.Event, error) { e := &eventindexer.Event{ - Data: datatypes.JSON(opts.Data), - ChainID: opts.ChainID.Int64(), - Name: opts.Name, - Event: opts.Event, - Address: opts.Address, + Data: datatypes.JSON(opts.Data), + ChainID: opts.ChainID.Int64(), + Name: opts.Name, + Event: opts.Event, + Address: opts.Address, + TransactedAt: opts.TransactedAt, } if opts.BlockID != nil { diff --git a/packages/eventindexer/repo/event_test.go b/packages/eventindexer/repo/event_test.go index adc8d3d2922..1b1897de18a 100644 --- a/packages/eventindexer/repo/event_test.go +++ b/packages/eventindexer/repo/event_test.go @@ -4,6 +4,7 @@ import ( "context" "math/big" "testing" + "time" "github.com/davecgh/go-spew/spew" "github.com/stretchr/testify/assert" @@ -13,20 +14,22 @@ import ( var ( blockID int64 = 1 dummyProveEventOpts = eventindexer.SaveEventOpts{ - Name: eventindexer.EventNameBlockProven, - Address: "0x123", - Data: "{\"data\":\"something\"}", - Event: eventindexer.EventNameBlockProven, - ChainID: big.NewInt(1), - BlockID: &blockID, + Name: eventindexer.EventNameBlockProven, + Address: "0x123", + Data: "{\"data\":\"something\"}", + Event: eventindexer.EventNameBlockProven, + ChainID: big.NewInt(1), + BlockID: &blockID, + TransactedAt: time.Now(), } dummyProposeEventOpts = eventindexer.SaveEventOpts{ - Name: eventindexer.EventNameBlockProposed, - Address: "0x123", - Data: "{\"data\":\"something\"}", - Event: eventindexer.EventNameBlockProposed, - ChainID: big.NewInt(1), - BlockID: &blockID, + Name: eventindexer.EventNameBlockProposed, + Address: "0x123", + Data: "{\"data\":\"something\"}", + Event: eventindexer.EventNameBlockProposed, + ChainID: big.NewInt(1), + BlockID: &blockID, + TransactedAt: time.Now(), } ) @@ -46,11 +49,12 @@ func TestIntegration_Event_Save(t *testing.T) { { "success", eventindexer.SaveEventOpts{ - Name: "test", - ChainID: big.NewInt(1), - Data: "{\"data\":\"something\"}", - Event: eventindexer.EventNameBlockProposed, - Address: "0x123", + Name: "test", + ChainID: big.NewInt(1), + Data: "{\"data\":\"something\"}", + Event: eventindexer.EventNameBlockProposed, + Address: "0x123", + TransactedAt: time.Now(), }, nil, }, diff --git a/packages/eventindexer/repo/processed_block.go b/packages/eventindexer/repo/processed_block.go new file mode 100644 index 00000000000..34104a430fd --- /dev/null +++ b/packages/eventindexer/repo/processed_block.go @@ -0,0 +1,62 @@ +package repo + +import ( + "math/big" + + "github.com/taikoxyz/taiko-mono/packages/eventindexer" + "gorm.io/gorm" +) + +type ProcessedBlockRepository struct { + db eventindexer.DB +} + +func NewProcessedBlockRepository(db eventindexer.DB) (*ProcessedBlockRepository, error) { + if db == nil { + return nil, eventindexer.ErrNoDB + } + + return &ProcessedBlockRepository{ + db: db, + }, nil +} + +func (r *ProcessedBlockRepository) startQuery() *gorm.DB { + return r.db.GormDB().Table("processed_blocks") +} + +func (r *ProcessedBlockRepository) Save(opts eventindexer.SaveProcessedBlockOpts) error { + exists := &eventindexer.ProcessedBlock{} + _ = r.startQuery().Where("block_height = ?", opts.Height).Where("chain_id = ?", opts.ChainID.Int64()).First(exists) + // block processed already + if exists.Height == opts.Height { + return nil + } + + b := &eventindexer.ProcessedBlock{ + Height: opts.Height, + Hash: opts.Hash.String(), + ChainID: opts.ChainID.Int64(), + } + if err := r.startQuery().Create(b).Error; err != nil { + return err + } + + return nil +} + +func (r *ProcessedBlockRepository) GetLatestBlockProcessed(chainID *big.Int) (*eventindexer.ProcessedBlock, error) { + b := &eventindexer.ProcessedBlock{} + if err := r. + startQuery(). + Raw(`SELECT id, block_height, hash, chain_id + FROM processed_blocks + WHERE block_height = + ( SELECT MAX(block_height) from processed_blocks + WHERE chain_id = ? )`, chainID.Int64()). + FirstOrInit(b).Error; err != nil { + return nil, err + } + + return b, nil +} diff --git a/packages/eventindexer/repo/block_test.go b/packages/eventindexer/repo/processed_block_test.go similarity index 84% rename from packages/eventindexer/repo/block_test.go rename to packages/eventindexer/repo/processed_block_test.go index eb25aba9bce..dc50d6095d7 100644 --- a/packages/eventindexer/repo/block_test.go +++ b/packages/eventindexer/repo/processed_block_test.go @@ -10,7 +10,7 @@ import ( "gopkg.in/go-playground/assert.v1" ) -func Test_NewBlockRepo(t *testing.T) { +func Test_NewProcessedBlockRepo(t *testing.T) { tests := []struct { name string db eventindexer.DB @@ -30,7 +30,7 @@ func Test_NewBlockRepo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewBlockRepository(tt.db) + _, err := NewProcessedBlockRepository(tt.db) assert.Equal(t, tt.wantErr, err) }) } @@ -42,16 +42,16 @@ func TestIntegration_Block_Save(t *testing.T) { defer close() - blockRepo, err := NewBlockRepository(db) + blockRepo, err := NewProcessedBlockRepository(db) assert.Equal(t, nil, err) tests := []struct { name string - opts eventindexer.SaveBlockOpts + opts eventindexer.SaveProcessedBlockOpts wantErr error }{ { "success", - eventindexer.SaveBlockOpts{ + eventindexer.SaveProcessedBlockOpts{ ChainID: big.NewInt(1), Height: 100, Hash: common.HexToHash("0x1234"), @@ -74,7 +74,7 @@ func TestIntegration_Block_GetLatestBlockProcessedForEvent(t *testing.T) { defer close() - blockRepo, err := NewBlockRepository(db) + blockRepo, err := NewProcessedBlockRepository(db) assert.Equal(t, nil, err) tests := []struct { name string diff --git a/packages/eventindexer/repo/transaction.go b/packages/eventindexer/repo/transaction.go new file mode 100644 index 00000000000..6e8f919ffca --- /dev/null +++ b/packages/eventindexer/repo/transaction.go @@ -0,0 +1,76 @@ +package repo + +import ( + "context" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/taikoxyz/taiko-mono/packages/eventindexer" +) + +var ( + ZeroAddress = common.HexToAddress("0x0000000000000000000000000000000000000000") +) + +type TransactionRepository struct { + db eventindexer.DB +} + +func NewTransactionRepository(db eventindexer.DB) (*TransactionRepository, error) { + if db == nil { + return nil, eventindexer.ErrNoDB + } + + return &TransactionRepository{ + db: db, + }, nil +} + +func (r *TransactionRepository) Save( + ctx context.Context, + tx *types.Transaction, + sender common.Address, + blockID *big.Int, + transactedAt time.Time, + contractAddress common.Address, +) error { + t := &eventindexer.Transaction{ + ChainID: tx.ChainId().Int64(), + Sender: sender.Hex(), + BlockID: blockID.Int64(), + GasPrice: tx.GasPrice().String(), + TransactedAt: transactedAt, + ContractAddress: contractAddress.Hex(), + } + + if to := tx.To(); to != nil { + t.Recipient = to.Hex() + } + + if tx.Value() != nil { + v, err := decimal.NewFromString(tx.Value().String()) + if err != nil { + return errors.Wrap(err, "decimal.NewFromString") + } + + t.Amount = decimal.NullDecimal{ + Valid: true, + Decimal: v, + } + } + + if err := r.db.GormDB().Create(t).Error; err != nil { + if strings.Contains(err.Error(), "Duplicate") { + return nil + } + + return errors.Wrap(err, "r.db.Create") + } + + return nil +} diff --git a/packages/eventindexer/tasks/tasks.go b/packages/eventindexer/tasks/tasks.go new file mode 100644 index 00000000000..971a8ffb717 --- /dev/null +++ b/packages/eventindexer/tasks/tasks.go @@ -0,0 +1,43 @@ +package tasks + +var ( + TotalTransactions = "total-transactions" + TransactionsPerDay = "transactions-per-day" + TotalBlocks = "total-blocks" + BlocksPerDay = "blocks-per-day" + TotalAccounts = "total-accounts" + AccountsPerDay = "accounts-per-day" + UniqueProposersPerDay = "unique-proposers-per-day" + TotalUniqueProposers = "total-proposers" + UniqueProversPerDay = "unique-provers-per-day" + TotalUniqueProvers = "total-provers" + TotalContractDeployments = "total-contract-deployments" + ContractDeploymentsPerDay = "contract-deployments-per-day" + ProveBlockTxPerDay = "prove-block-tx-per-day" + TotalProveBlockTx = "total-prove-block-tx" + ProposeBlockTxPerDay = "propose-block-tx-per-day" + TotalProposeBlockTx = "total-propose-block-tx" + BridgeMessagesSentPerDay = "bridge-messages-sent-per-day" + TotalBridgeMessagesSent = "total-bridge-messages-sent" +) + +var Tasks = []string{ + TotalTransactions, + TransactionsPerDay, + TotalBlocks, + BlocksPerDay, + TotalAccounts, + AccountsPerDay, + UniqueProposersPerDay, + TotalUniqueProposers, + UniqueProversPerDay, + TotalUniqueProvers, + TotalContractDeployments, + ContractDeploymentsPerDay, + ProveBlockTxPerDay, + TotalProveBlockTx, + ProposeBlockTxPerDay, + TotalProposeBlockTx, + BridgeMessagesSentPerDay, + TotalBridgeMessagesSent, +} diff --git a/packages/eventindexer/time_series_data.go b/packages/eventindexer/time_series_data.go new file mode 100644 index 00000000000..4d593b901b0 --- /dev/null +++ b/packages/eventindexer/time_series_data.go @@ -0,0 +1,12 @@ +package eventindexer + +import "time" + +type TimeSeriesData struct { + ID int + Task string + Value string + Date string + CreatedAt time.Time + UpdatedAt time.Time +} diff --git a/packages/eventindexer/transaction.go b/packages/eventindexer/transaction.go new file mode 100644 index 00000000000..0b8e51e55b7 --- /dev/null +++ b/packages/eventindexer/transaction.go @@ -0,0 +1,33 @@ +package eventindexer + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/shopspring/decimal" +) + +type Transaction struct { + ID int `json:"id"` + ChainID int64 `json:"chainID"` + Sender string `json:"sender"` + Recipient string `json:"recipient"` + BlockID int64 `json:"blockID"` + Amount decimal.NullDecimal `json:"amount"` + GasPrice string `json:"gasPrice"` + TransactedAt time.Time `json:"transactedAt"` + ContractAddress string `json:"contractAddress"` +} + +type TransactionRepository interface { + Save( + ctx context.Context, + tx *types.Transaction, + sender common.Address, + blockID *big.Int, + timestamp time.Time, + contractAddress common.Address) error +}