From 18660211c6403d4afcaf1bf4f9d10f9fe6be4e9b Mon Sep 17 00:00:00 2001 From: Daniel Porteous Date: Tue, 19 Nov 2024 14:01:25 +0000 Subject: [PATCH] Add --existing-hasura-url flag for indexer API in localnet --- crates/aptos/CHANGELOG.md | 3 + .../src/node/local_testnet/indexer_api.rs | 79 ++++++++++++++++--- 2 files changed, 72 insertions(+), 10 deletions(-) diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index d3d84666039ae..aea4358acadd1 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -4,6 +4,9 @@ All notable changes to the Aptos CLI will be captured in this file. This project ## Unreleased - Add option `--print-metadata-only` to `aptos move decompile` and `aptos move disassemble` to print out the metadata attached to the bytecode. +- Add `--existing-hasura-url` flag to localnet to tell it to use an existing Hasura instance instead of run Hasura itself. See https://github.com/aptos-labs/aptos-core/pull/15313. +- Add `--skip-metadata-apply` flag to localnet, in which case we won't try to apply the Hasura metadata. +- Upgrade Hasura image we use from 2.40.2 to 2.44.0. ## [4.5.0] - 2024/11/15 - Determine network from URL to make explorer links better for legacy users diff --git a/crates/aptos/src/node/local_testnet/indexer_api.rs b/crates/aptos/src/node/local_testnet/indexer_api.rs index 1967b1ceb2540..8ff415a879605 100644 --- a/crates/aptos/src/node/local_testnet/indexer_api.rs +++ b/crates/aptos/src/node/local_testnet/indexer_api.rs @@ -20,11 +20,11 @@ use clap::Parser; use futures::TryStreamExt; use maplit::{hashmap, hashset}; use reqwest::Url; -use std::{collections::HashSet, path::PathBuf}; +use std::{collections::HashSet, path::PathBuf, time::Duration}; use tracing::{info, warn}; const INDEXER_API_CONTAINER_NAME: &str = "local-testnet-indexer-api"; -const HASURA_IMAGE: &str = "hasura/graphql-engine:v2.40.2-ce"; +const HASURA_IMAGE: &str = "hasura/graphql-engine:v2.44.0-ce"; /// This Hasura metadata originates from the aptos-indexer-processors repo. /// @@ -47,22 +47,42 @@ const HASURA_METADATA: &str = include_str!("hasura_metadata.json"); /// Args related to running an indexer API for the localnet. #[derive(Debug, Parser)] pub struct IndexerApiArgs { - /// If set, we will run a postgres DB using Docker (unless - /// --use-host-postgres is set), run the standard set of indexer processors (see - /// --processors), and configure them to write to this DB, and run an API that lets - /// you access the data they write to storage. This is opt in because it requires - /// Docker to be installed on the host system. + /// If set, we will run a postgres DB using Docker (unless --use-host-postgres is + /// set), run the standard set of indexer processors (see --processors), and + /// configure them to write to this DB, and run an API that lets you access the data + /// they write to storage. This is opt in because it requires Docker to be installed + /// on the host system. #[clap(long, conflicts_with = "no_txn_stream")] pub with_indexer_api: bool, /// The port at which to run the indexer API. #[clap(long, default_value_t = 8090)] pub indexer_api_port: u16, + + /// If set we will assume a Hasura instance is running at the given URL rather than + /// running our own. + /// + /// If set, we will not run the indexer API, and will instead assume that a Hasura + /// instance is running at the given URL. We will wait for it to become healthy by + /// waiting for / to return 200 and then apply the Hasura metadata. The URL should + /// look something like this: http://127.0.0.1:8090, assuming the Hasura instance is + /// running at port 8090. When the localnet shuts down, we will not attempt to stop + /// the Hasura instance, this is up to you to handle. If you're using this, you + /// should probably use `--use-host-postgres` as well, otherwise you won't be able + /// to start your Hasura instance because the DB we create won't exist yet. + #[clap(long)] + pub existing_hasura_url: Option, + + /// If set, we will not try to apply the Hasura metadata. + #[clap(long)] + pub skip_metadata_apply: bool, } #[derive(Clone, Debug)] pub struct IndexerApiManager { indexer_api_port: u16, + existing_hasura_url: Option, + skip_metadata_apply: bool, prerequisite_health_checkers: HashSet, test_dir: PathBuf, postgres_connection_string: String, @@ -77,6 +97,8 @@ impl IndexerApiManager { ) -> Result { Ok(Self { indexer_api_port: args.indexer_api_args.indexer_api_port, + existing_hasura_url: args.indexer_api_args.existing_hasura_url.clone(), + skip_metadata_apply: args.indexer_api_args.skip_metadata_apply, prerequisite_health_checkers, test_dir, postgres_connection_string, @@ -84,7 +106,10 @@ impl IndexerApiManager { } pub fn get_url(&self) -> Url { - Url::parse(&format!("http://127.0.0.1:{}", self.indexer_api_port)).unwrap() + match &self.existing_hasura_url { + Some(url) => url.clone(), + None => Url::parse(&format!("http://127.0.0.1:{}", self.indexer_api_port)).unwrap(), + } } } @@ -95,6 +120,10 @@ impl ServiceManager for IndexerApiManager { } async fn pre_run(&self) -> Result<()> { + if self.existing_hasura_url.is_some() { + return Ok(()); + } + // Confirm Docker is available. get_docker().await?; @@ -120,12 +149,15 @@ impl ServiceManager for IndexerApiManager { /// In this case we we return two HealthCheckers, one for whether the Hasura API /// is up at all and one for whether the metadata is applied. fn get_health_checkers(&self) -> HashSet { - hashset! { + let mut checkers = hashset! { // This first one just checks if the API is up at all. HealthChecker::Http(self.get_url(), "Indexer API".to_string()), + }; + if !self.skip_metadata_apply { // This second one checks if the metadata is applied. - HealthChecker::IndexerApiMetadata(self.get_url()), + checkers.insert(HealthChecker::IndexerApiMetadata(self.get_url())); } + checkers } fn get_prerequisite_health_checkers(&self) -> HashSet<&HealthChecker> { @@ -133,6 +165,25 @@ impl ServiceManager for IndexerApiManager { } async fn run_service(self: Box) -> Result<()> { + // If we're using an existing Hasura instance we just do nothing. If the Hasura + // instance becomes unhealthy we print an error and exit. + if let Some(url) = self.existing_hasura_url { + info!("Using existing Hasura instance at {}", url); + // Periodically check that the Hasura instance is healthy. + let checker = HealthChecker::Http(url.clone(), "Indexer API".to_string()); + loop { + if let Err(e) = checker.wait(None).await { + eprintln!( + "Existing Hasura instance at {} became unhealthy: {}", + url, e + ); + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + return Ok(()); + } + setup_docker_logging(&self.test_dir, "indexer-api", INDEXER_API_CONTAINER_NAME)?; // This is somewhat hard to maintain. If it requires any further maintenance we @@ -239,6 +290,10 @@ impl ServiceManager for IndexerApiManager { } fn get_post_healthy_steps(&self) -> Vec> { + if self.skip_metadata_apply { + return vec![]; + } + /// There is no good way to apply Hasura metadata (the JSON format, anyway) to /// an instance of Hasura in a container at startup: /// @@ -267,6 +322,10 @@ impl ServiceManager for IndexerApiManager { } fn get_shutdown_steps(&self) -> Vec> { + if self.existing_hasura_url.is_some() { + return vec![]; + } + // Unfortunately the Hasura container does not shut down when the CLI does and // there doesn't seem to be a good way to make it do so. To work around this, // we register a step that will stop the container on shutdown.