Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: add some log for meta startup #20301

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions src/meta/node/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ pub async fn start_service_as_election_leader(
meta_store_impl,
)
.await?;
tracing::info!("MetaSrvEnv started");
let _ = env.may_start_watch_license_key_file()?;
let system_params_reader = env.system_params_reader().await;

Expand Down Expand Up @@ -355,8 +356,10 @@ pub async fn start_service_as_election_leader(
.await
.unwrap(),
);
tracing::info!("CompactorManager started");

let heartbeat_srv = HeartbeatServiceImpl::new(metadata_manager.clone());
tracing::info!("HeartbeatServiceImpl started");

let (compactor_streams_change_tx, compactor_streams_change_rx) =
tokio::sync::mpsc::unbounded_channel();
Expand All @@ -372,6 +375,7 @@ pub async fn start_service_as_election_leader(
)
.await
.unwrap();
tracing::info!("HummockManager started");
let object_store_media_type = hummock_manager.object_store_media_type();

let meta_member_srv = MetaMemberServiceImpl::new(election_client.clone());
Expand Down Expand Up @@ -414,6 +418,7 @@ pub async fn start_service_as_election_leader(

let (barrier_scheduler, scheduled_barriers) =
BarrierScheduler::new_pair(hummock_manager.clone(), meta_metrics.clone());
tracing::info!("BarrierScheduler started");

// Initialize services.
let backup_manager = BackupManager::new(
Expand All @@ -424,12 +429,14 @@ pub async fn start_service_as_election_leader(
system_params_reader.backup_storage_directory(),
)
.await?;
tracing::info!("BackupManager started");

LocalSecretManager::init(
opts.temp_secret_file_dir,
env.cluster_id().to_string(),
META_NODE_ID,
);
tracing::info!("LocalSecretManager started");

let notification_srv = NotificationServiceImpl::new(
env.clone(),
Expand All @@ -439,6 +446,7 @@ pub async fn start_service_as_election_leader(
serving_vnode_mapping.clone(),
)
.await?;
tracing::info!("NotificationServiceImpl started");

let source_manager = Arc::new(
SourceManager::new(
Expand All @@ -449,8 +457,10 @@ pub async fn start_service_as_election_leader(
.await
.unwrap(),
);
tracing::info!("SourceManager started");

let (sink_manager, shutdown_handle) = SinkCoordinatorManager::start_worker();
tracing::info!("SinkCoordinatorManager started");
// TODO(shutdown): remove this as there's no need to gracefully shutdown some of these sub-tasks.
let mut sub_tasks = vec![shutdown_handle];

Expand All @@ -470,6 +480,7 @@ pub async fn start_service_as_election_leader(
scale_controller.clone(),
)
.await;
tracing::info!("GlobalBarrierManager started");
sub_tasks.push((join_handle, shutdown_rx));

{
Expand Down
2 changes: 2 additions & 0 deletions src/meta/src/hummock/manager/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -514,6 +514,7 @@ async fn write_exclusive_cluster_id(
const CLUSTER_ID_NAME: &str = "0";
let cluster_id_dir = format!("{}/{}/", state_store_dir, CLUSTER_ID_DIR);
let cluster_id_full_path = format!("{}{}", cluster_id_dir, CLUSTER_ID_NAME);
tracing::info!("try reading cluster_id");
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is actually the slowest part now. We stat 2 non-exist objects, which takes 2s apache/opendal#5569

match object_store.read(&cluster_id_full_path, ..).await {
Ok(stored_cluster_id) => {
let stored_cluster_id = String::from_utf8(stored_cluster_id.to_vec()).unwrap();
Expand All @@ -529,6 +530,7 @@ async fn write_exclusive_cluster_id(
}
Err(e) => {
if e.is_object_not_found_error() {
tracing::info!("cluster_id not found, writing cluster_id");
object_store
.upload(&cluster_id_full_path, Bytes::from(String::from(cluster_id)))
.await?;
Expand Down
Loading