Skip to content

Commit

Permalink
chore: Remove notes
Browse files Browse the repository at this point in the history
  • Loading branch information
morgsmccauley committed May 29, 2024
1 parent 7afde0b commit b25685b
Show file tree
Hide file tree
Showing 6 changed files with 1 addition and 43 deletions.
7 changes: 0 additions & 7 deletions TODO.md

This file was deleted.

3 changes: 0 additions & 3 deletions coordinator/src/indexer_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@ struct OldIndexerState {
block_stream_synced_at: Option<u64>,
}

// NOTE We'll need to add more fields here - is there a way to gracefully handle non-existant
// fields during serde deserialization? it's annoying to always have to migrate this
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct IndexerState {
pub block_stream_synced_at: Option<u64>,
Expand All @@ -44,7 +42,6 @@ pub struct IndexerStateManagerImpl {
redis_client: RedisClient,
}

// NOTE we probably need a "list" method, which means storing all state ids in a Redis set
#[cfg_attr(test, mockall::automock)]
impl IndexerStateManagerImpl {
pub fn new(redis_client: RedisClient) -> Self {
Expand Down
15 changes: 0 additions & 15 deletions coordinator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,26 +68,11 @@ async fn main() -> anyhow::Result<()> {
.migrate_state_if_needed(&indexer_registry)
.await?;

// NOTE Rather than filtering them here, we can pass `IndexerState` to the sync methods,
// and let them decide what to do. That would be a bit cleaner?
//
// This will also allow us to determine when an Indexer has been deleted, rather than
// implicitly relying on the existance of executors/block_streams. This is going to be
// important for deprovisioning.
let indexer_registry = indexer_state_manager
.filter_disabled_indexers(&indexer_registry)
.await?;

tokio::try_join!(
// NOTE this may need to be regactored in to a combined "synchronise" function.
// The addition of DataLayer provisioning makes the process a bit more stateful, i.e.
// we need to do provisioning first, wait till it completes, and can then kick off
// executor/block_stream sync processes
//
// It's probably still helpful to encapsulate the block_stream/executor sync methods,
// as they are quite involved, but call them from an overall synchronise method
//
// We'll need to store the `ProvisioningStatus` in Redis, so we know when to poll
synchronise_executors(&indexer_registry, &executors_handler),
synchronise_block_streams(
&indexer_registry,
Expand Down
14 changes: 0 additions & 14 deletions runner/protos/data-layer.proto
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,7 @@ syntax = "proto3";

package data_layer;

// NOTE this will eventually be expanded to handle more granular operations
// such as truncating the database, or perhaps running migrations
service DataLayer {
// NOTE As this process can take a while, we need to handle this asynchronously.
// Therefore, this will trigger the provisioning process and return immediately.
// The client can then poll the CheckProvisioningStatus method to determine when the
// provisioning process has completed.
//
// Maybe we should call this TriggerProvisioning instead of Provision?
//
// Need to figure out how this process will actually be kicked off asynchronously,
// can we just fire a promise or do we need worker threads?

// Provisions the data layer (PostgreSQL + Hasura)
rpc Provision (ProvisionRequest) returns (ProvisionResponse);

Expand All @@ -23,8 +11,6 @@ service DataLayer {
}

message ProvisionRequest {
// TODO This is only a partial `IndexerConfig`, which may pose an issue as
// all the provisioning methods expect a full `IndexerConfig`
string account_id = 1;
string function_name = 2;
string schema = 3;
Expand Down
3 changes: 0 additions & 3 deletions runner/src/indexer/indexer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,6 @@ const defaultConfig: Config = {
hasuraEndpoint: process.env.HASURA_ENDPOINT,
};

// NOTE We need to remove provisioning from here,
// I'm thinking we remove knowledge of it entirely? and just inject he db parameters
// in from `StreamHandler`? Maybe a good opportunity to rename `StreamHandler` in to something more appropriate
export default class Indexer {
DEFAULT_HASURA_ROLE: string;
IS_FIRST_EXECUTION: boolean = true;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { type ServerUnaryCall, status } from '@grpc/grpc-js';

import { createDataLayerService, type ProvisioningTask } from './data-layer-service'; // Adjust this path accordingly
import { createDataLayerService, type ProvisioningTask } from './data-layer-service';
import { ProvisioningStatus } from '../../../generated/ProvisioningStatus';
import type Provisioner from '../../../provisioner';

Expand Down

0 comments on commit b25685b

Please sign in to comment.