Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Disable new logs table while still in development #632

Merged
merged 1 commit into from
Apr 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 12 additions & 10 deletions runner/src/provisioner/provisioner.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -99,14 +99,15 @@ describe('Provisioner', () => {
['GRANT ALL PRIVILEGES ON DATABASE morgs_near TO morgs_near'],
['REVOKE CONNECT ON DATABASE morgs_near FROM PUBLIC'],
]);
expect(cronPgClient.query.mock.calls).toEqual([
['GRANT USAGE ON SCHEMA cron TO morgs_near'],
['GRANT EXECUTE ON FUNCTION cron.schedule_in_database TO morgs_near;'],
]);
expect(userPgClientQuery.mock.calls).toEqual([
["SELECT cron.schedule_in_database('morgs_near_test_function_logs_create_partition', '0 1 * * *', $$SELECT fn_create_partition('morgs_near_test_function.__logs', CURRENT_DATE, '1 day', '2 day')$$, 'morgs_near');"],
["SELECT cron.schedule_in_database('morgs_near_test_function_logs_delete_partition', '0 2 * * *', $$SELECT fn_delete_partition('morgs_near_test_function.__logs', CURRENT_DATE, '-15 day', '-14 day')$$, 'morgs_near');"]
]);
// TODO re-enable once logs table is created
// expect(cronPgClient.query.mock.calls).toEqual([
// ['GRANT USAGE ON SCHEMA cron TO morgs_near'],
// ['GRANT EXECUTE ON FUNCTION cron.schedule_in_database TO morgs_near;'],
// ]);
// expect(userPgClientQuery.mock.calls).toEqual([
// ["SELECT cron.schedule_in_database('morgs_near_test_function_logs_create_partition', '0 1 * * *', $$SELECT fn_create_partition('morgs_near_test_function.__logs', CURRENT_DATE, '1 day', '2 day')$$, 'morgs_near');"],
// ["SELECT cron.schedule_in_database('morgs_near_test_function_logs_delete_partition', '0 2 * * *', $$SELECT fn_delete_partition('morgs_near_test_function.__logs', CURRENT_DATE, '-15 day', '-14 day')$$, 'morgs_near');"]
// ]);
expect(hasuraClient.addDatasource).toBeCalledWith(sanitizedAccountId, password, sanitizedAccountId);
expect(hasuraClient.createSchema).toBeCalledWith(sanitizedAccountId, schemaName);
expect(hasuraClient.runMigrations).toBeCalledWith(sanitizedAccountId, schemaName, databaseSchema);
Expand Down Expand Up @@ -201,13 +202,14 @@ describe('Provisioner', () => {
await expect(provisioner.provisionUserApi(accountId, functionName, databaseSchema)).rejects.toThrow('Failed to provision endpoint: Failed to add permissions to tables: some error');
});

it('throws when grant cron access fails', async () => {
// TODO re-enable once logs table is created
it.skip('throws when grant cron access fails', async () => {
cronPgClient.query = jest.fn().mockRejectedValue(error);

await expect(provisioner.provisionUserApi(accountId, functionName, databaseSchema)).rejects.toThrow('Failed to provision endpoint: Failed to setup partitioned logs table: Failed to grant cron access: some error');
});

it('throws when scheduling cron jobs fails', async () => {
it.skip('throws when scheduling cron jobs fails', async () => {
userPgClientQuery = jest.fn().mockRejectedValueOnce(error);

await expect(provisioner.provisionUserApi(accountId, functionName, databaseSchema)).rejects.toThrow('Failed to provision endpoint: Failed to setup partitioned logs table: Failed to schedule log partition jobs: some error');
Expand Down
3 changes: 2 additions & 1 deletion runner/src/provisioner/provisioner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,8 @@ export default class Provisioner {
await this.createSchema(databaseName, schemaName);
await this.runMigrations(databaseName, schemaName, databaseSchema);

await this.setupPartitionedLogsTable(userName, databaseName, schemaName);
// TODO re-enable once logs table is created
// await this.setupPartitionedLogsTable(userName, databaseName, schemaName);

const tableNames = await this.getTableNames(schemaName, databaseName);
await this.trackTables(schemaName, tableNames, databaseName);
Expand Down
Loading