From c7c0744e3eed75291b2de6c76e672816cfee9008 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Fri, 21 Jun 2024 17:12:47 -0400 Subject: [PATCH 01/13] leases cronjobs rm leases conn.go comments clarification comment on duplicate field names unmerge txn interface lint remove extraneous comment better comment --- Justfile | 2 +- backend/controller/controller.go | 36 +- backend/controller/cronjobs/cronjobs.go | 9 +- .../cronjobs/cronjobs_integration_test.go | 8 +- backend/controller/cronjobs/cronjobs_test.go | 6 +- .../cronjobs/cronjobs_utils_test.go | 33 +- backend/controller/cronjobs/dal/dal.go | 101 ++++ backend/controller/cronjobs/sql/conn.go | 21 + backend/controller/cronjobs/sql/db.go | 32 ++ backend/controller/cronjobs/sql/models.go | 541 ++++++++++++++++++ backend/controller/cronjobs/sql/querier.go | 22 + backend/controller/cronjobs/sql/queries.sql | 59 ++ .../controller/cronjobs/sql/queries.sql.go | 252 ++++++++ backend/controller/dal/async_calls.go | 18 +- backend/controller/dal/dal.go | 118 +--- backend/controller/dal/fsm.go | 6 +- backend/controller/dal/fsm_test.go | 9 +- backend/controller/leases/dal/dal.go | 16 + backend/controller/{ => leases}/dal/lease.go | 6 +- .../controller/{ => leases}/dal/lease_test.go | 8 +- backend/controller/leases/sql/conn.go | 21 + backend/controller/leases/sql/db.go | 32 ++ backend/controller/leases/sql/models.go | 541 ++++++++++++++++++ backend/controller/leases/sql/querier.go | 23 + backend/controller/leases/sql/queries.sql | 37 ++ backend/controller/leases/sql/queries.sql.go | 97 ++++ backend/controller/sql/querier.go | 12 - backend/controller/sql/queries.sql | 98 ---- backend/controller/sql/queries.sql.go | 322 ----------- sqlc.yaml | 12 + 30 files changed, 1919 insertions(+), 579 deletions(-) create mode 100644 backend/controller/cronjobs/dal/dal.go create mode 100644 backend/controller/cronjobs/sql/conn.go create mode 100644 backend/controller/cronjobs/sql/db.go create mode 100644 backend/controller/cronjobs/sql/models.go create mode 100644 backend/controller/cronjobs/sql/querier.go create mode 100644 backend/controller/cronjobs/sql/queries.sql create mode 100644 backend/controller/cronjobs/sql/queries.sql.go create mode 100644 backend/controller/leases/dal/dal.go rename backend/controller/{ => leases}/dal/lease.go (95%) rename backend/controller/{ => leases}/dal/lease_test.go (93%) create mode 100644 backend/controller/leases/sql/conn.go create mode 100644 backend/controller/leases/sql/db.go create mode 100644 backend/controller/leases/sql/models.go create mode 100644 backend/controller/leases/sql/querier.go create mode 100644 backend/controller/leases/sql/queries.sql create mode 100644 backend/controller/leases/sql/queries.sql.go diff --git a/Justfile b/Justfile index 70ae697691..ab9244fa6d 100644 --- a/Justfile +++ b/Justfile @@ -66,7 +66,7 @@ init-db: # Regenerate SQLC code (requires init-db to be run first) build-sqlc: - @mk backend/controller/sql/{db.go,models.go,querier.go,queries.sql.go} common/configuration/sql/{db.go,models.go,querier.go,queries.sql.go} : backend/controller/sql/queries.sql common/configuration/sql/queries.sql backend/controller/sql/schema sqlc.yaml -- "just init-db && sqlc generate" + @mk backend/controller/sql/{db.go,models.go,querier.go,queries.sql.go} backend/controller/{cronjobs,leases}/sql/{db.go,models.go,querier.go,queries.sql.go} common/configuration/sql/{db.go,models.go,querier.go,queries.sql.go} : backend/controller/sql/queries.sql backend/controller/{cronjobs,leases}/sql/queries.sql common/configuration/sql/queries.sql backend/controller/sql/schema sqlc.yaml -- "just init-db && sqlc generate" # Build the ZIP files that are embedded in the FTL release binaries build-zips: build-kt-runtime diff --git a/backend/controller/controller.go b/backend/controller/controller.go index b02f395f21..6b51d89496 100644 --- a/backend/controller/controller.go +++ b/backend/controller/controller.go @@ -23,6 +23,7 @@ import ( "github.com/alecthomas/types/either" "github.com/alecthomas/types/optional" "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/jellydator/ttlcache/v3" "github.com/jpillora/backoff" "golang.org/x/exp/maps" @@ -34,9 +35,11 @@ import ( "github.com/TBD54566975/ftl" "github.com/TBD54566975/ftl/backend/controller/admin" "github.com/TBD54566975/ftl/backend/controller/cronjobs" + cronjobsdal "github.com/TBD54566975/ftl/backend/controller/cronjobs/dal" "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/ingress" "github.com/TBD54566975/ftl/backend/controller/leases" + leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/pubsub" "github.com/TBD54566975/ftl/backend/controller/scaling" "github.com/TBD54566975/ftl/backend/controller/scaling/localscaling" @@ -118,7 +121,13 @@ func Start(ctx context.Context, config Config, runnerScaling scaling.RunnerScali logger.Infof("Web console available at: %s", config.Bind) } - svc, err := New(ctx, dal, config, runnerScaling) + // Bring up the DB connection and DAL. + conn, err := pgxpool.New(ctx, config.DSN) + if err != nil { + return err + } + + svc, err := New(ctx, conn, config, runnerScaling) if err != nil { return err } @@ -128,7 +137,7 @@ func Start(ctx context.Context, config Config, runnerScaling scaling.RunnerScali sm := cf.SecretsFromContext(ctx) admin := admin.NewAdminService(cm, sm) - console := NewConsoleService(dal) + console := NewConsoleService(svc.dal) ingressHandler := http.Handler(svc) if len(config.AllowOrigins) > 0 { @@ -170,6 +179,7 @@ type ControllerListListener interface { } type Service struct { + pool *pgxpool.Pool dal *dal.DAL key model.ControllerKey deploymentLogsSink *deploymentLogsSink @@ -193,7 +203,7 @@ type Service struct { asyncCallsLock sync.Mutex } -func New(ctx context.Context, db *dal.DAL, config Config, runnerScaling scaling.RunnerScaling) (*Service, error) { +func New(ctx context.Context, pool *pgxpool.Pool, config Config, runnerScaling scaling.RunnerScaling) (*Service, error) { key := config.Key if config.Key.IsZero() { key = model.NewControllerKey(config.Bind.Hostname(), config.Bind.Port()) @@ -207,8 +217,14 @@ func New(ctx context.Context, db *dal.DAL, config Config, runnerScaling scaling. config.ControllerTimeout = time.Second * 5 } + db, err := dal.New(ctx, pool) + if err != nil { + return nil, err + } + svc := &Service{ - tasks: scheduledtask.New(ctx, key, db), + tasks: scheduledtask.New(ctx, key, leasesdal.New(pool)), + pool: pool, dal: db, key: key, deploymentLogsSink: newDeploymentLogsSink(ctx, db), @@ -220,7 +236,7 @@ func New(ctx context.Context, db *dal.DAL, config Config, runnerScaling scaling. svc.routes.Store(map[string][]dal.Route{}) svc.schema.Store(&schema.Schema{}) - cronSvc := cronjobs.New(ctx, key, svc.config.Advertise.Host, cronjobs.Config{Timeout: config.CronJobTimeout}, db, svc.tasks, svc.callWithRequest) + cronSvc := cronjobs.New(ctx, key, svc.config.Advertise.Host, cronjobs.Config{Timeout: config.CronJobTimeout}, cronjobsdal.New(pool), svc.tasks, svc.callWithRequest) svc.cronJobs = cronSvc svc.controllerListListeners = append(svc.controllerListListeners, cronSvc) @@ -772,7 +788,7 @@ func (s *Service) AcquireLease(ctx context.Context, stream *connect.BidiStream[f return connect.NewError(connect.CodeInternal, fmt.Errorf("could not receive lease request: %w", err)) } if lease == nil { - lease, _, err = s.dal.AcquireLease(ctx, leases.ModuleKey(msg.Module, msg.Key...), msg.Ttl.AsDuration(), optional.None[any]()) + lease, _, err = leasesdal.New(s.pool).AcquireLease(ctx, leases.ModuleKey(msg.Module, msg.Key...), msg.Ttl.AsDuration(), optional.None[any]()) if err != nil { if errors.Is(err, leases.ErrConflict) { return connect.NewError(connect.CodeResourceExhausted, fmt.Errorf("lease is held: %w", err)) @@ -810,7 +826,7 @@ func (s *Service) SendFSMEvent(ctx context.Context, req *connect.Request[ftlv1.S } defer tx.CommitOrRollback(ctx, &err) - instance, err := tx.AcquireFSMInstance(ctx, fsmKey, msg.Instance) + instance, err := tx.AcquireFSMInstance(ctx, fsmKey, msg.Instance, s.pool) if err != nil { return nil, connect.NewError(connect.CodeFailedPrecondition, fmt.Errorf("could not acquire fsm instance: %w", err)) } @@ -1271,7 +1287,7 @@ func (s *Service) executeAsyncCalls(ctx context.Context) (time.Duration, error) logger := log.FromContext(ctx) logger.Tracef("Acquiring async call") - call, err := s.dal.AcquireAsyncCall(ctx) + call, err := s.dal.AcquireAsyncCall(ctx, s.pool) if errors.Is(err, dalerrs.ErrNotFound) { logger.Tracef("No async calls to execute") return time.Second * 2, nil @@ -1342,7 +1358,7 @@ func (s *Service) executeAsyncCalls(ctx context.Context) (time.Duration, error) func (s *Service) onAsyncFSMCallCompletion(ctx context.Context, tx *dal.Tx, origin dal.AsyncOriginFSM, failed bool) error { logger := log.FromContext(ctx).Scope(origin.FSM.String()) - instance, err := tx.AcquireFSMInstance(ctx, origin.FSM, origin.Key) + instance, err := tx.AcquireFSMInstance(ctx, origin.FSM, origin.Key, s.pool) if err != nil { return fmt.Errorf("could not acquire lock on FSM instance: %w", err) } @@ -1387,7 +1403,7 @@ func (s *Service) onAsyncFSMCallCompletion(ctx context.Context, tx *dal.Tx, orig } func (s *Service) expireStaleLeases(ctx context.Context) (time.Duration, error) { - err := s.dal.ExpireLeases(ctx) + err := leasesdal.New(s.pool).ExpireLeases(ctx) if err != nil { return 0, fmt.Errorf("failed to expire leases: %w", err) } diff --git a/backend/controller/cronjobs/cronjobs.go b/backend/controller/cronjobs/cronjobs.go index 1efec7b203..941e933497 100644 --- a/backend/controller/cronjobs/cronjobs.go +++ b/backend/controller/cronjobs/cronjobs.go @@ -15,7 +15,8 @@ import ( "github.com/jpillora/backoff" "github.com/serialx/hashring" - "github.com/TBD54566975/ftl/backend/controller/dal" + "github.com/TBD54566975/ftl/backend/controller/cronjobs/dal" + parentdal "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/scheduledtask" ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" schemapb "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1/schema" @@ -61,7 +62,7 @@ func (updatedHashRingEvent) cronJobEvent() {} type hashRingState struct { hashRing *hashring.HashRing - controllers []dal.Controller + controllers []parentdal.Controller idx int } @@ -408,7 +409,7 @@ func (s *Service) nextAttemptForJob(job model.CronJob, state *state, allowsNow b } // UpdatedControllerList synchronises the hash ring with the active controllers. -func (s *Service) UpdatedControllerList(ctx context.Context, controllers []dal.Controller) { +func (s *Service) UpdatedControllerList(ctx context.Context, controllers []parentdal.Controller) { logger := log.FromContext(ctx).Scope("cron") controllerIdx := -1 for idx, controller := range controllers { @@ -436,7 +437,7 @@ func (s *Service) UpdatedControllerList(ctx context.Context, controllers []dal.C } } - hashRing := hashring.New(slices.Map(controllers, func(c dal.Controller) string { return c.Key.String() })) + hashRing := hashring.New(slices.Map(controllers, func(c parentdal.Controller) string { return c.Key.String() })) s.hashRingState.Store(&hashRingState{ hashRing: hashRing, controllers: controllers, diff --git a/backend/controller/cronjobs/cronjobs_integration_test.go b/backend/controller/cronjobs/cronjobs_integration_test.go index 9e4d74507a..a8e9005851 100644 --- a/backend/controller/cronjobs/cronjobs_integration_test.go +++ b/backend/controller/cronjobs/cronjobs_integration_test.go @@ -9,7 +9,8 @@ import ( "testing" "time" - db "github.com/TBD54566975/ftl/backend/controller/dal" + db "github.com/TBD54566975/ftl/backend/controller/cronjobs/dal" + parentdb "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/sql/sqltest" in "github.com/TBD54566975/ftl/integration" "github.com/TBD54566975/ftl/internal/log" @@ -24,7 +25,8 @@ func TestServiceWithRealDal(t *testing.T) { t.Cleanup(cancel) conn := sqltest.OpenForTesting(ctx, t) - dal, err := db.New(ctx, conn) + dal := db.New(conn) + parentDAL, err := parentdb.New(ctx, conn) assert.NoError(t, err) // Using a real clock because real db queries use db clock @@ -36,7 +38,7 @@ func TestServiceWithRealDal(t *testing.T) { time.Sleep(2*time.Second - time.Duration(clk.Now().Nanosecond())*time.Nanosecond) } - testServiceWithDal(ctx, t, dal, clk) + testServiceWithDal(ctx, t, dal, parentDAL, clk) } func TestCron(t *testing.T) { diff --git a/backend/controller/cronjobs/cronjobs_test.go b/backend/controller/cronjobs/cronjobs_test.go index 2de588830f..775c3426e3 100644 --- a/backend/controller/cronjobs/cronjobs_test.go +++ b/backend/controller/cronjobs/cronjobs_test.go @@ -13,6 +13,7 @@ import ( xslices "golang.org/x/exp/slices" db "github.com/TBD54566975/ftl/backend/controller/dal" + "github.com/TBD54566975/ftl/backend/controller/sql/sqltest" ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" "github.com/TBD54566975/ftl/backend/schema" "github.com/TBD54566975/ftl/internal/log" @@ -35,8 +36,11 @@ func TestServiceWithMockDal(t *testing.T) { lock: sync.Mutex{}, attemptCountMap: map[string]int{}, } + conn := sqltest.OpenForTesting(ctx, t) + parentDAL, err := db.New(ctx, conn) + assert.NoError(t, err) - testServiceWithDal(ctx, t, mockDal, clk) + testServiceWithDal(ctx, t, mockDal, parentDAL, clk) } func TestHashRing(t *testing.T) { diff --git a/backend/controller/cronjobs/cronjobs_utils_test.go b/backend/controller/cronjobs/cronjobs_utils_test.go index 0c35a5d342..572d9e2395 100644 --- a/backend/controller/cronjobs/cronjobs_utils_test.go +++ b/backend/controller/cronjobs/cronjobs_utils_test.go @@ -14,7 +14,8 @@ import ( "github.com/benbjohnson/clock" "github.com/jpillora/backoff" - db "github.com/TBD54566975/ftl/backend/controller/dal" + cronjobsdb "github.com/TBD54566975/ftl/backend/controller/cronjobs/dal" + parentdb "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/scheduledtask" ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" "github.com/TBD54566975/ftl/backend/schema" @@ -23,9 +24,8 @@ import ( "github.com/TBD54566975/ftl/internal/slices" ) -type ExtendedDAL interface { - DAL - CreateDeployment(ctx context.Context, language string, moduleSchema *schema.Module, artefacts []db.DeploymentArtefact, ingressRoutes []db.IngressRoutingEntry, cronJobs []model.CronJob) (key model.DeploymentKey, err error) +type ParentDAL interface { + CreateDeployment(ctx context.Context, language string, moduleSchema *schema.Module, artefacts []parentdb.DeploymentArtefact, ingressRoutes []parentdb.IngressRoutingEntry, cronJobs []model.CronJob) (key model.DeploymentKey, err error) ReplaceDeployment(ctx context.Context, newDeploymentKey model.DeploymentKey, minReplicas int) (err error) } @@ -36,9 +36,10 @@ type mockDAL struct { attemptCountMap map[string]int } -var _ ExtendedDAL = &mockDAL{} +var _ ParentDAL = &mockDAL{} +var _ DAL = &mockDAL{} -func (d *mockDAL) CreateDeployment(ctx context.Context, language string, moduleSchema *schema.Module, artefacts []db.DeploymentArtefact, ingressRoutes []db.IngressRoutingEntry, cronJobs []model.CronJob) (key model.DeploymentKey, err error) { +func (d *mockDAL) CreateDeployment(ctx context.Context, language string, moduleSchema *schema.Module, artefacts []parentdb.DeploymentArtefact, ingressRoutes []parentdb.IngressRoutingEntry, cronJobs []model.CronJob) (key model.DeploymentKey, err error) { deploymentKey := model.NewDeploymentKey(moduleSchema.Name) d.jobs = []model.CronJob{} for _, job := range cronJobs { @@ -68,11 +69,11 @@ func (d *mockDAL) indexForJob(job model.CronJob) (int, error) { return -1, fmt.Errorf("job not found") } -func (d *mockDAL) StartCronJobs(ctx context.Context, jobs []model.CronJob) (attemptedJobs []db.AttemptedCronJob, err error) { +func (d *mockDAL) StartCronJobs(ctx context.Context, jobs []model.CronJob) (attemptedJobs []cronjobsdb.AttemptedCronJob, err error) { d.lock.Lock() defer d.lock.Unlock() - attemptedJobs = []db.AttemptedCronJob{} + attemptedJobs = []cronjobsdb.AttemptedCronJob{} now := d.clock.Now() for _, inputJob := range jobs { @@ -85,13 +86,13 @@ func (d *mockDAL) StartCronJobs(ctx context.Context, jobs []model.CronJob) (atte job.State = model.CronJobStateExecuting job.StartTime = d.clock.Now() d.jobs[i] = job - attemptedJobs = append(attemptedJobs, db.AttemptedCronJob{ + attemptedJobs = append(attemptedJobs, cronjobsdb.AttemptedCronJob{ CronJob: job, DidStartExecution: true, HasMinReplicas: true, }) } else { - attemptedJobs = append(attemptedJobs, db.AttemptedCronJob{ + attemptedJobs = append(attemptedJobs, cronjobsdb.AttemptedCronJob{ CronJob: job, DidStartExecution: false, HasMinReplicas: true, @@ -200,8 +201,8 @@ func newControllers(ctx context.Context, count int, dal DAL, clockFactory func() for _, c := range controllers { s := c.cronJobs go func() { - s.UpdatedControllerList(ctx, slices.Map(controllers, func(ctrl *controller) db.Controller { - return db.Controller{ + s.UpdatedControllerList(ctx, slices.Map(controllers, func(ctrl *controller) parentdb.Controller { + return parentdb.Controller{ Key: ctrl.key, } })) @@ -215,7 +216,7 @@ func newControllers(ctx context.Context, count int, dal DAL, clockFactory func() } // should be called when clk is half way between cron job executions (ie on an odd second) -func testServiceWithDal(ctx context.Context, t *testing.T, dal ExtendedDAL, clk clock.Clock) { +func testServiceWithDal(ctx context.Context, t *testing.T, dal DAL, parentDAL ParentDAL, clk clock.Clock) { t.Helper() verbCallCount := map[string]int{} @@ -224,12 +225,12 @@ func testServiceWithDal(ctx context.Context, t *testing.T, dal ExtendedDAL, clk moduleName := "initial" jobsToCreate := newJobs(t, moduleName, "*/2 * * * * * *", clk, 20) - deploymentKey, err := dal.CreateDeployment(ctx, "go", &schema.Module{ + deploymentKey, err := parentDAL.CreateDeployment(ctx, "go", &schema.Module{ Name: moduleName, - }, []db.DeploymentArtefact{}, []db.IngressRoutingEntry{}, jobsToCreate) + }, []parentdb.DeploymentArtefact{}, []parentdb.IngressRoutingEntry{}, jobsToCreate) assert.NoError(t, err) - err = dal.ReplaceDeployment(ctx, deploymentKey, 1) + err = parentDAL.ReplaceDeployment(ctx, deploymentKey, 1) assert.NoError(t, err) _ = newControllers(ctx, 5, dal, func() clock.Clock { return clk }, func(ctx context.Context, r *connect.Request[ftlv1.CallRequest], o optional.Option[model.RequestKey], s string) (*connect.Response[ftlv1.CallResponse], error) { diff --git a/backend/controller/cronjobs/dal/dal.go b/backend/controller/cronjobs/dal/dal.go new file mode 100644 index 0000000000..fadd5c369c --- /dev/null +++ b/backend/controller/cronjobs/dal/dal.go @@ -0,0 +1,101 @@ +// Package dal provides a data abstraction layer for cron jobs +package dal + +import ( + "context" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/TBD54566975/ftl/backend/controller/cronjobs/sql" + "github.com/TBD54566975/ftl/backend/schema" + "github.com/TBD54566975/ftl/db/dalerrs" + "github.com/TBD54566975/ftl/internal/model" + "github.com/TBD54566975/ftl/internal/slices" +) + +type DAL struct { + db sql.DBI +} + +func New(pool *pgxpool.Pool) *DAL { + return &DAL{db: sql.NewDB(pool)} +} + +func cronJobFromRow(row sql.GetCronJobsRow) model.CronJob { + return model.CronJob{ + Key: row.Key, + DeploymentKey: row.DeploymentKey, + Verb: schema.Ref{Module: row.Module, Name: row.Verb}, + Schedule: row.Schedule, + StartTime: row.StartTime, + NextExecution: row.NextExecution, + State: row.State, + } +} + +// GetCronJobs returns all cron jobs for deployments with min replicas > 0 +func (d *DAL) GetCronJobs(ctx context.Context) ([]model.CronJob, error) { + rows, err := d.db.GetCronJobs(ctx) + if err != nil { + return nil, dalerrs.TranslatePGError(err) + } + return slices.Map(rows, cronJobFromRow), nil +} + +type AttemptedCronJob struct { + DidStartExecution bool + HasMinReplicas bool + model.CronJob +} + +// StartCronJobs returns a full list of results so that the caller can update their list of jobs whether or not they successfully updated the row +func (d *DAL) StartCronJobs(ctx context.Context, jobs []model.CronJob) (attemptedJobs []AttemptedCronJob, err error) { + if len(jobs) == 0 { + return nil, nil + } + rows, err := d.db.StartCronJobs(ctx, slices.Map(jobs, func(job model.CronJob) string { return job.Key.String() })) + if err != nil { + return nil, dalerrs.TranslatePGError(err) + } + + attemptedJobs = []AttemptedCronJob{} + for _, row := range rows { + job := AttemptedCronJob{ + CronJob: model.CronJob{ + Key: row.Key, + DeploymentKey: row.DeploymentKey, + Verb: schema.Ref{Module: row.Module, Name: row.Verb}, + Schedule: row.Schedule, + StartTime: row.StartTime, + NextExecution: row.NextExecution, + State: row.State, + }, + DidStartExecution: row.Updated, + HasMinReplicas: row.HasMinReplicas, + } + attemptedJobs = append(attemptedJobs, job) + } + return attemptedJobs, nil +} + +// EndCronJob sets the status from executing to idle and updates the next execution time +// Can be called on the successful completion of a job, or if the job failed to execute (error or timeout) +func (d *DAL) EndCronJob(ctx context.Context, job model.CronJob, next time.Time) (model.CronJob, error) { + row, err := d.db.EndCronJob(ctx, next, job.Key, job.StartTime) + if err != nil { + return model.CronJob{}, dalerrs.TranslatePGError(err) + } + return cronJobFromRow(sql.GetCronJobsRow(row)), nil +} + +// GetStaleCronJobs returns a list of cron jobs that have been executing longer than the duration +func (d *DAL) GetStaleCronJobs(ctx context.Context, duration time.Duration) ([]model.CronJob, error) { + rows, err := d.db.GetStaleCronJobs(ctx, duration) + if err != nil { + return nil, dalerrs.TranslatePGError(err) + } + return slices.Map(rows, func(row sql.GetStaleCronJobsRow) model.CronJob { + return cronJobFromRow(sql.GetCronJobsRow(row)) + }), nil +} diff --git a/backend/controller/cronjobs/sql/conn.go b/backend/controller/cronjobs/sql/conn.go new file mode 100644 index 0000000000..065487cefa --- /dev/null +++ b/backend/controller/cronjobs/sql/conn.go @@ -0,0 +1,21 @@ +package sql + +type DBI interface { + Querier + Conn() ConnI +} + +type ConnI interface { + DBTX +} + +type DB struct { + conn ConnI + *Queries +} + +func NewDB(conn ConnI) *DB { + return &DB{conn: conn, Queries: New(conn)} +} + +func (d *DB) Conn() ConnI { return d.conn } diff --git a/backend/controller/cronjobs/sql/db.go b/backend/controller/cronjobs/sql/db.go new file mode 100644 index 0000000000..c4b45fb311 --- /dev/null +++ b/backend/controller/cronjobs/sql/db.go @@ -0,0 +1,32 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package sql + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" +) + +type DBTX interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx pgx.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/backend/controller/cronjobs/sql/models.go b/backend/controller/cronjobs/sql/models.go new file mode 100644 index 0000000000..6d2095f7ba --- /dev/null +++ b/backend/controller/cronjobs/sql/models.go @@ -0,0 +1,541 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package sql + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "time" + + "github.com/TBD54566975/ftl/backend/controller/leases" + "github.com/TBD54566975/ftl/backend/schema" + "github.com/TBD54566975/ftl/internal/model" + "github.com/alecthomas/types/optional" + "github.com/google/uuid" +) + +type AsyncCallState string + +const ( + AsyncCallStatePending AsyncCallState = "pending" + AsyncCallStateExecuting AsyncCallState = "executing" + AsyncCallStateSuccess AsyncCallState = "success" + AsyncCallStateError AsyncCallState = "error" +) + +func (e *AsyncCallState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = AsyncCallState(s) + case string: + *e = AsyncCallState(s) + default: + return fmt.Errorf("unsupported scan type for AsyncCallState: %T", src) + } + return nil +} + +type NullAsyncCallState struct { + AsyncCallState AsyncCallState + Valid bool // Valid is true if AsyncCallState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullAsyncCallState) Scan(value interface{}) error { + if value == nil { + ns.AsyncCallState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.AsyncCallState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullAsyncCallState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.AsyncCallState), nil +} + +type ControllerState string + +const ( + ControllerStateLive ControllerState = "live" + ControllerStateDead ControllerState = "dead" +) + +func (e *ControllerState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = ControllerState(s) + case string: + *e = ControllerState(s) + default: + return fmt.Errorf("unsupported scan type for ControllerState: %T", src) + } + return nil +} + +type NullControllerState struct { + ControllerState ControllerState + Valid bool // Valid is true if ControllerState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullControllerState) Scan(value interface{}) error { + if value == nil { + ns.ControllerState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.ControllerState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullControllerState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.ControllerState), nil +} + +type CronJobState string + +const ( + CronJobStateIdle CronJobState = "idle" + CronJobStateExecuting CronJobState = "executing" +) + +func (e *CronJobState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = CronJobState(s) + case string: + *e = CronJobState(s) + default: + return fmt.Errorf("unsupported scan type for CronJobState: %T", src) + } + return nil +} + +type NullCronJobState struct { + CronJobState CronJobState + Valid bool // Valid is true if CronJobState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullCronJobState) Scan(value interface{}) error { + if value == nil { + ns.CronJobState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.CronJobState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullCronJobState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.CronJobState), nil +} + +type EventType string + +const ( + EventTypeCall EventType = "call" + EventTypeLog EventType = "log" + EventTypeDeploymentCreated EventType = "deployment_created" + EventTypeDeploymentUpdated EventType = "deployment_updated" +) + +func (e *EventType) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = EventType(s) + case string: + *e = EventType(s) + default: + return fmt.Errorf("unsupported scan type for EventType: %T", src) + } + return nil +} + +type NullEventType struct { + EventType EventType + Valid bool // Valid is true if EventType is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullEventType) Scan(value interface{}) error { + if value == nil { + ns.EventType, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.EventType.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullEventType) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.EventType), nil +} + +type FsmStatus string + +const ( + FsmStatusRunning FsmStatus = "running" + FsmStatusCompleted FsmStatus = "completed" + FsmStatusFailed FsmStatus = "failed" +) + +func (e *FsmStatus) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = FsmStatus(s) + case string: + *e = FsmStatus(s) + default: + return fmt.Errorf("unsupported scan type for FsmStatus: %T", src) + } + return nil +} + +type NullFsmStatus struct { + FsmStatus FsmStatus + Valid bool // Valid is true if FsmStatus is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullFsmStatus) Scan(value interface{}) error { + if value == nil { + ns.FsmStatus, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.FsmStatus.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullFsmStatus) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.FsmStatus), nil +} + +type Origin string + +const ( + OriginIngress Origin = "ingress" + OriginCron Origin = "cron" + OriginPubsub Origin = "pubsub" +) + +func (e *Origin) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = Origin(s) + case string: + *e = Origin(s) + default: + return fmt.Errorf("unsupported scan type for Origin: %T", src) + } + return nil +} + +type NullOrigin struct { + Origin Origin + Valid bool // Valid is true if Origin is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullOrigin) Scan(value interface{}) error { + if value == nil { + ns.Origin, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.Origin.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullOrigin) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.Origin), nil +} + +type RunnerState string + +const ( + RunnerStateIdle RunnerState = "idle" + RunnerStateReserved RunnerState = "reserved" + RunnerStateAssigned RunnerState = "assigned" + RunnerStateDead RunnerState = "dead" +) + +func (e *RunnerState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = RunnerState(s) + case string: + *e = RunnerState(s) + default: + return fmt.Errorf("unsupported scan type for RunnerState: %T", src) + } + return nil +} + +type NullRunnerState struct { + RunnerState RunnerState + Valid bool // Valid is true if RunnerState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullRunnerState) Scan(value interface{}) error { + if value == nil { + ns.RunnerState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.RunnerState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullRunnerState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.RunnerState), nil +} + +type TopicSubscriptionState string + +const ( + TopicSubscriptionStateIdle TopicSubscriptionState = "idle" + TopicSubscriptionStateExecuting TopicSubscriptionState = "executing" +) + +func (e *TopicSubscriptionState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = TopicSubscriptionState(s) + case string: + *e = TopicSubscriptionState(s) + default: + return fmt.Errorf("unsupported scan type for TopicSubscriptionState: %T", src) + } + return nil +} + +type NullTopicSubscriptionState struct { + TopicSubscriptionState TopicSubscriptionState + Valid bool // Valid is true if TopicSubscriptionState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullTopicSubscriptionState) Scan(value interface{}) error { + if value == nil { + ns.TopicSubscriptionState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.TopicSubscriptionState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullTopicSubscriptionState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.TopicSubscriptionState), nil +} + +type Artefact struct { + ID int64 + CreatedAt time.Time + Digest []byte + Content []byte +} + +type AsyncCall struct { + ID int64 + CreatedAt time.Time + LeaseID optional.Option[int64] + Verb schema.RefKey + State AsyncCallState + Origin string + ScheduledAt time.Time + Request []byte + Response []byte + Error optional.Option[string] + RemainingAttempts int32 + Backoff time.Duration + MaxBackoff time.Duration +} + +type Controller struct { + ID int64 + Key model.ControllerKey + Created time.Time + LastSeen time.Time + State ControllerState + Endpoint string +} + +type CronJob struct { + ID int64 + Key model.CronJobKey + DeploymentID int64 + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time + State model.CronJobState + ModuleName string +} + +type Deployment struct { + ID int64 + CreatedAt time.Time + ModuleID int64 + Key model.DeploymentKey + Schema *schema.Module + Labels []byte + MinReplicas int32 +} + +type DeploymentArtefact struct { + ArtefactID int64 + DeploymentID int64 + CreatedAt time.Time + Executable bool + Path string +} + +type Event struct { + ID int64 + TimeStamp time.Time + DeploymentID int64 + RequestID optional.Option[int64] + Type EventType + CustomKey1 optional.Option[string] + CustomKey2 optional.Option[string] + CustomKey3 optional.Option[string] + CustomKey4 optional.Option[string] + Payload json.RawMessage +} + +type FsmInstance struct { + ID int64 + CreatedAt time.Time + Fsm schema.RefKey + Key string + Status FsmStatus + CurrentState optional.Option[schema.RefKey] + DestinationState optional.Option[schema.RefKey] + AsyncCallID optional.Option[int64] +} + +type IngressRoute struct { + Method string + Path string + DeploymentID int64 + Module string + Verb string +} + +type Lease struct { + ID int64 + IdempotencyKey uuid.UUID + Key leases.Key + CreatedAt time.Time + ExpiresAt time.Time + Metadata []byte +} + +type Module struct { + ID int64 + Language string + Name string +} + +type ModuleConfiguration struct { + ID int64 + CreatedAt time.Time + Module optional.Option[string] + Name string + Value []byte +} + +type Request struct { + ID int64 + Origin Origin + Key model.RequestKey + SourceAddr string +} + +type Runner struct { + ID int64 + Key model.RunnerKey + Created time.Time + LastSeen time.Time + ReservationTimeout optional.Option[time.Time] + State RunnerState + Endpoint string + ModuleName optional.Option[string] + DeploymentID optional.Option[int64] + Labels []byte +} + +type Topic struct { + ID int64 + Key model.TopicKey + CreatedAt time.Time + ModuleID int64 + Name string + Type string + Head optional.Option[int64] +} + +type TopicEvent struct { + ID int64 + CreatedAt time.Time + Key model.TopicEventKey + TopicID int64 + Payload []byte +} + +type TopicSubscriber struct { + ID int64 + Key model.SubscriberKey + CreatedAt time.Time + TopicSubscriptionsID int64 + DeploymentID int64 + Sink schema.RefKey + RetryAttempts int32 + Backoff time.Duration + MaxBackoff time.Duration +} + +type TopicSubscription struct { + ID int64 + Key model.SubscriptionKey + CreatedAt time.Time + TopicID int64 + ModuleID int64 + DeploymentID int64 + Name string + Cursor optional.Option[int64] + State TopicSubscriptionState +} diff --git a/backend/controller/cronjobs/sql/querier.go b/backend/controller/cronjobs/sql/querier.go new file mode 100644 index 0000000000..9f8cb6a55a --- /dev/null +++ b/backend/controller/cronjobs/sql/querier.go @@ -0,0 +1,22 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package sql + +import ( + "context" + "time" + + "github.com/TBD54566975/ftl/internal/model" +) + +type Querier interface { + CreateCronJob(ctx context.Context, arg CreateCronJobParams) error + EndCronJob(ctx context.Context, nextExecution time.Time, key model.CronJobKey, startTime time.Time) (EndCronJobRow, error) + GetCronJobs(ctx context.Context) ([]GetCronJobsRow, error) + GetStaleCronJobs(ctx context.Context, dollar_1 time.Duration) ([]GetStaleCronJobsRow, error) + StartCronJobs(ctx context.Context, keys []string) ([]StartCronJobsRow, error) +} + +var _ Querier = (*Queries)(nil) diff --git a/backend/controller/cronjobs/sql/queries.sql b/backend/controller/cronjobs/sql/queries.sql new file mode 100644 index 0000000000..b16589ff2c --- /dev/null +++ b/backend/controller/cronjobs/sql/queries.sql @@ -0,0 +1,59 @@ +-- name: GetCronJobs :many +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state +FROM cron_jobs j + INNER JOIN deployments d on j.deployment_id = d.id +WHERE d.min_replicas > 0; + +-- name: CreateCronJob :exec +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + sqlc.arg('key')::cron_job_key, + (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), + sqlc.arg('module_name')::TEXT, + sqlc.arg('verb')::TEXT, + sqlc.arg('schedule')::TEXT, + sqlc.arg('start_time')::TIMESTAMPTZ, + sqlc.arg('next_execution')::TIMESTAMPTZ); + +-- name: StartCronJobs :many +WITH updates AS ( + UPDATE cron_jobs + SET state = 'executing', + start_time = (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ + WHERE key = ANY (sqlc.arg('keys')) + AND state = 'idle' + AND start_time < next_execution + AND (next_execution AT TIME ZONE 'utc') < (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ + RETURNING id, key, state, start_time, next_execution) +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, + COALESCE(u.start_time, j.start_time) as start_time, + COALESCE(u.next_execution, j.next_execution) as next_execution, + COALESCE(u.state, j.state) as state, + d.min_replicas > 0 as has_min_replicas, + CASE WHEN u.key IS NULL THEN FALSE ELSE TRUE END as updated +FROM cron_jobs j + INNER JOIN deployments d on j.deployment_id = d.id + LEFT JOIN updates u on j.id = u.id +WHERE j.key = ANY (sqlc.arg('keys')); + +-- name: EndCronJob :one +WITH j AS ( +UPDATE cron_jobs + SET state = 'idle', + next_execution = sqlc.arg('next_execution')::TIMESTAMPTZ + WHERE key = sqlc.arg('key')::cron_job_key + AND state = 'executing' + AND start_time = sqlc.arg('start_time')::TIMESTAMPTZ + RETURNING * +) +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state + FROM j + INNER JOIN deployments d on j.deployment_id = d.id + LIMIT 1; + +-- name: GetStaleCronJobs :many +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state +FROM cron_jobs j + INNER JOIN deployments d on j.deployment_id = d.id +WHERE state = 'executing' + AND start_time < (NOW() AT TIME ZONE 'utc') - $1::INTERVAL; diff --git a/backend/controller/cronjobs/sql/queries.sql.go b/backend/controller/cronjobs/sql/queries.sql.go new file mode 100644 index 0000000000..5199dc158a --- /dev/null +++ b/backend/controller/cronjobs/sql/queries.sql.go @@ -0,0 +1,252 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 +// source: queries.sql + +package sql + +import ( + "context" + "time" + + "github.com/TBD54566975/ftl/internal/model" +) + +const createCronJob = `-- name: CreateCronJob :exec +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + $1::cron_job_key, + (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), + $3::TEXT, + $4::TEXT, + $5::TEXT, + $6::TIMESTAMPTZ, + $7::TIMESTAMPTZ) +` + +type CreateCronJobParams struct { + Key model.CronJobKey + DeploymentKey model.DeploymentKey + ModuleName string + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time +} + +func (q *Queries) CreateCronJob(ctx context.Context, arg CreateCronJobParams) error { + _, err := q.db.Exec(ctx, createCronJob, + arg.Key, + arg.DeploymentKey, + arg.ModuleName, + arg.Verb, + arg.Schedule, + arg.StartTime, + arg.NextExecution, + ) + return err +} + +const endCronJob = `-- name: EndCronJob :one +WITH j AS ( +UPDATE cron_jobs + SET state = 'idle', + next_execution = $1::TIMESTAMPTZ + WHERE key = $2::cron_job_key + AND state = 'executing' + AND start_time = $3::TIMESTAMPTZ + RETURNING id, key, deployment_id, verb, schedule, start_time, next_execution, state, module_name +) +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state + FROM j + INNER JOIN deployments d on j.deployment_id = d.id + LIMIT 1 +` + +type EndCronJobRow struct { + Key model.CronJobKey + DeploymentKey model.DeploymentKey + Module string + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time + State model.CronJobState +} + +func (q *Queries) EndCronJob(ctx context.Context, nextExecution time.Time, key model.CronJobKey, startTime time.Time) (EndCronJobRow, error) { + row := q.db.QueryRow(ctx, endCronJob, nextExecution, key, startTime) + var i EndCronJobRow + err := row.Scan( + &i.Key, + &i.DeploymentKey, + &i.Module, + &i.Verb, + &i.Schedule, + &i.StartTime, + &i.NextExecution, + &i.State, + ) + return i, err +} + +const getCronJobs = `-- name: GetCronJobs :many +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state +FROM cron_jobs j + INNER JOIN deployments d on j.deployment_id = d.id +WHERE d.min_replicas > 0 +` + +type GetCronJobsRow struct { + Key model.CronJobKey + DeploymentKey model.DeploymentKey + Module string + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time + State model.CronJobState +} + +func (q *Queries) GetCronJobs(ctx context.Context) ([]GetCronJobsRow, error) { + rows, err := q.db.Query(ctx, getCronJobs) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetCronJobsRow + for rows.Next() { + var i GetCronJobsRow + if err := rows.Scan( + &i.Key, + &i.DeploymentKey, + &i.Module, + &i.Verb, + &i.Schedule, + &i.StartTime, + &i.NextExecution, + &i.State, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getStaleCronJobs = `-- name: GetStaleCronJobs :many +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state +FROM cron_jobs j + INNER JOIN deployments d on j.deployment_id = d.id +WHERE state = 'executing' + AND start_time < (NOW() AT TIME ZONE 'utc') - $1::INTERVAL +` + +type GetStaleCronJobsRow struct { + Key model.CronJobKey + DeploymentKey model.DeploymentKey + Module string + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time + State model.CronJobState +} + +func (q *Queries) GetStaleCronJobs(ctx context.Context, dollar_1 time.Duration) ([]GetStaleCronJobsRow, error) { + rows, err := q.db.Query(ctx, getStaleCronJobs, dollar_1) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetStaleCronJobsRow + for rows.Next() { + var i GetStaleCronJobsRow + if err := rows.Scan( + &i.Key, + &i.DeploymentKey, + &i.Module, + &i.Verb, + &i.Schedule, + &i.StartTime, + &i.NextExecution, + &i.State, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const startCronJobs = `-- name: StartCronJobs :many +WITH updates AS ( + UPDATE cron_jobs + SET state = 'executing', + start_time = (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ + WHERE key = ANY ($1) + AND state = 'idle' + AND start_time < next_execution + AND (next_execution AT TIME ZONE 'utc') < (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ + RETURNING id, key, state, start_time, next_execution) +SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, + COALESCE(u.start_time, j.start_time) as start_time, + COALESCE(u.next_execution, j.next_execution) as next_execution, + COALESCE(u.state, j.state) as state, + d.min_replicas > 0 as has_min_replicas, + CASE WHEN u.key IS NULL THEN FALSE ELSE TRUE END as updated +FROM cron_jobs j + INNER JOIN deployments d on j.deployment_id = d.id + LEFT JOIN updates u on j.id = u.id +WHERE j.key = ANY ($1) +` + +type StartCronJobsRow struct { + Key model.CronJobKey + DeploymentKey model.DeploymentKey + Module string + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time + State model.CronJobState + HasMinReplicas bool + Updated bool +} + +func (q *Queries) StartCronJobs(ctx context.Context, keys []string) ([]StartCronJobsRow, error) { + rows, err := q.db.Query(ctx, startCronJobs, keys) + if err != nil { + return nil, err + } + defer rows.Close() + var items []StartCronJobsRow + for rows.Next() { + var i StartCronJobsRow + if err := rows.Scan( + &i.Key, + &i.DeploymentKey, + &i.Module, + &i.Verb, + &i.Schedule, + &i.StartTime, + &i.NextExecution, + &i.State, + &i.HasMinReplicas, + &i.Updated, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/backend/controller/dal/async_calls.go b/backend/controller/dal/async_calls.go index 06f3d7a5ce..b801aaf233 100644 --- a/backend/controller/dal/async_calls.go +++ b/backend/controller/dal/async_calls.go @@ -9,7 +9,9 @@ import ( "github.com/alecthomas/participle/v2" "github.com/alecthomas/types/either" + "github.com/jackc/pgx/v5/pgxpool" + leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/backend/schema" @@ -70,12 +72,12 @@ func ParseAsyncOrigin(origin string) (AsyncOrigin, error) { } type AsyncCall struct { - *Lease // May be nil - ID int64 - Origin AsyncOrigin - Verb schema.RefKey - Request json.RawMessage - ScheduledAt time.Time + *leasesdal.Lease // May be nil + ID int64 + Origin AsyncOrigin + Verb schema.RefKey + Request json.RawMessage + ScheduledAt time.Time RemainingAttempts int32 Backoff time.Duration @@ -85,7 +87,7 @@ type AsyncCall struct { // AcquireAsyncCall acquires a pending async call to execute. // // Returns ErrNotFound if there are no async calls to acquire. -func (d *DAL) AcquireAsyncCall(ctx context.Context) (call *AsyncCall, err error) { +func (d *DAL) AcquireAsyncCall(ctx context.Context, pool *pgxpool.Pool) (call *AsyncCall, err error) { tx, err := d.Begin(ctx) if err != nil { return nil, fmt.Errorf("failed to begin transaction: %w", err) @@ -106,7 +108,7 @@ func (d *DAL) AcquireAsyncCall(ctx context.Context) (call *AsyncCall, err error) if err != nil { return nil, fmt.Errorf("failed to parse origin key %q: %w", row.Origin, err) } - lease, _ := d.newLease(ctx, row.LeaseKey, row.LeaseIdempotencyKey, ttl) + lease, _ := leasesdal.New(pool).NewLease(ctx, row.LeaseKey, row.LeaseIdempotencyKey, ttl) return &AsyncCall{ ID: row.AsyncCallID, Verb: row.Verb, diff --git a/backend/controller/dal/dal.go b/backend/controller/dal/dal.go index dcc3e70e3a..71d4b83308 100644 --- a/backend/controller/dal/dal.go +++ b/backend/controller/dal/dal.go @@ -13,9 +13,11 @@ import ( "github.com/alecthomas/types/optional" "github.com/alecthomas/types/pubsub" sets "github.com/deckarep/golang-set/v2" + "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "google.golang.org/protobuf/proto" + cronjobssql "github.com/TBD54566975/ftl/backend/controller/cronjobs/sql" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" @@ -440,14 +442,21 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem logger := log.FromContext(ctx) // Start the transaction - tx, err := d.db.Begin(ctx) + tx, err := d.db.Conn().Begin(ctx) if err != nil { return model.DeploymentKey{}, fmt.Errorf("could not start transaction: %w", err) } - defer tx.CommitOrRollback(ctx, &err) + defer func() { + err := tx.Rollback(ctx) + if err != nil && !errors.Is(err, pgx.ErrTxClosed) { + panic(err) + } + }() - existingDeployment, err := d.checkForExistingDeployments(ctx, tx, moduleSchema, artefacts) + qtx := sql.New(d.db.Conn()).WithTx(tx) + + existingDeployment, err := d.checkForExistingDeployments(ctx, qtx, moduleSchema, artefacts) if err != nil { return model.DeploymentKey{}, err } else if !existingDeployment.IsZero() { @@ -465,7 +474,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } // TODO(aat): "schema" containing language? - _, err = tx.UpsertModule(ctx, language, moduleSchema.Name) + _, err = qtx.UpsertModule(ctx, language, moduleSchema.Name) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to upsert module: %w", dalerrs.TranslatePGError(err)) } @@ -476,7 +485,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem if !ok { continue } - err := tx.UpsertTopic(ctx, sql.UpsertTopicParams{ + err := qtx.UpsertTopic(ctx, sql.UpsertTopicParams{ Topic: model.NewTopicKey(moduleSchema.Name, t.Name), Module: moduleSchema.Name, Name: t.Name, @@ -490,13 +499,13 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem deploymentKey := model.NewDeploymentKey(moduleSchema.Name) // Create the deployment - err = tx.CreateDeployment(ctx, moduleSchema.Name, schemaBytes, deploymentKey) + err = qtx.CreateDeployment(ctx, moduleSchema.Name, schemaBytes, deploymentKey) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to create deployment: %w", dalerrs.TranslatePGError(err)) } uploadedDigests := slices.Map(artefacts, func(in DeploymentArtefact) []byte { return in.Digest[:] }) - artefactDigests, err := tx.GetArtefactDigests(ctx, uploadedDigests) + artefactDigests, err := qtx.GetArtefactDigests(ctx, uploadedDigests) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to get artefact digests: %w", err) } @@ -508,7 +517,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem // Associate the artefacts with the deployment for _, row := range artefactDigests { artefact := artefactsByDigest[sha256.FromBytes(row.Digest)] - err = tx.AssociateArtefactWithDeployment(ctx, sql.AssociateArtefactWithDeploymentParams{ + err = qtx.AssociateArtefactWithDeployment(ctx, sql.AssociateArtefactWithDeploymentParams{ Key: deploymentKey, ArtefactID: row.ID, Executable: artefact.Executable, @@ -520,7 +529,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } for _, ingressRoute := range ingressRoutes { - err = tx.CreateIngressRoute(ctx, sql.CreateIngressRouteParams{ + err = qtx.CreateIngressRoute(ctx, sql.CreateIngressRouteParams{ Key: deploymentKey, Method: ingressRoute.Method, Path: ingressRoute.Path, @@ -532,10 +541,12 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } } + cronjobsqtx := cronjobssql.New(d.db.Conn()).WithTx(tx) + for _, job := range cronJobs { // Start time must be calculated by the caller rather than generated by db // This ensures that nextExecution is after start time, otherwise the job will never be triggered - err := tx.CreateCronJob(ctx, sql.CreateCronJobParams{ + err := cronjobsqtx.CreateCronJob(ctx, cronjobssql.CreateCronJobParams{ Key: job.Key, DeploymentKey: deploymentKey, ModuleName: job.Verb.Module, @@ -549,6 +560,11 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } } + err = tx.Commit(ctx) + if err != nil { + return model.DeploymentKey{}, fmt.Errorf("failed to commit transaction: %w", dalerrs.TranslatePGError(err)) + } + return deploymentKey, nil } @@ -1000,84 +1016,6 @@ func (d *DAL) ExpireRunnerClaims(ctx context.Context) (int64, error) { return count, dalerrs.TranslatePGError(err) } -func cronJobFromRow(row sql.GetCronJobsRow) model.CronJob { - return model.CronJob{ - Key: row.Key, - DeploymentKey: row.DeploymentKey, - Verb: schema.Ref{Module: row.Module, Name: row.Verb}, - Schedule: row.Schedule, - StartTime: row.StartTime, - NextExecution: row.NextExecution, - State: row.State, - } -} - -// GetCronJobs returns all cron jobs for deployments with min replicas > 0 -func (d *DAL) GetCronJobs(ctx context.Context) ([]model.CronJob, error) { - rows, err := d.db.GetCronJobs(ctx) - if err != nil { - return nil, dalerrs.TranslatePGError(err) - } - return slices.Map(rows, cronJobFromRow), nil -} - -type AttemptedCronJob struct { - DidStartExecution bool - HasMinReplicas bool - model.CronJob -} - -// StartCronJobs returns a full list of results so that the caller can update their list of jobs whether or not they successfully updated the row -func (d *DAL) StartCronJobs(ctx context.Context, jobs []model.CronJob) (attemptedJobs []AttemptedCronJob, err error) { - if len(jobs) == 0 { - return nil, nil - } - rows, err := d.db.StartCronJobs(ctx, slices.Map(jobs, func(job model.CronJob) string { return job.Key.String() })) - if err != nil { - return nil, dalerrs.TranslatePGError(err) - } - - attemptedJobs = []AttemptedCronJob{} - for _, row := range rows { - job := AttemptedCronJob{ - CronJob: model.CronJob{ - Key: row.Key, - DeploymentKey: row.DeploymentKey, - Verb: schema.Ref{Module: row.Module, Name: row.Verb}, - Schedule: row.Schedule, - StartTime: row.StartTime, - NextExecution: row.NextExecution, - State: row.State, - }, - DidStartExecution: row.Updated, - HasMinReplicas: row.HasMinReplicas, - } - attemptedJobs = append(attemptedJobs, job) - } - return attemptedJobs, nil -} - -// EndCronJob sets the status from executing to idle and updates the next execution time -// Can be called on the successful completion of a job, or if the job failed to execute (error or timeout) -func (d *DAL) EndCronJob(ctx context.Context, job model.CronJob, next time.Time) (model.CronJob, error) { - row, err := d.db.EndCronJob(ctx, next, job.Key, job.StartTime) - if err != nil { - return model.CronJob{}, dalerrs.TranslatePGError(err) - } - return cronJobFromRow(sql.GetCronJobsRow(row)), nil -} - -// GetStaleCronJobs returns a list of cron jobs that have been executing longer than the duration -func (d *DAL) GetStaleCronJobs(ctx context.Context, duration time.Duration) ([]model.CronJob, error) { - rows, err := d.db.GetStaleCronJobs(ctx, duration) - if err != nil { - return nil, dalerrs.TranslatePGError(err) - } - return slices.Map(rows, func(row sql.GetStaleCronJobsRow) model.CronJob { - return cronJobFromRow(sql.GetCronJobsRow(row)) - }), nil -} - func (d *DAL) InsertLogEvent(ctx context.Context, log *LogEvent) error { attributes, err := json.Marshal(log.Attributes) if err != nil { @@ -1189,12 +1127,12 @@ func (d *DAL) GetActiveRunners(ctx context.Context) ([]Runner, error) { } // Check if a deployment exists that exactly matches the given artefacts and schema. -func (*DAL) checkForExistingDeployments(ctx context.Context, tx *sql.Tx, moduleSchema *schema.Module, artefacts []DeploymentArtefact) (model.DeploymentKey, error) { +func (*DAL) checkForExistingDeployments(ctx context.Context, qtx *sql.Queries, moduleSchema *schema.Module, artefacts []DeploymentArtefact) (model.DeploymentKey, error) { schemaBytes, err := schema.ModuleToBytes(moduleSchema) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to marshal schema: %w", err) } - existing, err := tx.GetDeploymentsWithArtefacts(ctx, + existing, err := qtx.GetDeploymentsWithArtefacts(ctx, sha256esToBytes(slices.Map(artefacts, func(in DeploymentArtefact) sha256.SHA256 { return in.Digest })), schemaBytes, int64(len(artefacts)), diff --git a/backend/controller/dal/fsm.go b/backend/controller/dal/fsm.go index a88f2c42ec..cf87fdf1bd 100644 --- a/backend/controller/dal/fsm.go +++ b/backend/controller/dal/fsm.go @@ -8,8 +8,10 @@ import ( "time" "github.com/alecthomas/types/optional" + "github.com/jackc/pgx/v5/pgxpool" "github.com/TBD54566975/ftl/backend/controller/leases" + leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/backend/schema" @@ -97,8 +99,8 @@ type FSMInstance struct { // AcquireFSMInstance returns an FSM instance, also acquiring a lease on it. // // The lease must be released by the caller. -func (d *DAL) AcquireFSMInstance(ctx context.Context, fsm schema.RefKey, instanceKey string) (*FSMInstance, error) { - lease, _, err := d.AcquireLease(ctx, leases.SystemKey("fsm_instance", fsm.String(), instanceKey), time.Second*5, optional.None[any]()) +func (d *DAL) AcquireFSMInstance(ctx context.Context, fsm schema.RefKey, instanceKey string, pool *pgxpool.Pool) (*FSMInstance, error) { + lease, _, err := leasesdal.New(pool).AcquireLease(ctx, leases.SystemKey("fsm_instance", fsm.String(), instanceKey), time.Second*5, optional.None[any]()) if err != nil { return nil, fmt.Errorf("failed to acquire FSM lease: %w", err) } diff --git a/backend/controller/dal/fsm_test.go b/backend/controller/dal/fsm_test.go index 68b2622ff4..c744f06865 100644 --- a/backend/controller/dal/fsm_test.go +++ b/backend/controller/dal/fsm_test.go @@ -8,6 +8,7 @@ import ( "github.com/alecthomas/assert/v2" "github.com/alecthomas/types/either" + leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/sql/sqltest" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/backend/schema" @@ -20,7 +21,7 @@ func TestSendFSMEvent(t *testing.T) { dal, err := New(ctx, conn) assert.NoError(t, err) - _, err = dal.AcquireAsyncCall(ctx) + _, err = dal.AcquireAsyncCall(ctx, conn) assert.IsError(t, err, dalerrs.ErrNotFound) ref := schema.RefKey{Module: "module", Name: "verb"} @@ -31,7 +32,7 @@ func TestSendFSMEvent(t *testing.T) { assert.IsError(t, err, dalerrs.ErrConflict) assert.EqualError(t, err, "transition already executing: conflict") - call, err := dal.AcquireAsyncCall(ctx) + call, err := dal.AcquireAsyncCall(ctx, conn) assert.NoError(t, err) t.Cleanup(func() { err := call.Lease.Release() @@ -48,12 +49,12 @@ func TestSendFSMEvent(t *testing.T) { }, Request: []byte(`{}`), } - assert.Equal(t, expectedCall, call, assert.Exclude[*Lease](), assert.Exclude[time.Time]()) + assert.Equal(t, expectedCall, call, assert.Exclude[*leasesdal.Lease](), assert.Exclude[time.Time]()) err = dal.CompleteAsyncCall(ctx, call, either.LeftOf[string]([]byte(`{}`)), func(tx *Tx) error { return nil }) assert.NoError(t, err) actual, err := dal.LoadAsyncCall(ctx, call.ID) assert.NoError(t, err) - assert.Equal(t, call, actual, assert.Exclude[*Lease](), assert.Exclude[time.Time]()) + assert.Equal(t, call, actual, assert.Exclude[*leasesdal.Lease](), assert.Exclude[time.Time]()) } diff --git a/backend/controller/leases/dal/dal.go b/backend/controller/leases/dal/dal.go new file mode 100644 index 0000000000..5226024b84 --- /dev/null +++ b/backend/controller/leases/dal/dal.go @@ -0,0 +1,16 @@ +// Package dal provides a data abstraction layer for leases +package dal + +import ( + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/TBD54566975/ftl/backend/controller/leases/sql" +) + +type DAL struct { + db sql.DBI +} + +func New(pool *pgxpool.Pool) *DAL { + return &DAL{db: sql.NewDB(pool)} +} diff --git a/backend/controller/dal/lease.go b/backend/controller/leases/dal/lease.go similarity index 95% rename from backend/controller/dal/lease.go rename to backend/controller/leases/dal/lease.go index d29625764c..cf6e4c913c 100644 --- a/backend/controller/dal/lease.go +++ b/backend/controller/leases/dal/lease.go @@ -11,7 +11,7 @@ import ( "github.com/google/uuid" "github.com/TBD54566975/ftl/backend/controller/leases" - "github.com/TBD54566975/ftl/backend/controller/sql" + "github.com/TBD54566975/ftl/backend/controller/leases/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/internal/log" ) @@ -102,11 +102,11 @@ func (d *DAL) AcquireLease(ctx context.Context, key leases.Key, ttl time.Duratio } return nil, nil, err } - leaseCtx, lease := d.newLease(ctx, key, idempotencyKey, ttl) + leaseCtx, lease := d.NewLease(ctx, key, idempotencyKey, ttl) return leaseCtx, lease, nil } -func (d *DAL) newLease(ctx context.Context, key leases.Key, idempotencyKey uuid.UUID, ttl time.Duration) (*Lease, context.Context) { +func (d *DAL) NewLease(ctx context.Context, key leases.Key, idempotencyKey uuid.UUID, ttl time.Duration) (*Lease, context.Context) { ctx, cancelCtx := context.WithCancel(ctx) lease := &Lease{ idempotencyKey: idempotencyKey, diff --git a/backend/controller/dal/lease_test.go b/backend/controller/leases/dal/lease_test.go similarity index 93% rename from backend/controller/dal/lease_test.go rename to backend/controller/leases/dal/lease_test.go index 72782a45a9..a7fa731f5f 100644 --- a/backend/controller/dal/lease_test.go +++ b/backend/controller/leases/dal/lease_test.go @@ -36,11 +36,10 @@ func TestLease(t *testing.T) { } ctx := log.ContextWithNewDefaultLogger(context.Background()) conn := sqltest.OpenForTesting(ctx, t) - dal, err := New(ctx, conn) - assert.NoError(t, err) + dal := New(conn) // TTL is too short, expect an error - _, _, err = dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*1, optional.None[any]()) + _, _, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*1, optional.None[any]()) assert.Error(t, err) leasei, leaseCtx, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) @@ -71,8 +70,7 @@ func TestExpireLeases(t *testing.T) { } ctx := log.ContextWithNewDefaultLogger(context.Background()) conn := sqltest.OpenForTesting(ctx, t) - dal, err := New(ctx, conn) - assert.NoError(t, err) + dal := New(conn) leasei, _, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) assert.NoError(t, err) diff --git a/backend/controller/leases/sql/conn.go b/backend/controller/leases/sql/conn.go new file mode 100644 index 0000000000..065487cefa --- /dev/null +++ b/backend/controller/leases/sql/conn.go @@ -0,0 +1,21 @@ +package sql + +type DBI interface { + Querier + Conn() ConnI +} + +type ConnI interface { + DBTX +} + +type DB struct { + conn ConnI + *Queries +} + +func NewDB(conn ConnI) *DB { + return &DB{conn: conn, Queries: New(conn)} +} + +func (d *DB) Conn() ConnI { return d.conn } diff --git a/backend/controller/leases/sql/db.go b/backend/controller/leases/sql/db.go new file mode 100644 index 0000000000..c4b45fb311 --- /dev/null +++ b/backend/controller/leases/sql/db.go @@ -0,0 +1,32 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package sql + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" +) + +type DBTX interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx pgx.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/backend/controller/leases/sql/models.go b/backend/controller/leases/sql/models.go new file mode 100644 index 0000000000..6d2095f7ba --- /dev/null +++ b/backend/controller/leases/sql/models.go @@ -0,0 +1,541 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package sql + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "time" + + "github.com/TBD54566975/ftl/backend/controller/leases" + "github.com/TBD54566975/ftl/backend/schema" + "github.com/TBD54566975/ftl/internal/model" + "github.com/alecthomas/types/optional" + "github.com/google/uuid" +) + +type AsyncCallState string + +const ( + AsyncCallStatePending AsyncCallState = "pending" + AsyncCallStateExecuting AsyncCallState = "executing" + AsyncCallStateSuccess AsyncCallState = "success" + AsyncCallStateError AsyncCallState = "error" +) + +func (e *AsyncCallState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = AsyncCallState(s) + case string: + *e = AsyncCallState(s) + default: + return fmt.Errorf("unsupported scan type for AsyncCallState: %T", src) + } + return nil +} + +type NullAsyncCallState struct { + AsyncCallState AsyncCallState + Valid bool // Valid is true if AsyncCallState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullAsyncCallState) Scan(value interface{}) error { + if value == nil { + ns.AsyncCallState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.AsyncCallState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullAsyncCallState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.AsyncCallState), nil +} + +type ControllerState string + +const ( + ControllerStateLive ControllerState = "live" + ControllerStateDead ControllerState = "dead" +) + +func (e *ControllerState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = ControllerState(s) + case string: + *e = ControllerState(s) + default: + return fmt.Errorf("unsupported scan type for ControllerState: %T", src) + } + return nil +} + +type NullControllerState struct { + ControllerState ControllerState + Valid bool // Valid is true if ControllerState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullControllerState) Scan(value interface{}) error { + if value == nil { + ns.ControllerState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.ControllerState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullControllerState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.ControllerState), nil +} + +type CronJobState string + +const ( + CronJobStateIdle CronJobState = "idle" + CronJobStateExecuting CronJobState = "executing" +) + +func (e *CronJobState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = CronJobState(s) + case string: + *e = CronJobState(s) + default: + return fmt.Errorf("unsupported scan type for CronJobState: %T", src) + } + return nil +} + +type NullCronJobState struct { + CronJobState CronJobState + Valid bool // Valid is true if CronJobState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullCronJobState) Scan(value interface{}) error { + if value == nil { + ns.CronJobState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.CronJobState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullCronJobState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.CronJobState), nil +} + +type EventType string + +const ( + EventTypeCall EventType = "call" + EventTypeLog EventType = "log" + EventTypeDeploymentCreated EventType = "deployment_created" + EventTypeDeploymentUpdated EventType = "deployment_updated" +) + +func (e *EventType) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = EventType(s) + case string: + *e = EventType(s) + default: + return fmt.Errorf("unsupported scan type for EventType: %T", src) + } + return nil +} + +type NullEventType struct { + EventType EventType + Valid bool // Valid is true if EventType is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullEventType) Scan(value interface{}) error { + if value == nil { + ns.EventType, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.EventType.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullEventType) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.EventType), nil +} + +type FsmStatus string + +const ( + FsmStatusRunning FsmStatus = "running" + FsmStatusCompleted FsmStatus = "completed" + FsmStatusFailed FsmStatus = "failed" +) + +func (e *FsmStatus) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = FsmStatus(s) + case string: + *e = FsmStatus(s) + default: + return fmt.Errorf("unsupported scan type for FsmStatus: %T", src) + } + return nil +} + +type NullFsmStatus struct { + FsmStatus FsmStatus + Valid bool // Valid is true if FsmStatus is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullFsmStatus) Scan(value interface{}) error { + if value == nil { + ns.FsmStatus, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.FsmStatus.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullFsmStatus) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.FsmStatus), nil +} + +type Origin string + +const ( + OriginIngress Origin = "ingress" + OriginCron Origin = "cron" + OriginPubsub Origin = "pubsub" +) + +func (e *Origin) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = Origin(s) + case string: + *e = Origin(s) + default: + return fmt.Errorf("unsupported scan type for Origin: %T", src) + } + return nil +} + +type NullOrigin struct { + Origin Origin + Valid bool // Valid is true if Origin is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullOrigin) Scan(value interface{}) error { + if value == nil { + ns.Origin, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.Origin.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullOrigin) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.Origin), nil +} + +type RunnerState string + +const ( + RunnerStateIdle RunnerState = "idle" + RunnerStateReserved RunnerState = "reserved" + RunnerStateAssigned RunnerState = "assigned" + RunnerStateDead RunnerState = "dead" +) + +func (e *RunnerState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = RunnerState(s) + case string: + *e = RunnerState(s) + default: + return fmt.Errorf("unsupported scan type for RunnerState: %T", src) + } + return nil +} + +type NullRunnerState struct { + RunnerState RunnerState + Valid bool // Valid is true if RunnerState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullRunnerState) Scan(value interface{}) error { + if value == nil { + ns.RunnerState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.RunnerState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullRunnerState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.RunnerState), nil +} + +type TopicSubscriptionState string + +const ( + TopicSubscriptionStateIdle TopicSubscriptionState = "idle" + TopicSubscriptionStateExecuting TopicSubscriptionState = "executing" +) + +func (e *TopicSubscriptionState) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = TopicSubscriptionState(s) + case string: + *e = TopicSubscriptionState(s) + default: + return fmt.Errorf("unsupported scan type for TopicSubscriptionState: %T", src) + } + return nil +} + +type NullTopicSubscriptionState struct { + TopicSubscriptionState TopicSubscriptionState + Valid bool // Valid is true if TopicSubscriptionState is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullTopicSubscriptionState) Scan(value interface{}) error { + if value == nil { + ns.TopicSubscriptionState, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.TopicSubscriptionState.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullTopicSubscriptionState) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.TopicSubscriptionState), nil +} + +type Artefact struct { + ID int64 + CreatedAt time.Time + Digest []byte + Content []byte +} + +type AsyncCall struct { + ID int64 + CreatedAt time.Time + LeaseID optional.Option[int64] + Verb schema.RefKey + State AsyncCallState + Origin string + ScheduledAt time.Time + Request []byte + Response []byte + Error optional.Option[string] + RemainingAttempts int32 + Backoff time.Duration + MaxBackoff time.Duration +} + +type Controller struct { + ID int64 + Key model.ControllerKey + Created time.Time + LastSeen time.Time + State ControllerState + Endpoint string +} + +type CronJob struct { + ID int64 + Key model.CronJobKey + DeploymentID int64 + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time + State model.CronJobState + ModuleName string +} + +type Deployment struct { + ID int64 + CreatedAt time.Time + ModuleID int64 + Key model.DeploymentKey + Schema *schema.Module + Labels []byte + MinReplicas int32 +} + +type DeploymentArtefact struct { + ArtefactID int64 + DeploymentID int64 + CreatedAt time.Time + Executable bool + Path string +} + +type Event struct { + ID int64 + TimeStamp time.Time + DeploymentID int64 + RequestID optional.Option[int64] + Type EventType + CustomKey1 optional.Option[string] + CustomKey2 optional.Option[string] + CustomKey3 optional.Option[string] + CustomKey4 optional.Option[string] + Payload json.RawMessage +} + +type FsmInstance struct { + ID int64 + CreatedAt time.Time + Fsm schema.RefKey + Key string + Status FsmStatus + CurrentState optional.Option[schema.RefKey] + DestinationState optional.Option[schema.RefKey] + AsyncCallID optional.Option[int64] +} + +type IngressRoute struct { + Method string + Path string + DeploymentID int64 + Module string + Verb string +} + +type Lease struct { + ID int64 + IdempotencyKey uuid.UUID + Key leases.Key + CreatedAt time.Time + ExpiresAt time.Time + Metadata []byte +} + +type Module struct { + ID int64 + Language string + Name string +} + +type ModuleConfiguration struct { + ID int64 + CreatedAt time.Time + Module optional.Option[string] + Name string + Value []byte +} + +type Request struct { + ID int64 + Origin Origin + Key model.RequestKey + SourceAddr string +} + +type Runner struct { + ID int64 + Key model.RunnerKey + Created time.Time + LastSeen time.Time + ReservationTimeout optional.Option[time.Time] + State RunnerState + Endpoint string + ModuleName optional.Option[string] + DeploymentID optional.Option[int64] + Labels []byte +} + +type Topic struct { + ID int64 + Key model.TopicKey + CreatedAt time.Time + ModuleID int64 + Name string + Type string + Head optional.Option[int64] +} + +type TopicEvent struct { + ID int64 + CreatedAt time.Time + Key model.TopicEventKey + TopicID int64 + Payload []byte +} + +type TopicSubscriber struct { + ID int64 + Key model.SubscriberKey + CreatedAt time.Time + TopicSubscriptionsID int64 + DeploymentID int64 + Sink schema.RefKey + RetryAttempts int32 + Backoff time.Duration + MaxBackoff time.Duration +} + +type TopicSubscription struct { + ID int64 + Key model.SubscriptionKey + CreatedAt time.Time + TopicID int64 + ModuleID int64 + DeploymentID int64 + Name string + Cursor optional.Option[int64] + State TopicSubscriptionState +} diff --git a/backend/controller/leases/sql/querier.go b/backend/controller/leases/sql/querier.go new file mode 100644 index 0000000000..aed4b3dd8a --- /dev/null +++ b/backend/controller/leases/sql/querier.go @@ -0,0 +1,23 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package sql + +import ( + "context" + + "github.com/TBD54566975/ftl/backend/controller/leases" + "github.com/google/uuid" + "time" +) + +type Querier interface { + ExpireLeases(ctx context.Context) (int64, error) + GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) + NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) + ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) + RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) +} + +var _ Querier = (*Queries)(nil) diff --git a/backend/controller/leases/sql/queries.sql b/backend/controller/leases/sql/queries.sql new file mode 100644 index 0000000000..60a927022c --- /dev/null +++ b/backend/controller/leases/sql/queries.sql @@ -0,0 +1,37 @@ +-- name: NewLease :one +INSERT INTO leases ( + idempotency_key, + key, + expires_at, + metadata +) +VALUES ( + gen_random_uuid(), + @key::lease_key, + (NOW() AT TIME ZONE 'utc') + @ttl::interval, + sqlc.narg('metadata')::JSONB +) +RETURNING idempotency_key; + +-- name: RenewLease :one +UPDATE leases +SET expires_at = (NOW() AT TIME ZONE 'utc') + @ttl::interval +WHERE idempotency_key = @idempotency_key AND key = @key::lease_key +RETURNING true; + +-- name: ReleaseLease :one +DELETE FROM leases +WHERE idempotency_key = @idempotency_key AND key = @key::lease_key +RETURNING true; + +-- name: ExpireLeases :one +WITH expired AS ( + DELETE FROM leases + WHERE expires_at < NOW() AT TIME ZONE 'utc' + RETURNING 1 +) +SELECT COUNT(*) +FROM expired; + +-- name: GetLeaseInfo :one +SELECT expires_at, metadata FROM leases WHERE key = @key::lease_key; diff --git a/backend/controller/leases/sql/queries.sql.go b/backend/controller/leases/sql/queries.sql.go new file mode 100644 index 0000000000..e73302dcc6 --- /dev/null +++ b/backend/controller/leases/sql/queries.sql.go @@ -0,0 +1,97 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 +// source: queries.sql + +package sql + +import ( + "context" + "time" + + "github.com/TBD54566975/ftl/backend/controller/leases" + "github.com/google/uuid" +) + +const expireLeases = `-- name: ExpireLeases :one +WITH expired AS ( + DELETE FROM leases + WHERE expires_at < NOW() AT TIME ZONE 'utc' + RETURNING 1 +) +SELECT COUNT(*) +FROM expired +` + +func (q *Queries) ExpireLeases(ctx context.Context) (int64, error) { + row := q.db.QueryRow(ctx, expireLeases) + var count int64 + err := row.Scan(&count) + return count, err +} + +const getLeaseInfo = `-- name: GetLeaseInfo :one +SELECT expires_at, metadata FROM leases WHERE key = $1::lease_key +` + +type GetLeaseInfoRow struct { + ExpiresAt time.Time + Metadata []byte +} + +func (q *Queries) GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) { + row := q.db.QueryRow(ctx, getLeaseInfo, key) + var i GetLeaseInfoRow + err := row.Scan(&i.ExpiresAt, &i.Metadata) + return i, err +} + +const newLease = `-- name: NewLease :one +INSERT INTO leases ( + idempotency_key, + key, + expires_at, + metadata +) +VALUES ( + gen_random_uuid(), + $1::lease_key, + (NOW() AT TIME ZONE 'utc') + $2::interval, + $3::JSONB +) +RETURNING idempotency_key +` + +func (q *Queries) NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) { + row := q.db.QueryRow(ctx, newLease, key, ttl, metadata) + var idempotency_key uuid.UUID + err := row.Scan(&idempotency_key) + return idempotency_key, err +} + +const releaseLease = `-- name: ReleaseLease :one +DELETE FROM leases +WHERE idempotency_key = $1 AND key = $2::lease_key +RETURNING true +` + +func (q *Queries) ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { + row := q.db.QueryRow(ctx, releaseLease, idempotencyKey, key) + var column_1 bool + err := row.Scan(&column_1) + return column_1, err +} + +const renewLease = `-- name: RenewLease :one +UPDATE leases +SET expires_at = (NOW() AT TIME ZONE 'utc') + $1::interval +WHERE idempotency_key = $2 AND key = $3::lease_key +RETURNING true +` + +func (q *Queries) RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { + row := q.db.QueryRow(ctx, renewLease, ttl, idempotencyKey, key) + var column_1 bool + err := row.Scan(&column_1) + return column_1, err +} diff --git a/backend/controller/sql/querier.go b/backend/controller/sql/querier.go index 3d14409a71..63354c1339 100644 --- a/backend/controller/sql/querier.go +++ b/backend/controller/sql/querier.go @@ -8,11 +8,9 @@ import ( "context" "time" - "github.com/TBD54566975/ftl/backend/controller/leases" "github.com/TBD54566975/ftl/backend/schema" "github.com/TBD54566975/ftl/internal/model" "github.com/alecthomas/types/optional" - "github.com/google/uuid" ) type Querier interface { @@ -25,15 +23,12 @@ type Querier interface { // Create a new artefact and return the artefact ID. CreateArtefact(ctx context.Context, digest []byte, content []byte) (int64, error) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams) (int64, error) - CreateCronJob(ctx context.Context, arg CreateCronJobParams) error CreateDeployment(ctx context.Context, moduleName string, schema []byte, key model.DeploymentKey) error CreateIngressRoute(ctx context.Context, arg CreateIngressRouteParams) error CreateRequest(ctx context.Context, origin Origin, key model.RequestKey, sourceAddr string) error DeleteSubscribers(ctx context.Context, deployment model.DeploymentKey) error DeleteSubscriptions(ctx context.Context, deployment model.DeploymentKey) error DeregisterRunner(ctx context.Context, key model.RunnerKey) (int64, error) - EndCronJob(ctx context.Context, nextExecution time.Time, key model.CronJobKey, startTime time.Time) (EndCronJobRow, error) - ExpireLeases(ctx context.Context) (int64, error) ExpireRunnerReservations(ctx context.Context) (int64, error) FailAsyncCall(ctx context.Context, error string, iD int64) (bool, error) FailAsyncCallWithRetry(ctx context.Context, arg FailAsyncCallWithRetryParams) (bool, error) @@ -48,7 +43,6 @@ type Querier interface { GetArtefactContentRange(ctx context.Context, start int32, count int32, iD int64) ([]byte, error) // Return the digests that exist in the database. GetArtefactDigests(ctx context.Context, digests [][]byte) ([]GetArtefactDigestsRow, error) - GetCronJobs(ctx context.Context) ([]GetCronJobsRow, error) GetDeployment(ctx context.Context, key model.DeploymentKey) (GetDeploymentRow, error) // Get all artefacts matching the given digests. GetDeploymentArtefacts(ctx context.Context, deploymentID int64) ([]GetDeploymentArtefactsRow, error) @@ -63,7 +57,6 @@ type Querier interface { GetIdleRunners(ctx context.Context, labels []byte, limit int64) ([]Runner, error) // Get the runner endpoints corresponding to the given ingress route. GetIngressRoutes(ctx context.Context, method string) ([]GetIngressRoutesRow, error) - GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) GetModulesByID(ctx context.Context, ids []int64) ([]Module, error) GetNextEventForSubscription(ctx context.Context, consumptionDelay time.Duration, topic model.TopicKey, cursor optional.Option[model.TopicEventKey]) (GetNextEventForSubscriptionRow, error) GetProcessList(ctx context.Context) ([]GetProcessListRow, error) @@ -75,7 +68,6 @@ type Querier interface { GetRunnerState(ctx context.Context, key model.RunnerKey) (RunnerState, error) GetRunnersForDeployment(ctx context.Context, key model.DeploymentKey) ([]GetRunnersForDeploymentRow, error) GetSchemaForDeployment(ctx context.Context, key model.DeploymentKey) (*schema.Module, error) - GetStaleCronJobs(ctx context.Context, dollar_1 time.Duration) ([]GetStaleCronJobsRow, error) // Results may not be ready to be scheduled yet due to event consumption delay // Sorting ensures that brand new events (that may not be ready for consumption) // don't prevent older events from being consumed @@ -90,15 +82,11 @@ type Querier interface { KillStaleControllers(ctx context.Context, timeout time.Duration) (int64, error) KillStaleRunners(ctx context.Context, timeout time.Duration) (int64, error) LoadAsyncCall(ctx context.Context, id int64) (AsyncCall, error) - NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) PublishEventForTopic(ctx context.Context, arg PublishEventForTopicParams) error - ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) - RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) ReplaceDeployment(ctx context.Context, oldDeployment model.DeploymentKey, newDeployment model.DeploymentKey, minReplicas int32) (int64, error) // Find an idle runner and reserve it for the given deployment. ReserveRunner(ctx context.Context, reservationTimeout time.Time, deploymentKey model.DeploymentKey, labels []byte) (Runner, error) SetDeploymentDesiredReplicas(ctx context.Context, key model.DeploymentKey, minReplicas int32) error - StartCronJobs(ctx context.Context, keys []string) ([]StartCronJobsRow, error) // Start a new FSM transition, populating the destination state and async call ID. // // "key" is the unique identifier for the FSM execution. diff --git a/backend/controller/sql/queries.sql b/backend/controller/sql/queries.sql index f766822048..bef9ebd0a8 100644 --- a/backend/controller/sql/queries.sql +++ b/backend/controller/sql/queries.sql @@ -282,66 +282,6 @@ WITH rows AS ( SELECT COUNT(*) FROM rows; --- name: GetCronJobs :many -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state -FROM cron_jobs j - INNER JOIN deployments d on j.deployment_id = d.id -WHERE d.min_replicas > 0; - --- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - sqlc.arg('key')::cron_job_key, - (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), - sqlc.arg('module_name')::TEXT, - sqlc.arg('verb')::TEXT, - sqlc.arg('schedule')::TEXT, - sqlc.arg('start_time')::TIMESTAMPTZ, - sqlc.arg('next_execution')::TIMESTAMPTZ); - --- name: StartCronJobs :many -WITH updates AS ( - UPDATE cron_jobs - SET state = 'executing', - start_time = (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ - WHERE key = ANY (sqlc.arg('keys')) - AND state = 'idle' - AND start_time < next_execution - AND (next_execution AT TIME ZONE 'utc') < (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ - RETURNING id, key, state, start_time, next_execution) -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, - COALESCE(u.start_time, j.start_time) as start_time, - COALESCE(u.next_execution, j.next_execution) as next_execution, - COALESCE(u.state, j.state) as state, - d.min_replicas > 0 as has_min_replicas, - CASE WHEN u.key IS NULL THEN FALSE ELSE TRUE END as updated -FROM cron_jobs j - INNER JOIN deployments d on j.deployment_id = d.id - LEFT JOIN updates u on j.id = u.id -WHERE j.key = ANY (sqlc.arg('keys')); - --- name: EndCronJob :one -WITH j AS ( -UPDATE cron_jobs - SET state = 'idle', - next_execution = sqlc.arg('next_execution')::TIMESTAMPTZ - WHERE key = sqlc.arg('key')::cron_job_key - AND state = 'executing' - AND start_time = sqlc.arg('start_time')::TIMESTAMPTZ - RETURNING * -) -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state - FROM j - INNER JOIN deployments d on j.deployment_id = d.id - LIMIT 1; - --- name: GetStaleCronJobs :many -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state -FROM cron_jobs j - INNER JOIN deployments d on j.deployment_id = d.id -WHERE state = 'executing' - AND start_time < (NOW() AT TIME ZONE 'utc') - $1::INTERVAL; - -- name: InsertLogEvent :exec INSERT INTO events (deployment_id, request_id, time_stamp, custom_key_1, type, payload) VALUES ((SELECT id FROM deployments d WHERE d.key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), @@ -462,44 +402,6 @@ INSERT INTO events (deployment_id, request_id, type, VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id; --- name: NewLease :one -INSERT INTO leases ( - idempotency_key, - key, - expires_at, - metadata -) -VALUES ( - gen_random_uuid(), - @key::lease_key, - (NOW() AT TIME ZONE 'utc') + @ttl::interval, - sqlc.narg('metadata')::JSONB -) -RETURNING idempotency_key; - --- name: RenewLease :one -UPDATE leases -SET expires_at = (NOW() AT TIME ZONE 'utc') + @ttl::interval -WHERE idempotency_key = @idempotency_key AND key = @key::lease_key -RETURNING true; - --- name: ReleaseLease :one -DELETE FROM leases -WHERE idempotency_key = @idempotency_key AND key = @key::lease_key -RETURNING true; - --- name: ExpireLeases :one -WITH expired AS ( - DELETE FROM leases - WHERE expires_at < NOW() AT TIME ZONE 'utc' - RETURNING 1 -) -SELECT COUNT(*) -FROM expired; - --- name: GetLeaseInfo :one -SELECT expires_at, metadata FROM leases WHERE key = @key::lease_key; - -- name: CreateAsyncCall :one INSERT INTO async_calls (verb, origin, request, remaining_attempts, backoff, max_backoff) VALUES (@verb, @origin, @request, @remaining_attempts, @backoff::interval, @max_backoff::interval) diff --git a/backend/controller/sql/queries.sql.go b/backend/controller/sql/queries.sql.go index 1f5a26efb8..ff4e2d0fa9 100644 --- a/backend/controller/sql/queries.sql.go +++ b/backend/controller/sql/queries.sql.go @@ -177,41 +177,6 @@ func (q *Queries) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams return id, err } -const createCronJob = `-- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - $1::cron_job_key, - (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), - $3::TEXT, - $4::TEXT, - $5::TEXT, - $6::TIMESTAMPTZ, - $7::TIMESTAMPTZ) -` - -type CreateCronJobParams struct { - Key model.CronJobKey - DeploymentKey model.DeploymentKey - ModuleName string - Verb string - Schedule string - StartTime time.Time - NextExecution time.Time -} - -func (q *Queries) CreateCronJob(ctx context.Context, arg CreateCronJobParams) error { - _, err := q.db.Exec(ctx, createCronJob, - arg.Key, - arg.DeploymentKey, - arg.ModuleName, - arg.Verb, - arg.Schedule, - arg.StartTime, - arg.NextExecution, - ) - return err -} - const createDeployment = `-- name: CreateDeployment :exec INSERT INTO deployments (module_id, "schema", "key") VALUES ((SELECT id FROM modules WHERE name = $1::TEXT LIMIT 1), $2::BYTEA, $3::deployment_key) @@ -302,66 +267,6 @@ func (q *Queries) DeregisterRunner(ctx context.Context, key model.RunnerKey) (in return count, err } -const endCronJob = `-- name: EndCronJob :one -WITH j AS ( -UPDATE cron_jobs - SET state = 'idle', - next_execution = $1::TIMESTAMPTZ - WHERE key = $2::cron_job_key - AND state = 'executing' - AND start_time = $3::TIMESTAMPTZ - RETURNING id, key, deployment_id, verb, schedule, start_time, next_execution, state, module_name -) -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state - FROM j - INNER JOIN deployments d on j.deployment_id = d.id - LIMIT 1 -` - -type EndCronJobRow struct { - Key model.CronJobKey - DeploymentKey model.DeploymentKey - Module string - Verb string - Schedule string - StartTime time.Time - NextExecution time.Time - State model.CronJobState -} - -func (q *Queries) EndCronJob(ctx context.Context, nextExecution time.Time, key model.CronJobKey, startTime time.Time) (EndCronJobRow, error) { - row := q.db.QueryRow(ctx, endCronJob, nextExecution, key, startTime) - var i EndCronJobRow - err := row.Scan( - &i.Key, - &i.DeploymentKey, - &i.Module, - &i.Verb, - &i.Schedule, - &i.StartTime, - &i.NextExecution, - &i.State, - ) - return i, err -} - -const expireLeases = `-- name: ExpireLeases :one -WITH expired AS ( - DELETE FROM leases - WHERE expires_at < NOW() AT TIME ZONE 'utc' - RETURNING 1 -) -SELECT COUNT(*) -FROM expired -` - -func (q *Queries) ExpireLeases(ctx context.Context) (int64, error) { - row := q.db.QueryRow(ctx, expireLeases) - var count int64 - err := row.Scan(&count) - return count, err -} - const expireRunnerReservations = `-- name: ExpireRunnerReservations :one WITH rows AS ( UPDATE runners @@ -723,53 +628,6 @@ func (q *Queries) GetArtefactDigests(ctx context.Context, digests [][]byte) ([]G return items, nil } -const getCronJobs = `-- name: GetCronJobs :many -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state -FROM cron_jobs j - INNER JOIN deployments d on j.deployment_id = d.id -WHERE d.min_replicas > 0 -` - -type GetCronJobsRow struct { - Key model.CronJobKey - DeploymentKey model.DeploymentKey - Module string - Verb string - Schedule string - StartTime time.Time - NextExecution time.Time - State model.CronJobState -} - -func (q *Queries) GetCronJobs(ctx context.Context) ([]GetCronJobsRow, error) { - rows, err := q.db.Query(ctx, getCronJobs) - if err != nil { - return nil, err - } - defer rows.Close() - var items []GetCronJobsRow - for rows.Next() { - var i GetCronJobsRow - if err := rows.Scan( - &i.Key, - &i.DeploymentKey, - &i.Module, - &i.Verb, - &i.Schedule, - &i.StartTime, - &i.NextExecution, - &i.State, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - const getDeployment = `-- name: GetDeployment :one SELECT d.id, d.created_at, d.module_id, d.key, d.schema, d.labels, d.min_replicas, m.language, m.name AS module_name, d.min_replicas FROM deployments d @@ -1168,22 +1026,6 @@ func (q *Queries) GetIngressRoutes(ctx context.Context, method string) ([]GetIng return items, nil } -const getLeaseInfo = `-- name: GetLeaseInfo :one -SELECT expires_at, metadata FROM leases WHERE key = $1::lease_key -` - -type GetLeaseInfoRow struct { - ExpiresAt time.Time - Metadata []byte -} - -func (q *Queries) GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) { - row := q.db.QueryRow(ctx, getLeaseInfo, key) - var i GetLeaseInfoRow - err := row.Scan(&i.ExpiresAt, &i.Metadata) - return i, err -} - const getModulesByID = `-- name: GetModulesByID :many SELECT id, language, name FROM modules @@ -1530,54 +1372,6 @@ func (q *Queries) GetSchemaForDeployment(ctx context.Context, key model.Deployme return schema, err } -const getStaleCronJobs = `-- name: GetStaleCronJobs :many -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state -FROM cron_jobs j - INNER JOIN deployments d on j.deployment_id = d.id -WHERE state = 'executing' - AND start_time < (NOW() AT TIME ZONE 'utc') - $1::INTERVAL -` - -type GetStaleCronJobsRow struct { - Key model.CronJobKey - DeploymentKey model.DeploymentKey - Module string - Verb string - Schedule string - StartTime time.Time - NextExecution time.Time - State model.CronJobState -} - -func (q *Queries) GetStaleCronJobs(ctx context.Context, dollar_1 time.Duration) ([]GetStaleCronJobsRow, error) { - rows, err := q.db.Query(ctx, getStaleCronJobs, dollar_1) - if err != nil { - return nil, err - } - defer rows.Close() - var items []GetStaleCronJobsRow - for rows.Next() { - var i GetStaleCronJobsRow - if err := rows.Scan( - &i.Key, - &i.DeploymentKey, - &i.Module, - &i.Verb, - &i.Schedule, - &i.StartTime, - &i.NextExecution, - &i.State, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - const getSubscriptionsNeedingUpdate = `-- name: GetSubscriptionsNeedingUpdate :many SELECT subs.key::subscription_key as key, @@ -1942,29 +1736,6 @@ func (q *Queries) LoadAsyncCall(ctx context.Context, id int64) (AsyncCall, error return i, err } -const newLease = `-- name: NewLease :one -INSERT INTO leases ( - idempotency_key, - key, - expires_at, - metadata -) -VALUES ( - gen_random_uuid(), - $1::lease_key, - (NOW() AT TIME ZONE 'utc') + $2::interval, - $3::JSONB -) -RETURNING idempotency_key -` - -func (q *Queries) NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) { - row := q.db.QueryRow(ctx, newLease, key, ttl, metadata) - var idempotency_key uuid.UUID - err := row.Scan(&idempotency_key) - return idempotency_key, err -} - const publishEventForTopic = `-- name: PublishEventForTopic :exec INSERT INTO topic_events ( "key", @@ -2001,33 +1772,6 @@ func (q *Queries) PublishEventForTopic(ctx context.Context, arg PublishEventForT return err } -const releaseLease = `-- name: ReleaseLease :one -DELETE FROM leases -WHERE idempotency_key = $1 AND key = $2::lease_key -RETURNING true -` - -func (q *Queries) ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { - row := q.db.QueryRow(ctx, releaseLease, idempotencyKey, key) - var column_1 bool - err := row.Scan(&column_1) - return column_1, err -} - -const renewLease = `-- name: RenewLease :one -UPDATE leases -SET expires_at = (NOW() AT TIME ZONE 'utc') + $1::interval -WHERE idempotency_key = $2 AND key = $3::lease_key -RETURNING true -` - -func (q *Queries) RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { - row := q.db.QueryRow(ctx, renewLease, ttl, idempotencyKey, key) - var column_1 bool - err := row.Scan(&column_1) - return column_1, err -} - const replaceDeployment = `-- name: ReplaceDeployment :one WITH update_container AS ( UPDATE deployments AS d @@ -2097,72 +1841,6 @@ func (q *Queries) SetDeploymentDesiredReplicas(ctx context.Context, key model.De return err } -const startCronJobs = `-- name: StartCronJobs :many -WITH updates AS ( - UPDATE cron_jobs - SET state = 'executing', - start_time = (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ - WHERE key = ANY ($1) - AND state = 'idle' - AND start_time < next_execution - AND (next_execution AT TIME ZONE 'utc') < (NOW() AT TIME ZONE 'utc')::TIMESTAMPTZ - RETURNING id, key, state, start_time, next_execution) -SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, - COALESCE(u.start_time, j.start_time) as start_time, - COALESCE(u.next_execution, j.next_execution) as next_execution, - COALESCE(u.state, j.state) as state, - d.min_replicas > 0 as has_min_replicas, - CASE WHEN u.key IS NULL THEN FALSE ELSE TRUE END as updated -FROM cron_jobs j - INNER JOIN deployments d on j.deployment_id = d.id - LEFT JOIN updates u on j.id = u.id -WHERE j.key = ANY ($1) -` - -type StartCronJobsRow struct { - Key model.CronJobKey - DeploymentKey model.DeploymentKey - Module string - Verb string - Schedule string - StartTime time.Time - NextExecution time.Time - State model.CronJobState - HasMinReplicas bool - Updated bool -} - -func (q *Queries) StartCronJobs(ctx context.Context, keys []string) ([]StartCronJobsRow, error) { - rows, err := q.db.Query(ctx, startCronJobs, keys) - if err != nil { - return nil, err - } - defer rows.Close() - var items []StartCronJobsRow - for rows.Next() { - var i StartCronJobsRow - if err := rows.Scan( - &i.Key, - &i.DeploymentKey, - &i.Module, - &i.Verb, - &i.Schedule, - &i.StartTime, - &i.NextExecution, - &i.State, - &i.HasMinReplicas, - &i.Updated, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - const startFSMTransition = `-- name: StartFSMTransition :one INSERT INTO fsm_instances ( fsm, diff --git a/sqlc.yaml b/sqlc.yaml index cd5fd3dcf3..2b17199bf3 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -138,6 +138,18 @@ sql: - sqlc/db-prepare # - postgresql-query-too-costly - postgresql-no-seq-scan + - <<: *daldir + queries: "backend/controller/cronjobs/sql/queries.sql" + gen: + go: + <<: *gengo + out: "backend/controller/cronjobs/sql" + - <<: *daldir + queries: "backend/controller/leases/sql/queries.sql" + gen: + go: + <<: *gengo + out: "backend/controller/leases/sql" - <<: *daldir queries: "common/configuration/sql/queries.sql" gen: From a723b4e409c51035d881ccdd13a2c84dda9c434f Mon Sep 17 00:00:00 2001 From: Denise Li Date: Mon, 1 Jul 2024 15:53:34 -0400 Subject: [PATCH 02/13] lint --- backend/controller/controller.go | 4 ++-- backend/controller/cronjobs/dal/dal.go | 11 ++++++----- cmd/ftl-controller/main.go | 4 +++- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/backend/controller/controller.go b/backend/controller/controller.go index 6b51d89496..79f6e98137 100644 --- a/backend/controller/controller.go +++ b/backend/controller/controller.go @@ -124,7 +124,7 @@ func Start(ctx context.Context, config Config, runnerScaling scaling.RunnerScali // Bring up the DB connection and DAL. conn, err := pgxpool.New(ctx, config.DSN) if err != nil { - return err + return fmt.Errorf("failed to bring up DB connection: %w", err) } svc, err := New(ctx, conn, config, runnerScaling) @@ -219,7 +219,7 @@ func New(ctx context.Context, pool *pgxpool.Pool, config Config, runnerScaling s db, err := dal.New(ctx, pool) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create DAL: %w", err) } svc := &Service{ diff --git a/backend/controller/cronjobs/dal/dal.go b/backend/controller/cronjobs/dal/dal.go index fadd5c369c..9408465204 100644 --- a/backend/controller/cronjobs/dal/dal.go +++ b/backend/controller/cronjobs/dal/dal.go @@ -3,13 +3,14 @@ package dal import ( "context" + "fmt" "time" "github.com/jackc/pgx/v5/pgxpool" "github.com/TBD54566975/ftl/backend/controller/cronjobs/sql" + dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/backend/schema" - "github.com/TBD54566975/ftl/db/dalerrs" "github.com/TBD54566975/ftl/internal/model" "github.com/TBD54566975/ftl/internal/slices" ) @@ -38,7 +39,7 @@ func cronJobFromRow(row sql.GetCronJobsRow) model.CronJob { func (d *DAL) GetCronJobs(ctx context.Context) ([]model.CronJob, error) { rows, err := d.db.GetCronJobs(ctx) if err != nil { - return nil, dalerrs.TranslatePGError(err) + return nil, fmt.Errorf("failed to get cron jobs: %w", dalerrs.TranslatePGError(err)) } return slices.Map(rows, cronJobFromRow), nil } @@ -56,7 +57,7 @@ func (d *DAL) StartCronJobs(ctx context.Context, jobs []model.CronJob) (attempte } rows, err := d.db.StartCronJobs(ctx, slices.Map(jobs, func(job model.CronJob) string { return job.Key.String() })) if err != nil { - return nil, dalerrs.TranslatePGError(err) + return nil, fmt.Errorf("failed to start cron jobs: %w", dalerrs.TranslatePGError(err)) } attemptedJobs = []AttemptedCronJob{} @@ -84,7 +85,7 @@ func (d *DAL) StartCronJobs(ctx context.Context, jobs []model.CronJob) (attempte func (d *DAL) EndCronJob(ctx context.Context, job model.CronJob, next time.Time) (model.CronJob, error) { row, err := d.db.EndCronJob(ctx, next, job.Key, job.StartTime) if err != nil { - return model.CronJob{}, dalerrs.TranslatePGError(err) + return model.CronJob{}, fmt.Errorf("failed to end cron job: %w", dalerrs.TranslatePGError(err)) } return cronJobFromRow(sql.GetCronJobsRow(row)), nil } @@ -93,7 +94,7 @@ func (d *DAL) EndCronJob(ctx context.Context, job model.CronJob, next time.Time) func (d *DAL) GetStaleCronJobs(ctx context.Context, duration time.Duration) ([]model.CronJob, error) { rows, err := d.db.GetStaleCronJobs(ctx, duration) if err != nil { - return nil, dalerrs.TranslatePGError(err) + return nil, fmt.Errorf("failed to get stale cron jobs: %w", dalerrs.TranslatePGError(err)) } return slices.Map(rows, func(row sql.GetStaleCronJobsRow) model.CronJob { return cronJobFromRow(sql.GetCronJobsRow(row)) diff --git a/cmd/ftl-controller/main.go b/cmd/ftl-controller/main.go index 1af8897d20..00260e19ab 100644 --- a/cmd/ftl-controller/main.go +++ b/cmd/ftl-controller/main.go @@ -15,6 +15,7 @@ import ( "github.com/TBD54566975/ftl" "github.com/TBD54566975/ftl/backend/controller" "github.com/TBD54566975/ftl/backend/controller/dal" + leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/scaling" cf "github.com/TBD54566975/ftl/common/configuration" cfdal "github.com/TBD54566975/ftl/common/configuration/dal" @@ -48,6 +49,7 @@ func main() { // The FTL controller currently only supports DB as a configuration provider/resolver. conn, err := pgxpool.New(ctx, cli.ControllerConfig.DSN) kctx.FatalIfErrorf(err) + ldal := leasesdal.New(conn) dal, err := dal.New(ctx, conn) kctx.FatalIfErrorf(err) @@ -63,7 +65,7 @@ func main() { // The FTL controller currently only supports AWS Secrets Manager as a secrets provider. awsConfig, err := config.LoadDefaultConfig(ctx) kctx.FatalIfErrorf(err) - secretsResolver := cf.NewASM(ctx, secretsmanager.NewFromConfig(awsConfig), cli.ControllerConfig.Advertise, dal) + secretsResolver := cf.NewASM(ctx, secretsmanager.NewFromConfig(awsConfig), cli.ControllerConfig.Advertise, ldal) secretsProviders := []cf.Provider[cf.Secrets]{secretsResolver} sm, err := cf.New[cf.Secrets](ctx, secretsResolver, secretsProviders) kctx.FatalIfErrorf(err) From 5c04dd81fe9485b92889d0eea9758942faf0360c Mon Sep 17 00:00:00 2001 From: Denise Li Date: Mon, 1 Jul 2024 20:36:03 -0400 Subject: [PATCH 03/13] undo leases, add shared sql --- Justfile | 2 +- .../cronjobs/shared/sql/queries.sql | 10 + backend/controller/cronjobs/sql/queries.sql | 11 - backend/controller/dal/async_calls.go | 18 +- backend/controller/dal/dal.go | 40 +- backend/controller/{leases => }/dal/lease.go | 30 +- backend/controller/leases/dal/dal.go | 16 - backend/controller/leases/dal/lease_test.go | 104 ---- backend/controller/leases/sql/conn.go | 21 - backend/controller/leases/sql/db.go | 32 -- backend/controller/leases/sql/models.go | 541 ------------------ backend/controller/leases/sql/querier.go | 23 - backend/controller/leases/sql/queries.sql | 37 -- backend/controller/leases/sql/queries.sql.go | 97 ---- backend/controller/sql/querier.go | 6 + backend/controller/sql/queries.sql.go | 85 +++ sqlc.yaml | 14 +- 17 files changed, 131 insertions(+), 956 deletions(-) create mode 100644 backend/controller/cronjobs/shared/sql/queries.sql rename backend/controller/{leases => }/dal/lease.go (74%) delete mode 100644 backend/controller/leases/dal/dal.go delete mode 100644 backend/controller/leases/dal/lease_test.go delete mode 100644 backend/controller/leases/sql/conn.go delete mode 100644 backend/controller/leases/sql/db.go delete mode 100644 backend/controller/leases/sql/models.go delete mode 100644 backend/controller/leases/sql/querier.go delete mode 100644 backend/controller/leases/sql/queries.sql delete mode 100644 backend/controller/leases/sql/queries.sql.go diff --git a/Justfile b/Justfile index ab9244fa6d..1f499c4c2c 100644 --- a/Justfile +++ b/Justfile @@ -66,7 +66,7 @@ init-db: # Regenerate SQLC code (requires init-db to be run first) build-sqlc: - @mk backend/controller/sql/{db.go,models.go,querier.go,queries.sql.go} backend/controller/{cronjobs,leases}/sql/{db.go,models.go,querier.go,queries.sql.go} common/configuration/sql/{db.go,models.go,querier.go,queries.sql.go} : backend/controller/sql/queries.sql backend/controller/{cronjobs,leases}/sql/queries.sql common/configuration/sql/queries.sql backend/controller/sql/schema sqlc.yaml -- "just init-db && sqlc generate" + @mk backend/controller/sql/{db.go,models.go,querier.go,queries.sql.go} backend/controller/{cronjobs}/sql/{db.go,models.go,querier.go,queries.sql.go} backend/controller/{cronjobs}/shared/sql/{db.go,models.go,querier.go,queries.sql.go} common/configuration/sql/{db.go,models.go,querier.go,queries.sql.go} : backend/controller/sql/queries.sql backend/controller/{cronjobs}/sql/queries.sql backend/controller/{cronjobs}/shared/sql/queries.sql common/configuration/sql/queries.sql backend/controller/sql/schema sqlc.yaml -- "just init-db && sqlc generate" # Build the ZIP files that are embedded in the FTL release binaries build-zips: build-kt-runtime diff --git a/backend/controller/cronjobs/shared/sql/queries.sql b/backend/controller/cronjobs/shared/sql/queries.sql new file mode 100644 index 0000000000..882e305be9 --- /dev/null +++ b/backend/controller/cronjobs/shared/sql/queries.sql @@ -0,0 +1,10 @@ +-- name: CreateCronJob :exec +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + sqlc.arg('key')::cron_job_key, + (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), + sqlc.arg('module_name')::TEXT, + sqlc.arg('verb')::TEXT, + sqlc.arg('schedule')::TEXT, + sqlc.arg('start_time')::TIMESTAMPTZ, + sqlc.arg('next_execution')::TIMESTAMPTZ); diff --git a/backend/controller/cronjobs/sql/queries.sql b/backend/controller/cronjobs/sql/queries.sql index b16589ff2c..c99d193b6d 100644 --- a/backend/controller/cronjobs/sql/queries.sql +++ b/backend/controller/cronjobs/sql/queries.sql @@ -4,17 +4,6 @@ FROM cron_jobs j INNER JOIN deployments d on j.deployment_id = d.id WHERE d.min_replicas > 0; --- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - sqlc.arg('key')::cron_job_key, - (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), - sqlc.arg('module_name')::TEXT, - sqlc.arg('verb')::TEXT, - sqlc.arg('schedule')::TEXT, - sqlc.arg('start_time')::TIMESTAMPTZ, - sqlc.arg('next_execution')::TIMESTAMPTZ); - -- name: StartCronJobs :many WITH updates AS ( UPDATE cron_jobs diff --git a/backend/controller/dal/async_calls.go b/backend/controller/dal/async_calls.go index b801aaf233..06f3d7a5ce 100644 --- a/backend/controller/dal/async_calls.go +++ b/backend/controller/dal/async_calls.go @@ -9,9 +9,7 @@ import ( "github.com/alecthomas/participle/v2" "github.com/alecthomas/types/either" - "github.com/jackc/pgx/v5/pgxpool" - leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/backend/schema" @@ -72,12 +70,12 @@ func ParseAsyncOrigin(origin string) (AsyncOrigin, error) { } type AsyncCall struct { - *leasesdal.Lease // May be nil - ID int64 - Origin AsyncOrigin - Verb schema.RefKey - Request json.RawMessage - ScheduledAt time.Time + *Lease // May be nil + ID int64 + Origin AsyncOrigin + Verb schema.RefKey + Request json.RawMessage + ScheduledAt time.Time RemainingAttempts int32 Backoff time.Duration @@ -87,7 +85,7 @@ type AsyncCall struct { // AcquireAsyncCall acquires a pending async call to execute. // // Returns ErrNotFound if there are no async calls to acquire. -func (d *DAL) AcquireAsyncCall(ctx context.Context, pool *pgxpool.Pool) (call *AsyncCall, err error) { +func (d *DAL) AcquireAsyncCall(ctx context.Context) (call *AsyncCall, err error) { tx, err := d.Begin(ctx) if err != nil { return nil, fmt.Errorf("failed to begin transaction: %w", err) @@ -108,7 +106,7 @@ func (d *DAL) AcquireAsyncCall(ctx context.Context, pool *pgxpool.Pool) (call *A if err != nil { return nil, fmt.Errorf("failed to parse origin key %q: %w", row.Origin, err) } - lease, _ := leasesdal.New(pool).NewLease(ctx, row.LeaseKey, row.LeaseIdempotencyKey, ttl) + lease, _ := d.newLease(ctx, row.LeaseKey, row.LeaseIdempotencyKey, ttl) return &AsyncCall{ ID: row.AsyncCallID, Verb: row.Verb, diff --git a/backend/controller/dal/dal.go b/backend/controller/dal/dal.go index 71d4b83308..fdea7be20b 100644 --- a/backend/controller/dal/dal.go +++ b/backend/controller/dal/dal.go @@ -13,11 +13,9 @@ import ( "github.com/alecthomas/types/optional" "github.com/alecthomas/types/pubsub" sets "github.com/deckarep/golang-set/v2" - "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "google.golang.org/protobuf/proto" - cronjobssql "github.com/TBD54566975/ftl/backend/controller/cronjobs/sql" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" @@ -442,21 +440,14 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem logger := log.FromContext(ctx) // Start the transaction - tx, err := d.db.Conn().Begin(ctx) + tx, err := d.db.Begin(ctx) if err != nil { return model.DeploymentKey{}, fmt.Errorf("could not start transaction: %w", err) } - defer func() { - err := tx.Rollback(ctx) - if err != nil && !errors.Is(err, pgx.ErrTxClosed) { - panic(err) - } - }() - - qtx := sql.New(d.db.Conn()).WithTx(tx) + defer tx.CommitOrRollback(ctx, &err) - existingDeployment, err := d.checkForExistingDeployments(ctx, qtx, moduleSchema, artefacts) + existingDeployment, err := d.checkForExistingDeployments(ctx, tx, moduleSchema, artefacts) if err != nil { return model.DeploymentKey{}, err } else if !existingDeployment.IsZero() { @@ -474,7 +465,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } // TODO(aat): "schema" containing language? - _, err = qtx.UpsertModule(ctx, language, moduleSchema.Name) + _, err = tx.UpsertModule(ctx, language, moduleSchema.Name) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to upsert module: %w", dalerrs.TranslatePGError(err)) } @@ -485,7 +476,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem if !ok { continue } - err := qtx.UpsertTopic(ctx, sql.UpsertTopicParams{ + err := tx.UpsertTopic(ctx, sql.UpsertTopicParams{ Topic: model.NewTopicKey(moduleSchema.Name, t.Name), Module: moduleSchema.Name, Name: t.Name, @@ -499,13 +490,13 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem deploymentKey := model.NewDeploymentKey(moduleSchema.Name) // Create the deployment - err = qtx.CreateDeployment(ctx, moduleSchema.Name, schemaBytes, deploymentKey) + err = tx.CreateDeployment(ctx, moduleSchema.Name, schemaBytes, deploymentKey) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to create deployment: %w", dalerrs.TranslatePGError(err)) } uploadedDigests := slices.Map(artefacts, func(in DeploymentArtefact) []byte { return in.Digest[:] }) - artefactDigests, err := qtx.GetArtefactDigests(ctx, uploadedDigests) + artefactDigests, err := tx.GetArtefactDigests(ctx, uploadedDigests) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to get artefact digests: %w", err) } @@ -517,7 +508,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem // Associate the artefacts with the deployment for _, row := range artefactDigests { artefact := artefactsByDigest[sha256.FromBytes(row.Digest)] - err = qtx.AssociateArtefactWithDeployment(ctx, sql.AssociateArtefactWithDeploymentParams{ + err = tx.AssociateArtefactWithDeployment(ctx, sql.AssociateArtefactWithDeploymentParams{ Key: deploymentKey, ArtefactID: row.ID, Executable: artefact.Executable, @@ -529,7 +520,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } for _, ingressRoute := range ingressRoutes { - err = qtx.CreateIngressRoute(ctx, sql.CreateIngressRouteParams{ + err = tx.CreateIngressRoute(ctx, sql.CreateIngressRouteParams{ Key: deploymentKey, Method: ingressRoute.Method, Path: ingressRoute.Path, @@ -541,12 +532,10 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } } - cronjobsqtx := cronjobssql.New(d.db.Conn()).WithTx(tx) - for _, job := range cronJobs { // Start time must be calculated by the caller rather than generated by db // This ensures that nextExecution is after start time, otherwise the job will never be triggered - err := cronjobsqtx.CreateCronJob(ctx, cronjobssql.CreateCronJobParams{ + err := tx.CreateCronJob(ctx, sql.CreateCronJobParams{ Key: job.Key, DeploymentKey: deploymentKey, ModuleName: job.Verb.Module, @@ -560,11 +549,6 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } } - err = tx.Commit(ctx) - if err != nil { - return model.DeploymentKey{}, fmt.Errorf("failed to commit transaction: %w", dalerrs.TranslatePGError(err)) - } - return deploymentKey, nil } @@ -1127,12 +1111,12 @@ func (d *DAL) GetActiveRunners(ctx context.Context) ([]Runner, error) { } // Check if a deployment exists that exactly matches the given artefacts and schema. -func (*DAL) checkForExistingDeployments(ctx context.Context, qtx *sql.Queries, moduleSchema *schema.Module, artefacts []DeploymentArtefact) (model.DeploymentKey, error) { +func (*DAL) checkForExistingDeployments(ctx context.Context, tx *sql.Tx, moduleSchema *schema.Module, artefacts []DeploymentArtefact) (model.DeploymentKey, error) { schemaBytes, err := schema.ModuleToBytes(moduleSchema) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to marshal schema: %w", err) } - existing, err := qtx.GetDeploymentsWithArtefacts(ctx, + existing, err := tx.GetDeploymentsWithArtefacts(ctx, sha256esToBytes(slices.Map(artefacts, func(in DeploymentArtefact) sha256.SHA256 { return in.Digest })), schemaBytes, int64(len(artefacts)), diff --git a/backend/controller/leases/dal/lease.go b/backend/controller/dal/lease.go similarity index 74% rename from backend/controller/leases/dal/lease.go rename to backend/controller/dal/lease.go index cf6e4c913c..19b1a7146c 100644 --- a/backend/controller/leases/dal/lease.go +++ b/backend/controller/dal/lease.go @@ -11,7 +11,7 @@ import ( "github.com/google/uuid" "github.com/TBD54566975/ftl/backend/controller/leases" - "github.com/TBD54566975/ftl/backend/controller/leases/sql" + "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/internal/log" ) @@ -102,11 +102,11 @@ func (d *DAL) AcquireLease(ctx context.Context, key leases.Key, ttl time.Duratio } return nil, nil, err } - leaseCtx, lease := d.NewLease(ctx, key, idempotencyKey, ttl) + leaseCtx, lease := d.newLease(ctx, key, idempotencyKey, ttl) return leaseCtx, lease, nil } -func (d *DAL) NewLease(ctx context.Context, key leases.Key, idempotencyKey uuid.UUID, ttl time.Duration) (*Lease, context.Context) { +func (d *DAL) newLease(ctx context.Context, key leases.Key, idempotencyKey uuid.UUID, ttl time.Duration) (*Lease, context.Context) { ctx, cancelCtx := context.WithCancel(ctx) lease := &Lease{ idempotencyKey: idempotencyKey, @@ -119,27 +119,3 @@ func (d *DAL) NewLease(ctx context.Context, key leases.Key, idempotencyKey uuid. go lease.renew(ctx, cancelCtx) return lease, ctx } - -// GetLeaseInfo returns the metadata and expiry time for the lease with the given key. -// -// metadata should be a pointer to the type that metadata should be unmarshaled into. -func (d *DAL) GetLeaseInfo(ctx context.Context, key leases.Key, metadata any) (expiry time.Time, err error) { - l, err := d.db.GetLeaseInfo(ctx, key) - if err != nil { - return expiry, dalerrs.TranslatePGError(err) - } - if err := json.Unmarshal(l.Metadata, metadata); err != nil { - return expiry, fmt.Errorf("could not unmarshal lease metadata: %w", err) - } - return l.ExpiresAt, nil -} - -// ExpireLeases expires (deletes) all leases that have expired. -func (d *DAL) ExpireLeases(ctx context.Context) error { - count, err := d.db.ExpireLeases(ctx) - // TODO: Return and log the actual lease keys? - if count > 0 { - log.FromContext(ctx).Warnf("Expired %d leases", count) - } - return dalerrs.TranslatePGError(err) -} diff --git a/backend/controller/leases/dal/dal.go b/backend/controller/leases/dal/dal.go deleted file mode 100644 index 5226024b84..0000000000 --- a/backend/controller/leases/dal/dal.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package dal provides a data abstraction layer for leases -package dal - -import ( - "github.com/jackc/pgx/v5/pgxpool" - - "github.com/TBD54566975/ftl/backend/controller/leases/sql" -) - -type DAL struct { - db sql.DBI -} - -func New(pool *pgxpool.Pool) *DAL { - return &DAL{db: sql.NewDB(pool)} -} diff --git a/backend/controller/leases/dal/lease_test.go b/backend/controller/leases/dal/lease_test.go deleted file mode 100644 index a7fa731f5f..0000000000 --- a/backend/controller/leases/dal/lease_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package dal - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/alecthomas/assert/v2" - "github.com/alecthomas/types/optional" - "github.com/google/uuid" - - "github.com/TBD54566975/ftl/backend/controller/leases" - "github.com/TBD54566975/ftl/backend/controller/sql" - "github.com/TBD54566975/ftl/backend/controller/sql/sqltest" - dalerrs "github.com/TBD54566975/ftl/backend/dal" - "github.com/TBD54566975/ftl/internal/log" -) - -func leaseExists(t *testing.T, conn sql.ConnI, idempotencyKey uuid.UUID, key leases.Key) bool { - t.Helper() - var count int - err := dalerrs.TranslatePGError(conn. - QueryRow(context.Background(), "SELECT COUNT(*) FROM leases WHERE idempotency_key = $1 AND key = $2", idempotencyKey, key). - Scan(&count)) - if errors.Is(err, dalerrs.ErrNotFound) { - return false - } - assert.NoError(t, err) - return count > 0 -} - -func TestLease(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - ctx := log.ContextWithNewDefaultLogger(context.Background()) - conn := sqltest.OpenForTesting(ctx, t) - dal := New(conn) - - // TTL is too short, expect an error - _, _, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*1, optional.None[any]()) - assert.Error(t, err) - - leasei, leaseCtx, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) - assert.NoError(t, err) - - lease := leasei.(*Lease) //nolint:forcetypeassert - - // Try to acquire the same lease again, which should fail. - _, _, err = dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) - assert.IsError(t, err, leases.ErrConflict) - - time.Sleep(time.Second * 6) - - assert.True(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) - - err = lease.Release() - assert.NoError(t, err) - - assert.False(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) - - time.Sleep(time.Second) - assert.Error(t, leaseCtx.Err(), "context should be cancelled after lease was released") -} - -func TestExpireLeases(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - ctx := log.ContextWithNewDefaultLogger(context.Background()) - conn := sqltest.OpenForTesting(ctx, t) - dal := New(conn) - - leasei, _, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) - assert.NoError(t, err) - - lease := leasei.(*Lease) //nolint:forcetypeassert - - err = dal.ExpireLeases(ctx) - assert.NoError(t, err) - - assert.True(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) - - // Pretend that the lease expired. - lease.leak = true - err = lease.Release() - assert.NoError(t, err) - - assert.True(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) - - time.Sleep(time.Second * 6) - - err = dal.ExpireLeases(ctx) - assert.NoError(t, err) - - assert.False(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) - - leasei, _, err = dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) - assert.NoError(t, err) - - err = leasei.Release() - assert.NoError(t, err) -} diff --git a/backend/controller/leases/sql/conn.go b/backend/controller/leases/sql/conn.go deleted file mode 100644 index 065487cefa..0000000000 --- a/backend/controller/leases/sql/conn.go +++ /dev/null @@ -1,21 +0,0 @@ -package sql - -type DBI interface { - Querier - Conn() ConnI -} - -type ConnI interface { - DBTX -} - -type DB struct { - conn ConnI - *Queries -} - -func NewDB(conn ConnI) *DB { - return &DB{conn: conn, Queries: New(conn)} -} - -func (d *DB) Conn() ConnI { return d.conn } diff --git a/backend/controller/leases/sql/db.go b/backend/controller/leases/sql/db.go deleted file mode 100644 index c4b45fb311..0000000000 --- a/backend/controller/leases/sql/db.go +++ /dev/null @@ -1,32 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.26.0 - -package sql - -import ( - "context" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgconn" -) - -type DBTX interface { - Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) - Query(context.Context, string, ...interface{}) (pgx.Rows, error) - QueryRow(context.Context, string, ...interface{}) pgx.Row -} - -func New(db DBTX) *Queries { - return &Queries{db: db} -} - -type Queries struct { - db DBTX -} - -func (q *Queries) WithTx(tx pgx.Tx) *Queries { - return &Queries{ - db: tx, - } -} diff --git a/backend/controller/leases/sql/models.go b/backend/controller/leases/sql/models.go deleted file mode 100644 index 6d2095f7ba..0000000000 --- a/backend/controller/leases/sql/models.go +++ /dev/null @@ -1,541 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.26.0 - -package sql - -import ( - "database/sql/driver" - "encoding/json" - "fmt" - "time" - - "github.com/TBD54566975/ftl/backend/controller/leases" - "github.com/TBD54566975/ftl/backend/schema" - "github.com/TBD54566975/ftl/internal/model" - "github.com/alecthomas/types/optional" - "github.com/google/uuid" -) - -type AsyncCallState string - -const ( - AsyncCallStatePending AsyncCallState = "pending" - AsyncCallStateExecuting AsyncCallState = "executing" - AsyncCallStateSuccess AsyncCallState = "success" - AsyncCallStateError AsyncCallState = "error" -) - -func (e *AsyncCallState) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = AsyncCallState(s) - case string: - *e = AsyncCallState(s) - default: - return fmt.Errorf("unsupported scan type for AsyncCallState: %T", src) - } - return nil -} - -type NullAsyncCallState struct { - AsyncCallState AsyncCallState - Valid bool // Valid is true if AsyncCallState is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullAsyncCallState) Scan(value interface{}) error { - if value == nil { - ns.AsyncCallState, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.AsyncCallState.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullAsyncCallState) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.AsyncCallState), nil -} - -type ControllerState string - -const ( - ControllerStateLive ControllerState = "live" - ControllerStateDead ControllerState = "dead" -) - -func (e *ControllerState) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = ControllerState(s) - case string: - *e = ControllerState(s) - default: - return fmt.Errorf("unsupported scan type for ControllerState: %T", src) - } - return nil -} - -type NullControllerState struct { - ControllerState ControllerState - Valid bool // Valid is true if ControllerState is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullControllerState) Scan(value interface{}) error { - if value == nil { - ns.ControllerState, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.ControllerState.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullControllerState) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.ControllerState), nil -} - -type CronJobState string - -const ( - CronJobStateIdle CronJobState = "idle" - CronJobStateExecuting CronJobState = "executing" -) - -func (e *CronJobState) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = CronJobState(s) - case string: - *e = CronJobState(s) - default: - return fmt.Errorf("unsupported scan type for CronJobState: %T", src) - } - return nil -} - -type NullCronJobState struct { - CronJobState CronJobState - Valid bool // Valid is true if CronJobState is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullCronJobState) Scan(value interface{}) error { - if value == nil { - ns.CronJobState, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.CronJobState.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullCronJobState) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.CronJobState), nil -} - -type EventType string - -const ( - EventTypeCall EventType = "call" - EventTypeLog EventType = "log" - EventTypeDeploymentCreated EventType = "deployment_created" - EventTypeDeploymentUpdated EventType = "deployment_updated" -) - -func (e *EventType) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = EventType(s) - case string: - *e = EventType(s) - default: - return fmt.Errorf("unsupported scan type for EventType: %T", src) - } - return nil -} - -type NullEventType struct { - EventType EventType - Valid bool // Valid is true if EventType is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullEventType) Scan(value interface{}) error { - if value == nil { - ns.EventType, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.EventType.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullEventType) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.EventType), nil -} - -type FsmStatus string - -const ( - FsmStatusRunning FsmStatus = "running" - FsmStatusCompleted FsmStatus = "completed" - FsmStatusFailed FsmStatus = "failed" -) - -func (e *FsmStatus) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = FsmStatus(s) - case string: - *e = FsmStatus(s) - default: - return fmt.Errorf("unsupported scan type for FsmStatus: %T", src) - } - return nil -} - -type NullFsmStatus struct { - FsmStatus FsmStatus - Valid bool // Valid is true if FsmStatus is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullFsmStatus) Scan(value interface{}) error { - if value == nil { - ns.FsmStatus, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.FsmStatus.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullFsmStatus) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.FsmStatus), nil -} - -type Origin string - -const ( - OriginIngress Origin = "ingress" - OriginCron Origin = "cron" - OriginPubsub Origin = "pubsub" -) - -func (e *Origin) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = Origin(s) - case string: - *e = Origin(s) - default: - return fmt.Errorf("unsupported scan type for Origin: %T", src) - } - return nil -} - -type NullOrigin struct { - Origin Origin - Valid bool // Valid is true if Origin is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullOrigin) Scan(value interface{}) error { - if value == nil { - ns.Origin, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.Origin.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullOrigin) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.Origin), nil -} - -type RunnerState string - -const ( - RunnerStateIdle RunnerState = "idle" - RunnerStateReserved RunnerState = "reserved" - RunnerStateAssigned RunnerState = "assigned" - RunnerStateDead RunnerState = "dead" -) - -func (e *RunnerState) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = RunnerState(s) - case string: - *e = RunnerState(s) - default: - return fmt.Errorf("unsupported scan type for RunnerState: %T", src) - } - return nil -} - -type NullRunnerState struct { - RunnerState RunnerState - Valid bool // Valid is true if RunnerState is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullRunnerState) Scan(value interface{}) error { - if value == nil { - ns.RunnerState, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.RunnerState.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullRunnerState) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.RunnerState), nil -} - -type TopicSubscriptionState string - -const ( - TopicSubscriptionStateIdle TopicSubscriptionState = "idle" - TopicSubscriptionStateExecuting TopicSubscriptionState = "executing" -) - -func (e *TopicSubscriptionState) Scan(src interface{}) error { - switch s := src.(type) { - case []byte: - *e = TopicSubscriptionState(s) - case string: - *e = TopicSubscriptionState(s) - default: - return fmt.Errorf("unsupported scan type for TopicSubscriptionState: %T", src) - } - return nil -} - -type NullTopicSubscriptionState struct { - TopicSubscriptionState TopicSubscriptionState - Valid bool // Valid is true if TopicSubscriptionState is not NULL -} - -// Scan implements the Scanner interface. -func (ns *NullTopicSubscriptionState) Scan(value interface{}) error { - if value == nil { - ns.TopicSubscriptionState, ns.Valid = "", false - return nil - } - ns.Valid = true - return ns.TopicSubscriptionState.Scan(value) -} - -// Value implements the driver Valuer interface. -func (ns NullTopicSubscriptionState) Value() (driver.Value, error) { - if !ns.Valid { - return nil, nil - } - return string(ns.TopicSubscriptionState), nil -} - -type Artefact struct { - ID int64 - CreatedAt time.Time - Digest []byte - Content []byte -} - -type AsyncCall struct { - ID int64 - CreatedAt time.Time - LeaseID optional.Option[int64] - Verb schema.RefKey - State AsyncCallState - Origin string - ScheduledAt time.Time - Request []byte - Response []byte - Error optional.Option[string] - RemainingAttempts int32 - Backoff time.Duration - MaxBackoff time.Duration -} - -type Controller struct { - ID int64 - Key model.ControllerKey - Created time.Time - LastSeen time.Time - State ControllerState - Endpoint string -} - -type CronJob struct { - ID int64 - Key model.CronJobKey - DeploymentID int64 - Verb string - Schedule string - StartTime time.Time - NextExecution time.Time - State model.CronJobState - ModuleName string -} - -type Deployment struct { - ID int64 - CreatedAt time.Time - ModuleID int64 - Key model.DeploymentKey - Schema *schema.Module - Labels []byte - MinReplicas int32 -} - -type DeploymentArtefact struct { - ArtefactID int64 - DeploymentID int64 - CreatedAt time.Time - Executable bool - Path string -} - -type Event struct { - ID int64 - TimeStamp time.Time - DeploymentID int64 - RequestID optional.Option[int64] - Type EventType - CustomKey1 optional.Option[string] - CustomKey2 optional.Option[string] - CustomKey3 optional.Option[string] - CustomKey4 optional.Option[string] - Payload json.RawMessage -} - -type FsmInstance struct { - ID int64 - CreatedAt time.Time - Fsm schema.RefKey - Key string - Status FsmStatus - CurrentState optional.Option[schema.RefKey] - DestinationState optional.Option[schema.RefKey] - AsyncCallID optional.Option[int64] -} - -type IngressRoute struct { - Method string - Path string - DeploymentID int64 - Module string - Verb string -} - -type Lease struct { - ID int64 - IdempotencyKey uuid.UUID - Key leases.Key - CreatedAt time.Time - ExpiresAt time.Time - Metadata []byte -} - -type Module struct { - ID int64 - Language string - Name string -} - -type ModuleConfiguration struct { - ID int64 - CreatedAt time.Time - Module optional.Option[string] - Name string - Value []byte -} - -type Request struct { - ID int64 - Origin Origin - Key model.RequestKey - SourceAddr string -} - -type Runner struct { - ID int64 - Key model.RunnerKey - Created time.Time - LastSeen time.Time - ReservationTimeout optional.Option[time.Time] - State RunnerState - Endpoint string - ModuleName optional.Option[string] - DeploymentID optional.Option[int64] - Labels []byte -} - -type Topic struct { - ID int64 - Key model.TopicKey - CreatedAt time.Time - ModuleID int64 - Name string - Type string - Head optional.Option[int64] -} - -type TopicEvent struct { - ID int64 - CreatedAt time.Time - Key model.TopicEventKey - TopicID int64 - Payload []byte -} - -type TopicSubscriber struct { - ID int64 - Key model.SubscriberKey - CreatedAt time.Time - TopicSubscriptionsID int64 - DeploymentID int64 - Sink schema.RefKey - RetryAttempts int32 - Backoff time.Duration - MaxBackoff time.Duration -} - -type TopicSubscription struct { - ID int64 - Key model.SubscriptionKey - CreatedAt time.Time - TopicID int64 - ModuleID int64 - DeploymentID int64 - Name string - Cursor optional.Option[int64] - State TopicSubscriptionState -} diff --git a/backend/controller/leases/sql/querier.go b/backend/controller/leases/sql/querier.go deleted file mode 100644 index aed4b3dd8a..0000000000 --- a/backend/controller/leases/sql/querier.go +++ /dev/null @@ -1,23 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.26.0 - -package sql - -import ( - "context" - - "github.com/TBD54566975/ftl/backend/controller/leases" - "github.com/google/uuid" - "time" -) - -type Querier interface { - ExpireLeases(ctx context.Context) (int64, error) - GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) - NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) - ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) - RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) -} - -var _ Querier = (*Queries)(nil) diff --git a/backend/controller/leases/sql/queries.sql b/backend/controller/leases/sql/queries.sql deleted file mode 100644 index 60a927022c..0000000000 --- a/backend/controller/leases/sql/queries.sql +++ /dev/null @@ -1,37 +0,0 @@ --- name: NewLease :one -INSERT INTO leases ( - idempotency_key, - key, - expires_at, - metadata -) -VALUES ( - gen_random_uuid(), - @key::lease_key, - (NOW() AT TIME ZONE 'utc') + @ttl::interval, - sqlc.narg('metadata')::JSONB -) -RETURNING idempotency_key; - --- name: RenewLease :one -UPDATE leases -SET expires_at = (NOW() AT TIME ZONE 'utc') + @ttl::interval -WHERE idempotency_key = @idempotency_key AND key = @key::lease_key -RETURNING true; - --- name: ReleaseLease :one -DELETE FROM leases -WHERE idempotency_key = @idempotency_key AND key = @key::lease_key -RETURNING true; - --- name: ExpireLeases :one -WITH expired AS ( - DELETE FROM leases - WHERE expires_at < NOW() AT TIME ZONE 'utc' - RETURNING 1 -) -SELECT COUNT(*) -FROM expired; - --- name: GetLeaseInfo :one -SELECT expires_at, metadata FROM leases WHERE key = @key::lease_key; diff --git a/backend/controller/leases/sql/queries.sql.go b/backend/controller/leases/sql/queries.sql.go deleted file mode 100644 index e73302dcc6..0000000000 --- a/backend/controller/leases/sql/queries.sql.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.26.0 -// source: queries.sql - -package sql - -import ( - "context" - "time" - - "github.com/TBD54566975/ftl/backend/controller/leases" - "github.com/google/uuid" -) - -const expireLeases = `-- name: ExpireLeases :one -WITH expired AS ( - DELETE FROM leases - WHERE expires_at < NOW() AT TIME ZONE 'utc' - RETURNING 1 -) -SELECT COUNT(*) -FROM expired -` - -func (q *Queries) ExpireLeases(ctx context.Context) (int64, error) { - row := q.db.QueryRow(ctx, expireLeases) - var count int64 - err := row.Scan(&count) - return count, err -} - -const getLeaseInfo = `-- name: GetLeaseInfo :one -SELECT expires_at, metadata FROM leases WHERE key = $1::lease_key -` - -type GetLeaseInfoRow struct { - ExpiresAt time.Time - Metadata []byte -} - -func (q *Queries) GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) { - row := q.db.QueryRow(ctx, getLeaseInfo, key) - var i GetLeaseInfoRow - err := row.Scan(&i.ExpiresAt, &i.Metadata) - return i, err -} - -const newLease = `-- name: NewLease :one -INSERT INTO leases ( - idempotency_key, - key, - expires_at, - metadata -) -VALUES ( - gen_random_uuid(), - $1::lease_key, - (NOW() AT TIME ZONE 'utc') + $2::interval, - $3::JSONB -) -RETURNING idempotency_key -` - -func (q *Queries) NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) { - row := q.db.QueryRow(ctx, newLease, key, ttl, metadata) - var idempotency_key uuid.UUID - err := row.Scan(&idempotency_key) - return idempotency_key, err -} - -const releaseLease = `-- name: ReleaseLease :one -DELETE FROM leases -WHERE idempotency_key = $1 AND key = $2::lease_key -RETURNING true -` - -func (q *Queries) ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { - row := q.db.QueryRow(ctx, releaseLease, idempotencyKey, key) - var column_1 bool - err := row.Scan(&column_1) - return column_1, err -} - -const renewLease = `-- name: RenewLease :one -UPDATE leases -SET expires_at = (NOW() AT TIME ZONE 'utc') + $1::interval -WHERE idempotency_key = $2 AND key = $3::lease_key -RETURNING true -` - -func (q *Queries) RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { - row := q.db.QueryRow(ctx, renewLease, ttl, idempotencyKey, key) - var column_1 bool - err := row.Scan(&column_1) - return column_1, err -} diff --git a/backend/controller/sql/querier.go b/backend/controller/sql/querier.go index 63354c1339..280de07264 100644 --- a/backend/controller/sql/querier.go +++ b/backend/controller/sql/querier.go @@ -8,9 +8,11 @@ import ( "context" "time" + "github.com/TBD54566975/ftl/backend/controller/leases" "github.com/TBD54566975/ftl/backend/schema" "github.com/TBD54566975/ftl/internal/model" "github.com/alecthomas/types/optional" + "github.com/google/uuid" ) type Querier interface { @@ -23,6 +25,7 @@ type Querier interface { // Create a new artefact and return the artefact ID. CreateArtefact(ctx context.Context, digest []byte, content []byte) (int64, error) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams) (int64, error) + CreateCronJob(ctx context.Context, arg CreateCronJobParams) error CreateDeployment(ctx context.Context, moduleName string, schema []byte, key model.DeploymentKey) error CreateIngressRoute(ctx context.Context, arg CreateIngressRouteParams) error CreateRequest(ctx context.Context, origin Origin, key model.RequestKey, sourceAddr string) error @@ -82,7 +85,10 @@ type Querier interface { KillStaleControllers(ctx context.Context, timeout time.Duration) (int64, error) KillStaleRunners(ctx context.Context, timeout time.Duration) (int64, error) LoadAsyncCall(ctx context.Context, id int64) (AsyncCall, error) + NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) PublishEventForTopic(ctx context.Context, arg PublishEventForTopicParams) error + ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) + RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) ReplaceDeployment(ctx context.Context, oldDeployment model.DeploymentKey, newDeployment model.DeploymentKey, minReplicas int32) (int64, error) // Find an idle runner and reserve it for the given deployment. ReserveRunner(ctx context.Context, reservationTimeout time.Time, deploymentKey model.DeploymentKey, labels []byte) (Runner, error) diff --git a/backend/controller/sql/queries.sql.go b/backend/controller/sql/queries.sql.go index ff4e2d0fa9..a54e972291 100644 --- a/backend/controller/sql/queries.sql.go +++ b/backend/controller/sql/queries.sql.go @@ -177,6 +177,41 @@ func (q *Queries) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams return id, err } +const createCronJob = `-- name: CreateCronJob :exec +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + $1::cron_job_key, + (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), + $3::TEXT, + $4::TEXT, + $5::TEXT, + $6::TIMESTAMPTZ, + $7::TIMESTAMPTZ) +` + +type CreateCronJobParams struct { + Key model.CronJobKey + DeploymentKey model.DeploymentKey + ModuleName string + Verb string + Schedule string + StartTime time.Time + NextExecution time.Time +} + +func (q *Queries) CreateCronJob(ctx context.Context, arg CreateCronJobParams) error { + _, err := q.db.Exec(ctx, createCronJob, + arg.Key, + arg.DeploymentKey, + arg.ModuleName, + arg.Verb, + arg.Schedule, + arg.StartTime, + arg.NextExecution, + ) + return err +} + const createDeployment = `-- name: CreateDeployment :exec INSERT INTO deployments (module_id, "schema", "key") VALUES ((SELECT id FROM modules WHERE name = $1::TEXT LIMIT 1), $2::BYTEA, $3::deployment_key) @@ -1736,6 +1771,29 @@ func (q *Queries) LoadAsyncCall(ctx context.Context, id int64) (AsyncCall, error return i, err } +const newLease = `-- name: NewLease :one +INSERT INTO leases ( + idempotency_key, + key, + expires_at, + metadata +) +VALUES ( + gen_random_uuid(), + $1::lease_key, + (NOW() AT TIME ZONE 'utc') + $2::interval, + $3::JSONB +) +RETURNING idempotency_key +` + +func (q *Queries) NewLease(ctx context.Context, key leases.Key, ttl time.Duration, metadata []byte) (uuid.UUID, error) { + row := q.db.QueryRow(ctx, newLease, key, ttl, metadata) + var idempotency_key uuid.UUID + err := row.Scan(&idempotency_key) + return idempotency_key, err +} + const publishEventForTopic = `-- name: PublishEventForTopic :exec INSERT INTO topic_events ( "key", @@ -1772,6 +1830,33 @@ func (q *Queries) PublishEventForTopic(ctx context.Context, arg PublishEventForT return err } +const releaseLease = `-- name: ReleaseLease :one +DELETE FROM leases +WHERE idempotency_key = $1 AND key = $2::lease_key +RETURNING true +` + +func (q *Queries) ReleaseLease(ctx context.Context, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { + row := q.db.QueryRow(ctx, releaseLease, idempotencyKey, key) + var column_1 bool + err := row.Scan(&column_1) + return column_1, err +} + +const renewLease = `-- name: RenewLease :one +UPDATE leases +SET expires_at = (NOW() AT TIME ZONE 'utc') + $1::interval +WHERE idempotency_key = $2 AND key = $3::lease_key +RETURNING true +` + +func (q *Queries) RenewLease(ctx context.Context, ttl time.Duration, idempotencyKey uuid.UUID, key leases.Key) (bool, error) { + row := q.db.QueryRow(ctx, renewLease, ttl, idempotencyKey, key) + var column_1 bool + err := row.Scan(&column_1) + return column_1, err +} + const replaceDeployment = `-- name: ReplaceDeployment :one WITH update_container AS ( UPDATE deployments AS d diff --git a/sqlc.yaml b/sqlc.yaml index 2b17199bf3..5b64e313de 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -2,7 +2,9 @@ version: "2" sql: - &daldir engine: "postgresql" - queries: "backend/controller/sql/queries.sql" + queries: + - "backend/controller/sql/queries.sql" + - "backend/controller/cronjobs/shared/sql/queries.sql" schema: "backend/controller/sql/schema" database: uri: postgres://localhost:15432/ftl?sslmode=disable&user=postgres&password=secret @@ -139,17 +141,13 @@ sql: # - postgresql-query-too-costly - postgresql-no-seq-scan - <<: *daldir - queries: "backend/controller/cronjobs/sql/queries.sql" + queries: + - "backend/controller/cronjobs/sql/queries.sql" + - "backend/controller/cronjobs/shared/sql/queries.sql" gen: go: <<: *gengo out: "backend/controller/cronjobs/sql" - - <<: *daldir - queries: "backend/controller/leases/sql/queries.sql" - gen: - go: - <<: *gengo - out: "backend/controller/leases/sql" - <<: *daldir queries: "common/configuration/sql/queries.sql" gen: From 0753954d3e2436f12a415abe216a55ea983b1df7 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Mon, 1 Jul 2024 20:43:33 -0400 Subject: [PATCH 04/13] fixes --- backend/controller/controller.go | 7 +++-- backend/controller/dal/fsm.go | 6 ++--- backend/controller/dal/fsm_test.go | 9 +++---- backend/controller/sql/querier.go | 2 ++ backend/controller/sql/queries.sql | 38 +++++++++++++++++++++++++++ backend/controller/sql/queries.sql.go | 33 +++++++++++++++++++++++ cmd/ftl-controller/main.go | 4 +-- 7 files changed, 83 insertions(+), 16 deletions(-) diff --git a/backend/controller/controller.go b/backend/controller/controller.go index 79f6e98137..79efac10a4 100644 --- a/backend/controller/controller.go +++ b/backend/controller/controller.go @@ -39,7 +39,6 @@ import ( "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/ingress" "github.com/TBD54566975/ftl/backend/controller/leases" - leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/pubsub" "github.com/TBD54566975/ftl/backend/controller/scaling" "github.com/TBD54566975/ftl/backend/controller/scaling/localscaling" @@ -223,7 +222,7 @@ func New(ctx context.Context, pool *pgxpool.Pool, config Config, runnerScaling s } svc := &Service{ - tasks: scheduledtask.New(ctx, key, leasesdal.New(pool)), + tasks: scheduledtask.New(ctx, key, db), pool: pool, dal: db, key: key, @@ -788,7 +787,7 @@ func (s *Service) AcquireLease(ctx context.Context, stream *connect.BidiStream[f return connect.NewError(connect.CodeInternal, fmt.Errorf("could not receive lease request: %w", err)) } if lease == nil { - lease, _, err = leasesdal.New(s.pool).AcquireLease(ctx, leases.ModuleKey(msg.Module, msg.Key...), msg.Ttl.AsDuration(), optional.None[any]()) + lease, _, err = s.dal.AcquireLease(ctx, leases.ModuleKey(msg.Module, msg.Key...), msg.Ttl.AsDuration(), optional.None[any]()) if err != nil { if errors.Is(err, leases.ErrConflict) { return connect.NewError(connect.CodeResourceExhausted, fmt.Errorf("lease is held: %w", err)) @@ -1403,7 +1402,7 @@ func (s *Service) onAsyncFSMCallCompletion(ctx context.Context, tx *dal.Tx, orig } func (s *Service) expireStaleLeases(ctx context.Context) (time.Duration, error) { - err := leasesdal.New(s.pool).ExpireLeases(ctx) + err := s.dal.ExpireLeases(ctx) if err != nil { return 0, fmt.Errorf("failed to expire leases: %w", err) } diff --git a/backend/controller/dal/fsm.go b/backend/controller/dal/fsm.go index cf87fdf1bd..a88f2c42ec 100644 --- a/backend/controller/dal/fsm.go +++ b/backend/controller/dal/fsm.go @@ -8,10 +8,8 @@ import ( "time" "github.com/alecthomas/types/optional" - "github.com/jackc/pgx/v5/pgxpool" "github.com/TBD54566975/ftl/backend/controller/leases" - leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/backend/schema" @@ -99,8 +97,8 @@ type FSMInstance struct { // AcquireFSMInstance returns an FSM instance, also acquiring a lease on it. // // The lease must be released by the caller. -func (d *DAL) AcquireFSMInstance(ctx context.Context, fsm schema.RefKey, instanceKey string, pool *pgxpool.Pool) (*FSMInstance, error) { - lease, _, err := leasesdal.New(pool).AcquireLease(ctx, leases.SystemKey("fsm_instance", fsm.String(), instanceKey), time.Second*5, optional.None[any]()) +func (d *DAL) AcquireFSMInstance(ctx context.Context, fsm schema.RefKey, instanceKey string) (*FSMInstance, error) { + lease, _, err := d.AcquireLease(ctx, leases.SystemKey("fsm_instance", fsm.String(), instanceKey), time.Second*5, optional.None[any]()) if err != nil { return nil, fmt.Errorf("failed to acquire FSM lease: %w", err) } diff --git a/backend/controller/dal/fsm_test.go b/backend/controller/dal/fsm_test.go index c744f06865..68b2622ff4 100644 --- a/backend/controller/dal/fsm_test.go +++ b/backend/controller/dal/fsm_test.go @@ -8,7 +8,6 @@ import ( "github.com/alecthomas/assert/v2" "github.com/alecthomas/types/either" - leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/sql/sqltest" dalerrs "github.com/TBD54566975/ftl/backend/dal" "github.com/TBD54566975/ftl/backend/schema" @@ -21,7 +20,7 @@ func TestSendFSMEvent(t *testing.T) { dal, err := New(ctx, conn) assert.NoError(t, err) - _, err = dal.AcquireAsyncCall(ctx, conn) + _, err = dal.AcquireAsyncCall(ctx) assert.IsError(t, err, dalerrs.ErrNotFound) ref := schema.RefKey{Module: "module", Name: "verb"} @@ -32,7 +31,7 @@ func TestSendFSMEvent(t *testing.T) { assert.IsError(t, err, dalerrs.ErrConflict) assert.EqualError(t, err, "transition already executing: conflict") - call, err := dal.AcquireAsyncCall(ctx, conn) + call, err := dal.AcquireAsyncCall(ctx) assert.NoError(t, err) t.Cleanup(func() { err := call.Lease.Release() @@ -49,12 +48,12 @@ func TestSendFSMEvent(t *testing.T) { }, Request: []byte(`{}`), } - assert.Equal(t, expectedCall, call, assert.Exclude[*leasesdal.Lease](), assert.Exclude[time.Time]()) + assert.Equal(t, expectedCall, call, assert.Exclude[*Lease](), assert.Exclude[time.Time]()) err = dal.CompleteAsyncCall(ctx, call, either.LeftOf[string]([]byte(`{}`)), func(tx *Tx) error { return nil }) assert.NoError(t, err) actual, err := dal.LoadAsyncCall(ctx, call.ID) assert.NoError(t, err) - assert.Equal(t, call, actual, assert.Exclude[*leasesdal.Lease](), assert.Exclude[time.Time]()) + assert.Equal(t, call, actual, assert.Exclude[*Lease](), assert.Exclude[time.Time]()) } diff --git a/backend/controller/sql/querier.go b/backend/controller/sql/querier.go index 280de07264..02b479b287 100644 --- a/backend/controller/sql/querier.go +++ b/backend/controller/sql/querier.go @@ -32,6 +32,7 @@ type Querier interface { DeleteSubscribers(ctx context.Context, deployment model.DeploymentKey) error DeleteSubscriptions(ctx context.Context, deployment model.DeploymentKey) error DeregisterRunner(ctx context.Context, key model.RunnerKey) (int64, error) + ExpireLeases(ctx context.Context) (int64, error) ExpireRunnerReservations(ctx context.Context) (int64, error) FailAsyncCall(ctx context.Context, error string, iD int64) (bool, error) FailAsyncCallWithRetry(ctx context.Context, arg FailAsyncCallWithRetryParams) (bool, error) @@ -60,6 +61,7 @@ type Querier interface { GetIdleRunners(ctx context.Context, labels []byte, limit int64) ([]Runner, error) // Get the runner endpoints corresponding to the given ingress route. GetIngressRoutes(ctx context.Context, method string) ([]GetIngressRoutesRow, error) + GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) GetModulesByID(ctx context.Context, ids []int64) ([]Module, error) GetNextEventForSubscription(ctx context.Context, consumptionDelay time.Duration, topic model.TopicKey, cursor optional.Option[model.TopicEventKey]) (GetNextEventForSubscriptionRow, error) GetProcessList(ctx context.Context) ([]GetProcessListRow, error) diff --git a/backend/controller/sql/queries.sql b/backend/controller/sql/queries.sql index bef9ebd0a8..3fa72ce9c3 100644 --- a/backend/controller/sql/queries.sql +++ b/backend/controller/sql/queries.sql @@ -402,6 +402,44 @@ INSERT INTO events (deployment_id, request_id, type, VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id; +-- name: NewLease :one +INSERT INTO leases ( + idempotency_key, + key, + expires_at, + metadata +) +VALUES ( + gen_random_uuid(), + @key::lease_key, + (NOW() AT TIME ZONE 'utc') + @ttl::interval, + sqlc.narg('metadata')::JSONB +) +RETURNING idempotency_key; + +-- name: RenewLease :one +UPDATE leases +SET expires_at = (NOW() AT TIME ZONE 'utc') + @ttl::interval +WHERE idempotency_key = @idempotency_key AND key = @key::lease_key +RETURNING true; + +-- name: ReleaseLease :one +DELETE FROM leases +WHERE idempotency_key = @idempotency_key AND key = @key::lease_key +RETURNING true; + +-- name: ExpireLeases :one +WITH expired AS ( + DELETE FROM leases + WHERE expires_at < NOW() AT TIME ZONE 'utc' + RETURNING 1 +) +SELECT COUNT(*) +FROM expired; + +-- name: GetLeaseInfo :one +SELECT expires_at, metadata FROM leases WHERE key = @key::lease_key; + -- name: CreateAsyncCall :one INSERT INTO async_calls (verb, origin, request, remaining_attempts, backoff, max_backoff) VALUES (@verb, @origin, @request, @remaining_attempts, @backoff::interval, @max_backoff::interval) diff --git a/backend/controller/sql/queries.sql.go b/backend/controller/sql/queries.sql.go index a54e972291..7457b817d5 100644 --- a/backend/controller/sql/queries.sql.go +++ b/backend/controller/sql/queries.sql.go @@ -302,6 +302,23 @@ func (q *Queries) DeregisterRunner(ctx context.Context, key model.RunnerKey) (in return count, err } +const expireLeases = `-- name: ExpireLeases :one +WITH expired AS ( + DELETE FROM leases + WHERE expires_at < NOW() AT TIME ZONE 'utc' + RETURNING 1 +) +SELECT COUNT(*) +FROM expired +` + +func (q *Queries) ExpireLeases(ctx context.Context) (int64, error) { + row := q.db.QueryRow(ctx, expireLeases) + var count int64 + err := row.Scan(&count) + return count, err +} + const expireRunnerReservations = `-- name: ExpireRunnerReservations :one WITH rows AS ( UPDATE runners @@ -1061,6 +1078,22 @@ func (q *Queries) GetIngressRoutes(ctx context.Context, method string) ([]GetIng return items, nil } +const getLeaseInfo = `-- name: GetLeaseInfo :one +SELECT expires_at, metadata FROM leases WHERE key = $1::lease_key +` + +type GetLeaseInfoRow struct { + ExpiresAt time.Time + Metadata []byte +} + +func (q *Queries) GetLeaseInfo(ctx context.Context, key leases.Key) (GetLeaseInfoRow, error) { + row := q.db.QueryRow(ctx, getLeaseInfo, key) + var i GetLeaseInfoRow + err := row.Scan(&i.ExpiresAt, &i.Metadata) + return i, err +} + const getModulesByID = `-- name: GetModulesByID :many SELECT id, language, name FROM modules diff --git a/cmd/ftl-controller/main.go b/cmd/ftl-controller/main.go index 00260e19ab..1af8897d20 100644 --- a/cmd/ftl-controller/main.go +++ b/cmd/ftl-controller/main.go @@ -15,7 +15,6 @@ import ( "github.com/TBD54566975/ftl" "github.com/TBD54566975/ftl/backend/controller" "github.com/TBD54566975/ftl/backend/controller/dal" - leasesdal "github.com/TBD54566975/ftl/backend/controller/leases/dal" "github.com/TBD54566975/ftl/backend/controller/scaling" cf "github.com/TBD54566975/ftl/common/configuration" cfdal "github.com/TBD54566975/ftl/common/configuration/dal" @@ -49,7 +48,6 @@ func main() { // The FTL controller currently only supports DB as a configuration provider/resolver. conn, err := pgxpool.New(ctx, cli.ControllerConfig.DSN) kctx.FatalIfErrorf(err) - ldal := leasesdal.New(conn) dal, err := dal.New(ctx, conn) kctx.FatalIfErrorf(err) @@ -65,7 +63,7 @@ func main() { // The FTL controller currently only supports AWS Secrets Manager as a secrets provider. awsConfig, err := config.LoadDefaultConfig(ctx) kctx.FatalIfErrorf(err) - secretsResolver := cf.NewASM(ctx, secretsmanager.NewFromConfig(awsConfig), cli.ControllerConfig.Advertise, ldal) + secretsResolver := cf.NewASM(ctx, secretsmanager.NewFromConfig(awsConfig), cli.ControllerConfig.Advertise, dal) secretsProviders := []cf.Provider[cf.Secrets]{secretsResolver} sm, err := cf.New[cf.Secrets](ctx, secretsResolver, secretsProviders) kctx.FatalIfErrorf(err) From 3998b7ccd9a4a4f8e7b63610eb2c4c2e8cca6bc5 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Mon, 1 Jul 2024 20:49:44 -0400 Subject: [PATCH 05/13] tests and lint --- backend/controller/controller.go | 6 +- backend/controller/dal/lease.go | 24 ++++++ backend/controller/dal/lease_test.go | 106 +++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 3 deletions(-) create mode 100644 backend/controller/dal/lease_test.go diff --git a/backend/controller/controller.go b/backend/controller/controller.go index 79efac10a4..fcbcd8d8f3 100644 --- a/backend/controller/controller.go +++ b/backend/controller/controller.go @@ -825,7 +825,7 @@ func (s *Service) SendFSMEvent(ctx context.Context, req *connect.Request[ftlv1.S } defer tx.CommitOrRollback(ctx, &err) - instance, err := tx.AcquireFSMInstance(ctx, fsmKey, msg.Instance, s.pool) + instance, err := tx.AcquireFSMInstance(ctx, fsmKey, msg.Instance) if err != nil { return nil, connect.NewError(connect.CodeFailedPrecondition, fmt.Errorf("could not acquire fsm instance: %w", err)) } @@ -1286,7 +1286,7 @@ func (s *Service) executeAsyncCalls(ctx context.Context) (time.Duration, error) logger := log.FromContext(ctx) logger.Tracef("Acquiring async call") - call, err := s.dal.AcquireAsyncCall(ctx, s.pool) + call, err := s.dal.AcquireAsyncCall(ctx) if errors.Is(err, dalerrs.ErrNotFound) { logger.Tracef("No async calls to execute") return time.Second * 2, nil @@ -1357,7 +1357,7 @@ func (s *Service) executeAsyncCalls(ctx context.Context) (time.Duration, error) func (s *Service) onAsyncFSMCallCompletion(ctx context.Context, tx *dal.Tx, origin dal.AsyncOriginFSM, failed bool) error { logger := log.FromContext(ctx).Scope(origin.FSM.String()) - instance, err := tx.AcquireFSMInstance(ctx, origin.FSM, origin.Key, s.pool) + instance, err := tx.AcquireFSMInstance(ctx, origin.FSM, origin.Key) if err != nil { return fmt.Errorf("could not acquire lock on FSM instance: %w", err) } diff --git a/backend/controller/dal/lease.go b/backend/controller/dal/lease.go index 19b1a7146c..d29625764c 100644 --- a/backend/controller/dal/lease.go +++ b/backend/controller/dal/lease.go @@ -119,3 +119,27 @@ func (d *DAL) newLease(ctx context.Context, key leases.Key, idempotencyKey uuid. go lease.renew(ctx, cancelCtx) return lease, ctx } + +// GetLeaseInfo returns the metadata and expiry time for the lease with the given key. +// +// metadata should be a pointer to the type that metadata should be unmarshaled into. +func (d *DAL) GetLeaseInfo(ctx context.Context, key leases.Key, metadata any) (expiry time.Time, err error) { + l, err := d.db.GetLeaseInfo(ctx, key) + if err != nil { + return expiry, dalerrs.TranslatePGError(err) + } + if err := json.Unmarshal(l.Metadata, metadata); err != nil { + return expiry, fmt.Errorf("could not unmarshal lease metadata: %w", err) + } + return l.ExpiresAt, nil +} + +// ExpireLeases expires (deletes) all leases that have expired. +func (d *DAL) ExpireLeases(ctx context.Context) error { + count, err := d.db.ExpireLeases(ctx) + // TODO: Return and log the actual lease keys? + if count > 0 { + log.FromContext(ctx).Warnf("Expired %d leases", count) + } + return dalerrs.TranslatePGError(err) +} diff --git a/backend/controller/dal/lease_test.go b/backend/controller/dal/lease_test.go new file mode 100644 index 0000000000..72782a45a9 --- /dev/null +++ b/backend/controller/dal/lease_test.go @@ -0,0 +1,106 @@ +package dal + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/alecthomas/assert/v2" + "github.com/alecthomas/types/optional" + "github.com/google/uuid" + + "github.com/TBD54566975/ftl/backend/controller/leases" + "github.com/TBD54566975/ftl/backend/controller/sql" + "github.com/TBD54566975/ftl/backend/controller/sql/sqltest" + dalerrs "github.com/TBD54566975/ftl/backend/dal" + "github.com/TBD54566975/ftl/internal/log" +) + +func leaseExists(t *testing.T, conn sql.ConnI, idempotencyKey uuid.UUID, key leases.Key) bool { + t.Helper() + var count int + err := dalerrs.TranslatePGError(conn. + QueryRow(context.Background(), "SELECT COUNT(*) FROM leases WHERE idempotency_key = $1 AND key = $2", idempotencyKey, key). + Scan(&count)) + if errors.Is(err, dalerrs.ErrNotFound) { + return false + } + assert.NoError(t, err) + return count > 0 +} + +func TestLease(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx := log.ContextWithNewDefaultLogger(context.Background()) + conn := sqltest.OpenForTesting(ctx, t) + dal, err := New(ctx, conn) + assert.NoError(t, err) + + // TTL is too short, expect an error + _, _, err = dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*1, optional.None[any]()) + assert.Error(t, err) + + leasei, leaseCtx, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) + assert.NoError(t, err) + + lease := leasei.(*Lease) //nolint:forcetypeassert + + // Try to acquire the same lease again, which should fail. + _, _, err = dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) + assert.IsError(t, err, leases.ErrConflict) + + time.Sleep(time.Second * 6) + + assert.True(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) + + err = lease.Release() + assert.NoError(t, err) + + assert.False(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) + + time.Sleep(time.Second) + assert.Error(t, leaseCtx.Err(), "context should be cancelled after lease was released") +} + +func TestExpireLeases(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + ctx := log.ContextWithNewDefaultLogger(context.Background()) + conn := sqltest.OpenForTesting(ctx, t) + dal, err := New(ctx, conn) + assert.NoError(t, err) + + leasei, _, err := dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) + assert.NoError(t, err) + + lease := leasei.(*Lease) //nolint:forcetypeassert + + err = dal.ExpireLeases(ctx) + assert.NoError(t, err) + + assert.True(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) + + // Pretend that the lease expired. + lease.leak = true + err = lease.Release() + assert.NoError(t, err) + + assert.True(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) + + time.Sleep(time.Second * 6) + + err = dal.ExpireLeases(ctx) + assert.NoError(t, err) + + assert.False(t, leaseExists(t, conn, lease.idempotencyKey, lease.key)) + + leasei, _, err = dal.AcquireLease(ctx, leases.SystemKey("test"), time.Second*5, optional.None[any]()) + assert.NoError(t, err) + + err = leasei.Release() + assert.NoError(t, err) +} From d4f1b7f51989ef9ef96d4c6d46349b190fe07579 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Mon, 1 Jul 2024 22:01:15 -0400 Subject: [PATCH 06/13] convert to psql func --- .../cronjobs/shared/sql/queries.sql | 10 --------- backend/controller/cronjobs/sql/queries.sql | 10 +++++++++ .../controller/cronjobs/sql/queries.sql.go | 17 +++++++-------- backend/controller/sql/queries.sql | 10 +++++++++ backend/controller/sql/queries.sql.go | 17 +++++++-------- backend/controller/sql/schema/001_init.sql | 21 +++++++++++++++++++ sqlc.yaml | 8 ++----- 7 files changed, 59 insertions(+), 34 deletions(-) delete mode 100644 backend/controller/cronjobs/shared/sql/queries.sql diff --git a/backend/controller/cronjobs/shared/sql/queries.sql b/backend/controller/cronjobs/shared/sql/queries.sql deleted file mode 100644 index 882e305be9..0000000000 --- a/backend/controller/cronjobs/shared/sql/queries.sql +++ /dev/null @@ -1,10 +0,0 @@ --- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - sqlc.arg('key')::cron_job_key, - (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), - sqlc.arg('module_name')::TEXT, - sqlc.arg('verb')::TEXT, - sqlc.arg('schedule')::TEXT, - sqlc.arg('start_time')::TIMESTAMPTZ, - sqlc.arg('next_execution')::TIMESTAMPTZ); diff --git a/backend/controller/cronjobs/sql/queries.sql b/backend/controller/cronjobs/sql/queries.sql index c99d193b6d..a1009eddb8 100644 --- a/backend/controller/cronjobs/sql/queries.sql +++ b/backend/controller/cronjobs/sql/queries.sql @@ -1,3 +1,13 @@ +-- name: CreateCronJob :exec +SELECT create_cron_job( + sqlc.arg('key')::cron_job_key, + sqlc.arg('deployment_key')::deployment_key, + sqlc.arg('module_name')::TEXT, + sqlc.arg('verb')::TEXT, + sqlc.arg('schedule')::TEXT, + sqlc.arg('start_time')::TIMESTAMPTZ, + sqlc.arg('next_execution')::TIMESTAMPTZ); + -- name: GetCronJobs :many SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state FROM cron_jobs j diff --git a/backend/controller/cronjobs/sql/queries.sql.go b/backend/controller/cronjobs/sql/queries.sql.go index 5199dc158a..641bbe393a 100644 --- a/backend/controller/cronjobs/sql/queries.sql.go +++ b/backend/controller/cronjobs/sql/queries.sql.go @@ -13,15 +13,14 @@ import ( ) const createCronJob = `-- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - $1::cron_job_key, - (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), - $3::TEXT, - $4::TEXT, - $5::TEXT, - $6::TIMESTAMPTZ, - $7::TIMESTAMPTZ) +SELECT create_cron_job( + $1::cron_job_key, + $2::deployment_key, + $3::TEXT, + $4::TEXT, + $5::TEXT, + $6::TIMESTAMPTZ, + $7::TIMESTAMPTZ) ` type CreateCronJobParams struct { diff --git a/backend/controller/sql/queries.sql b/backend/controller/sql/queries.sql index 3fa72ce9c3..9a28a76d38 100644 --- a/backend/controller/sql/queries.sql +++ b/backend/controller/sql/queries.sql @@ -738,3 +738,13 @@ UPDATE topic_subscriptions SET state = 'idle' WHERE name = @name::TEXT AND module_id = (SELECT id FROM module); + +-- name: CreateCronJob :exec +SELECT create_cron_job( + sqlc.arg('key')::cron_job_key, + sqlc.arg('deployment_key')::deployment_key, + sqlc.arg('module_name')::TEXT, + sqlc.arg('verb')::TEXT, + sqlc.arg('schedule')::TEXT, + sqlc.arg('start_time')::TIMESTAMPTZ, + sqlc.arg('next_execution')::TIMESTAMPTZ); diff --git a/backend/controller/sql/queries.sql.go b/backend/controller/sql/queries.sql.go index 7457b817d5..d3e991e378 100644 --- a/backend/controller/sql/queries.sql.go +++ b/backend/controller/sql/queries.sql.go @@ -178,15 +178,14 @@ func (q *Queries) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams } const createCronJob = `-- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - $1::cron_job_key, - (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), - $3::TEXT, - $4::TEXT, - $5::TEXT, - $6::TIMESTAMPTZ, - $7::TIMESTAMPTZ) +SELECT create_cron_job( + $1::cron_job_key, + $2::deployment_key, + $3::TEXT, + $4::TEXT, + $5::TEXT, + $6::TIMESTAMPTZ, + $7::TIMESTAMPTZ) ` type CreateCronJobParams struct { diff --git a/backend/controller/sql/schema/001_init.sql b/backend/controller/sql/schema/001_init.sql index 68b34021b2..117988af4f 100644 --- a/backend/controller/sql/schema/001_init.sql +++ b/backend/controller/sql/schema/001_init.sql @@ -263,6 +263,27 @@ CREATE TABLE cron_jobs CREATE INDEX cron_jobs_executing_start_time_idx ON cron_jobs (start_time) WHERE state = 'executing'; CREATE UNIQUE INDEX cron_jobs_key_idx ON cron_jobs (key); +CREATE OR REPLACE FUNCTION create_cron_job( + k cron_job_key, + d_key deployment_key, + m_name TEXT, + v TEXT, + sch TEXT, + s_time TIMESTAMPTZ, + n_e TIMESTAMPTZ) RETURNS VOID AS $$ +BEGIN + INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + k, + (SELECT id FROM deployments WHERE key = d_key LIMIT 1), + m_name, + v, + sch, + s_time, + n_e); +END; +$$ LANGUAGE plpgsql; + CREATE TYPE event_type AS ENUM ( 'call', 'log', diff --git a/sqlc.yaml b/sqlc.yaml index 5b64e313de..56272e7813 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -2,9 +2,7 @@ version: "2" sql: - &daldir engine: "postgresql" - queries: - - "backend/controller/sql/queries.sql" - - "backend/controller/cronjobs/shared/sql/queries.sql" + queries: "backend/controller/sql/queries.sql" schema: "backend/controller/sql/schema" database: uri: postgres://localhost:15432/ftl?sslmode=disable&user=postgres&password=secret @@ -141,9 +139,7 @@ sql: # - postgresql-query-too-costly - postgresql-no-seq-scan - <<: *daldir - queries: - - "backend/controller/cronjobs/sql/queries.sql" - - "backend/controller/cronjobs/shared/sql/queries.sql" + queries: "backend/controller/cronjobs/sql/queries.sql" gen: go: <<: *gengo From 8a982433dbcee266c5a31ab1263954502759125f Mon Sep 17 00:00:00 2001 From: Denise Li Date: Tue, 2 Jul 2024 16:17:25 -0400 Subject: [PATCH 07/13] revert to shared dirs --- .../cronjobs/shared/sql/queries.sql | 10 +++++++++ backend/controller/cronjobs/sql/queries.sql | 10 --------- .../controller/cronjobs/sql/queries.sql.go | 17 ++++++++------- backend/controller/sql/queries.sql | 10 --------- backend/controller/sql/queries.sql.go | 17 ++++++++------- backend/controller/sql/schema/001_init.sql | 21 ------------------- sqlc.yaml | 8 +++++-- 7 files changed, 34 insertions(+), 59 deletions(-) create mode 100644 backend/controller/cronjobs/shared/sql/queries.sql diff --git a/backend/controller/cronjobs/shared/sql/queries.sql b/backend/controller/cronjobs/shared/sql/queries.sql new file mode 100644 index 0000000000..882e305be9 --- /dev/null +++ b/backend/controller/cronjobs/shared/sql/queries.sql @@ -0,0 +1,10 @@ +-- name: CreateCronJob :exec +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + sqlc.arg('key')::cron_job_key, + (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), + sqlc.arg('module_name')::TEXT, + sqlc.arg('verb')::TEXT, + sqlc.arg('schedule')::TEXT, + sqlc.arg('start_time')::TIMESTAMPTZ, + sqlc.arg('next_execution')::TIMESTAMPTZ); diff --git a/backend/controller/cronjobs/sql/queries.sql b/backend/controller/cronjobs/sql/queries.sql index a1009eddb8..c99d193b6d 100644 --- a/backend/controller/cronjobs/sql/queries.sql +++ b/backend/controller/cronjobs/sql/queries.sql @@ -1,13 +1,3 @@ --- name: CreateCronJob :exec -SELECT create_cron_job( - sqlc.arg('key')::cron_job_key, - sqlc.arg('deployment_key')::deployment_key, - sqlc.arg('module_name')::TEXT, - sqlc.arg('verb')::TEXT, - sqlc.arg('schedule')::TEXT, - sqlc.arg('start_time')::TIMESTAMPTZ, - sqlc.arg('next_execution')::TIMESTAMPTZ); - -- name: GetCronJobs :many SELECT j.key as key, d.key as deployment_key, j.module_name as module, j.verb, j.schedule, j.start_time, j.next_execution, j.state FROM cron_jobs j diff --git a/backend/controller/cronjobs/sql/queries.sql.go b/backend/controller/cronjobs/sql/queries.sql.go index 641bbe393a..5199dc158a 100644 --- a/backend/controller/cronjobs/sql/queries.sql.go +++ b/backend/controller/cronjobs/sql/queries.sql.go @@ -13,14 +13,15 @@ import ( ) const createCronJob = `-- name: CreateCronJob :exec -SELECT create_cron_job( - $1::cron_job_key, - $2::deployment_key, - $3::TEXT, - $4::TEXT, - $5::TEXT, - $6::TIMESTAMPTZ, - $7::TIMESTAMPTZ) +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + $1::cron_job_key, + (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), + $3::TEXT, + $4::TEXT, + $5::TEXT, + $6::TIMESTAMPTZ, + $7::TIMESTAMPTZ) ` type CreateCronJobParams struct { diff --git a/backend/controller/sql/queries.sql b/backend/controller/sql/queries.sql index 9a28a76d38..3fa72ce9c3 100644 --- a/backend/controller/sql/queries.sql +++ b/backend/controller/sql/queries.sql @@ -738,13 +738,3 @@ UPDATE topic_subscriptions SET state = 'idle' WHERE name = @name::TEXT AND module_id = (SELECT id FROM module); - --- name: CreateCronJob :exec -SELECT create_cron_job( - sqlc.arg('key')::cron_job_key, - sqlc.arg('deployment_key')::deployment_key, - sqlc.arg('module_name')::TEXT, - sqlc.arg('verb')::TEXT, - sqlc.arg('schedule')::TEXT, - sqlc.arg('start_time')::TIMESTAMPTZ, - sqlc.arg('next_execution')::TIMESTAMPTZ); diff --git a/backend/controller/sql/queries.sql.go b/backend/controller/sql/queries.sql.go index d3e991e378..7457b817d5 100644 --- a/backend/controller/sql/queries.sql.go +++ b/backend/controller/sql/queries.sql.go @@ -178,14 +178,15 @@ func (q *Queries) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams } const createCronJob = `-- name: CreateCronJob :exec -SELECT create_cron_job( - $1::cron_job_key, - $2::deployment_key, - $3::TEXT, - $4::TEXT, - $5::TEXT, - $6::TIMESTAMPTZ, - $7::TIMESTAMPTZ) +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + $1::cron_job_key, + (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), + $3::TEXT, + $4::TEXT, + $5::TEXT, + $6::TIMESTAMPTZ, + $7::TIMESTAMPTZ) ` type CreateCronJobParams struct { diff --git a/backend/controller/sql/schema/001_init.sql b/backend/controller/sql/schema/001_init.sql index 117988af4f..68b34021b2 100644 --- a/backend/controller/sql/schema/001_init.sql +++ b/backend/controller/sql/schema/001_init.sql @@ -263,27 +263,6 @@ CREATE TABLE cron_jobs CREATE INDEX cron_jobs_executing_start_time_idx ON cron_jobs (start_time) WHERE state = 'executing'; CREATE UNIQUE INDEX cron_jobs_key_idx ON cron_jobs (key); -CREATE OR REPLACE FUNCTION create_cron_job( - k cron_job_key, - d_key deployment_key, - m_name TEXT, - v TEXT, - sch TEXT, - s_time TIMESTAMPTZ, - n_e TIMESTAMPTZ) RETURNS VOID AS $$ -BEGIN - INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - k, - (SELECT id FROM deployments WHERE key = d_key LIMIT 1), - m_name, - v, - sch, - s_time, - n_e); -END; -$$ LANGUAGE plpgsql; - CREATE TYPE event_type AS ENUM ( 'call', 'log', diff --git a/sqlc.yaml b/sqlc.yaml index 56272e7813..5b64e313de 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -2,7 +2,9 @@ version: "2" sql: - &daldir engine: "postgresql" - queries: "backend/controller/sql/queries.sql" + queries: + - "backend/controller/sql/queries.sql" + - "backend/controller/cronjobs/shared/sql/queries.sql" schema: "backend/controller/sql/schema" database: uri: postgres://localhost:15432/ftl?sslmode=disable&user=postgres&password=secret @@ -139,7 +141,9 @@ sql: # - postgresql-query-too-costly - postgresql-no-seq-scan - <<: *daldir - queries: "backend/controller/cronjobs/sql/queries.sql" + queries: + - "backend/controller/cronjobs/sql/queries.sql" + - "backend/controller/cronjobs/shared/sql/queries.sql" gen: go: <<: *gengo From b598dc8bf6d61cb182dc6076ba3c407d7b9ea46f Mon Sep 17 00:00:00 2001 From: Denise Li Date: Wed, 3 Jul 2024 15:35:59 -0400 Subject: [PATCH 08/13] remove shared queries because no longer needed, refactor txns --- .../cronjobs/shared/sql/queries.sql | 10 ------ backend/controller/cronjobs/sql/queries.sql | 11 ++++++ backend/controller/dal/dal.go | 24 +++++++------ backend/controller/sql/conn.go | 2 ++ backend/controller/sql/querier.go | 1 - backend/controller/sql/queries.sql.go | 35 ------------------- sqlc.yaml | 8 ++--- 7 files changed, 29 insertions(+), 62 deletions(-) delete mode 100644 backend/controller/cronjobs/shared/sql/queries.sql diff --git a/backend/controller/cronjobs/shared/sql/queries.sql b/backend/controller/cronjobs/shared/sql/queries.sql deleted file mode 100644 index 882e305be9..0000000000 --- a/backend/controller/cronjobs/shared/sql/queries.sql +++ /dev/null @@ -1,10 +0,0 @@ --- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - sqlc.arg('key')::cron_job_key, - (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), - sqlc.arg('module_name')::TEXT, - sqlc.arg('verb')::TEXT, - sqlc.arg('schedule')::TEXT, - sqlc.arg('start_time')::TIMESTAMPTZ, - sqlc.arg('next_execution')::TIMESTAMPTZ); diff --git a/backend/controller/cronjobs/sql/queries.sql b/backend/controller/cronjobs/sql/queries.sql index c99d193b6d..b16589ff2c 100644 --- a/backend/controller/cronjobs/sql/queries.sql +++ b/backend/controller/cronjobs/sql/queries.sql @@ -4,6 +4,17 @@ FROM cron_jobs j INNER JOIN deployments d on j.deployment_id = d.id WHERE d.min_replicas > 0; +-- name: CreateCronJob :exec +INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) + VALUES ( + sqlc.arg('key')::cron_job_key, + (SELECT id FROM deployments WHERE key = sqlc.arg('deployment_key')::deployment_key LIMIT 1), + sqlc.arg('module_name')::TEXT, + sqlc.arg('verb')::TEXT, + sqlc.arg('schedule')::TEXT, + sqlc.arg('start_time')::TIMESTAMPTZ, + sqlc.arg('next_execution')::TIMESTAMPTZ); + -- name: StartCronJobs :many WITH updates AS ( UPDATE cron_jobs diff --git a/backend/controller/dal/dal.go b/backend/controller/dal/dal.go index fdea7be20b..2380133466 100644 --- a/backend/controller/dal/dal.go +++ b/backend/controller/dal/dal.go @@ -16,6 +16,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "google.golang.org/protobuf/proto" + cronjobssql "github.com/TBD54566975/ftl/backend/controller/cronjobs/sql" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" @@ -439,7 +440,7 @@ type IngressRoutingEntry struct { func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchema *schema.Module, artefacts []DeploymentArtefact, ingressRoutes []IngressRoutingEntry, cronJobs []model.CronJob) (key model.DeploymentKey, err error) { logger := log.FromContext(ctx) - // Start the transaction + // Start the parent transaction tx, err := d.db.Begin(ctx) if err != nil { return model.DeploymentKey{}, fmt.Errorf("could not start transaction: %w", err) @@ -447,7 +448,9 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem defer tx.CommitOrRollback(ctx, &err) - existingDeployment, err := d.checkForExistingDeployments(ctx, tx, moduleSchema, artefacts) + qtx := sql.New(d.db.Conn()).WithTx(tx.Tx()) + + existingDeployment, err := d.checkForExistingDeployments(ctx, qtx, moduleSchema, artefacts) if err != nil { return model.DeploymentKey{}, err } else if !existingDeployment.IsZero() { @@ -465,7 +468,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } // TODO(aat): "schema" containing language? - _, err = tx.UpsertModule(ctx, language, moduleSchema.Name) + _, err = qtx.UpsertModule(ctx, language, moduleSchema.Name) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to upsert module: %w", dalerrs.TranslatePGError(err)) } @@ -476,7 +479,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem if !ok { continue } - err := tx.UpsertTopic(ctx, sql.UpsertTopicParams{ + err := qtx.UpsertTopic(ctx, sql.UpsertTopicParams{ Topic: model.NewTopicKey(moduleSchema.Name, t.Name), Module: moduleSchema.Name, Name: t.Name, @@ -490,13 +493,13 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem deploymentKey := model.NewDeploymentKey(moduleSchema.Name) // Create the deployment - err = tx.CreateDeployment(ctx, moduleSchema.Name, schemaBytes, deploymentKey) + err = qtx.CreateDeployment(ctx, moduleSchema.Name, schemaBytes, deploymentKey) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to create deployment: %w", dalerrs.TranslatePGError(err)) } uploadedDigests := slices.Map(artefacts, func(in DeploymentArtefact) []byte { return in.Digest[:] }) - artefactDigests, err := tx.GetArtefactDigests(ctx, uploadedDigests) + artefactDigests, err := qtx.GetArtefactDigests(ctx, uploadedDigests) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to get artefact digests: %w", err) } @@ -508,7 +511,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem // Associate the artefacts with the deployment for _, row := range artefactDigests { artefact := artefactsByDigest[sha256.FromBytes(row.Digest)] - err = tx.AssociateArtefactWithDeployment(ctx, sql.AssociateArtefactWithDeploymentParams{ + err = qtx.AssociateArtefactWithDeployment(ctx, sql.AssociateArtefactWithDeploymentParams{ Key: deploymentKey, ArtefactID: row.ID, Executable: artefact.Executable, @@ -520,7 +523,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } for _, ingressRoute := range ingressRoutes { - err = tx.CreateIngressRoute(ctx, sql.CreateIngressRouteParams{ + err = qtx.CreateIngressRoute(ctx, sql.CreateIngressRouteParams{ Key: deploymentKey, Method: ingressRoute.Method, Path: ingressRoute.Path, @@ -532,10 +535,11 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } } + cronjobsqtx := cronjobssql.New(d.db.Conn()).WithTx(tx.Tx()) for _, job := range cronJobs { // Start time must be calculated by the caller rather than generated by db // This ensures that nextExecution is after start time, otherwise the job will never be triggered - err := tx.CreateCronJob(ctx, sql.CreateCronJobParams{ + err := cronjobsqtx.CreateCronJob(ctx, cronjobssql.CreateCronJobParams{ Key: job.Key, DeploymentKey: deploymentKey, ModuleName: job.Verb.Module, @@ -1111,7 +1115,7 @@ func (d *DAL) GetActiveRunners(ctx context.Context) ([]Runner, error) { } // Check if a deployment exists that exactly matches the given artefacts and schema. -func (*DAL) checkForExistingDeployments(ctx context.Context, tx *sql.Tx, moduleSchema *schema.Module, artefacts []DeploymentArtefact) (model.DeploymentKey, error) { +func (*DAL) checkForExistingDeployments(ctx context.Context, tx *sql.Queries, moduleSchema *schema.Module, artefacts []DeploymentArtefact) (model.DeploymentKey, error) { schemaBytes, err := schema.ModuleToBytes(moduleSchema) if err != nil { return model.DeploymentKey{}, fmt.Errorf("failed to marshal schema: %w", err) diff --git a/backend/controller/sql/conn.go b/backend/controller/sql/conn.go index d55ff2f20c..3b69110ec9 100644 --- a/backend/controller/sql/conn.go +++ b/backend/controller/sql/conn.go @@ -46,6 +46,8 @@ type Tx struct { func (t *Tx) Conn() ConnI { return t.tx } +func (t *Tx) Tx() pgx.Tx { return t.tx } + func (t *Tx) Begin(ctx context.Context) (*Tx, error) { savepoint := fmt.Sprintf("savepoint_%d", len(t.savepoints)) t.savepoints = append(t.savepoints, savepoint) diff --git a/backend/controller/sql/querier.go b/backend/controller/sql/querier.go index 02b479b287..b8a825dc83 100644 --- a/backend/controller/sql/querier.go +++ b/backend/controller/sql/querier.go @@ -25,7 +25,6 @@ type Querier interface { // Create a new artefact and return the artefact ID. CreateArtefact(ctx context.Context, digest []byte, content []byte) (int64, error) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams) (int64, error) - CreateCronJob(ctx context.Context, arg CreateCronJobParams) error CreateDeployment(ctx context.Context, moduleName string, schema []byte, key model.DeploymentKey) error CreateIngressRoute(ctx context.Context, arg CreateIngressRouteParams) error CreateRequest(ctx context.Context, origin Origin, key model.RequestKey, sourceAddr string) error diff --git a/backend/controller/sql/queries.sql.go b/backend/controller/sql/queries.sql.go index 7457b817d5..8e67b084ce 100644 --- a/backend/controller/sql/queries.sql.go +++ b/backend/controller/sql/queries.sql.go @@ -177,41 +177,6 @@ func (q *Queries) CreateAsyncCall(ctx context.Context, arg CreateAsyncCallParams return id, err } -const createCronJob = `-- name: CreateCronJob :exec -INSERT INTO cron_jobs (key, deployment_id, module_name, verb, schedule, start_time, next_execution) - VALUES ( - $1::cron_job_key, - (SELECT id FROM deployments WHERE key = $2::deployment_key LIMIT 1), - $3::TEXT, - $4::TEXT, - $5::TEXT, - $6::TIMESTAMPTZ, - $7::TIMESTAMPTZ) -` - -type CreateCronJobParams struct { - Key model.CronJobKey - DeploymentKey model.DeploymentKey - ModuleName string - Verb string - Schedule string - StartTime time.Time - NextExecution time.Time -} - -func (q *Queries) CreateCronJob(ctx context.Context, arg CreateCronJobParams) error { - _, err := q.db.Exec(ctx, createCronJob, - arg.Key, - arg.DeploymentKey, - arg.ModuleName, - arg.Verb, - arg.Schedule, - arg.StartTime, - arg.NextExecution, - ) - return err -} - const createDeployment = `-- name: CreateDeployment :exec INSERT INTO deployments (module_id, "schema", "key") VALUES ((SELECT id FROM modules WHERE name = $1::TEXT LIMIT 1), $2::BYTEA, $3::deployment_key) diff --git a/sqlc.yaml b/sqlc.yaml index 5b64e313de..56272e7813 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -2,9 +2,7 @@ version: "2" sql: - &daldir engine: "postgresql" - queries: - - "backend/controller/sql/queries.sql" - - "backend/controller/cronjobs/shared/sql/queries.sql" + queries: "backend/controller/sql/queries.sql" schema: "backend/controller/sql/schema" database: uri: postgres://localhost:15432/ftl?sslmode=disable&user=postgres&password=secret @@ -141,9 +139,7 @@ sql: # - postgresql-query-too-costly - postgresql-no-seq-scan - <<: *daldir - queries: - - "backend/controller/cronjobs/sql/queries.sql" - - "backend/controller/cronjobs/shared/sql/queries.sql" + queries: "backend/controller/cronjobs/sql/queries.sql" gen: go: <<: *gengo From 8d21fc75815b45c8952d22628769a0eea4556734 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Wed, 3 Jul 2024 15:37:35 -0400 Subject: [PATCH 09/13] clean justfile --- Justfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Justfile b/Justfile index 1f499c4c2c..36d70b9adb 100644 --- a/Justfile +++ b/Justfile @@ -66,7 +66,7 @@ init-db: # Regenerate SQLC code (requires init-db to be run first) build-sqlc: - @mk backend/controller/sql/{db.go,models.go,querier.go,queries.sql.go} backend/controller/{cronjobs}/sql/{db.go,models.go,querier.go,queries.sql.go} backend/controller/{cronjobs}/shared/sql/{db.go,models.go,querier.go,queries.sql.go} common/configuration/sql/{db.go,models.go,querier.go,queries.sql.go} : backend/controller/sql/queries.sql backend/controller/{cronjobs}/sql/queries.sql backend/controller/{cronjobs}/shared/sql/queries.sql common/configuration/sql/queries.sql backend/controller/sql/schema sqlc.yaml -- "just init-db && sqlc generate" + @mk backend/controller/sql/{db.go,models.go,querier.go,queries.sql.go} backend/controller/{cronjobs}/sql/{db.go,models.go,querier.go,queries.sql.go} common/configuration/sql/{db.go,models.go,querier.go,queries.sql.go} : backend/controller/sql/queries.sql backend/controller/{cronjobs}/sql/queries.sql common/configuration/sql/queries.sql backend/controller/sql/schema sqlc.yaml -- "just init-db && sqlc generate" # Build the ZIP files that are embedded in the FTL release binaries build-zips: build-kt-runtime From 9fd05f144bb0f06a066845f80f623e4e65e69e20 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Wed, 3 Jul 2024 15:57:31 -0400 Subject: [PATCH 10/13] final comments --- backend/controller/controller.go | 5 ++--- backend/controller/cronjobs/cronjobs.go | 5 +++-- cmd/ftl-controller/main.go | 2 +- cmd/ftl/cmd_box_run.go | 12 +----------- cmd/ftl/cmd_serve.go | 12 +----------- 5 files changed, 8 insertions(+), 28 deletions(-) diff --git a/backend/controller/controller.go b/backend/controller/controller.go index fcbcd8d8f3..9e5c30bbca 100644 --- a/backend/controller/controller.go +++ b/backend/controller/controller.go @@ -35,7 +35,6 @@ import ( "github.com/TBD54566975/ftl" "github.com/TBD54566975/ftl/backend/controller/admin" "github.com/TBD54566975/ftl/backend/controller/cronjobs" - cronjobsdal "github.com/TBD54566975/ftl/backend/controller/cronjobs/dal" "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/ingress" "github.com/TBD54566975/ftl/backend/controller/leases" @@ -99,7 +98,7 @@ func (c *Config) SetDefaults() { } // Start the Controller. Blocks until the context is cancelled. -func Start(ctx context.Context, config Config, runnerScaling scaling.RunnerScaling, dal *dal.DAL) error { +func Start(ctx context.Context, config Config, runnerScaling scaling.RunnerScaling) error { config.SetDefaults() logger := log.FromContext(ctx) @@ -235,7 +234,7 @@ func New(ctx context.Context, pool *pgxpool.Pool, config Config, runnerScaling s svc.routes.Store(map[string][]dal.Route{}) svc.schema.Store(&schema.Schema{}) - cronSvc := cronjobs.New(ctx, key, svc.config.Advertise.Host, cronjobs.Config{Timeout: config.CronJobTimeout}, cronjobsdal.New(pool), svc.tasks, svc.callWithRequest) + cronSvc := cronjobs.New(ctx, key, svc.config.Advertise.Host, cronjobs.Config{Timeout: config.CronJobTimeout}, pool, svc.tasks, svc.callWithRequest) svc.cronJobs = cronSvc svc.controllerListListeners = append(svc.controllerListListeners, cronSvc) diff --git a/backend/controller/cronjobs/cronjobs.go b/backend/controller/cronjobs/cronjobs.go index 941e933497..0bea7afba7 100644 --- a/backend/controller/cronjobs/cronjobs.go +++ b/backend/controller/cronjobs/cronjobs.go @@ -12,6 +12,7 @@ import ( "github.com/alecthomas/types/optional" "github.com/alecthomas/types/pubsub" "github.com/benbjohnson/clock" + "github.com/jackc/pgx/v5/pgxpool" "github.com/jpillora/backoff" "github.com/serialx/hashring" @@ -95,8 +96,8 @@ type Service struct { hashRingState atomic.Value[*hashRingState] } -func New(ctx context.Context, key model.ControllerKey, requestSource string, config Config, dal DAL, scheduler Scheduler, call ExecuteCallFunc) *Service { - return NewForTesting(ctx, key, requestSource, config, dal, scheduler, call, clock.New()) +func New(ctx context.Context, key model.ControllerKey, requestSource string, config Config, pool *pgxpool.Pool, scheduler Scheduler, call ExecuteCallFunc) *Service { + return NewForTesting(ctx, key, requestSource, config, dal.New(pool), scheduler, call, clock.New()) } func NewForTesting(ctx context.Context, key model.ControllerKey, requestSource string, config Config, dal DAL, scheduler Scheduler, call ExecuteCallFunc, clock clock.Clock) *Service { diff --git a/cmd/ftl-controller/main.go b/cmd/ftl-controller/main.go index 1af8897d20..3ed7b271ac 100644 --- a/cmd/ftl-controller/main.go +++ b/cmd/ftl-controller/main.go @@ -69,6 +69,6 @@ func main() { kctx.FatalIfErrorf(err) ctx = cf.ContextWithSecrets(ctx, sm) - err = controller.Start(ctx, cli.ControllerConfig, scaling.NewK8sScaling(), dal) + err = controller.Start(ctx, cli.ControllerConfig, scaling.NewK8sScaling()) kctx.FatalIfErrorf(err) } diff --git a/cmd/ftl/cmd_box_run.go b/cmd/ftl/cmd_box_run.go index 0f9cb08b52..8b57c88824 100644 --- a/cmd/ftl/cmd_box_run.go +++ b/cmd/ftl/cmd_box_run.go @@ -11,9 +11,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/TBD54566975/ftl/backend/controller" - "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/scaling/localscaling" - "github.com/TBD54566975/ftl/backend/controller/sql/databasetesting" "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1/ftlv1connect" "github.com/TBD54566975/ftl/backend/schema" "github.com/TBD54566975/ftl/buildengine" @@ -34,14 +32,6 @@ type boxRunCmd struct { } func (b *boxRunCmd) Run(ctx context.Context) error { - conn, err := databasetesting.CreateForDevel(ctx, b.DSN, b.Recreate) - if err != nil { - return fmt.Errorf("failed to create database: %w", err) - } - dal, err := dal.New(ctx, conn) - if err != nil { - return fmt.Errorf("failed to create DAL: %w", err) - } config := controller.Config{ Bind: b.Bind, IngressBind: b.IngressBind, @@ -63,7 +53,7 @@ func (b *boxRunCmd) Run(ctx context.Context) error { } wg := errgroup.Group{} wg.Go(func() error { - return controller.Start(ctx, config, runnerScaling, dal) + return controller.Start(ctx, config, runnerScaling) }) // Wait for the controller to come up. diff --git a/cmd/ftl/cmd_serve.go b/cmd/ftl/cmd_serve.go index 10a7a10442..9ed29f0d2d 100644 --- a/cmd/ftl/cmd_serve.go +++ b/cmd/ftl/cmd_serve.go @@ -16,11 +16,9 @@ import ( "connectrpc.com/connect" "github.com/alecthomas/kong" "github.com/alecthomas/types/optional" - "github.com/jackc/pgx/v5/pgxpool" "golang.org/x/sync/errgroup" "github.com/TBD54566975/ftl/backend/controller" - "github.com/TBD54566975/ftl/backend/controller/dal" "github.com/TBD54566975/ftl/backend/controller/scaling/localscaling" "github.com/TBD54566975/ftl/backend/controller/sql/databasetesting" ftlv1 "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1" @@ -89,14 +87,6 @@ func (s *serveCmd) run(ctx context.Context, projConfig projectconfig.Config, ini if err != nil { return err } - conn, err := pgxpool.New(ctx, dsn) - if err != nil { - return err - } - dal, err := dal.New(ctx, conn) - if err != nil { - return err - } wg, ctx := errgroup.WithContext(ctx) @@ -133,7 +123,7 @@ func (s *serveCmd) run(ctx context.Context, projConfig projectconfig.Config, ini controllerCtx := log.ContextWithLogger(ctx, logger.Scope(scope)) wg.Go(func() error { - if err := controller.Start(controllerCtx, config, runnerScaling, dal); err != nil { + if err := controller.Start(controllerCtx, config, runnerScaling); err != nil { return fmt.Errorf("controller%d failed: %w", i, err) } return nil From 600388b379bb3e1ef2fbe672f9b6b4a82421d9a2 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Wed, 3 Jul 2024 16:11:02 -0400 Subject: [PATCH 11/13] transact through dal instead of sql --- backend/controller/cronjobs/dal/dal.go | 5 +++++ backend/controller/dal/dal.go | 10 ++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/backend/controller/cronjobs/dal/dal.go b/backend/controller/cronjobs/dal/dal.go index 9408465204..f2834efa76 100644 --- a/backend/controller/cronjobs/dal/dal.go +++ b/backend/controller/cronjobs/dal/dal.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "github.com/TBD54566975/ftl/backend/controller/cronjobs/sql" @@ -23,6 +24,10 @@ func New(pool *pgxpool.Pool) *DAL { return &DAL{db: sql.NewDB(pool)} } +func NewQTx(pool sql.ConnI, tx pgx.Tx) *sql.Queries { + return sql.New(pool).WithTx(tx) +} + func cronJobFromRow(row sql.GetCronJobsRow) model.CronJob { return model.CronJob{ Key: row.Key, diff --git a/backend/controller/dal/dal.go b/backend/controller/dal/dal.go index 2380133466..e1a12ee840 100644 --- a/backend/controller/dal/dal.go +++ b/backend/controller/dal/dal.go @@ -13,9 +13,11 @@ import ( "github.com/alecthomas/types/optional" "github.com/alecthomas/types/pubsub" sets "github.com/deckarep/golang-set/v2" + "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "google.golang.org/protobuf/proto" + cronjobsdal "github.com/TBD54566975/ftl/backend/controller/cronjobs/dal" cronjobssql "github.com/TBD54566975/ftl/backend/controller/cronjobs/sql" "github.com/TBD54566975/ftl/backend/controller/sql" dalerrs "github.com/TBD54566975/ftl/backend/dal" @@ -222,6 +224,10 @@ func New(ctx context.Context, pool *pgxpool.Pool) (*DAL, error) { return dal, nil } +func NewQTx(pool sql.ConnI, tx pgx.Tx) *sql.Queries { + return sql.New(pool).WithTx(tx) +} + type DAL struct { db sql.DBI @@ -448,7 +454,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem defer tx.CommitOrRollback(ctx, &err) - qtx := sql.New(d.db.Conn()).WithTx(tx.Tx()) + qtx := NewQTx(d.db.Conn(), tx.Tx()) existingDeployment, err := d.checkForExistingDeployments(ctx, qtx, moduleSchema, artefacts) if err != nil { @@ -535,7 +541,7 @@ func (d *DAL) CreateDeployment(ctx context.Context, language string, moduleSchem } } - cronjobsqtx := cronjobssql.New(d.db.Conn()).WithTx(tx.Tx()) + cronjobsqtx := cronjobsdal.NewQTx(d.db.Conn(), tx.Tx()) for _, job := range cronJobs { // Start time must be calculated by the caller rather than generated by db // This ensures that nextExecution is after start time, otherwise the job will never be triggered From 02987682674822b2ad17b88bfde06f8a00123997 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Wed, 3 Jul 2024 16:55:18 -0400 Subject: [PATCH 12/13] fix TestBox --- cmd/ftl/cmd_box_run.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/ftl/cmd_box_run.go b/cmd/ftl/cmd_box_run.go index 8b57c88824..1fe9d41d54 100644 --- a/cmd/ftl/cmd_box_run.go +++ b/cmd/ftl/cmd_box_run.go @@ -12,6 +12,7 @@ import ( "github.com/TBD54566975/ftl/backend/controller" "github.com/TBD54566975/ftl/backend/controller/scaling/localscaling" + "github.com/TBD54566975/ftl/backend/controller/sql/databasetesting" "github.com/TBD54566975/ftl/backend/protos/xyz/block/ftl/v1/ftlv1connect" "github.com/TBD54566975/ftl/backend/schema" "github.com/TBD54566975/ftl/buildengine" @@ -32,6 +33,14 @@ type boxRunCmd struct { } func (b *boxRunCmd) Run(ctx context.Context) error { + _, err := databasetesting.CreateForDevel(ctx, b.DSN, b.Recreate) + if err != nil { + return fmt.Errorf("failed to create database: %w", err) + } + //_, err = dal.New(ctx, conn) + /*if err != nil { + return fmt.Errorf("failed to create DAL: %w", err) + }*/ config := controller.Config{ Bind: b.Bind, IngressBind: b.IngressBind, From 79ad6c0a69247d3ccfb7586eadced451073ef113 Mon Sep 17 00:00:00 2001 From: Denise Li Date: Wed, 3 Jul 2024 16:55:51 -0400 Subject: [PATCH 13/13] rm comment --- cmd/ftl/cmd_box_run.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cmd/ftl/cmd_box_run.go b/cmd/ftl/cmd_box_run.go index 1fe9d41d54..88ff19c507 100644 --- a/cmd/ftl/cmd_box_run.go +++ b/cmd/ftl/cmd_box_run.go @@ -37,10 +37,6 @@ func (b *boxRunCmd) Run(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to create database: %w", err) } - //_, err = dal.New(ctx, conn) - /*if err != nil { - return fmt.Errorf("failed to create DAL: %w", err) - }*/ config := controller.Config{ Bind: b.Bind, IngressBind: b.IngressBind,