From 241d9198585e3b75926bacbce81cbb086d694481 Mon Sep 17 00:00:00 2001 From: sanderegg <35365065+sanderegg@users.noreply.github.com> Date: Tue, 27 Jun 2023 17:28:42 +0200 Subject: [PATCH 1/3] remove services_limitations --- ...2b5c2466605_remove_services_limitations.py | 97 ++++++++++++++++++ .../models/services_limitations.py | 98 ------------------- 2 files changed, 97 insertions(+), 98 deletions(-) create mode 100644 packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py delete mode 100644 packages/postgres-database/src/simcore_postgres_database/models/services_limitations.py diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py new file mode 100644 index 00000000000..f1392536e7c --- /dev/null +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py @@ -0,0 +1,97 @@ +"""remove services_limitations + +Revision ID: 52b5c2466605 +Revises: 38fe651b4196 +Create Date: 2023-06-27 15:24:13.207340+00:00 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "52b5c2466605" +down_revision = "38fe651b4196" +branch_labels = None +depends_on = None + + +modified_timestamp_trigger = sa.DDL( + """ +DROP TRIGGER IF EXISTS trigger_auto_update on services_limitations; +CREATE TRIGGER trigger_auto_update +BEFORE INSERT OR UPDATE ON services_limitations +FOR EACH ROW EXECUTE PROCEDURE services_limitations_auto_update_modified(); + """ +) + +update_modified_timestamp_procedure = sa.DDL( + """ +CREATE OR REPLACE FUNCTION services_limitations_auto_update_modified() +RETURNS TRIGGER AS $$ +BEGIN + NEW.modified := current_timestamp; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + """ +) + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index("idx_unique_gid_cluster_id_null", table_name="services_limitations") + op.drop_table("services_limitations") + # ### end Alembic commands ### + # custom + op.execute(f"DROP FUNCTION services_limitations_auto_update_modified();") + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "services_limitations", + sa.Column("gid", sa.BIGINT(), autoincrement=False, nullable=False), + sa.Column("cluster_id", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("ram", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("cpu", sa.NUMERIC(), autoincrement=False, nullable=True), + sa.Column("vram", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("gpu", sa.INTEGER(), autoincrement=False, nullable=True), + sa.Column( + "created", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.Column( + "modified", + postgresql.TIMESTAMP(timezone=True), + server_default=sa.text("now()"), + autoincrement=False, + nullable=False, + ), + sa.ForeignKeyConstraint( + ["cluster_id"], + ["clusters.id"], + name="fk_services_limitations_to_clusters_id", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["gid"], + ["groups.gid"], + name="fk_services_limitations_to_groups_gid", + onupdate="CASCADE", + ondelete="CASCADE", + ), + sa.UniqueConstraint("gid", "cluster_id", name="gid_cluster_id_uniqueness"), + ) + op.create_index( + "idx_unique_gid_cluster_id_null", "services_limitations", ["gid"], unique=False + ) + # ### end Alembic commands ### + + # custom + op.execute(update_modified_timestamp_procedure) + op.execute(modified_timestamp_trigger) diff --git a/packages/postgres-database/src/simcore_postgres_database/models/services_limitations.py b/packages/postgres-database/src/simcore_postgres_database/models/services_limitations.py deleted file mode 100644 index b06d9a858ac..00000000000 --- a/packages/postgres-database/src/simcore_postgres_database/models/services_limitations.py +++ /dev/null @@ -1,98 +0,0 @@ -""" Groups table - - - List of groups in the framework - - Groups have a ID, name and a list of users that belong to the group -""" - - -from typing import Final - -import sqlalchemy as sa - -from ._common import ( - column_created_datetime, - column_modified_datetime, - register_modified_datetime_auto_update_trigger, -) -from .base import metadata -from .clusters import clusters -from .groups import groups - -_TABLE_NAME = "services_limitations" -USE_DEFAULTS: Final = None -UNLIMITED: Final[int] = -999 - -services_limitations = sa.Table( - _TABLE_NAME, - metadata, - sa.Column( - "gid", - sa.BigInteger, - sa.ForeignKey( - groups.c.gid, - onupdate="CASCADE", - ondelete="CASCADE", - name=f"fk_{_TABLE_NAME}_to_groups_gid", - ), - nullable=False, - doc="Group unique ID", - ), - sa.Column( - "cluster_id", - sa.BigInteger(), - sa.ForeignKey( - clusters.c.id, - name=f"fk_{_TABLE_NAME}_to_clusters_id", - onupdate="CASCADE", - ondelete="CASCADE", - ), - nullable=True, - doc="The cluster id with which these limitations are associated, if NULL or 0 uses the default", - ), - sa.Column( - "ram", - sa.BigInteger, - nullable=True, - doc="defines this group maximum allowable RAM used per service " - "(None means use defaults, <0 means no limits)", - ), - sa.Column( - "cpu", - sa.Numeric, - nullable=True, - doc="defines this group maximum allowable CPUs used per service " - "(None means use defaults, <0 means no limits)", - ), - sa.Column( - "vram", - sa.BigInteger, - nullable=True, - doc="defines this group maximum allowable VRAM used per service " - "(None means use defaults, <0 means no limits)", - ), - sa.Column( - "gpu", - sa.Integer, - nullable=True, - doc="defines this group maximum allowable CPUs used per service " - "(None means use defaults, <0 means no limits)", - ), - # TIME STAMPS ---- - column_created_datetime(timezone=True), - column_modified_datetime(timezone=True), - sa.UniqueConstraint( - "gid", - "cluster_id", - name="gid_cluster_id_uniqueness", - ), - # prevents having multiple entries with NULL cluster (postgres < 15 treats NULL as always different) - sa.Index( - "idx_unique_gid_cluster_id_null", - "gid", - unique=True, - postgresql_where=sa.text("cluster_id IS NULL"), - ), -) - - -register_modified_datetime_auto_update_trigger(services_limitations) From 8edbae70dc04f1dad99a3609390841f0edf9979b Mon Sep 17 00:00:00 2001 From: sanderegg <35365065+sanderegg@users.noreply.github.com> Date: Thu, 29 Jun 2023 08:18:52 +0200 Subject: [PATCH 2/3] sonar --- .../versions/52b5c2466605_remove_services_limitations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py index f1392536e7c..403f571d0fa 100644 --- a/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py +++ b/packages/postgres-database/src/simcore_postgres_database/migration/versions/52b5c2466605_remove_services_limitations.py @@ -44,7 +44,7 @@ def upgrade(): op.drop_table("services_limitations") # ### end Alembic commands ### # custom - op.execute(f"DROP FUNCTION services_limitations_auto_update_modified();") + op.execute("DROP FUNCTION services_limitations_auto_update_modified();") def downgrade(): From 8f0d57e96473670ca924a4c301d8d8c046d018b8 Mon Sep 17 00:00:00 2001 From: sanderegg <35365065+sanderegg@users.noreply.github.com> Date: Thu, 29 Jun 2023 09:04:03 +0200 Subject: [PATCH 3/3] remove --- .../utils_services_limitations.py | 155 --------- .../tests/test_utils_services_limitations.py | 299 ------------------ 2 files changed, 454 deletions(-) delete mode 100644 packages/postgres-database/src/simcore_postgres_database/utils_services_limitations.py delete mode 100644 packages/postgres-database/tests/test_utils_services_limitations.py diff --git a/packages/postgres-database/src/simcore_postgres_database/utils_services_limitations.py b/packages/postgres-database/src/simcore_postgres_database/utils_services_limitations.py deleted file mode 100644 index e8bfe6efb05..00000000000 --- a/packages/postgres-database/src/simcore_postgres_database/utils_services_limitations.py +++ /dev/null @@ -1,155 +0,0 @@ -import datetime -from dataclasses import asdict, dataclass - -import aiopg.sa -import psycopg2 -import psycopg2.errors -import sqlalchemy as sa -from sqlalchemy import literal_column - -from .models.groups import GroupType, groups, user_to_groups -from .models.services_limitations import services_limitations - - -# -# Errors -# -class BaseServicesLimitationsError(Exception): - ... - - -class ServiceLimitationsOperationNotAllowed(BaseServicesLimitationsError): - ... - - -class ServiceLimitationsOperationNotFound(BaseServicesLimitationsError): - ... - - -@dataclass(frozen=True, slots=True, kw_only=True) -class ServiceLimitationsCreate: - gid: int - cluster_id: int | None - ram: int | None - cpu: float | None - vram: int | None - gpu: float | None - - -@dataclass(frozen=True, slots=True, kw_only=True) -class ServiceLimitations(ServiceLimitationsCreate): - created: datetime.datetime - modified: datetime.datetime - - -@dataclass(frozen=True, slots=True, kw_only=True) -class ServicesLimitationsRepo: - user_id: int - - @staticmethod - async def create( - conn: aiopg.sa.SAConnection, *, new_limits: ServiceLimitationsCreate - ) -> ServiceLimitations: - try: - insert_stmt = ( - services_limitations.insert() - .values(**asdict(new_limits)) - .returning(literal_column("*")) - ) - result = await conn.execute(insert_stmt) - created_entry = await result.first() - assert created_entry # nosec - return ServiceLimitations(**dict(created_entry.items())) - except psycopg2.errors.UniqueViolation as exc: - raise ServiceLimitationsOperationNotAllowed( - f"Service limitations for ({new_limits.gid=}, {new_limits.cluster_id=}) already exist" - ) from exc - - @staticmethod - async def get( - conn: aiopg.sa.SAConnection, *, gid: int, cluster_id: int | None - ) -> ServiceLimitations: - result = await conn.execute( - sa.select(services_limitations).where( - (services_limitations.c.gid == gid) - & (services_limitations.c.cluster_id == cluster_id) - ) - ) - receive_entry = await result.first() - if not receive_entry: - raise ServiceLimitationsOperationNotFound( - f"Service limitations for ({gid=}, {cluster_id=}) do not exist" - ) - assert receive_entry # nosec - return ServiceLimitations(**dict(receive_entry.items())) - - @staticmethod - async def update( - conn: aiopg.sa.SAConnection, *, gid: int, cluster_id: int | None, **values - ) -> ServiceLimitations: - update_stmt = ( - services_limitations.update() - .values(**values) - .where( - (services_limitations.c.gid == gid) - & (services_limitations.c.cluster_id == cluster_id) - ) - .returning(literal_column("*")) - ) - result = await conn.execute(update_stmt) - updated_entry = await result.first() - if not updated_entry: - raise ServiceLimitationsOperationNotFound( - f"Service limitations for ({gid=}, {cluster_id=}) do not exist" - ) - assert updated_entry # nosec - return ServiceLimitations(**dict(updated_entry.items())) - - @staticmethod - async def delete( - conn: aiopg.sa.SAConnection, *, gid: int, cluster_id: int | None - ) -> None: - await conn.execute( - sa.delete(services_limitations).where( - (services_limitations.c.gid == gid) - & (services_limitations.c.cluster_id == cluster_id) - ) - ) - - def _join_user_groups_service_limitations( - self, - cluster_id: int | None, - ): - j = user_to_groups.join( - services_limitations, - (user_to_groups.c.uid == self.user_id) - & (user_to_groups.c.gid == services_limitations.c.gid) - & (services_limitations.c.cluster_id == cluster_id), - ).join(groups) - return j - - async def list_for_user( - self, conn: aiopg.sa.SAConnection, *, cluster_id: int | None - ) -> list[ServiceLimitations]: - select_stmt = sa.select(services_limitations, groups.c.type).select_from( - self._join_user_groups_service_limitations(cluster_id) - ) - group_to_limits: dict[tuple[int, GroupType], ServiceLimitations] = { - ( - row[services_limitations.c.gid], - row[groups.c.type], - ): ServiceLimitations(**{k: v for k, v in row.items() if k != "type"}) - async for row in conn.execute(select_stmt) - } - - possibly_everyone_limit = [] - standard_limits = [] - for (_, group_type), limit in group_to_limits.items(): - match group_type: - case GroupType.STANDARD: - standard_limits.append(limit) - case GroupType.EVERYONE: - possibly_everyone_limit.append(limit) - case GroupType.PRIMARY: - return [limit] - return standard_limits or possibly_everyone_limit diff --git a/packages/postgres-database/tests/test_utils_services_limitations.py b/packages/postgres-database/tests/test_utils_services_limitations.py deleted file mode 100644 index e2d2de01a52..00000000000 --- a/packages/postgres-database/tests/test_utils_services_limitations.py +++ /dev/null @@ -1,299 +0,0 @@ -# pylint: disable=redefined-outer-name -# pylint: disable=unused-argument -# pylint: disable=unused-variable -# pylint: disable=too-many-arguments -from dataclasses import fields -from typing import Awaitable, Callable - -import pytest -from aiopg.sa.connection import SAConnection -from aiopg.sa.result import RowProxy -from faker import Faker -from simcore_postgres_database.models.groups import user_to_groups -from simcore_postgres_database.models.services_limitations import ( - UNLIMITED, - USE_DEFAULTS, -) -from simcore_postgres_database.utils_services_limitations import ( - ServiceLimitationsCreate, - ServiceLimitationsOperationNotAllowed, - ServiceLimitationsOperationNotFound, - ServicesLimitationsRepo, -) - - -@pytest.fixture -def default_service_limitations() -> ( - Callable[[int, int | None], ServiceLimitationsCreate] -): - def _creator(gid: int, cluster_id: int | None) -> ServiceLimitationsCreate: - return ServiceLimitationsCreate( - gid=gid, - cluster_id=cluster_id, - ram=USE_DEFAULTS, - cpu=USE_DEFAULTS, - vram=USE_DEFAULTS, - gpu=USE_DEFAULTS, - ) - - return _creator - - -@pytest.fixture -def unlimited_service_limitations() -> ( - Callable[[int, int | None], ServiceLimitationsCreate] -): - def _creator(gid: int, cluster_id: int | None) -> ServiceLimitationsCreate: - return ServiceLimitationsCreate( - gid=gid, - cluster_id=cluster_id, - ram=UNLIMITED, - cpu=UNLIMITED, - vram=UNLIMITED, - gpu=UNLIMITED, - ) - - return _creator - - -@pytest.fixture -def random_service_limitations( - faker: Faker, -) -> Callable[[int, int | None], ServiceLimitationsCreate]: - def _creator(gid: int, cluster_id: int | None) -> ServiceLimitationsCreate: - return ServiceLimitationsCreate( - gid=gid, - cluster_id=cluster_id, - ram=faker.pyint(), - cpu=faker.pydecimal(), - vram=faker.pyint(), - gpu=faker.pyint(), - ) - - return _creator - - -@pytest.fixture( - params=( - "default_service_limitations", - "unlimited_service_limitations", - "random_service_limitations", - ) -) -def service_limitations( - request: pytest.FixtureRequest, - default_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], - unlimited_service_limitations: Callable[ - [int, int | None], ServiceLimitationsCreate - ], - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], -) -> Callable[[int, int | None], ServiceLimitationsCreate]: - return { - "default_service_limitations": default_service_limitations, - "unlimited_service_limitations": unlimited_service_limitations, - "random_service_limitations": random_service_limitations, - }[request.param] - - -async def test_create_service_limitation( - connection: SAConnection, - service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], -): - # NOTE: these test works because the everyone group (gid=1) exists - input_limit = service_limitations(1, None) - created_limit = await ServicesLimitationsRepo.create( - connection, new_limits=input_limit - ) - assert created_limit - for field in fields(ServiceLimitationsCreate): - assert getattr(created_limit, field.name) == getattr(input_limit, field.name) - assert created_limit.created == created_limit.modified - - -async def test_multiple_same_group_limitations_on_same_cluster_fail( - connection: SAConnection, - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], -): - # NOTE: these test works because the everyone group (gid=1) exists - created_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - assert created_limit - - # doing it again shall raise - with pytest.raises(ServiceLimitationsOperationNotAllowed): - await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - - -async def test_multiple_same_group_limitations_on_different_clusters_succeed( - connection: SAConnection, - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], - create_fake_cluster: Callable[..., Awaitable[int]], -): - # NOTE: these test works because the everyone group (gid=1) exists - created_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - assert created_limit - - cluster_id = await create_fake_cluster(owner=1) - created_limit_on_other_cluster = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, cluster_id) - ) - assert created_limit_on_other_cluster - - -async def test_multiple_same_group_limitations_on_same_cluster_different_groups_succeed( - connection: SAConnection, - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], - create_fake_group: Callable[..., Awaitable[RowProxy]], -): - # NOTE: these test works because the everyone group (gid=1) exists - created_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - assert created_limit - group = await create_fake_group(connection) - created_limit_for_new_group = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(group.gid, None) - ) - assert created_limit_for_new_group - - -async def test_modified_timestamp_auto_updates_with_changes( - connection: SAConnection, - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], -): - # NOTE: these test works because the everyone group (gid=1) exists - created_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - assert created_limit - assert created_limit.ram is not None - # modify the limit - updated_limit = await ServicesLimitationsRepo.update( - connection, gid=1, cluster_id=None, ram=created_limit.ram + 25 - ) - assert updated_limit - assert updated_limit.ram is not None - assert created_limit.ram == (updated_limit.ram - 25) - assert updated_limit.modified > created_limit.modified - assert updated_limit.created == created_limit.created - - -async def test_update_services_limitations_raises_if_not_found( - connection: SAConnection, -): - # NOTE: these test works because the everyone group (gid=1) exists - with pytest.raises(ServiceLimitationsOperationNotFound): - await ServicesLimitationsRepo.update(connection, gid=1, cluster_id=None, ram=25) - - -async def test_get_services_limitations( - connection: SAConnection, - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], -): - # NOTE: these test works because the everyone group (gid=1) exists - created_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - assert created_limit - - received_limit = await ServicesLimitationsRepo.get( - connection, gid=1, cluster_id=None - ) - assert received_limit == created_limit - - -async def test_get_services_limitations_raises_if_not_found( - connection: SAConnection, -): - # NOTE: these test works because the everyone group (gid=1) exists - with pytest.raises(ServiceLimitationsOperationNotFound): - await ServicesLimitationsRepo.get(connection, gid=1, cluster_id=None) - - -async def test_delete_services_limitations( - connection: SAConnection, - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], -): - # NOTE: these test works because the everyone group (gid=1) exists - created_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - assert created_limit - received_limit = await ServicesLimitationsRepo.get( - connection, gid=1, cluster_id=None - ) - assert received_limit == created_limit - # now delete and verify - await ServicesLimitationsRepo.delete(connection, gid=1, cluster_id=None) - with pytest.raises(ServiceLimitationsOperationNotFound): - await ServicesLimitationsRepo.get(connection, gid=1, cluster_id=None) - - -async def test_list_service_limitations_for_user( - connection: SAConnection, - create_fake_user: Callable[..., Awaitable[RowProxy]], - create_fake_group: Callable[..., Awaitable[RowProxy]], - random_service_limitations: Callable[[int, int | None], ServiceLimitationsCreate], -): - group1 = await create_fake_group(connection) - user = await create_fake_user(connection, group1) - repo = ServicesLimitationsRepo(user_id=user.id) - list_limits = await repo.list_for_user(connection, cluster_id=None) - assert list_limits is not None - assert len(list_limits) == 0 - - # NOTE: these test works because the everyone group (gid=1) exists - # here we have now 1 service limits set for the group everyone - everyone_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(1, None) - ) - assert everyone_limit - list_limits = await repo.list_for_user(connection, cluster_id=None) - assert list_limits is not None - assert len(list_limits) == 1 - assert list_limits[0] == everyone_limit - - # add a limit on the group of the user - group_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(group1.gid, None) - ) - assert group_limit - list_limits = await repo.list_for_user(connection, cluster_id=None) - assert list_limits is not None - assert len(list_limits) == 1 - assert all(limit in list_limits for limit in [group_limit]) - - # create a second group, but do not add the user to it yet - group2 = await create_fake_group(connection) - group2_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(group2.gid, None) - ) - assert group2_limit - list_limits = await repo.list_for_user(connection, cluster_id=None) - assert list_limits is not None - assert len(list_limits) == 1 - assert all(limit in list_limits for limit in [group_limit]) - - # now add the user to it, we should now see 2 groups in the listing - await connection.execute( - user_to_groups.insert().values(uid=user.id, gid=group2.gid) - ) - list_limits = await repo.list_for_user(connection, cluster_id=None) - assert list_limits is not None - assert len(list_limits) == 2 - assert all(limit in list_limits for limit in [group_limit, group2_limit]) - - # add a limit on the primary group - user_limit = await ServicesLimitationsRepo.create( - connection, new_limits=random_service_limitations(user.primary_gid, None) - ) - assert user_limit - list_limits = await repo.list_for_user(connection, cluster_id=None) - assert list_limits is not None - assert len(list_limits) == 1 - assert all(limit in list_limits for limit in [user_limit])