diff --git a/.github/actions/build-and-test-branch/action.yml b/.github/actions/build-and-test-branch/action.yml index fb94574f5d2..b48cec1eaba 100644 --- a/.github/actions/build-and-test-branch/action.yml +++ b/.github/actions/build-and-test-branch/action.yml @@ -33,7 +33,7 @@ runs: - name: Ensure frontend configuration files exist run: | - python manage.py check --tag=compatibility + python manage.py check shell: bash - name: Install Arches applications @@ -74,7 +74,7 @@ runs: - name: Check for missing migrations run: | - python manage.py makemigrations --check --skip-checks + python manage.py makemigrations --check shell: bash - name: Ensure previous Python coverage data is erased diff --git a/arches/app/const.py b/arches/app/const.py index e155c61df78..e355a436846 100644 --- a/arches/app/const.py +++ b/arches/app/const.py @@ -4,6 +4,7 @@ IntegrityCheckDescriptions = { 1005: "Nodes with ontologies found in graphs without ontologies", 1012: "Node Groups without matching nodes", + 1014: "Publication missing for language", } @@ -11,6 +12,7 @@ class IntegrityCheck(Enum): NODE_HAS_ONTOLOGY_GRAPH_DOES_NOT = 1005 NODELESS_NODE_GROUP = 1012 + PUBLICATION_MISSING_FOR_LANGUAGE = 1014 def __str__(self): return IntegrityCheckDescriptions[self.value] diff --git a/arches/app/etl_modules/branch_excel_importer.py b/arches/app/etl_modules/branch_excel_importer.py index ca811a22750..7899d0c32d9 100644 --- a/arches/app/etl_modules/branch_excel_importer.py +++ b/arches/app/etl_modules/branch_excel_importer.py @@ -205,8 +205,9 @@ def process_worksheet(self, worksheet, cursor, node_lookup, nodegroup_lookup): tile_value_json, passes_validation = self.create_tile_value( cell_values, data_node_lookup, node_lookup, row_details, cursor ) + sortorder = 0 cursor.execute( - """INSERT INTO load_staging (nodegroupid, legacyid, resourceid, tileid, parenttileid, value, loadid, nodegroup_depth, source_description, passes_validation, operation) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", + """INSERT INTO load_staging (nodegroupid, legacyid, resourceid, tileid, parenttileid, value, loadid, nodegroup_depth, source_description, passes_validation, operation, sortorder) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", ( row_details["nodegroup_id"], legacyid, @@ -221,6 +222,7 @@ def process_worksheet(self, worksheet, cursor, node_lookup, nodegroup_lookup): ), # source_description passes_validation, operation, + sortorder, ), ) except KeyError: diff --git a/arches/app/etl_modules/bulk_edit_concept.py b/arches/app/etl_modules/bulk_edit_concept.py index 8a33701efec..86237a6fffd 100644 --- a/arches/app/etl_modules/bulk_edit_concept.py +++ b/arches/app/etl_modules/bulk_edit_concept.py @@ -393,8 +393,8 @@ def stage_data( try: sql = ( """ - INSERT INTO load_staging (value, tileid, nodegroupid, parenttileid, resourceid, loadid, nodegroup_depth, source_description, operation, passes_validation) - (SELECT tiledata, tileid, nodegroupid, parenttileid, resourceinstanceid, %(load_id)s, 0, 'bulk_edit', 'update', true + INSERT INTO load_staging (value, tileid, nodegroupid, parenttileid, resourceid, loadid, nodegroup_depth, source_description, operation, passes_validation, sortorder) + (SELECT tiledata, tileid, nodegroupid, parenttileid, resourceinstanceid, %(load_id)s, 0, 'bulk_edit', 'update', true, sortorder FROM tiles WHERE nodegroupid in (SELECT nodegroupid FROM nodes WHERE nodeid = %(node_id)s) AND tiledata -> %(node_id)s ? %(old_id)s diff --git a/arches/app/etl_modules/import_single_csv.py b/arches/app/etl_modules/import_single_csv.py index d2bdb2ebf24..145358c9fad 100644 --- a/arches/app/etl_modules/import_single_csv.py +++ b/arches/app/etl_modules/import_single_csv.py @@ -555,8 +555,9 @@ def populate_staging_table( nodegroup_depth, source_description, operation, - passes_validation - ) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", + passes_validation, + sortorder + ) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", ( nodegroup, legacyid, @@ -568,6 +569,7 @@ def populate_staging_table( csv_file_name, "insert", passes_validation, + 0, ), ) diff --git a/arches/app/etl_modules/tile_excel_importer.py b/arches/app/etl_modules/tile_excel_importer.py index a9feb06ef49..a124c6a02f9 100644 --- a/arches/app/etl_modules/tile_excel_importer.py +++ b/arches/app/etl_modules/tile_excel_importer.py @@ -81,7 +81,6 @@ def run_load_task_async(self, request): def create_tile_value( self, - cell_values, data_node_lookup, node_lookup, nodegroup_alias, @@ -176,6 +175,7 @@ def process_worksheet(self, worksheet, cursor, node_lookup, nodegroup_lookup): raise ValueError(_("All rows must have a valid resource id")) node_values = cell_values[3:-3] + sortorder = cell_values[-3] if cell_values[-3] else 0 try: row_count += 1 row_details = dict(zip(data_node_lookup[nodegroup_alias], node_values)) @@ -194,7 +194,6 @@ def process_worksheet(self, worksheet, cursor, node_lookup, nodegroup_lookup): ) legacyid, resourceid = self.set_legacy_id(resourceid) tile_value_json, passes_validation = self.create_tile_value( - cell_values, data_node_lookup, node_lookup, nodegroup_alias, @@ -214,7 +213,7 @@ def process_worksheet(self, worksheet, cursor, node_lookup, nodegroup_lookup): if TileModel.objects.filter(pk=tileid).exists(): operation = "update" cursor.execute( - """INSERT INTO load_staging (nodegroupid, legacyid, resourceid, tileid, parenttileid, value, loadid, nodegroup_depth, source_description, passes_validation, operation) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", + """INSERT INTO load_staging (nodegroupid, legacyid, resourceid, tileid, parenttileid, value, loadid, nodegroup_depth, source_description, passes_validation, operation, sortorder) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", ( row_details["nodegroup_id"], legacyid, @@ -229,6 +228,7 @@ def process_worksheet(self, worksheet, cursor, node_lookup, nodegroup_lookup): ), # source_description passes_validation, operation, + sortorder, ), ) except KeyError: diff --git a/arches/app/media/js/utils/create-vue-application.js b/arches/app/media/js/utils/create-vue-application.js index 2f1620adc48..d51990ce1b5 100644 --- a/arches/app/media/js/utils/create-vue-application.js +++ b/arches/app/media/js/utils/create-vue-application.js @@ -13,7 +13,7 @@ import { createGettext } from "vue3-gettext"; import arches from 'arches'; import { DEFAULT_THEME } from "@/arches/themes/default.ts"; -export default async function createVueApplication(vueComponent, themeConfiguration) { +export default async function createVueApplication(vueComponent, themeConfiguration = DEFAULT_THEME) { /** * This wrapper allows us to maintain a level of control inside arches-core * over Vue apps. For instance this allows us to abstract i18n setup/config @@ -40,8 +40,19 @@ export default async function createVueApplication(vueComponent, themeConfigurat }); const app = createApp(vueComponent); + const darkModeClass = themeConfiguration.theme.options.darkModeSelector.substring(1); + const darkModeStorageKey = `arches.${darkModeClass}`; - app.use(PrimeVue, themeConfiguration || DEFAULT_THEME); + const darkModeToggleState = localStorage.getItem(darkModeStorageKey); + if ( + darkModeToggleState === "true" || + (darkModeToggleState === null && + window.matchMedia("(prefers-color-scheme: dark)").matches) + ) { + document.documentElement.classList.add(darkModeClass); + } + + app.use(PrimeVue, themeConfiguration); app.use(gettext); app.use(ConfirmationService); app.use(DialogService); diff --git a/arches/app/models/graph.py b/arches/app/models/graph.py index a531ee953de..b14d593ded8 100644 --- a/arches/app/models/graph.py +++ b/arches/app/models/graph.py @@ -1075,6 +1075,7 @@ def flatten_tree(tree, node_id_list=[]): copy_of_self.root = root_node copy_of_self.name = root_node.name copy_of_self.isresource = False + copy_of_self.resource_instance_lifecycle = None copy_of_self.subtitle = "" copy_of_self.description = "" copy_of_self.author = "" diff --git a/arches/app/models/migrations/11408_loadstaging_sortorder.py b/arches/app/models/migrations/11408_loadstaging_sortorder.py new file mode 100644 index 00000000000..9420764f28c --- /dev/null +++ b/arches/app/models/migrations/11408_loadstaging_sortorder.py @@ -0,0 +1,409 @@ +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("models", "10437_node_alias_not_null"), + ] + + update_arches_staging_to_tile = """ + CREATE OR REPLACE FUNCTION public.__arches_staging_to_tile( + load_id uuid) + RETURNS boolean + LANGUAGE 'plpgsql' + COST 100 + VOLATILE PARALLEL UNSAFE + AS $BODY$ + DECLARE + status boolean; + staged_value jsonb; + tile_data jsonb; + old_data jsonb; + passed boolean; + source text; + op text; + selected_resource text; + graph_id uuid; + instance_id uuid; + legacy_id text; + file_id uuid; + tile_id uuid; + tile_id_tree uuid; + parent_id uuid; + nodegroup_id uuid; + sort_order integer; + resource_instance_lifecycle_state_uuid uuid; + _file jsonb; + _key text; + _value text; + tile_data_value jsonb; + resource_object jsonb; + resource_obejct_array jsonb; + BEGIN + FOR staged_value, instance_id, legacy_id, tile_id, parent_id, nodegroup_id, passed, graph_id, source, op, resource_instance_lifecycle_state_uuid, sort_order IN + ( + SELECT value, resourceid, legacyid, tileid, parenttileid, ls.nodegroupid, passes_validation, n.graphid, source_description, operation, rils.id, ls.sortorder + FROM load_staging ls + INNER JOIN (SELECT DISTINCT nodegroupid, graphid FROM nodes) n + ON ls.nodegroupid = n.nodegroupid + INNER JOIN (SELECT graphid, resource_instance_lifecycle_id FROM graphs) g + ON g.graphid = n.graphid + INNER JOIN (SELECT id, resource_instance_lifecycle_id FROM resource_instance_lifecycle_states WHERE is_initial_state = true) rils + ON g.resource_instance_lifecycle_id = rils.resource_instance_lifecycle_id + WHERE loadid = load_id + ORDER BY nodegroup_depth ASC + ) + LOOP + IF passed THEN + SELECT resourceinstanceid FROM resource_instances INTO selected_resource WHERE resourceinstanceid = instance_id; + -- create a resource first if the resource is not yet created + IF NOT FOUND THEN + INSERT INTO resource_instances(resourceinstanceid, graphid, legacyid, createdtime, resource_instance_lifecycle_state_id) + VALUES (instance_id, graph_id, legacy_id, now(), resource_instance_lifecycle_state_uuid); + -- create resource instance edit log + INSERT INTO edit_log (resourceclassid, resourceinstanceid, edittype, timestamp, note, transactionid) + VALUES (graph_id, instance_id, 'create', now(), 'loaded from staging_table', load_id); + END IF; + + -- create a tile one by one + tile_data := '{}'::jsonb; + FOR _key, _value IN SELECT * FROM jsonb_each_text(staged_value) + LOOP + tile_data_value = _value::jsonb -> 'value'; + IF (_value::jsonb ->> 'datatype') in ('resource-instance-list', 'resource-instance') AND tile_data_value <> null THEN + resource_obejct_array = '[]'::jsonb; + FOR resource_object IN SELECT * FROM jsonb_array_elements(tile_data_value) LOOP + resource_object = jsonb_set(resource_object, '{resourceXresourceId}', to_jsonb(uuid_generate_v1mc())); + resource_obejct_array = resource_obejct_array || resource_object; + END LOOP; + tile_data_value = resource_obejct_array; + END IF; + tile_data = jsonb_set(tile_data, format('{"%s"}', _key)::text[], coalesce(tile_data_value, 'null')); + END LOOP; + + IF op = 'update' THEN + SELECT tiledata FROM tiles INTO old_data WHERE resourceinstanceid = instance_id AND tileid = tile_id; + IF NOT FOUND THEN + INSERT INTO tiles(tileid, tiledata, nodegroupid, parenttileid, resourceinstanceid, sortorder) + VALUES (tile_id, tile_data, nodegroup_id, parent_id, instance_id, sort_order); + INSERT INTO edit_log (resourceclassid, resourceinstanceid, nodegroupid, tileinstanceid, edittype, newvalue, timestamp, note, transactionid) + VALUES (graph_id, instance_id, nodegroup_id, tile_id, 'tile create', tile_data::jsonb, now(), 'loaded from staging_table', load_id); + ELSE + UPDATE tiles + SET tiledata = tile_data, sortorder = sort_order + WHERE tileid = tile_id; + INSERT INTO edit_log (resourceclassid, resourceinstanceid, nodegroupid, tileinstanceid, edittype, newvalue, oldvalue, timestamp, note, transactionid) + VALUES (graph_id, instance_id, nodegroup_id, tile_id, 'tile edit', tile_data::jsonb, old_data, now(), 'loaded from staging_table', load_id); + END IF; + ELSIF op = 'insert' THEN + INSERT INTO tiles(tileid, tiledata, nodegroupid, parenttileid, resourceinstanceid, sortorder) + VALUES (tile_id, tile_data, nodegroup_id, parent_id, instance_id, sort_order); + INSERT INTO edit_log (resourceclassid, resourceinstanceid, nodegroupid, tileinstanceid, edittype, newvalue, timestamp, note, transactionid) + VALUES (graph_id, instance_id, nodegroup_id, tile_id, 'tile create', tile_data::jsonb, now(), 'loaded from staging_table', load_id); + END IF; + END IF; + END LOOP; + FOR staged_value, tile_id IN + ( + SELECT value, tileid + FROM load_staging + WHERE loadid = load_id + ) + LOOP + FOR _key, _value IN SELECT * FROM jsonb_each_text(staged_value) + LOOP + CASE + WHEN (_value::jsonb ->> 'datatype') = 'file-list' THEN + FOR _file IN SELECT * FROM jsonb_array_elements(_value::jsonb -> 'value') LOOP + file_id = _file ->> 'file_id'; + UPDATE files SET tileid = tile_id WHERE fileid = file_id::uuid; + END LOOP; + WHEN (_value::jsonb ->> 'datatype') in ('resource-instance-list', 'resource-instance') THEN + PERFORM __arches_refresh_tile_resource_relationships(tile_id); + ELSE + END CASE; + END LOOP; + END LOOP; + UPDATE load_event SET (load_end_time, complete, successful) = (now(), true, true) WHERE loadid = load_id; + PERFORM refresh_transaction_geojson_geometries(load_id); + SELECT successful INTO status FROM load_event WHERE loadid = load_id; + RETURN status; + END; + $BODY$; + """ + + reverse_arches_staging_to_tile = """ + CREATE OR REPLACE FUNCTION public.__arches_staging_to_tile( + load_id uuid) + RETURNS boolean + LANGUAGE 'plpgsql' + COST 100 + VOLATILE PARALLEL UNSAFE + AS $BODY$ + DECLARE + status boolean; + staged_value jsonb; + tile_data jsonb; + old_data jsonb; + passed boolean; + source text; + op text; + selected_resource text; + graph_id uuid; + instance_id uuid; + legacy_id text; + file_id uuid; + tile_id uuid; + tile_id_tree uuid; + parent_id uuid; + nodegroup_id uuid; + resource_instance_lifecycle_state_uuid uuid; + _file jsonb; + _key text; + _value text; + tile_data_value jsonb; + resource_object jsonb; + resource_obejct_array jsonb; + BEGIN + FOR staged_value, instance_id, legacy_id, tile_id, parent_id, nodegroup_id, passed, graph_id, source, op, resource_instance_lifecycle_state_uuid IN + ( + SELECT value, resourceid, legacyid, tileid, parenttileid, ls.nodegroupid, passes_validation, n.graphid, source_description, operation, rils.id + FROM load_staging ls + INNER JOIN (SELECT DISTINCT nodegroupid, graphid FROM nodes) n + ON ls.nodegroupid = n.nodegroupid + INNER JOIN (SELECT graphid, resource_instance_lifecycle_id FROM graphs) g + ON g.graphid = n.graphid + INNER JOIN (SELECT id, resource_instance_lifecycle_id FROM resource_instance_lifecycle_states WHERE is_initial_state = true) rils + ON g.resource_instance_lifecycle_id = rils.resource_instance_lifecycle_id + WHERE loadid = load_id + ORDER BY nodegroup_depth ASC + ) + LOOP + IF passed THEN + SELECT resourceinstanceid FROM resource_instances INTO selected_resource WHERE resourceinstanceid = instance_id; + -- create a resource first if the resource is not yet created + IF NOT FOUND THEN + INSERT INTO resource_instances(resourceinstanceid, graphid, legacyid, createdtime, resource_instance_lifecycle_state_id) + VALUES (instance_id, graph_id, legacy_id, now(), resource_instance_lifecycle_state_uuid); + -- create resource instance edit log + INSERT INTO edit_log (resourceclassid, resourceinstanceid, edittype, timestamp, note, transactionid) + VALUES (graph_id, instance_id, 'create', now(), 'loaded from staging_table', load_id); + END IF; + + -- create a tile one by one + tile_data := '{}'::jsonb; + FOR _key, _value IN SELECT * FROM jsonb_each_text(staged_value) + LOOP + tile_data_value = _value::jsonb -> 'value'; + IF (_value::jsonb ->> 'datatype') in ('resource-instance-list', 'resource-instance') AND tile_data_value <> null THEN + resource_obejct_array = '[]'::jsonb; + FOR resource_object IN SELECT * FROM jsonb_array_elements(tile_data_value) LOOP + resource_object = jsonb_set(resource_object, '{resourceXresourceId}', to_jsonb(uuid_generate_v1mc())); + resource_obejct_array = resource_obejct_array || resource_object; + END LOOP; + tile_data_value = resource_obejct_array; + END IF; + tile_data = jsonb_set(tile_data, format('{"%s"}', _key)::text[], coalesce(tile_data_value, 'null')); + END LOOP; + + IF op = 'update' THEN + SELECT tiledata FROM tiles INTO old_data WHERE resourceinstanceid = instance_id AND tileid = tile_id; + IF NOT FOUND THEN + INSERT INTO tiles(tileid, tiledata, nodegroupid, parenttileid, resourceinstanceid) + VALUES (tile_id, tile_data, nodegroup_id, parent_id, instance_id); + INSERT INTO edit_log (resourceclassid, resourceinstanceid, nodegroupid, tileinstanceid, edittype, newvalue, timestamp, note, transactionid) + VALUES (graph_id, instance_id, nodegroup_id, tile_id, 'tile create', tile_data::jsonb, now(), 'loaded from staging_table', load_id); + ELSE + UPDATE tiles + SET tiledata = tile_data + WHERE tileid = tile_id; + INSERT INTO edit_log (resourceclassid, resourceinstanceid, nodegroupid, tileinstanceid, edittype, newvalue, oldvalue, timestamp, note, transactionid) + VALUES (graph_id, instance_id, nodegroup_id, tile_id, 'tile edit', tile_data::jsonb, old_data, now(), 'loaded from staging_table', load_id); + END IF; + ELSIF op = 'insert' THEN + INSERT INTO tiles(tileid, tiledata, nodegroupid, parenttileid, resourceinstanceid) + VALUES (tile_id, tile_data, nodegroup_id, parent_id, instance_id); + INSERT INTO edit_log (resourceclassid, resourceinstanceid, nodegroupid, tileinstanceid, edittype, newvalue, timestamp, note, transactionid) + VALUES (graph_id, instance_id, nodegroup_id, tile_id, 'tile create', tile_data::jsonb, now(), 'loaded from staging_table', load_id); + END IF; + END IF; + END LOOP; + FOR staged_value, tile_id IN + ( + SELECT value, tileid + FROM load_staging + WHERE loadid = load_id + ) + LOOP + FOR _key, _value IN SELECT * FROM jsonb_each_text(staged_value) + LOOP + CASE + WHEN (_value::jsonb ->> 'datatype') = 'file-list' THEN + FOR _file IN SELECT * FROM jsonb_array_elements(_value::jsonb -> 'value') LOOP + file_id = _file ->> 'file_id'; + UPDATE files SET tileid = tile_id WHERE fileid = file_id::uuid; + END LOOP; + WHEN (_value::jsonb ->> 'datatype') in ('resource-instance-list', 'resource-instance') THEN + PERFORM __arches_refresh_tile_resource_relationships(tile_id); + ELSE + END CASE; + END LOOP; + END LOOP; + UPDATE load_event SET (load_end_time, complete, successful) = (now(), true, true) WHERE loadid = load_id; + PERFORM refresh_transaction_geojson_geometries(load_id); + SELECT successful INTO status FROM load_event WHERE loadid = load_id; + RETURN status; + END; + $BODY$; + """ + + update_arches_stage_string_data_for_bulk_edit = """ + CREATE OR REPLACE FUNCTION public.__arches_stage_string_data_for_bulk_edit( + load_id uuid, + graph_id uuid, + node_id uuid, + module_id uuid, + resourceinstance_ids uuid[], + operation text, + old_text text, + new_text text, + language_code text, + case_insensitive boolean, + update_limit integer) + RETURNS void + LANGUAGE 'plpgsql' + COST 100 + VOLATILE PARALLEL UNSAFE + AS $BODY$ + DECLARE + tile_id uuid; + tile_data jsonb; + nodegroup_id uuid; + parenttile_id uuid; + resourceinstance_id uuid; + text_replacing_like text; + BEGIN + INSERT INTO load_staging (tileid, value, nodegroupid, parenttileid, resourceid, loadid, nodegroup_depth, source_description, operation, passes_validation, sortorder) + SELECT DISTINCT t.tileid, t.tiledata, t.nodegroupid, t.parenttileid, t.resourceinstanceid, load_id, 0, 'bulk_edit', 'update', true, t.sortorder + FROM tiles t, nodes n + WHERE t.nodegroupid = n.nodegroupid + AND CASE + WHEN graph_id IS NULL THEN true + ELSE n.graphid = graph_id + END + AND CASE + WHEN node_id IS NULL THEN n.datatype = 'string' + ELSE n.nodeid = node_id + END + AND CASE + WHEN resourceinstance_ids IS NULL THEN true + ELSE t.resourceinstanceid = ANY(resourceinstance_ids) + END + AND CASE operation + WHEN 'trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'capitalize' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> INITCAP(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'capitalize_trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(INITCAP(t.tiledata -> nodeid::text -> language_code ->> 'value')) + WHEN 'upper' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> UPPER(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'upper_trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(UPPER(t.tiledata -> nodeid::text -> language_code ->> 'value')) + WHEN 'lower' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> LOWER(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'lower_trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(LOWER(t.tiledata -> nodeid::text -> language_code ->> 'value')) + WHEN 'replace_i' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> REGEXP_REPLACE(t.tiledata -> nodeid::text -> language_code ->> 'value', old_text, new_text, 'gi') + WHEN 'replace' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> REGEXP_REPLACE(t.tiledata -> nodeid::text -> language_code ->> 'value', old_text, new_text, 'g') + END + LIMIT update_limit; + END; + $BODY$; + """ + + reverse_arches_stage_string_data_for_bulk_edit = """ + CREATE OR REPLACE FUNCTION public.__arches_stage_string_data_for_bulk_edit( + load_id uuid, + graph_id uuid, + node_id uuid, + module_id uuid, + resourceinstance_ids uuid[], + operation text, + old_text text, + new_text text, + language_code text, + case_insensitive boolean, + update_limit integer) + RETURNS void + LANGUAGE 'plpgsql' + COST 100 + VOLATILE PARALLEL UNSAFE + AS $BODY$ + DECLARE + tile_id uuid; + tile_data jsonb; + nodegroup_id uuid; + parenttile_id uuid; + resourceinstance_id uuid; + text_replacing_like text; + BEGIN + INSERT INTO load_staging (tileid, value, nodegroupid, parenttileid, resourceid, loadid, nodegroup_depth, source_description, operation, passes_validation) + SELECT DISTINCT t.tileid, t.tiledata, t.nodegroupid, t.parenttileid, t.resourceinstanceid, load_id, 0, 'bulk_edit', 'update', true + FROM tiles t, nodes n + WHERE t.nodegroupid = n.nodegroupid + AND CASE + WHEN graph_id IS NULL THEN true + ELSE n.graphid = graph_id + END + AND CASE + WHEN node_id IS NULL THEN n.datatype = 'string' + ELSE n.nodeid = node_id + END + AND CASE + WHEN resourceinstance_ids IS NULL THEN true + ELSE t.resourceinstanceid = ANY(resourceinstance_ids) + END + AND CASE operation + WHEN 'trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'capitalize' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> INITCAP(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'capitalize_trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(INITCAP(t.tiledata -> nodeid::text -> language_code ->> 'value')) + WHEN 'upper' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> UPPER(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'upper_trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(UPPER(t.tiledata -> nodeid::text -> language_code ->> 'value')) + WHEN 'lower' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> LOWER(t.tiledata -> nodeid::text -> language_code ->> 'value') + WHEN 'lower_trim' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> TRIM(LOWER(t.tiledata -> nodeid::text -> language_code ->> 'value')) + WHEN 'replace_i' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> REGEXP_REPLACE(t.tiledata -> nodeid::text -> language_code ->> 'value', old_text, new_text, 'gi') + WHEN 'replace' THEN + t.tiledata -> nodeid::text -> language_code ->> 'value' <> REGEXP_REPLACE(t.tiledata -> nodeid::text -> language_code ->> 'value', old_text, new_text, 'g') + END + LIMIT update_limit; + END; + $BODY$; + """ + + operations = [ + migrations.AddField( + model_name="loadstaging", + name="sortorder", + field=models.IntegerField(default=0), + ), + migrations.RunSQL( + update_arches_staging_to_tile, + reverse_arches_staging_to_tile, + ), + migrations.RunSQL( + update_arches_stage_string_data_for_bulk_edit, + reverse_arches_stage_string_data_for_bulk_edit, + ), + ] diff --git a/arches/app/models/models.py b/arches/app/models/models.py index dcd7004292c..ed885b73aec 100644 --- a/arches/app/models/models.py +++ b/arches/app/models/models.py @@ -613,39 +613,6 @@ def save(self, *args, **kwargs): super(GraphModel, self).save(*args, **kwargs) - @classmethod - def check(cls, **kwargs): - errors = super().check(**kwargs) - errors.extend(cls._check_publication_in_every_language()) - return errors - - @classmethod - def _check_publication_in_every_language(cls): - errors = [] - system_languages = {lang[0] for lang in settings.LANGUAGES} - - for graph in ( - cls.objects.filter(publication__isnull=False) - .select_related("publication") - .prefetch_related("publication__publishedgraph_set") - ): - languages_with_a_publication = { - published_graph.language_id - for published_graph in graph.publication.publishedgraph_set.all() - } - missing_languages = system_languages - languages_with_a_publication - if missing_languages: - errors.append( - checks.Error( - "This graph is not published in all enabled languages.", - hint="Run python manage.py graph publish --update", - obj=graph, - id="arches.E004", # TODO: enum in arches 8 - ) - ) - - return errors - def __str__(self): return str(self.name) @@ -2200,6 +2167,7 @@ class LoadStaging(models.Model): legacyid = models.TextField(blank=True, null=True) resourceid = models.UUIDField(serialize=False, blank=True, null=True) tileid = models.UUIDField(serialize=False, blank=True, null=True) + sortorder = models.IntegerField(blank=False, null=False, default=0) parenttileid = models.UUIDField(serialize=False, blank=True, null=True) passes_validation = models.BooleanField(blank=True, null=True) nodegroup_depth = models.IntegerField(default=1) diff --git a/arches/app/views/api/__init__.py b/arches/app/views/api/__init__.py index 84bfac18465..376007eb811 100644 --- a/arches/app/views/api/__init__.py +++ b/arches/app/views/api/__init__.py @@ -137,9 +137,8 @@ def get(self, request): localized_strings = {} for lang_file in language_file_path: try: - localized_strings = ( - json.load(open(lang_file))[user_language] | localized_strings - ) + with open(lang_file, "r", encoding="utf-8") as f: + localized_strings = json.load(f)[user_language] | localized_strings except FileNotFoundError: pass diff --git a/arches/install/arches-templates/.github/actions/build-and-test-branch/action.yml b/arches/install/arches-templates/.github/actions/build-and-test-branch/action.yml index f8e4c02a4f0..41921f283f8 100644 --- a/arches/install/arches-templates/.github/actions/build-and-test-branch/action.yml +++ b/arches/install/arches-templates/.github/actions/build-and-test-branch/action.yml @@ -33,7 +33,7 @@ runs: - name: Ensure frontend configuration files exist run: | - python manage.py check --tag=compatibility + python manage.py check shell: bash - name: Install Arches applications @@ -74,7 +74,7 @@ runs: - name: Check for missing migrations run: | - python manage.py makemigrations --check --skip-checks + python manage.py makemigrations --check shell: bash - name: Ensure previous Python coverage data is erased diff --git a/arches/management/commands/graph.py b/arches/management/commands/graph.py index f77dfe4fa33..933f70312b7 100644 --- a/arches/management/commands/graph.py +++ b/arches/management/commands/graph.py @@ -23,14 +23,7 @@ class Command(BaseCommand): - """ - Commands for adding arches test users - - """ - - # Silence system checks since this command is the cure for - # one of the system checks (arches.E004) - requires_system_checks = [] + """Commands for updating graphs.""" def add_arguments(self, parser): parser.add_argument( diff --git a/arches/management/commands/packages.py b/arches/management/commands/packages.py index ac8da0bac7b..6c86a3af35a 100644 --- a/arches/management/commands/packages.py +++ b/arches/management/commands/packages.py @@ -73,9 +73,6 @@ class Command(BaseCommand): """ - # Silence system checks: if run with -db, should always succeed. - requires_system_checks = [] - def add_arguments(self, parser): parser.add_argument( "-o", diff --git a/arches/management/commands/setup_db.py b/arches/management/commands/setup_db.py index 0b0df08d791..dcc6006ebdf 100644 --- a/arches/management/commands/setup_db.py +++ b/arches/management/commands/setup_db.py @@ -36,9 +36,6 @@ class Command(BaseCommand): during development. """ - # Silence system checks: this command should always succeed. - requires_system_checks = [] - def add_arguments(self, parser): parser.add_argument( diff --git a/arches/management/commands/updateproject.py b/arches/management/commands/updateproject.py index 33c4a1f93b9..e0c4956cae7 100644 --- a/arches/management/commands/updateproject.py +++ b/arches/management/commands/updateproject.py @@ -51,30 +51,3 @@ def update_to_v8(self): os.remove(declarations_test_file_path) self.stdout.write("Done!") - - # Update certain lines in GitHub Actions workflows. - self.stdout.write("Updating GitHub Actions...") - action_path = os.path.join( - settings.APP_ROOT, - "..", - ".github", - "actions", - "build-and-test-branch", - "action.yml", - ) - if os.path.exists(action_path): - first_find = "python manage.py check\n" - first_replace = "python manage.py check --tag=compatibility\n" - second_find = "python manage.py makemigrations --check\n" - second_replace = "python manage.py makemigrations --check --skip-checks\n" - with open(action_path, "r") as f: - content = f.readlines() - for i, line in enumerate(content): - if line.endswith(first_find): - content[i] = line.replace(first_find, first_replace) - elif line.endswith(second_find): - content[i] = line.replace(second_find, second_replace) - with open(action_path, "w") as f: - f.writelines(content) - - self.stdout.write("Done!") diff --git a/arches/management/commands/validate.py b/arches/management/commands/validate.py index 8801997fee2..e821b6ece35 100644 --- a/arches/management/commands/validate.py +++ b/arches/management/commands/validate.py @@ -18,12 +18,15 @@ from datetime import datetime +from django.conf import settings +from django.contrib.postgres.aggregates import ArrayAgg +from django.core.management import BaseCommand, CommandError, call_command +from django.db import transaction +from django.db.models import Exists, OuterRef, Q, Subquery + from arches import __version__ from arches.app.const import IntegrityCheck from arches.app.models import models -from django.core.management.base import BaseCommand, CommandError -from django.db import transaction -from django.db.models import Exists, OuterRef # Command modes FIX = "fix" @@ -31,6 +34,7 @@ # Fix actions DELETE_QUERYSET = "delete queryset" +UPDATE_GRAPH_PUBLICATIONS = "manage.py graph --update" class Command(BaseCommand): @@ -116,6 +120,36 @@ def handle(self, *args, **options): ), fix_action=DELETE_QUERYSET, ) + self.check_integrity( + check=IntegrityCheck.PUBLICATION_MISSING_FOR_LANGUAGE, # 1014 + queryset=( + models.GraphModel.objects.filter( + isresource=True, + publication__isnull=False, + source_identifier=None, + ) + .annotate( + publications_in_system_languages=ArrayAgg( + Subquery( + models.PublishedGraph.objects.filter( + pk=OuterRef("publication__publishedgraph"), + ) + .values("language") + .distinct() + ), + filter=Q( + publication__publishedgraph__language__in=[ + lang[0] for lang in settings.LANGUAGES + ] + ), + ) + ) + .filter( + publications_in_system_languages__len__lt=len(settings.LANGUAGES) + ) + ), + fix_action=UPDATE_GRAPH_PUBLICATIONS, + ) def check_integrity(self, check, queryset, fix_action): # 500 not set as a default earlier: None distinguishes whether verbose output implied @@ -146,10 +180,24 @@ def check_integrity(self, check, queryset, fix_action): elif queryset.exists(): fix_status = self.style.ERROR("No") # until actually fixed below # Perform fix action - if fix_action is DELETE_QUERYSET: + if fix_action == DELETE_QUERYSET: with transaction.atomic(): queryset.delete() fix_status = self.style.SUCCESS("Yes") + elif fix_action == UPDATE_GRAPH_PUBLICATIONS: + call_command( + "graph", + "publish", + "--update", + "-g", + ",".join( + str(pk) for pk in queryset.values_list("pk", flat=True) + ), + verbosity=self.options["verbosity"], + stdout=self.stdout, + stderr=self.stderr, + ) + fix_status = self.style.SUCCESS("Yes") else: raise NotImplementedError else: diff --git a/releases/8.0.0.md b/releases/8.0.0.md index cab180e5173..48649f418f0 100644 --- a/releases/8.0.0.md +++ b/releases/8.0.0.md @@ -19,15 +19,16 @@ Arches 8.0.0 Release Notes ### Additional highlights - Add session-based REST APIs for login, logout [#11261](https://github.com/archesproject/arches/issues/11261) -- Add system check advising next action when enabling additional languages without updating graphs [#10079](https://github.com/archesproject/arches/issues/10079) - Auth views now filter out passwords from error reports when running in production [#11652](https://github.com/archesproject/arches/issues/11652) - Improve handling of longer model names [#11317](https://github.com/archesproject/arches/issues/11317) - Support more expressive plugin URLs [#11320](https://github.com/archesproject/arches/issues/11320) - Make node aliases not nullable [#10437](https://github.com/archesproject/arches/issues/10437) - Concepts API no longer responds with empty body for error conditions [#11519](https://github.com/archesproject/arches/issues/11519) - Removes sample index from new projects, updates test coverage behavior [#11591](https://github.com/archesproject/arches/issues/11519) +- Add system dark mode detection for Vue apps [#11624](https://github.com/archesproject/arches/issues/11624) - Make number datatype node values searchable in the main search [#11619](https://github.com/archesproject/arches/issues/11619) - Prevent navigation to a new browser tab when clicking Manage link in index.htm [#11635](https://github.com/archesproject/arches/issues/11635) +- Add support for tile sort order to the bulk data manager [#11638](https://github.com/archesproject/arches/pull/11638) ### Dependency changes ``` @@ -107,6 +108,12 @@ JavaScript: 4. Update your Graph publications and Resource instances to point to the newly published Graphs by running `python manage.py graph publish --update -ui` +1. Also consider running validation checks: + ``` + python manage.py validate + python manage.py validate --fix [error] + ``` + 5. Within your project with your Python 3 virtual environment activated: ``` python manage.py es reindex_database diff --git a/tests/bulkdata/tile_excel_tests.py b/tests/bulkdata/tile_excel_tests.py index 267a60ebdcc..75f9899ada5 100644 --- a/tests/bulkdata/tile_excel_tests.py +++ b/tests/bulkdata/tile_excel_tests.py @@ -23,6 +23,7 @@ from django.core.management import call_command from django.test import TransactionTestCase +from arches.app.models.models import TileModel from arches.app.utils.betterJSONSerializer import JSONDeserializer from arches.app.utils.data_management.resource_graphs.importer import ( import_graph as ResourceGraphImporter, @@ -57,3 +58,7 @@ def test_cli(self): ) call_command("etl", "tile-excel-importer", source=excel_file_path, stdout=out) self.assertIn("succeeded", out.getvalue()) + + new_tiles = TileModel.objects.all() + self.assertEqual(new_tiles.count(), 6) + self.assertEqual(new_tiles.filter(sortorder=1).count(), 2) diff --git a/tests/fixtures/data/uploadedfiles/tile_excel_test.xlsx b/tests/fixtures/data/uploadedfiles/tile_excel_test.xlsx index a06af945698..5f167143b03 100644 Binary files a/tests/fixtures/data/uploadedfiles/tile_excel_test.xlsx and b/tests/fixtures/data/uploadedfiles/tile_excel_test.xlsx differ diff --git a/tests/views/graph_manager_tests.py b/tests/views/graph_manager_tests.py index 00129b869aa..fd858aba148 100644 --- a/tests/views/graph_manager_tests.py +++ b/tests/views/graph_manager_tests.py @@ -157,7 +157,7 @@ def setUpTestData(cls): } Edge.objects.create(**edges_dict).save() - graph = Graph.new() + graph = Graph.new(is_resource=True) graph.ontology_id = "e6e8db47-2ccf-11e6-927e-b8f6b115d7dd" graph.root.ontologyclass = "http://www.cidoc-crm.org/cidoc-crm/E1_CRM_Entity" graph.name = "TEST GRAPH"