From 5c9e88b146df541a8c9f3274042f391dfb53dce3 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Thu, 26 Jan 2023 17:27:39 +0100 Subject: [PATCH 001/175] Show new publisher from menu --- openpype/hosts/maya/api/menu.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 67109e99585..8cf8899825f 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -67,12 +67,6 @@ def deferred(): cmds.menuItem(divider=True) - # Create default items - cmds.menuItem( - "Create...", - command=lambda *args: host_tools.show_creator(parent=parent_widget) - ) - cmds.menuItem( "Load...", command=lambda *args: host_tools.show_loader( @@ -83,7 +77,7 @@ def deferred(): cmds.menuItem( "Publish...", - command=lambda *args: host_tools.show_publish( + command=lambda *args: host_tools.show_publisher( parent=parent_widget ), image=pyblish_icon From d05f904857e1152a278395ef0b5d99f9ed3be12e Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:03:19 +0100 Subject: [PATCH 002/175] Implement draft for new publisher in Maya --- openpype/hosts/maya/api/lib.py | 52 +++++++++ openpype/hosts/maya/api/pipeline.py | 9 +- openpype/hosts/maya/api/plugin.py | 175 +++++++++++++++++++++++++++- 3 files changed, 234 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 25842a47769..9ebaef7cb8d 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -32,6 +32,7 @@ load_container, registered_host, ) +from openpype.lib import NumberDef from openpype.pipeline.context_tools import get_current_project_asset from .commands import reset_frame_range @@ -315,6 +316,57 @@ def collect_animation_data(fps=False): return data +def collect_animation_defs(fps=False): + """Get the basic animation attribute defintions for the publisher. + + Returns: + OrderedDict + + """ + + # get scene values as defaults + start = cmds.playbackOptions(query=True, animationStartTime=True) + end = cmds.playbackOptions(query=True, animationEndTime=True) + + # build attributes + defs = [ + NumberDef("frameStart", + label="Frame Start", + default=start, + decimals=0), + NumberDef("frameEnd", + label="Frame End", + default=end, + decimals=0), + NumberDef("handleStart", + label="Handle Start", + default=0, + decimals=0), + NumberDef("handleEnd", + label="Handle End", + default=0, + decimals=0), + NumberDef("step", + label="Step size", + tooltip="A smaller step size means more samples and larger " + "output files.\n" + "A 1.0 step size is a single sample every frame.\n" + "A 0.5 step size is two samples per frame.\n" + "A 0.2 step size is five samples per frame.", + default=1.0, + decimals=3), + ] + + if fps: + current_fps = mel.eval('currentTimeUnitToFPS()') + fps_def = NumberDef( + "fps", label="FPS", default=current_fps, decimals=5 + ) + defs.append(fps_def) + + return defs + + def imprint(node, data): """Write `data` to `node` as userDefined attributes diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index 3798170671e..50f08810316 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -13,6 +13,7 @@ HostBase, IWorkfileHost, ILoadHost, + IPublishHost, HostDirmap, ) from openpype.tools.utils import host_tools @@ -63,7 +64,7 @@ AVALON_CONTAINERS = ":AVALON_CONTAINERS" -class MayaHost(HostBase, IWorkfileHost, ILoadHost): +class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): name = "maya" def __init__(self): @@ -146,6 +147,12 @@ def maintained_selection(self): with lib.maintained_selection(): yield + def get_context_data(self): + return {} + + def update_context_data(self, data, changes): + return + def _register_callbacks(self): for handler, event in self._op_events.copy().items(): if event is None: diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 82df85a8be9..efb4b945d93 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -1,4 +1,10 @@ import os +import sys +import json +from abc import ( + ABCMeta +) +import six from maya import cmds @@ -6,17 +12,36 @@ from openpype.lib import Logger from openpype.pipeline import ( - LegacyCreator, LoaderPlugin, get_representation_path, AVALON_CONTAINER_ID, Anatomy, + CreatorError, + LegacyCreator, + Creator as NewCreator, + CreatedInstance ) +from openpype.lib import BoolDef +from .lib import imprint, read, lsattr + from openpype.settings import get_project_settings from .pipeline import containerise from . import lib +CREATOR_INSTANCE_ATTRS = { + "id", "asset", "subset", "task", "variant", "family", "instance_id", + "creator_attributes", "publish_attributes", "active" +} + + +def _get_attr(node, attr, default=None): + """Helper to get attribute which allows attribute to not exist.""" + if not cmds.attributeQuery(attr, node=node, exists=True): + return default + return cmds.getAttr("{}.{}".format(node, attr)) + + def get_reference_node(members, log=None): """Get the reference node from the container members Args: @@ -98,6 +123,154 @@ def process(self): return instance +#@six.add_metaclass(ABCMeta) +class MayaCreator(NewCreator): + + def create(self, subset_name, instance_data, pre_create_data): + + members = list() + if pre_create_data.get("use_selection"): + members = cmds.ls(selection=True) + + with lib.undo_chunk(): + instance_node = cmds.sets(members, name=subset_name) + instance_data["instance_node"] = instance_node + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + self._add_instance_to_context(instance) + + self.imprint_instance_node(instance_node, + data=instance.data_to_store()) + return instance + + def collect_instances(self): + self.cache_subsets(self.collection_shared_data) + cached_subsets = self.collection_shared_data["maya_cached_subsets"] + for node in cached_subsets.get(self.identifier, []): + node_data = self.read_instance_node(node) + + # Explicitly re-parse the node name + node_data["instance_node"] = node + + created_instance = CreatedInstance.from_existing(node_data, self) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + data = created_inst.data_to_store() + node = data.get("instance_node") + + self.imprint_instance_node(node, data) + + def imprint_instance_node(self, node, data): + + # We never store the instance_node as value on the node since + # it's the node name itself + data.pop("instance_node", None) + + # We store creator attributes at the root level and assume they + # will not clash in names with `subset`, `task`, etc. and other + # default names. This is just so these attributes in many cases + # are still editable in the maya UI by artists. + data.update(data.pop("creator_attributes", {})) + + # We know the "publish_attributes" will be complex data of + # settings per plugins, we'll store this as a flattened json structure + publish_attributes = json.dumps(data.get("publish_attributes", {})) + data.pop("publish_attributes", None) # pop to move to end of dict + data["publish_attributes"] = publish_attributes + + # Kill any existing attributes just we can imprint cleanly again + for attr in data.keys(): + if cmds.attributeQuery(attr, node=node, exists=True): + cmds.deleteAttr("{}.{}".format(node, attr)) + + return imprint(node, data) + + def read_instance_node(self, node): + node_data = read(node) + + # Move the relevant attributes into "creator_attributes" that + # we flattened originally + node_data["creator_attributes"] = {} + for key, value in node_data.items(): + if key not in CREATOR_INSTANCE_ATTRS: + node_data["creator_attributes"][key] = value + + publish_attributes = node_data.get("publish_attributes") + if publish_attributes: + node_data["publish_attributes"] = json.loads(publish_attributes) + + return node_data + + def remove_instances(self, instances): + """Remove specified instance from the scene. + + This is only removing `id` parameter so instance is no longer + instance, because it might contain valuable data for artist. + + """ + for instance in instances: + node = instance.data.get("instance_node") + if node: + cmds.delete(node) + + self._remove_instance_from_context(instance) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", label="Use selection") + ] + + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators to shared data. + + Create `maya_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. + + If legacy instances are detected in the scene, create + `maya_cached_legacy_subsets` there and fill it with + all legacy subsets under family as a key. + + Args: + Dict[str, Any]: Shared data. + + Return: + Dict[str, Any]: Shared data dictionary. + + """ + if shared_data.get("maya_cached_subsets") is None: + cache = dict() + cache_legacy = dict() + + for node in cmds.ls(type='objectSet'): + + if _get_attr(node, attr="id") != "pyblish.avalon.instance": + continue + + creator_id = _get_attr(node, attr="creator_identifier") + if creator_id is not None: + # creator instance + cache.setdefault(creator_id, []).append(node) + else: + # legacy instance + family = _get_attr(node, attr="family") + if family is None: + # must be a broken instance + continue + + cache_legacy.setdefault(family, []).append(node) + + shared_data["maya_cached_subsets"] = cache + shared_data["maya_cached_legacy_subsets"] = cache_legacy + return shared_data + + class Loader(LoaderPlugin): hosts = ["maya"] From 82631a15c6c48cd211f16be17c28491e2a4bd2ae Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:03:40 +0100 Subject: [PATCH 003/175] Maya: Refactor Create Pointcache to new publish --- .../maya/plugins/create/create_pointcache.py | 71 +++++++++++++------ 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index cdec140ea8c..9883dec2665 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -2,38 +2,65 @@ lib, plugin ) +from openpype.lib import ( + BoolDef, + TextDef +) -class CreatePointCache(plugin.Creator): +class CreatePointCache(plugin.MayaCreator): """Alembic pointcache for animated data""" - name = "pointcache" - label = "Point Cache" + identifier = "io.openpype.creators.maya.pointcache" + label = "Pointcache" family = "pointcache" icon = "gears" - write_color_sets = False - write_face_sets = False - - def __init__(self, *args, **kwargs): - super(CreatePointCache, self).__init__(*args, **kwargs) - # Add animation data - self.data.update(lib.collect_animation_data()) + def get_instance_attr_defs(self): - # Vertex colors with the geometry. - self.data["writeColorSets"] = self.write_color_sets - # Vertex colors with the geometry. - self.data["writeFaceSets"] = self.write_face_sets - self.data["renderableOnly"] = False # Only renderable visible shapes - self.data["visibleOnly"] = False # only nodes that are visible - self.data["includeParentHierarchy"] = False # Include parent groups - self.data["worldSpace"] = True # Default to exporting world-space - self.data["refresh"] = False # Default to suspend refresh. + defs = lib.collect_animation_defs() - # Add options for custom attributes - self.data["attr"] = "" - self.data["attrPrefix"] = "" + defs.extend([ + BoolDef("writeColorSets", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=False), + BoolDef("writeFaceSets", + label="Write face sets", + tooltip="Write face sets with the geometry", + default=False), + BoolDef("renderableOnly", + label="Renderable Only", + tooltip="Only export renderable visible shapes", + default=False), + BoolDef("visibleOnly", + label="Visible Only", + tooltip="Only export dag objects visible during " + "frame range", + default=False), + BoolDef("includeParentHierarchy", + label="Include Parent Hierarchy", + default=False), + BoolDef("worldSpace", + label="World-Space Export", + default=True), + BoolDef("refresh", + label="Refresh viewport during export", + default=False), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1, prefix2") + ]) + # TODO: Implement these on a Deadline plug-in instead? + """ # Default to not send to farm. self.data["farm"] = False self.data["priority"] = 50 + """ + + return defs From 81ffcc80771d9aae90f0e974e5c0b14abd8403c1 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:05:18 +0100 Subject: [PATCH 004/175] Add tooltip --- openpype/hosts/maya/plugins/create/create_pointcache.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index 9883dec2665..9713ff0aec0 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -40,6 +40,8 @@ def get_instance_attr_defs(self): default=False), BoolDef("includeParentHierarchy", label="Include Parent Hierarchy", + tooltip="Whether to include parent hierarchy of nodes in " + "the publish instance", default=False), BoolDef("worldSpace", label="World-Space Export", From b448c46b0cc54d5efcdc3b7e0b19e6ce657ea279 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:07:59 +0100 Subject: [PATCH 005/175] Maya: Refactor Create Model to new publish --- .../hosts/maya/plugins/create/create_model.py | 43 +++++++++++++------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_model.py b/openpype/hosts/maya/plugins/create/create_model.py index 520e962f74f..30f1a822814 100644 --- a/openpype/hosts/maya/plugins/create/create_model.py +++ b/openpype/hosts/maya/plugins/create/create_model.py @@ -1,26 +1,43 @@ from openpype.hosts.maya.api import plugin +from openpype.lib import ( + BoolDef, + TextDef +) -class CreateModel(plugin.Creator): +class CreateModel(plugin.MayaCreator): """Polygonal static geometry""" - name = "modelMain" + identifier = "io.openpype.creators.maya.model" label = "Model" family = "model" icon = "cube" defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"] + write_color_sets = False write_face_sets = False - def __init__(self, *args, **kwargs): - super(CreateModel, self).__init__(*args, **kwargs) - - # Vertex colors with the geometry - self.data["writeColorSets"] = self.write_color_sets - self.data["writeFaceSets"] = self.write_face_sets - # Include attributes by attribute name or prefix - self.data["attr"] = "" - self.data["attrPrefix"] = "" + def get_instance_attr_defs(self): - # Whether to include parent hierarchy of nodes in the instance - self.data["includeParentHierarchy"] = False + return [ + BoolDef("writeColorSets", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=self.write_color_sets), + BoolDef("writeFaceSets", + label="Write face sets", + tooltip="Write face sets with the geometry", + default=self.write_face_sets), + BoolDef("includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip="Whether to include parent hierarchy of nodes in " + "the publish instance", + default=False), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1, prefix2") + ] From 5be796cbb7585116c55a49ef5346d565f2aa19bd Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:08:54 +0100 Subject: [PATCH 006/175] Maya: Refactor Create Maya Scene to new publish --- openpype/hosts/maya/plugins/create/create_mayaascii.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/create_mayaascii.py b/openpype/hosts/maya/plugins/create/create_mayaascii.py index f54f2df812d..b61c97aebf1 100644 --- a/openpype/hosts/maya/plugins/create/create_mayaascii.py +++ b/openpype/hosts/maya/plugins/create/create_mayaascii.py @@ -1,9 +1,10 @@ from openpype.hosts.maya.api import plugin -class CreateMayaScene(plugin.Creator): +class CreateMayaScene(plugin.MayaCreator): """Raw Maya Scene file export""" + identifier = "io.openpype.creators.maya.mayascene" name = "mayaScene" label = "Maya Scene" family = "mayaScene" From ef4b5d119d954ff35b3a570285861cfa040535f2 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:09:13 +0100 Subject: [PATCH 007/175] Rename file to match plugin name --- .../plugins/create/{create_mayaascii.py => create_mayascene.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename openpype/hosts/maya/plugins/create/{create_mayaascii.py => create_mayascene.py} (100%) diff --git a/openpype/hosts/maya/plugins/create/create_mayaascii.py b/openpype/hosts/maya/plugins/create/create_mayascene.py similarity index 100% rename from openpype/hosts/maya/plugins/create/create_mayaascii.py rename to openpype/hosts/maya/plugins/create/create_mayascene.py From 783bd8622e47f66a3252bcbb78ab09eba114e3d3 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:19:54 +0100 Subject: [PATCH 008/175] Maya: Refactor Create Look to new publisher --- .../hosts/maya/plugins/create/create_look.py | 47 ++++++++++++++----- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_look.py b/openpype/hosts/maya/plugins/create/create_look.py index 44e439fe1f9..4d675cc6f5f 100644 --- a/openpype/hosts/maya/plugins/create/create_look.py +++ b/openpype/hosts/maya/plugins/create/create_look.py @@ -1,27 +1,48 @@ from openpype.hosts.maya.api import ( - lib, - plugin + plugin, + lib +) +from openpype.lib import ( + BoolDef, + TextDef ) -class CreateLook(plugin.Creator): +class CreateLook(plugin.MayaCreator): """Shader connections defining shape look""" + identifier = "io.openpype.creators.maya.look" name = "look" label = "Look" family = "look" icon = "paint-brush" - make_tx = True - def __init__(self, *args, **kwargs): - super(CreateLook, self).__init__(*args, **kwargs) + make_tx = True - self.data["renderlayer"] = lib.get_current_renderlayer() + def get_instance_attr_defs(self): - # Whether to automatically convert the textures to .tx upon publish. - self.data["maketx"] = self.make_tx + return [ + # TODO: This value should actually get set on create! + TextDef("renderLayer", + # TODO: Bug: Hidden attribute's label is still shown in UI? + hidden=True, + default=lib.get_current_renderlayer(), + label="Renderlayer", + tooltip="Renderlayer to extract the look from"), + BoolDef("maketx", + label="MakeTX", + tooltip="Whether to generate .tx files for your textures", + default=self.make_tx), + BoolDef("forceCopy", + label="Force Copy", + tooltip="Enable users to force a copy instead of hardlink." + "\nNote: On Windows copy is always forced due to " + "bugs in windows' implementation of hardlinks.", + default=False) + ] - # Enable users to force a copy. - # - on Windows is "forceCopy" always changed to `True` because of - # windows implementation of hardlinks - self.data["forceCopy"] = False + def get_pre_create_attr_defs(self): + # Show same attributes on create but include use selection + defs = super(CreateLook, self).get_pre_create_attr_defs() + defs.extend(self.get_instance_attr_defs()) + return defs From b67018a2a42f5cbf2f47a23391d8fc4a4fc8c8ec Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:20:34 +0100 Subject: [PATCH 009/175] Default use selection to True --- openpype/hosts/maya/api/plugin.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index efb4b945d93..e7e7101ce74 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -222,7 +222,9 @@ def remove_instances(self, instances): def get_pre_create_attr_defs(self): return [ - BoolDef("use_selection", label="Use selection") + BoolDef("use_selection", + label="Use selection", + default=True) ] @staticmethod From 8179ad43d8e4a122bb595d4c62c9d68e50e2e51f Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:22:10 +0100 Subject: [PATCH 010/175] Maya: Refactor Create Layout to new publisher --- .../hosts/maya/plugins/create/create_layout.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_layout.py b/openpype/hosts/maya/plugins/create/create_layout.py index 1768a3d49ef..b382e94be4d 100644 --- a/openpype/hosts/maya/plugins/create/create_layout.py +++ b/openpype/hosts/maya/plugins/create/create_layout.py @@ -1,16 +1,19 @@ from openpype.hosts.maya.api import plugin +from openpype.lib import BoolDef -class CreateLayout(plugin.Creator): +class CreateLayout(plugin.MayaCreator): """A grouped package of loaded content""" - name = "layoutMain" + identifier = "io.openpype.creators.maya.layout" label = "Layout" family = "layout" icon = "cubes" - def __init__(self, *args, **kwargs): - super(CreateLayout, self).__init__(*args, **kwargs) - # enable this when you want to - # publish group of loaded asset - self.data["groupLoadedAssets"] = False + def get_instance_attr_defs(self): + + return [ + BoolDef("groupLoadedAssets", + label="Group Loaded Assets", + default=False) + ] From c2319079aa5c2dc974b68ed179a42792b8713d61 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:28:54 +0100 Subject: [PATCH 011/175] Maya: Refactor Create Camera + Camera Rig to new publisher --- .../maya/plugins/create/create_camera.py | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_camera.py b/openpype/hosts/maya/plugins/create/create_camera.py index 8b2c8810368..ee4fbb0771e 100644 --- a/openpype/hosts/maya/plugins/create/create_camera.py +++ b/openpype/hosts/maya/plugins/create/create_camera.py @@ -2,33 +2,35 @@ lib, plugin ) +from openpype.lib import BoolDef -class CreateCamera(plugin.Creator): +class CreateCamera(plugin.MayaCreator): """Single baked camera""" - name = "cameraMain" + identifier = "io.openpype.creators.maya.camera" label = "Camera" family = "camera" icon = "video-camera" - def __init__(self, *args, **kwargs): - super(CreateCamera, self).__init__(*args, **kwargs) + def get_instance_attr_defs(self): - # get basic animation data : start / end / handles / steps - animation_data = lib.collect_animation_data() - for key, value in animation_data.items(): - self.data[key] = value + defs = lib.collect_animation_defs() - # Bake to world space by default, when this is False it will also - # include the parent hierarchy in the baked results - self.data['bakeToWorldSpace'] = True + defs.extend([ + BoolDef("bakeToWorldSpace", + label="Bake to World-Space", + tooltip="Bake to World-Space (for mayaScene export only!)", + default=True), + ]) + return defs -class CreateCameraRig(plugin.Creator): + +class CreateCameraRig(plugin.MayaCreator): """Complex hierarchy with camera.""" - name = "camerarigMain" + identifier = "io.openpype.creators.maya.camerarig" label = "Camera Rig" family = "camerarig" icon = "video-camera" From 3f87417b697b2aa8eb230d8f07e93bae3377f4e8 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:29:22 +0100 Subject: [PATCH 012/175] Maya: Refactor Create Assembly to new publisher --- openpype/hosts/maya/plugins/create/create_assembly.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_assembly.py b/openpype/hosts/maya/plugins/create/create_assembly.py index ff5e1d45c48..813fe4da04a 100644 --- a/openpype/hosts/maya/plugins/create/create_assembly.py +++ b/openpype/hosts/maya/plugins/create/create_assembly.py @@ -1,10 +1,10 @@ from openpype.hosts.maya.api import plugin -class CreateAssembly(plugin.Creator): +class CreateAssembly(plugin.MayaCreator): """A grouped package of loaded content""" - name = "assembly" + identifier = "io.openpype.creators.maya.assembly" label = "Assembly" family = "assembly" icon = "cubes" From 9b180bc8d6f879736221690c57431d2ab5772461 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:32:52 +0100 Subject: [PATCH 013/175] Remove name attribute --- openpype/hosts/maya/plugins/create/create_look.py | 1 - 1 file changed, 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/create_look.py b/openpype/hosts/maya/plugins/create/create_look.py index 4d675cc6f5f..0f960e5ff56 100644 --- a/openpype/hosts/maya/plugins/create/create_look.py +++ b/openpype/hosts/maya/plugins/create/create_look.py @@ -12,7 +12,6 @@ class CreateLook(plugin.MayaCreator): """Shader connections defining shape look""" identifier = "io.openpype.creators.maya.look" - name = "look" label = "Look" family = "look" icon = "paint-brush" From e57b7aaf1750c204d19b2bfd21800ca47032e4fd Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:38:32 +0100 Subject: [PATCH 014/175] Maya: Refactor Create Animation to new publisher --- .../maya/plugins/create/create_animation.py | 80 ++++++++++++------- 1 file changed, 53 insertions(+), 27 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py index e54c12315c7..c04180a55aa 100644 --- a/openpype/hosts/maya/plugins/create/create_animation.py +++ b/openpype/hosts/maya/plugins/create/create_animation.py @@ -2,48 +2,74 @@ lib, plugin ) +from openpype.lib import ( + BoolDef, + TextDef +) -class CreateAnimation(plugin.Creator): +class CreateAnimation(plugin.MayaCreator): """Animation output for character rigs""" - name = "animationDefault" + identifier = "io.openpype.creators.maya.animation" label = "Animation" family = "animation" icon = "male" + write_color_sets = False write_face_sets = False - def __init__(self, *args, **kwargs): - super(CreateAnimation, self).__init__(*args, **kwargs) - - # create an ordered dict with the existing data first - - # get basic animation data : start / end / handles / steps - for key, value in lib.collect_animation_data().items(): - self.data[key] = value - - # Write vertex colors with the geometry. - self.data["writeColorSets"] = self.write_color_sets - self.data["writeFaceSets"] = self.write_face_sets - - # Include only renderable visible shapes. - # Skips locators and empty transforms - self.data["renderableOnly"] = False + # TODO: Would be great if we could visually hide this from the creator + # by default but do allow to generate it through code. - # Include only nodes that are visible at least once during the - # frame range. - self.data["visibleOnly"] = False + def get_instance_attr_defs(self): - # Include the groups above the out_SET content - self.data["includeParentHierarchy"] = False # Include parent groups + defs = lib.collect_animation_defs() - # Default to exporting world-space - self.data["worldSpace"] = True + defs.extend([ + BoolDef("writeColorSets", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=self.write_color_sets), + BoolDef("writeFaceSets", + label="Write face sets", + tooltip="Write face sets with the geometry", + default=self.write_face_sets), + BoolDef("writeNormals", + label="Write normals", + tooltip="Write normals with the deforming geometry", + default=True), + BoolDef("renderableOnly", + label="Renderable Only", + tooltip="Only export renderable visible shapes", + default=False), + BoolDef("visibleOnly", + label="Visible Only", + tooltip="Only export dag objects visible during " + "frame range", + default=False), + BoolDef("includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip="Whether to include parent hierarchy of nodes in " + "the publish instance", + default=False), + BoolDef("worldSpace", + label="World-Space Export", + default=True), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1, prefix2") + ]) + # TODO: Implement these on a Deadline plug-in instead? + """ # Default to not send to farm. self.data["farm"] = False self.data["priority"] = 50 + """ - # Default to write normals. - self.data["writeNormals"] = True + return defs From fadf3a111a964409c722229d7d48fa08562280c1 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:46:22 +0100 Subject: [PATCH 015/175] Maya: Refactor Create Rig to new publisher --- .../hosts/maya/plugins/create/create_rig.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_rig.py b/openpype/hosts/maya/plugins/create/create_rig.py index 8032e5fbbd9..04104cb7cba 100644 --- a/openpype/hosts/maya/plugins/create/create_rig.py +++ b/openpype/hosts/maya/plugins/create/create_rig.py @@ -1,25 +1,25 @@ from maya import cmds -from openpype.hosts.maya.api import ( - lib, - plugin -) +from openpype.hosts.maya.api import plugin -class CreateRig(plugin.Creator): +class CreateRig(plugin.MayaCreator): """Artist-friendly rig with controls to direct motion""" - name = "rigDefault" + identifier = "io.openpype.creators.maya.rig" label = "Rig" family = "rig" icon = "wheelchair" - def process(self): + def create(self, subset_name, instance_data, pre_create_data): - with lib.undo_chunk(): - instance = super(CreateRig, self).process() + instance = super(CreateRig, self).create(subset_name, + instance_data, + pre_create_data) - self.log.info("Creating Rig instance set up ...") - controls = cmds.sets(name="controls_SET", empty=True) - pointcache = cmds.sets(name="out_SET", empty=True) - cmds.sets([controls, pointcache], forceElement=instance) + instance_node = instance.get("instance_node") + + self.log.info("Creating Rig instance set up ...") + controls = cmds.sets(name="controls_SET", empty=True) + pointcache = cmds.sets(name="out_SET", empty=True) + cmds.sets([controls, pointcache], forceElement=instance_node) From ccb4e956aef1adffeb42b5f4714a1192a049d975 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:48:43 +0100 Subject: [PATCH 016/175] Maya: Refactor Create Setdress to new publisher --- .../hosts/maya/plugins/create/create_setdress.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_setdress.py b/openpype/hosts/maya/plugins/create/create_setdress.py index 4246183fdbf..594a3dc46de 100644 --- a/openpype/hosts/maya/plugins/create/create_setdress.py +++ b/openpype/hosts/maya/plugins/create/create_setdress.py @@ -1,16 +1,19 @@ from openpype.hosts.maya.api import plugin +from openpype.lib import BoolDef -class CreateSetDress(plugin.Creator): +class CreateSetDress(plugin.MayaCreator): """A grouped package of loaded content""" - name = "setdressMain" + identifier = "io.openpype.creators.maya.setdress" label = "Set Dress" family = "setdress" icon = "cubes" defaults = ["Main", "Anim"] - def __init__(self, *args, **kwargs): - super(CreateSetDress, self).__init__(*args, **kwargs) - - self.data["exactSetMembersOnly"] = True + def get_instance_attr_defs(self): + return [ + BoolDef("exactSetMembersOnly", + label="Exact Set Members Only", + default=True) + ] From 406884d31a17507f4f7211f177433b04cec57c13 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 00:52:46 +0100 Subject: [PATCH 017/175] Maya: Refactor Create Xgen to new publisher --- openpype/hosts/maya/plugins/create/create_xgen.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_xgen.py b/openpype/hosts/maya/plugins/create/create_xgen.py index 8672c06a1e0..c1cde9959af 100644 --- a/openpype/hosts/maya/plugins/create/create_xgen.py +++ b/openpype/hosts/maya/plugins/create/create_xgen.py @@ -1,10 +1,10 @@ from openpype.hosts.maya.api import plugin -class CreateXgen(plugin.Creator): +class CreateXgen(plugin.MayaCreator): """Xgen interactive export""" - name = "xgen" + identifier = "io.openpype.creators.maya.xgen" label = "Xgen Interactive" family = "xgen" icon = "pagelines" From c9aa319396f822fa4a5755162bb476fa1505926e Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 01:02:12 +0100 Subject: [PATCH 018/175] Maya: Refactor Create Yeti Rig + Yeti Cache to new publisher --- .../maya/plugins/create/create_yeti_cache.py | 34 ++++++++++++------- .../maya/plugins/create/create_yeti_rig.py | 12 ++++--- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_yeti_cache.py b/openpype/hosts/maya/plugins/create/create_yeti_cache.py index e8c3203f21f..395aa62325a 100644 --- a/openpype/hosts/maya/plugins/create/create_yeti_cache.py +++ b/openpype/hosts/maya/plugins/create/create_yeti_cache.py @@ -1,15 +1,14 @@ -from collections import OrderedDict - from openpype.hosts.maya.api import ( lib, plugin ) +from openpype.lib import NumberDef -class CreateYetiCache(plugin.Creator): +class CreateYetiCache(plugin.MayaCreator): """Output for procedural plugin nodes of Yeti """ - name = "yetiDefault" + identifier = "io.openpype.creators.maya.yeticache" label = "Yeti Cache" family = "yeticache" icon = "pagelines" @@ -17,14 +16,23 @@ class CreateYetiCache(plugin.Creator): def __init__(self, *args, **kwargs): super(CreateYetiCache, self).__init__(*args, **kwargs) - self.data["preroll"] = 0 + defs = [ + NumberDef("preroll", + label="Preroll", + minimum=0, + default=0, + decimals=0) + ] # Add animation data without step and handles - anim_data = lib.collect_animation_data() - anim_data.pop("step") - anim_data.pop("handleStart") - anim_data.pop("handleEnd") - self.data.update(anim_data) - - # Add samples - self.data["samples"] = 3 + defs.extend(lib.collect_animation_defs()) + remove = {"step", "handleStart", "handleEnd"} + defs = [attr_def for attr_def in defs if attr_def.key not in remove] + + # Add samples after frame range + defs.append( + NumberDef("samples", + label="Samples", + default=3, + decimals=0) + ) diff --git a/openpype/hosts/maya/plugins/create/create_yeti_rig.py b/openpype/hosts/maya/plugins/create/create_yeti_rig.py index 7abe2988cd5..445bcf46d87 100644 --- a/openpype/hosts/maya/plugins/create/create_yeti_rig.py +++ b/openpype/hosts/maya/plugins/create/create_yeti_rig.py @@ -6,18 +6,22 @@ ) -class CreateYetiRig(plugin.Creator): +class CreateYetiRig(plugin.MayaCreator): """Output for procedural plugin nodes ( Yeti / XGen / etc)""" + identifier = "io.openpype.creators.maya.yetirig" label = "Yeti Rig" family = "yetiRig" icon = "usb" - def process(self): + def create(self, subset_name, instance_data, pre_create_data): with lib.undo_chunk(): - instance = super(CreateYetiRig, self).process() + instance = super(CreateYetiRig, self).create(subset_name, + instance_data, + pre_create_data) + instance_node = instance.get("instance_node") self.log.info("Creating Rig instance set up ...") input_meshes = cmds.sets(name="input_SET", empty=True) - cmds.sets(input_meshes, forceElement=instance) + cmds.sets(input_meshes, forceElement=instance_node) From ae2a1b22c4c2e969d6fb3392f5a0e643d267073d Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 01:05:10 +0100 Subject: [PATCH 019/175] Fix creator identifier --- openpype/hosts/maya/api/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index e7e7101ce74..6f61d10d3fe 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -31,7 +31,7 @@ CREATOR_INSTANCE_ATTRS = { "id", "asset", "subset", "task", "variant", "family", "instance_id", - "creator_attributes", "publish_attributes", "active" + "creator_identifier", "creator_attributes", "publish_attributes", "active" } From fa6f813ce140b4fba965d6dc6cc636fb8703966f Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 01:06:01 +0100 Subject: [PATCH 020/175] Fix accidental comment --- openpype/hosts/maya/api/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 6f61d10d3fe..7fed7f04ab7 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -123,7 +123,7 @@ def process(self): return instance -#@six.add_metaclass(ABCMeta) +@six.add_metaclass(ABCMeta) class MayaCreator(NewCreator): def create(self, subset_name, instance_data, pre_create_data): From 4ed04c5baf94403b1f37c1eeaa7a38c6716ef29a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 01:06:10 +0100 Subject: [PATCH 021/175] Remove unused imports --- openpype/hosts/maya/api/plugin.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 7fed7f04ab7..cfe4000746d 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -1,5 +1,4 @@ import os -import sys import json from abc import ( ABCMeta @@ -16,7 +15,6 @@ get_representation_path, AVALON_CONTAINER_ID, Anatomy, - CreatorError, LegacyCreator, Creator as NewCreator, CreatedInstance From 448d520043a8bd2b77a4c481bb86839dd21bd451 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 01:16:47 +0100 Subject: [PATCH 022/175] Ignore new style instances --- openpype/hosts/maya/plugins/publish/collect_instances.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/openpype/hosts/maya/plugins/publish/collect_instances.py b/openpype/hosts/maya/plugins/publish/collect_instances.py index 6c6819f0a24..167ac1cbca0 100644 --- a/openpype/hosts/maya/plugins/publish/collect_instances.py +++ b/openpype/hosts/maya/plugins/publish/collect_instances.py @@ -77,6 +77,12 @@ def process(self, context): context.data['objectsets'] = objectset for objset in objectset: + if cmds.attributeQuery("creator_identifier", + node=objset, + exists=True): + # Ignore new style instances + continue + if not cmds.attributeQuery("id", node=objset, exists=True): continue From b114b064f3f459f14d1912f2b786c35403f37f39 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 01:17:00 +0100 Subject: [PATCH 023/175] Collect relevant data for the new style instances --- .../plugins/publish/collect_new_instances.py | 123 ++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 openpype/hosts/maya/plugins/publish/collect_new_instances.py diff --git a/openpype/hosts/maya/plugins/publish/collect_new_instances.py b/openpype/hosts/maya/plugins/publish/collect_new_instances.py new file mode 100644 index 00000000000..f06f92678e1 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_new_instances.py @@ -0,0 +1,123 @@ +from maya import cmds +import maya.api.OpenMaya as om + +import pyblish.api + + +def get_all_children(nodes): + """Return all children of `nodes` including each instanced child. + Using maya.cmds.listRelatives(allDescendents=True) includes only the first + instance. As such, this function acts as an optimal replacement with a + focus on a fast query. + + """ + + sel = om.MSelectionList() + traversed = set() + iterator = om.MItDag(om.MItDag.kDepthFirst) + for node in nodes: + + if node in traversed: + # Ignore if already processed as a child + # before + continue + + sel.clear() + sel.add(node) + dag = sel.getDagPath(0) + + iterator.reset(dag) + # ignore self + iterator.next() # noqa: B305 + while not iterator.isDone(): + + path = iterator.fullPathName() + + if path in traversed: + iterator.prune() + iterator.next() # noqa: B305 + continue + + traversed.add(path) + iterator.next() # noqa: B305 + + return list(traversed) + + +class CollectNewInstances(pyblish.api.InstancePlugin): + """Gather members for instances and pre-defined attribute + + This collector takes into account assets that are associated with + an objectSet and marked with a unique identifier; + + Identifier: + id (str): "pyblish.avalon.instance" + + Limitations: + - Does not take into account nodes connected to those + within an objectSet. Extractors are assumed to export + with history preserved, but this limits what they will + be able to achieve and the amount of data available + to validators. An additional collector could also + append this input data into the instance, as we do + for `pype.rig` with collect_history. + + """ + + label = "Collect New Instance Data" + order = pyblish.api.CollectorOrder + hosts = ["maya"] + + def process(self, instance): + + objset = instance.data.get("instance_node") + if not objset: + self.log.debug("Instance has no `instance_node` data") + + # TODO: We might not want to do this in the future + # Merge creator attributes into instance.data just backwards compatible + # code still runs as expected + creator_attributes = instance.data.get("creator_attributes", {}) + if creator_attributes: + instance.data.update(creator_attributes) + + members = cmds.sets(objset, query=True) + if members is None: + self.log.warning("Skipped empty instance: \"%s\" " % objset) + return + + # Collect members + members = cmds.ls(members, long=True) or [] + + dag_members = cmds.ls(members, type="dagNode", long=True) + children = get_all_children(dag_members) + children = cmds.ls(children, noIntermediate=True, long=True) + parents = [] + if creator_attributes.get("includeParentHierarchy", True): + # If `includeParentHierarchy` then include the parents + # so they will also be picked up in the instance by validators + parents = self.get_all_parents(members) + members_hierarchy = list(set(members + children + parents)) + + instance[:] = members_hierarchy + + # Store the exact members of the object set + instance.data["setMembers"] = members + + def get_all_parents(self, nodes): + """Get all parents by using string operations (optimization) + + Args: + nodes (list): the nodes which are found in the objectSet + + Returns: + list + """ + + parents = [] + for node in nodes: + splitted = node.split("|") + items = ["|".join(splitted[0:i]) for i in range(2, len(splitted))] + parents.extend(items) + + return list(set(parents)) From e9d7f6607b1c2e6a778127066bea17afe685ddc0 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 01:25:49 +0100 Subject: [PATCH 024/175] Generate frameStartHandle and frameEndHandle accordingly --- .../maya/plugins/publish/collect_new_instances.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/openpype/hosts/maya/plugins/publish/collect_new_instances.py b/openpype/hosts/maya/plugins/publish/collect_new_instances.py index f06f92678e1..6166a4878d6 100644 --- a/openpype/hosts/maya/plugins/publish/collect_new_instances.py +++ b/openpype/hosts/maya/plugins/publish/collect_new_instances.py @@ -104,6 +104,17 @@ def process(self, instance): # Store the exact members of the object set instance.data["setMembers"] = members + # TODO: This might make more sense as a separate collector + # Collect frameStartHandle and frameEndHandle if frames present + if "frameStart" in instance.data: + handle_start = instance.data.get("handleStart", 0) + frame_start_handle = instance.data["frameStart"] - handle_start + instance.data["frameStartHandle"] = frame_start_handle + if "frameEnd" in instance.data: + handle_end = instance.data.get("handleEnd", 0) + frame_end_handle = instance.data["frameEnd"] + handle_end + instance.data["frameEndHandle"] = frame_end_handle + def get_all_parents(self, nodes): """Get all parents by using string operations (optimization) From 082b6736eb09a64eb65880fae7e8d5f18c6367c4 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 02:02:59 +0100 Subject: [PATCH 025/175] Remove unused import --- openpype/hosts/maya/api/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index cfe4000746d..e943d279202 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -20,7 +20,7 @@ CreatedInstance ) from openpype.lib import BoolDef -from .lib import imprint, read, lsattr +from .lib import imprint, read from openpype.settings import get_project_settings from .pipeline import containerise From 2d574996c38e7990eb66c1dd4a98d8be30f5703c Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 02:03:21 +0100 Subject: [PATCH 026/175] Fix indentation --- openpype/hosts/maya/api/lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 9ebaef7cb8d..703e7564fee 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -360,7 +360,7 @@ def collect_animation_defs(fps=False): if fps: current_fps = mel.eval('currentTimeUnitToFPS()') fps_def = NumberDef( - "fps", label="FPS", default=current_fps, decimals=5 + "fps", label="FPS", default=current_fps, decimals=5 ) defs.append(fps_def) From 011c1f748cc0fe2b74d844c34051edb1079ec3f9 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 10:22:48 +0100 Subject: [PATCH 027/175] Maya: Refactor Create Ass to new publisher --- .../hosts/maya/plugins/create/create_ass.py | 100 ++++++++++++------ 1 file changed, 68 insertions(+), 32 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_ass.py b/openpype/hosts/maya/plugins/create/create_ass.py index 935a068ca5a..dd1b98ea7bd 100644 --- a/openpype/hosts/maya/plugins/create/create_ass.py +++ b/openpype/hosts/maya/plugins/create/create_ass.py @@ -2,17 +2,20 @@ lib, plugin ) - -from maya import cmds +from openpype.lib import ( + NumberDef, + BoolDef +) -class CreateAss(plugin.Creator): +class CreateAss(plugin.MayaCreator): """Arnold Scene Source""" - name = "ass" + identifier = "io.openpype.creators.maya.ass" label = "Arnold Scene Source" family = "ass" icon = "cube" + expandProcedurals = False motionBlur = True motionBlurKeys = 2 @@ -28,39 +31,72 @@ class CreateAss(plugin.Creator): maskColor_manager = False maskOperator = False - def __init__(self, *args, **kwargs): - super(CreateAss, self).__init__(*args, **kwargs) + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() - # Add animation data - self.data.update(lib.collect_animation_data()) + defs.extend([ + BoolDef("expandProcedural", + label="Expand Procedural", + default=self.expandProcedurals), + BoolDef("motionBlur", + label="Motion Blur", + default=self.motionBlur), + NumberDef("motionBlurKeys", + label="Motion Blur Keys", + decimals=0, + default=self.motionBlurKeys), + NumberDef("motionBlurLength", + label="Motion Blur Length", + decimals=3, + default=self.motionBlurLength), - self.data["expandProcedurals"] = self.expandProcedurals - self.data["motionBlur"] = self.motionBlur - self.data["motionBlurKeys"] = self.motionBlurKeys - self.data["motionBlurLength"] = self.motionBlurLength + # Masks + BoolDef("maskOptions", + label="Mask Options", + default=self.maskOptions), + BoolDef("maskCamera", + label="Mask Camera", + default=self.maskCamera), + BoolDef("maskLight", + label="Mask Light", + default=self.maskLight), + BoolDef("maskShape", + label="Mask Shape", + default=self.maskShape), + BoolDef("maskShader", + label="Mask Shader", + default=self.maskShader), + BoolDef("maskOverride", + label="Mask Override", + default=self.maskOverride), + BoolDef("maskDriver", + label="Mask Driver", + default=self.maskDriver), + BoolDef("maskFilter", + label="Mask Filter", + default=self.maskFilter), + BoolDef("maskColor_manager", + label="Mask Color Manager", + default=self.maskColor_manager), + BoolDef("maskOperator", + label="Mask Operator", + default=self.maskOperator), + ]) - # Masks - self.data["maskOptions"] = self.maskOptions - self.data["maskCamera"] = self.maskCamera - self.data["maskLight"] = self.maskLight - self.data["maskShape"] = self.maskShape - self.data["maskShader"] = self.maskShader - self.data["maskOverride"] = self.maskOverride - self.data["maskDriver"] = self.maskDriver - self.data["maskFilter"] = self.maskFilter - self.data["maskColor_manager"] = self.maskColor_manager - self.data["maskOperator"] = self.maskOperator + return defs - def process(self): - instance = super(CreateAss, self).process() + def create(self, subset_name, instance_data, pre_create_data): - nodes = [] + from maya import cmds - if (self.options or {}).get("useSelection"): - nodes = cmds.ls(selection=True) + instance = super(CreateAss, self).create(subset_name, + instance_data, + pre_create_data) - cmds.sets(nodes, rm=instance) + instance_node = instance.get("instance_node") - assContent = cmds.sets(name="content_SET") - assProxy = cmds.sets(name="proxy_SET", empty=True) - cmds.sets([assContent, assProxy], forceElement=instance) + self.log.info("Creating ass instance set up ...") + content = cmds.sets(name="content_SET", empty=True) + proxy = cmds.sets(name="proxy_SET", empty=True) + cmds.sets([content, proxy], forceElement=instance_node) From 5225578f5ef80edad1ce342aa949a569e52f3065 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 10:35:26 +0100 Subject: [PATCH 028/175] Maya: Refactor Create Render Setup to new publisher --- .../maya/plugins/create/create_rendersetup.py | 61 ++++++------------- 1 file changed, 19 insertions(+), 42 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_rendersetup.py b/openpype/hosts/maya/plugins/create/create_rendersetup.py index 494f90d87b2..e8a93ac388c 100644 --- a/openpype/hosts/maya/plugins/create/create_rendersetup.py +++ b/openpype/hosts/maya/plugins/create/create_rendersetup.py @@ -1,55 +1,32 @@ -from openpype.hosts.maya.api import ( - lib, - plugin -) -from maya import cmds +from openpype.hosts.maya.api import plugin +from openpype.pipeline import CreatorError -class CreateRenderSetup(plugin.Creator): +class CreateRenderSetup(plugin.MayaCreator): """Create rendersetup template json data""" + identifier = "io.openpype.creators.maya.rendersetup" name = "rendersetup" label = "Render Setup Preset" family = "rendersetup" icon = "tablet" - def __init__(self, *args, **kwargs): - super(CreateRenderSetup, self).__init__(*args, **kwargs) + def get_pre_create_attr_defs(self): + # Do not show the "use_selection" setting from parent class + return [] - # here we can pre-create renderSetup layers, possibly utlizing - # settings for it. + def create(self, subset_name, instance_data, pre_create_data): - # _____ - # / __\__ - # | / __\__ - # | | / \ - # | | | | - # \__| | | - # \__| | - # \_____/ + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break - # from pype.api import get_project_settings - # import maya.app.renderSetup.model.renderSetup as renderSetup - # settings = get_project_settings(os.environ['AVALON_PROJECT']) - # layer = settings['maya']['create']['renderSetup']["layer"] + if existing_instance: + raise CreatorError("A RenderSetup instance already exists - only " + "one can be configured.") - # rs = renderSetup.instance() - # rs.createRenderLayer(layer) - - self.options = {"useSelection": False} # Force no content - - def process(self): - exists = cmds.ls(self.name) - assert len(exists) <= 1, ( - "More than one renderglobal exists, this is a bug" - ) - - if exists: - return cmds.warning("%s already exists." % exists[0]) - - with lib.undo_chunk(): - instance = super(CreateRenderSetup, self).process() - - self.data["renderSetup"] = "42" - null = cmds.sets(name="null_SET", empty=True) - cmds.sets([null], forceElement=instance) + super(CreateRenderSetup, self).create(subset_name, + instance_data, + pre_create_data) From 946ad153d2b80331ebf4609cff3197e661e0392d Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 10:43:18 +0100 Subject: [PATCH 029/175] Maya: Refactor Create Redshift Proxy to new publisher + Don't use a separate proxyFrameStart, proxyEndStart - just use the frame data that's there. --- .../plugins/create/create_redshift_proxy.py | 20 ++++++++++--------- .../plugins/publish/extract_redshift_proxy.py | 20 +++++++++++-------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_redshift_proxy.py b/openpype/hosts/maya/plugins/create/create_redshift_proxy.py index 419a8d99d44..2490738e8fb 100644 --- a/openpype/hosts/maya/plugins/create/create_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/create/create_redshift_proxy.py @@ -2,22 +2,24 @@ """Creator of Redshift proxy subset types.""" from openpype.hosts.maya.api import plugin, lib +from openpype.lib import BoolDef -class CreateRedshiftProxy(plugin.Creator): +class CreateRedshiftProxy(plugin.MayaCreator): """Create instance of Redshift Proxy subset.""" - name = "redshiftproxy" + identifier = "io.openpype.creators.maya.redshiftproxy" label = "Redshift Proxy" family = "redshiftproxy" icon = "gears" - def __init__(self, *args, **kwargs): - super(CreateRedshiftProxy, self).__init__(*args, **kwargs) + def get_instance_attr_defs(self): - animation_data = lib.collect_animation_data() + defs = [ + BoolDef("animation", + label="Export animation", + default=False) + ] - self.data["animation"] = False - self.data["proxyFrameStart"] = animation_data["frameStart"] - self.data["proxyFrameEnd"] = animation_data["frameEnd"] - self.data["proxyFrameStep"] = animation_data["step"] + defs.extend(lib.collect_animation_defs()) + return defs diff --git a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py index 43772756355..834b335fc57 100644 --- a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py @@ -29,15 +29,21 @@ def process(self, instance): if not anim_on: # Remove animation information because it is not required for # non-animated subsets - instance.data.pop("proxyFrameStart", None) - instance.data.pop("proxyFrameEnd", None) + keys = ["frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "frameStartHandle", + "frameEndHandle"] + for key in keys: + instance.data.pop(key, None) else: - start_frame = instance.data["proxyFrameStart"] - end_frame = instance.data["proxyFrameEnd"] + start_frame = instance.data["frameStartHandle"] + end_frame = instance.data["frameEndHandle"] rs_options = "{}startFrame={};endFrame={};frameStep={};".format( rs_options, start_frame, - end_frame, instance.data["proxyFrameStep"] + end_frame, instance.data["step"] ) root, ext = os.path.splitext(file_path) @@ -48,7 +54,7 @@ def process(self, instance): for frame in range( int(start_frame), int(end_frame) + 1, - int(instance.data["proxyFrameStep"]), + int(instance.data["step"]), )] # vertex_colors = instance.data.get("vertexColors", False) @@ -74,8 +80,6 @@ def process(self, instance): 'files': repr_files, "stagingDir": staging_dir, } - if anim_on: - representation["frameStart"] = instance.data["proxyFrameStart"] instance.data["representations"].append(representation) self.log.info("Extracted instance '%s' to: %s" From aa22a7cb6b437d3007deba19139619c6ea38e02a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 10:55:48 +0100 Subject: [PATCH 030/175] Maya: Refactor Create Review to new publisher --- .../maya/plugins/create/create_review.py | 89 +++++++++++-------- 1 file changed, 54 insertions(+), 35 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index ba51ffa0093..9b10a07af1a 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -1,45 +1,64 @@ -from collections import OrderedDict from openpype.hosts.maya.api import ( lib, plugin ) +from openpype.lib import ( + BoolDef, + NumberDef, + EnumDef +) + +TRANSPARENCIES = [ + "preset", + "simple", + "object sorting", + "weighted average", + "depth peeling", + "alpha cut" +] -class CreateReview(plugin.Creator): - """Single baked camera""" +class CreateReview(plugin.MayaCreator): + """Playblast reviewable""" - name = "reviewDefault" + identifier = "io.openpype.creators.maya.review" label = "Review" family = "review" icon = "video-camera" - keepImages = False - isolate = False - imagePlane = True - Width = 0 - Height = 0 - transparency = [ - "preset", - "simple", - "object sorting", - "weighted average", - "depth peeling", - "alpha cut" - ] - - def __init__(self, *args, **kwargs): - super(CreateReview, self).__init__(*args, **kwargs) - - # get basic animation data : start / end / handles / steps - data = OrderedDict(**self.data) - animation_data = lib.collect_animation_data(fps=True) - for key, value in animation_data.items(): - data[key] = value - - data["review_width"] = self.Width - data["review_height"] = self.Height - data["isolate"] = self.isolate - data["keepImages"] = self.keepImages - data["imagePlane"] = self.imagePlane - data["transparency"] = self.transparency - - self.data = data + + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() + + defs.extend([ + NumberDef("review_width", + label="Review width", + tooltip="A value of zero will use the asset resolution.", + decimals=0, + minimum=0, + default=0), + NumberDef("review_height", + label="Review height", + tooltip="A value of zero will use the asset resolution.", + decimals=0, + minimum=0, + default=0), + BoolDef("keepImages", + label="Keep Images", + tooltip="Whether to also publish along the image sequence " + "next to the video reviewable.", + default=False), + BoolDef("isolate", + label="Isolate render members of instance", + tooltip="When enabled only the members of the instance " + "will be included in the playblast review.", + default=False), + BoolDef("imagePlane", + label="Show Image Plane", + default=True), + EnumDef("transparency", + label="Transparency", + items={key: key for key in TRANSPARENCIES}) + ]) + + return defs From 23f0cb21ca7ab5527df56b7a0b1c560a5372885f Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 11:38:07 +0100 Subject: [PATCH 031/175] Make Validate Frame Range optional in new publisher --- .../hosts/maya/plugins/publish/validate_frame_range.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_frame_range.py b/openpype/hosts/maya/plugins/publish/validate_frame_range.py index d86925184ed..1b32d795c50 100644 --- a/openpype/hosts/maya/plugins/publish/validate_frame_range.py +++ b/openpype/hosts/maya/plugins/publish/validate_frame_range.py @@ -4,6 +4,7 @@ from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, + OptionalPyblishPluginMixin ) from openpype.hosts.maya.api.lib_rendersetup import ( get_attr_overrides, @@ -12,7 +13,8 @@ from maya.app.renderSetup.model.override import AbsOverride -class ValidateFrameRange(pyblish.api.InstancePlugin): +class ValidateFrameRange(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validates the frame ranges. This is an optional validator checking if the frame range on instance @@ -39,6 +41,9 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): exclude_families = [] def process(self, instance): + if not self.is_active(instance.data): + return + context = instance.context if instance.data.get("tileRendering"): self.log.info(( From 69bc82edca385a0b823c4909c617bd72857b4a44 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:08:26 +0100 Subject: [PATCH 032/175] Refactor many maya publish plug-ins to be optional in new publisher --- .../maya/plugins/publish/extract_import_reference.py | 7 ++++++- openpype/hosts/maya/plugins/publish/extract_model.py | 6 +++++- .../hosts/maya/plugins/publish/extract_xgen_cache.py | 5 ++++- .../hosts/maya/plugins/publish/validate_attributes.py | 6 +++++- .../hosts/maya/plugins/publish/validate_color_sets.py | 6 +++++- .../maya/plugins/publish/validate_cycle_error.py | 11 +++++++++-- .../maya/plugins/publish/validate_mesh_has_uv.py | 10 ++++++++-- .../plugins/publish/validate_mesh_non_zero_edge.py | 11 +++++++++-- .../plugins/publish/validate_mesh_normals_unlocked.py | 6 +++++- .../plugins/publish/validate_mesh_overlapping_uvs.py | 10 ++++++++-- .../plugins/publish/validate_mesh_single_uv_set.py | 6 +++++- .../maya/plugins/publish/validate_mesh_uv_set_map1.py | 6 +++++- .../hosts/maya/plugins/publish/validate_model_name.py | 11 +++++++++-- .../maya/plugins/publish/validate_mvlook_contents.py | 11 +++++++++-- .../maya/plugins/publish/validate_no_animation.py | 10 ++++++++-- .../maya/plugins/publish/validate_no_unknown_nodes.py | 10 ++++++++-- .../maya/plugins/publish/validate_node_ids_related.py | 11 +++++++++-- .../maya/plugins/publish/validate_shader_name.py | 10 ++++++++-- .../plugins/publish/validate_shape_default_names.py | 6 +++++- .../publish/validate_transform_naming_suffix.py | 11 +++++++++-- .../publish/validate_unreal_staticmesh_naming.py | 11 +++++++++-- .../maya/plugins/publish/validate_unreal_up_axis.py | 7 ++++++- 22 files changed, 154 insertions(+), 34 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/extract_import_reference.py b/openpype/hosts/maya/plugins/publish/extract_import_reference.py index 51c82dde924..5ca100ac002 100644 --- a/openpype/hosts/maya/plugins/publish/extract_import_reference.py +++ b/openpype/hosts/maya/plugins/publish/extract_import_reference.py @@ -8,10 +8,12 @@ from openpype.lib import run_subprocess from openpype.pipeline import publish +from openpype.pipeline.publish import OptionalPyblishPluginMixin from openpype.hosts.maya.api import lib -class ExtractImportReference(publish.Extractor): +class ExtractImportReference(publish.Extractor, + OptionalPyblishPluginMixin): """ Extract the scene with imported reference. @@ -32,6 +34,9 @@ def apply_settings(cls, project_setting, system_settings): cls.active = project_setting["deadline"]["publish"]["MayaSubmitDeadline"]["import_reference"] # noqa def process(self, instance): + if not self.is_active(instance.data): + return + ext_mapping = ( instance.context.data["project_settings"]["maya"]["ext_mapping"] ) diff --git a/openpype/hosts/maya/plugins/publish/extract_model.py b/openpype/hosts/maya/plugins/publish/extract_model.py index 7c8c3a29813..d19c4db5fd9 100644 --- a/openpype/hosts/maya/plugins/publish/extract_model.py +++ b/openpype/hosts/maya/plugins/publish/extract_model.py @@ -8,7 +8,8 @@ from openpype.hosts.maya.api import lib -class ExtractModel(publish.Extractor): +class ExtractModel(publish.Extractor, + publish.OptionalPyblishPluginMixin): """Extract as Model (Maya Scene). Only extracts contents based on the original "setMembers" data to ensure @@ -31,6 +32,9 @@ class ExtractModel(publish.Extractor): def process(self, instance): """Plugin entry point.""" + if not self.is_active(instance.data): + return + ext_mapping = ( instance.context.data["project_settings"]["maya"]["ext_mapping"] ) diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py index 77350f343e4..6dbfa8b1f91 100644 --- a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py +++ b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py @@ -9,7 +9,8 @@ ) -class ExtractXgenCache(publish.Extractor): +class ExtractXgenCache(publish.Extractor, + publish.OptionalPyblishPluginMixin): """Produce an alembic of just xgen interactive groom """ @@ -20,6 +21,8 @@ class ExtractXgenCache(publish.Extractor): optional = True def process(self, instance): + if not self.is_active(instance.data): + return # Collect the out set nodes out_descriptions = [node for node in instance diff --git a/openpype/hosts/maya/plugins/publish/validate_attributes.py b/openpype/hosts/maya/plugins/publish/validate_attributes.py index 136c38bc1df..2ce4c94efe2 100644 --- a/openpype/hosts/maya/plugins/publish/validate_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_attributes.py @@ -4,10 +4,12 @@ from openpype.pipeline.publish import ( RepairContextAction, ValidateContentsOrder, + OptionalPyblishPluginMixin ) -class ValidateAttributes(pyblish.api.ContextPlugin): +class ValidateAttributes(pyblish.api.ContextPlugin, + OptionalPyblishPluginMixin): """Ensure attributes are consistent. Attributes to validate and their values comes from the @@ -29,6 +31,8 @@ class ValidateAttributes(pyblish.api.ContextPlugin): def process(self, context): # Check for preset existence. + if not self.is_active(context.data): + return if not self.attributes: return diff --git a/openpype/hosts/maya/plugins/publish/validate_color_sets.py b/openpype/hosts/maya/plugins/publish/validate_color_sets.py index 905417bafaf..2ce266f5559 100644 --- a/openpype/hosts/maya/plugins/publish/validate_color_sets.py +++ b/openpype/hosts/maya/plugins/publish/validate_color_sets.py @@ -5,10 +5,12 @@ from openpype.pipeline.publish import ( RepairAction, ValidateMeshOrder, + OptionalPyblishPluginMixin ) -class ValidateColorSets(pyblish.api.Validator): +class ValidateColorSets(pyblish.api.Validator, + OptionalPyblishPluginMixin): """Validate all meshes in the instance have unlocked normals These can be removed manually through: @@ -41,6 +43,8 @@ def get_invalid(cls, instance): def process(self, instance): """Raise invalid when any of the meshes have ColorSets""" + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) diff --git a/openpype/hosts/maya/plugins/publish/validate_cycle_error.py b/openpype/hosts/maya/plugins/publish/validate_cycle_error.py index 210ee4127cb..f7d6658c6e3 100644 --- a/openpype/hosts/maya/plugins/publish/validate_cycle_error.py +++ b/openpype/hosts/maya/plugins/publish/validate_cycle_error.py @@ -4,10 +4,14 @@ import openpype.hosts.maya.api.action from openpype.hosts.maya.api.lib import maintained_selection -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) -class ValidateCycleError(pyblish.api.InstancePlugin): +class ValidateCycleError(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate nodes produce no cycle errors.""" order = ValidateContentsOrder + 0.05 @@ -18,6 +22,9 @@ class ValidateCycleError(pyblish.api.InstancePlugin): optional = True def process(self, instance): + if not self.is_active(instance.data): + return + invalid = self.get_invalid(instance) if invalid: raise RuntimeError("Nodes produce a cycle error: %s" % invalid) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py index 36a0da7a59b..12090e5d56e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py @@ -4,7 +4,10 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateMeshOrder +from openpype.pipeline.publish import ( + ValidateMeshOrder, + OptionalPyblishPluginMixin +) def len_flattened(components): @@ -36,7 +39,8 @@ def len_flattened(components): return n -class ValidateMeshHasUVs(pyblish.api.InstancePlugin): +class ValidateMeshHasUVs(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate the current mesh has UVs. It validates whether the current UV set has non-zero UVs and @@ -88,6 +92,8 @@ def get_invalid(cls, instance): return invalid def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py index 0ef2716559c..1f04652afde 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py @@ -3,10 +3,14 @@ import pyblish.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ValidateMeshOrder +from openpype.pipeline.publish import ( + ValidateMeshOrder, + OptionalPyblishPluginMixin +) -class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): +class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate meshes don't have edges with a zero length. Based on Maya's polyCleanup 'Edges with zero length'. @@ -51,6 +55,9 @@ def get_invalid(cls, instance): def process(self, instance): """Process all meshes""" + if not self.is_active(instance.data): + return + invalid = self.get_invalid(instance) if invalid: raise RuntimeError("Meshes found with zero " diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py index c8892a8e59b..6ff384dc537 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py @@ -6,10 +6,12 @@ from openpype.pipeline.publish import ( RepairAction, ValidateMeshOrder, + OptionalPyblishPluginMixin ) -class ValidateMeshNormalsUnlocked(pyblish.api.Validator): +class ValidateMeshNormalsUnlocked(pyblish.api.Validator, + OptionalPyblishPluginMixin): """Validate all meshes in the instance have unlocked normals These can be unlocked manually through: @@ -49,6 +51,8 @@ def get_invalid(cls, instance): def process(self, instance): """Raise invalid when any of the meshes have locked normals""" + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py index be7324a68f0..15c27b8ebfd 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py @@ -5,7 +5,10 @@ import pymel.core as pm from six.moves import xrange -from openpype.pipeline.publish import ValidateMeshOrder +from openpype.pipeline.publish import ( + ValidateMeshOrder, + OptionalPyblishPluginMixin +) class GetOverlappingUVs(object): @@ -225,7 +228,8 @@ def _getOverlapUVFaces(self, meshName): return faces -class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin): +class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """ Validate the current mesh overlapping UVs. It validates whether the current UVs are overlapping or not. @@ -274,6 +278,8 @@ def get_invalid(cls, instance, compute=False): return invalid def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance, compute=True) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py index 6ca8c06ba51..477efd506df 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py @@ -6,10 +6,12 @@ from openpype.pipeline.publish import ( RepairAction, ValidateMeshOrder, + OptionalPyblishPluginMixin ) -class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin): +class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Warn on multiple UV sets existing for each polygon mesh. On versions prior to Maya 2017 this will force no multiple uv sets because @@ -49,6 +51,8 @@ def get_invalid(instance): def process(self, instance): """Process all the nodes in the instance 'objectSet'""" + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py b/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py index 40ddb916cae..116fecbcba9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_uv_set_map1.py @@ -5,10 +5,12 @@ from openpype.pipeline.publish import ( RepairAction, ValidateMeshOrder, + OptionalPyblishPluginMixin ) -class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin): +class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate model's default set exists and is named 'map1'. In Maya meshes by default have a uv set named "map1" that cannot be @@ -48,6 +50,8 @@ def get_invalid(instance): def process(self, instance): """Process all the nodes in the instance 'objectSet'""" + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/openpype/hosts/maya/plugins/publish/validate_model_name.py index 2dec9ba267d..44d8521419c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_name.py @@ -6,7 +6,10 @@ import pyblish.api from openpype.pipeline import legacy_io -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) import openpype.hosts.maya.api.action from openpype.hosts.maya.api.shader_definition_editor import ( DEFINITION_FILENAME) @@ -14,7 +17,8 @@ import gridfs -class ValidateModelName(pyblish.api.InstancePlugin): +class ValidateModelName(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate name of model starts with (somename)_###_(materialID)_GEO @@ -145,6 +149,9 @@ def is_group(group_name): def process(self, instance): """Plugin entry point.""" + if not self.is_active(instance.data): + return + invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py index e583c1edbab..2242550846d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py @@ -1,14 +1,18 @@ import os import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) COLOUR_SPACES = ['sRGB', 'linear', 'auto'] MIPMAP_EXTENSIONS = ['tdl'] -class ValidateMvLookContents(pyblish.api.InstancePlugin): +class ValidateMvLookContents(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): order = ValidateContentsOrder families = ['mvLook'] hosts = ['maya'] @@ -23,6 +27,9 @@ class ValidateMvLookContents(pyblish.api.InstancePlugin): enforced_intents = ['-', 'Final'] def process(self, instance): + if not self.is_active(instance.data): + return + intent = instance.context.data['intent']['value'] publishMipMap = instance.data["publishMipMap"] enforced = True diff --git a/openpype/hosts/maya/plugins/publish/validate_no_animation.py b/openpype/hosts/maya/plugins/publish/validate_no_animation.py index 2e7cafe4abd..853554bb4b1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_animation.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_animation.py @@ -2,10 +2,14 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) -class ValidateNoAnimation(pyblish.api.Validator): +class ValidateNoAnimation(pyblish.api.Validator, + OptionalPyblishPluginMixin): """Ensure no keyframes on nodes in the Instance. Even though a Model would extract without animCurves correctly this avoids @@ -22,6 +26,8 @@ class ValidateNoAnimation(pyblish.api.Validator): actions = [openpype.hosts.maya.api.action.SelectInvalidAction] def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py b/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py index 2cfdc281282..b18bbd85920 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_unknown_nodes.py @@ -2,10 +2,14 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) -class ValidateNoUnknownNodes(pyblish.api.InstancePlugin): +class ValidateNoUnknownNodes(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Checks to see if there are any unknown nodes in the instance. This often happens if nodes from plug-ins are used but are not available @@ -29,6 +33,8 @@ def get_invalid(instance): def process(self, instance): """Process all the nodes in the instance""" + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py index f901dc58c43..941fe4ee98a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_related.py @@ -1,11 +1,15 @@ import pyblish.api -from openpype.pipeline.publish import ValidatePipelineOrder +from openpype.pipeline.publish import ( + ValidatePipelineOrder, + OptionalPyblishPluginMixin +) import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib -class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): +class ValidateNodeIDsRelated(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate nodes have a related Colorbleed Id to the instance.data[asset] """ @@ -23,6 +27,9 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): def process(self, instance): """Process all nodes in instance (including hierarchy)""" + if not self.is_active(instance.data): + return + # Ensure all nodes have a cbId invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_shader_name.py b/openpype/hosts/maya/plugins/publish/validate_shader_name.py index b3e51f011d2..ee269304dd8 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shader_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_shader_name.py @@ -4,10 +4,14 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) -class ValidateShaderName(pyblish.api.InstancePlugin): +class ValidateShaderName(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate shader name assigned. It should be _<*>_SHD @@ -23,6 +27,8 @@ class ValidateShaderName(pyblish.api.InstancePlugin): # The default connections to check def process(self, instance): + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py index 651c6bcec9a..fc763ef0c3a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py +++ b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py @@ -8,6 +8,7 @@ from openpype.pipeline.publish import ( ValidateContentsOrder, RepairAction, + OptionalPyblishPluginMixin ) @@ -15,7 +16,8 @@ def short_name(node): return node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] -class ValidateShapeDefaultNames(pyblish.api.InstancePlugin): +class ValidateShapeDefaultNames(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validates that Shape names are using Maya's default format. When you create a new polygon cube Maya will name the transform @@ -79,6 +81,8 @@ def get_invalid(cls, instance): def process(self, instance): """Process all the shape nodes in the instance""" + if not self.is_active(instance.data): + return invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py index 65551c8d5e6..d19f1efbc1a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py @@ -5,10 +5,14 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) -class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): +class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validates transform suffix based on the type of its children shapes. Suffices must be: @@ -112,6 +116,9 @@ def process(self, instance): instance (:class:`pyblish.api.Instance`): published instance. """ + if not self.is_active(instance.data): + return + invalid = self.get_invalid(instance) if invalid: valid = self.get_table_for_invalid() diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py index 1425190b824..dc4c2300af8 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py @@ -7,10 +7,14 @@ import openpype.hosts.maya.api.action from openpype.pipeline import legacy_io from openpype.settings import get_project_settings -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + OptionalPyblishPluginMixin +) -class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin): +class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): """Validate name of Unreal Static Mesh Unreals naming convention states that staticMesh should start with `SM` @@ -131,6 +135,9 @@ def get_invalid(cls, instance): return invalid def process(self, instance): + if not self.is_active(instance.data): + return + if not self.validate_mesh and not self.validate_collision: self.log.info("Validation of both mesh and collision names" "is disabled.") diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py index dd699735d96..a420dcb9003 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_up_axis.py @@ -6,10 +6,12 @@ from openpype.pipeline.publish import ( ValidateContentsOrder, RepairAction, + OptionalPyblishPluginMixin ) -class ValidateUnrealUpAxis(pyblish.api.ContextPlugin): +class ValidateUnrealUpAxis(pyblish.api.ContextPlugin, + OptionalPyblishPluginMixin): """Validate if Z is set as up axis in Maya""" optional = True @@ -21,6 +23,9 @@ class ValidateUnrealUpAxis(pyblish.api.ContextPlugin): actions = [RepairAction] def process(self, context): + if not self.is_active(context.data): + return + assert cmds.upAxis(q=True, axis=True) == "z", ( "Invalid axis set as up axis" ) From fc03bebd92bac0c2052c1c1e5bf5bd33f36ae081 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:11:17 +0100 Subject: [PATCH 033/175] Maya: Refactor Create Proxy Alembic to new publisher --- .../maya/plugins/create/create_proxy_abc.py | 57 ++++++++++++------- 1 file changed, 36 insertions(+), 21 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_proxy_abc.py b/openpype/hosts/maya/plugins/create/create_proxy_abc.py index 2946f7b5302..d89470ebee9 100644 --- a/openpype/hosts/maya/plugins/create/create_proxy_abc.py +++ b/openpype/hosts/maya/plugins/create/create_proxy_abc.py @@ -2,34 +2,49 @@ lib, plugin ) +from openpype.lib import ( + BoolDef, + TextDef +) -class CreateProxyAlembic(plugin.Creator): +class CreateProxyAlembic(plugin.MayaCreator): """Proxy Alembic for animated data""" - name = "proxyAbcMain" + identifier = "io.openpype.creators.maya.proxyabc" label = "Proxy Alembic" family = "proxyAbc" icon = "gears" write_color_sets = False write_face_sets = False - def __init__(self, *args, **kwargs): - super(CreateProxyAlembic, self).__init__(*args, **kwargs) - - # Add animation data - self.data.update(lib.collect_animation_data()) - - # Vertex colors with the geometry. - self.data["writeColorSets"] = self.write_color_sets - # Vertex colors with the geometry. - self.data["writeFaceSets"] = self.write_face_sets - # Default to exporting world-space - self.data["worldSpace"] = True - - # name suffix for the bounding box - self.data["nameSuffix"] = "_BBox" - - # Add options for custom attributes - self.data["attr"] = "" - self.data["attrPrefix"] = "" + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() + + defs.extend([ + BoolDef("writeColorSets", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=self.write_color_sets), + BoolDef("writeFaceSets", + label="Write face sets", + tooltip="Write face sets with the geometry", + default=self.write_face_sets), + BoolDef("worldSpace", + label="World-Space Export", + default=True), + TextDef("nameSuffix", + label="Name Suffix for Bounding Box", + default="_BBox", + placeholder="_BBox"), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1, prefix2") + ]) + + return defs From cb23a1d2c128491716cf2b20ee7c53701493068f Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:23:17 +0100 Subject: [PATCH 034/175] Validate Model Content - Raise PublishValidationError --- .../maya/plugins/publish/validate_model_content.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_model_content.py b/openpype/hosts/maya/plugins/publish/validate_model_content.py index 723346a2852..ab9303f49d5 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_content.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_content.py @@ -3,7 +3,10 @@ import pyblish.api import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError +) class ValidateModelContent(pyblish.api.InstancePlugin): @@ -97,4 +100,7 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Model content is invalid. See log.") + raise PublishValidationError( + title="Model content is invalid", + message="See log for more details" + ) From e546a4e5b5691067a65c20dce58a929039066ae0 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:23:46 +0100 Subject: [PATCH 035/175] Make it so that you can select instance with select action if its empty --- openpype/hosts/maya/plugins/publish/validate_model_content.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_model_content.py b/openpype/hosts/maya/plugins/publish/validate_model_content.py index ab9303f49d5..9ba458a4160 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_content.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_content.py @@ -31,7 +31,7 @@ def get_invalid(cls, instance): content_instance = instance.data.get("setMembers", None) if not content_instance: cls.log.error("Instance has no nodes!") - return True + return [instance.data["name"]] # All children will be included in the extracted export so we also # validate *all* descendents of the set members and we skip any From a54c8b2e84e7adee82be0ee46448ff61c7dcac1a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:24:03 +0100 Subject: [PATCH 036/175] Validate Instance Has Members - Raise PublishValidationError --- .../publish/validate_instance_has_members.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py index 4870f27bff6..7423764934b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py +++ b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py @@ -1,6 +1,9 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError +) class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): @@ -15,7 +18,7 @@ class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): def get_invalid(cls, instance): invalid = list() - if not instance.data["setMembers"]: + if not instance.data.get("setMembers"): objectset_name = instance.data['name'] invalid.append(objectset_name) @@ -25,4 +28,9 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Empty instances found: {0}".format(invalid)) + # Invalid will always be a single entry, we log the single name + name = invalid[0] + raise PublishValidationError( + title="Empty instance", + message="Instance '{0}' is empty".format(name) + ) From 46f3f7a6245e36fa77bf277d298cd90473ca6b13 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:38:27 +0100 Subject: [PATCH 037/175] Raise PublishValidationError for better report --- .../plugins/publish/validate_mesh_has_uv.py | 16 +++++++++++--- .../validate_transform_naming_suffix.py | 21 +++++++++++++------ 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py index 12090e5d56e..f2d407cfa19 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py @@ -6,7 +6,8 @@ import openpype.hosts.maya.api.action from openpype.pipeline.publish import ( ValidateMeshOrder, - OptionalPyblishPluginMixin + OptionalPyblishPluginMixin, + PublishValidationError ) @@ -97,5 +98,14 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Meshes found in instance without " - "valid UVs: {0}".format(invalid)) + + names = "
".join( + " - {}".format(node) for node in invalid + ) + + raise PublishValidationError( + title="Mesh has no UVs", + message="Model meshes are required to have UVs.

" + "Meshes detected with invalid or missing UVs:
" + "{0}".format(names) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py index d19f1efbc1a..b0d7904224c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py @@ -7,7 +7,8 @@ import openpype.hosts.maya.api.action from openpype.pipeline.publish import ( ValidateContentsOrder, - OptionalPyblishPluginMixin + OptionalPyblishPluginMixin, + PublishValidationError ) @@ -53,8 +54,8 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin, def get_table_for_invalid(cls): ss = [] for k, v in cls.SUFFIX_NAMING_TABLE.items(): - ss.append(" - {}: {}".format(k, ", ".join(v))) - return "\n".join(ss) + ss.append(" - {}: {}".format(k, ", ".join(v))) + return "
".join(ss) @staticmethod def is_valid_name(node_name, shape_type, @@ -122,6 +123,14 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: valid = self.get_table_for_invalid() - raise ValueError("Incorrectly named geometry " - "transforms: {0}, accepted suffixes are: " - "\n{1}".format(invalid, valid)) + + names = "
".join( + " - {}".format(node) for node in invalid + ) + valid = valid.replace("\n", "
") + + raise PublishValidationError( + title="Invalid naming suffix", + message="Valid suffixes are:
{0}

" + "Incorrectly named geometry transforms:
{1}" + "".format(valid, names)) From 299074508094308cc67d3beb6945b5e2be8d6f17 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:44:24 +0100 Subject: [PATCH 038/175] Improve validation title --- openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py index f2d407cfa19..3d346ac71db 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py @@ -104,7 +104,7 @@ def process(self, instance): ) raise PublishValidationError( - title="Mesh has no UVs", + title="Mesh has missing UVs", message="Model meshes are required to have UVs.

" "Meshes detected with invalid or missing UVs:
" "{0}".format(names) From 2446082d87c1e4ed1fdc99e3b4eef0eb502946d9 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 13:44:36 +0100 Subject: [PATCH 039/175] Raise PublishValidationError for better report --- .../plugins/publish/validate_transform_zero.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py index da569195e8c..004cedeaf67 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py @@ -3,7 +3,10 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError +) class ValidateTransformZero(pyblish.api.Validator): @@ -64,5 +67,14 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise ValueError("Nodes found with transform " - "values: {0}".format(invalid)) + + names = "
".join( + " - {}".format(node) for node in invalid + ) + + raise PublishValidationError( + title="Transform Zero", + message="The model publish allows no transformations. You must" + " freeze transformations to continue.

" + "Nodes found with transform values: " + "{0}".format(names)) From e1970eeb11ae4a713114463cbfe01823bc145345 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 14:58:55 +0100 Subject: [PATCH 040/175] Maya: Refactor Create Multiverse Look to new publisher --- .../plugins/create/create_multiverse_look.py | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_look.py b/openpype/hosts/maya/plugins/create/create_multiverse_look.py index f47c88a93b4..b89df856c70 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_look.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_look.py @@ -1,15 +1,27 @@ from openpype.hosts.maya.api import plugin +from openpype.lib import ( + BoolDef, + EnumDef +) -class CreateMultiverseLook(plugin.Creator): +class CreateMultiverseLook(plugin.MayaCreator): """Create Multiverse Look""" - name = "mvLook" + identifier = "io.openpype.creators.maya.mvlook" label = "Multiverse Look" family = "mvLook" icon = "cubes" - def __init__(self, *args, **kwargs): - super(CreateMultiverseLook, self).__init__(*args, **kwargs) - self.data["fileFormat"] = ["usda", "usd"] - self.data["publishMipMap"] = True + def get_instance_attr_defs(self): + + return [ + EnumDef("fileFormat", + label="File Format", + tooltip="USD export file format", + items={key: key for key in ["usda", "usd"]}, + default="usda"), + BoolDef("publishMipMap", + label="Publish MipMap", + default=True), + ] From d4a9e1cd3ec30b54f0379b9a6290fbd294d566b1 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 15:03:57 +0100 Subject: [PATCH 041/175] Maya: Refactor Create VRay Proxy to new publisher --- .../maya/plugins/create/create_vrayproxy.py | 37 ++++++++++++++----- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_vrayproxy.py b/openpype/hosts/maya/plugins/create/create_vrayproxy.py index 5c0365b495b..5fb5c1e15bc 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayproxy.py +++ b/openpype/hosts/maya/plugins/create/create_vrayproxy.py @@ -1,20 +1,37 @@ -from openpype.hosts.maya.api import plugin +from openpype.hosts.maya.api import ( + plugin, + lib +) +from openpype.lib import BoolDef -class CreateVrayProxy(plugin.Creator): +class CreateVrayProxy(plugin.MayaCreator): """Alembic pointcache for animated data""" - name = "vrayproxy" + identifier = "io.openpype.creators.maya.vrayproxy" label = "VRay Proxy" family = "vrayproxy" icon = "gears" - def __init__(self, *args, **kwargs): - super(CreateVrayProxy, self).__init__(*args, **kwargs) + def get_instance_attr_defs(self): - self.data["animation"] = False - self.data["frameStart"] = 1 - self.data["frameEnd"] = 1 + defs = [ + BoolDef("animation", + label="Export Animation", + default=False) + ] - # Write vertex colors - self.data["vertexColors"] = False + # Add time range attributes but remove some attributes + # which this instance actually doesn't use + defs.extend(lib.collect_animation_defs()) + remove = {"handleStart", "handleEnd", "step"} + defs = [attr_def for attr_def in defs if attr_def.key not in remove] + + defs.extend([ + BoolDef("vertexColors", + label="Write vertex colors", + tooltip="Write vertex colors with the geometry", + default=False), + ]) + + return defs From aae89c42e43db0b3579fae82a4c8b4abb57ac19d Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 15:44:10 +0100 Subject: [PATCH 042/175] Add nice new report --- .../publish/help/validate_node_ids.xml | 29 +++++++++++++++++++ .../maya/plugins/publish/validate_node_ids.py | 15 ++++++++-- 2 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 openpype/hosts/maya/plugins/publish/help/validate_node_ids.xml diff --git a/openpype/hosts/maya/plugins/publish/help/validate_node_ids.xml b/openpype/hosts/maya/plugins/publish/help/validate_node_ids.xml new file mode 100644 index 00000000000..2ef4bc95c2c --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/help/validate_node_ids.xml @@ -0,0 +1,29 @@ + + + +Missing node ids +## Nodes found with missing `cbId` + +Nodes were detected in your scene which are missing required `cbId` +attributes for identification further in the pipeline. + +### How to repair? + +The node ids are auto-generated on scene save, and thus the easiest fix is to +save your scene again. + +After that restart publishing with Reload button. + + +### Invalid nodes + +{nodes} + + +### How could this happen? + +This often happens if you've generated new nodes but haven't saved your scene +after creating the new nodes. + + + diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids.py b/openpype/hosts/maya/plugins/publish/validate_node_ids.py index 796f4c8d766..0c7d6470143 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids.py @@ -1,6 +1,9 @@ import pyblish.api -from openpype.pipeline.publish import ValidatePipelineOrder +from openpype.pipeline.publish import ( + ValidatePipelineOrder, + PublishXmlValidationError +) import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -34,8 +37,14 @@ def process(self, instance): # Ensure all nodes have a cbId invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Nodes found without " - "IDs: {0}".format(invalid)) + names = "\n".join( + "- {}".format(node) for node in invalid + ) + raise PublishXmlValidationError( + plugin=self, + message="Nodes found without IDs: {}".format(invalid), + formatting_data={"nodes": names} + ) @classmethod def get_invalid(cls, instance): From f423bc6294a64c4202d17cf8773f2c88c00723ed Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 16:11:51 +0100 Subject: [PATCH 043/175] Maya: Refactor Create Unreal Static Mesh to new publisher --- .../create/create_unreal_staticmesh.py | 96 ++++++++++++------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py index 44cbee05024..3f96d91a54e 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_staticmesh.py @@ -1,58 +1,90 @@ # -*- coding: utf-8 -*- """Creator for Unreal Static Meshes.""" from openpype.hosts.maya.api import plugin, lib -from openpype.settings import get_project_settings -from openpype.pipeline import legacy_io from maya import cmds # noqa -class CreateUnrealStaticMesh(plugin.Creator): +class CreateUnrealStaticMesh(plugin.MayaCreator): """Unreal Static Meshes with collisions.""" - name = "staticMeshMain" + + identifier = "io.openpype.creators.maya.unrealstaticmesh" label = "Unreal - Static Mesh" family = "staticMesh" icon = "cube" dynamic_subset_keys = ["asset"] - def __init__(self, *args, **kwargs): - """Constructor.""" - super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs) - self._project_settings = get_project_settings( - legacy_io.Session["AVALON_PROJECT"]) + # Defined in settings + collision_prefixes = [] + + def apply_settings(self, project_settings, system_settings): + """Apply project settings to creator""" + settings = project_settings["maya"]["create"]["CreateUnrealStaticMesh"] + self.collision_prefixes = settings["collision_prefixes"] - @classmethod def get_dynamic_data( - cls, variant, task_name, asset_id, project_name, host_name + self, variant, task_name, asset_doc, project_name, host_name, instance ): - dynamic_data = super(CreateUnrealStaticMesh, cls).get_dynamic_data( - variant, task_name, asset_id, project_name, host_name + """ + The default subset name templates for Unreal include {asset} and thus + we should pass that along as dynamic data. + """ + dynamic_data = super(CreateUnrealStaticMesh, self).get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance ) - dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET") + dynamic_data["asset"] = asset_doc["name"] return dynamic_data - def process(self): - self.name = "{}_{}".format(self.family, self.name) + def create(self, subset_name, instance_data, pre_create_data): + with lib.undo_chunk(): - instance = super(CreateUnrealStaticMesh, self).process() - content = cmds.sets(instance, query=True) + instance = super(CreateUnrealStaticMesh, self).create( + subset_name, instance_data, pre_create_data) + instance_node = instance.get("instance_node") + + # We reorganize the geometry that was originally added into the + # set into either 'collision_SET' or 'geometry_SET' based on the + # collision_prefixes from project settings + members = cmds.sets(instance_node, query=True) + cmds.sets(clear=instance_node) - # empty set and process its former content - cmds.sets(content, rm=instance) geometry_set = cmds.sets(name="geometry_SET", empty=True) collisions_set = cmds.sets(name="collisions_SET", empty=True) - cmds.sets([geometry_set, collisions_set], forceElement=instance) + cmds.sets([geometry_set, collisions_set], + forceElement=instance_node) - members = cmds.ls(content, long=True) or [] + members = cmds.ls(members, long=True) or [] children = cmds.listRelatives(members, allDescendents=True, fullPath=True) or [] - children = cmds.ls(children, type="transform") - for node in children: - if cmds.listRelatives(node, type="shape"): - if [ - n for n in self.collision_prefixes - if node.startswith(n) - ]: - cmds.sets(node, forceElement=collisions_set) - else: - cmds.sets(node, forceElement=geometry_set) + transforms = cmds.ls(members + children, type="transform") + for transform in transforms: + + if not cmds.listRelatives(transform, + type="shape", + noIntermediate=True): + # Exclude all transforms that have no direct shapes + continue + + if self.has_collision_prefix(transform): + cmds.sets(transform, forceElement=collisions_set) + else: + cmds.sets(transform, forceElement=geometry_set) + + def has_collision_prefix(self, node_path): + """Return whether node name of path matches collision prefix. + + If the node name matches the collision prefix we add it to the + `collisions_SET` instead of the `geometry_SET`. + + Args: + node_path (str): Maya node path. + + Returns: + bool: Whether the node should be considered a collision mesh. + + """ + node_name = node_path.rsplit("|", 1)[-1] + for prefix in self.collision_prefixes: + if node_name.startswith(prefix): + return True + return False From f0ac3b48bde514ecead6e94e2f7522066d263b4a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 17:53:56 +0100 Subject: [PATCH 044/175] Maya: Refactor Create Unreal Skeletal Mesh to new publisher --- .../create/create_unreal_skeletalmesh.py | 110 ++++++++++++------ 1 file changed, 72 insertions(+), 38 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py index 6e72bf5324f..b53c03b078c 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py @@ -1,47 +1,63 @@ # -*- coding: utf-8 -*- -"""Creator for Unreal Skeletal Meshes.""" +"""Creator for Unreal Static Meshes.""" from openpype.hosts.maya.api import plugin, lib -from openpype.pipeline import legacy_io +from openpype.lib import ( + BoolDef, + TextDef +) + from maya import cmds # noqa -class CreateUnrealSkeletalMesh(plugin.Creator): +class CreateUnrealSkeletalMesh(plugin.MayaCreator): """Unreal Static Meshes with collisions.""" - name = "staticMeshMain" + + identifier = "io.openpype.creators.maya.unrealskeletalmesh" label = "Unreal - Skeletal Mesh" family = "skeletalMesh" icon = "thumbs-up" dynamic_subset_keys = ["asset"] - joint_hints = [] + # Defined in settings + joint_hints = set() - def __init__(self, *args, **kwargs): - """Constructor.""" - super(CreateUnrealSkeletalMesh, self).__init__(*args, **kwargs) + def apply_settings(self, project_settings, system_settings): + """Apply project settings to creator""" + settings = ( + project_settings["maya"]["create"]["CreateUnrealSkeletalMesh"] + ) + self.joint_hints = set(settings.get("joint_hints", [])) - @classmethod def get_dynamic_data( - cls, variant, task_name, asset_id, project_name, host_name + self, variant, task_name, asset_doc, project_name, host_name, instance ): - dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data( - variant, task_name, asset_id, project_name, host_name + """ + The default subset name templates for Unreal include {asset} and thus + we should pass that along as dynamic data. + """ + dynamic_data = super(CreateUnrealSkeletalMesh, self).get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance ) - dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET") + dynamic_data["asset"] = asset_doc["name"] return dynamic_data - def process(self): - self.name = "{}_{}".format(self.family, self.name) + def create(self, subset_name, instance_data, pre_create_data): + with lib.undo_chunk(): - instance = super(CreateUnrealSkeletalMesh, self).process() - content = cmds.sets(instance, query=True) + instance = super(CreateUnrealSkeletalMesh, self).create( + subset_name, instance_data, pre_create_data) + instance_node = instance.get("instance_node") + + # We reorganize the geometry that was originally added into the + # set into either 'joints_SET' or 'geometry_SET' based on the + # joint_hints from project settings + members = cmds.sets(instance_node, query=True) + cmds.sets(clear=instance_node) - # empty set and process its former content - cmds.sets(content, rm=instance) geometry_set = cmds.sets(name="geometry_SET", empty=True) joints_set = cmds.sets(name="joints_SET", empty=True) - cmds.sets([geometry_set, joints_set], forceElement=instance) - members = cmds.ls(content) or [] + cmds.sets([geometry_set, joints_set], forceElement=instance_node) for node in members: if node in self.joint_hints: @@ -49,20 +65,38 @@ def process(self): else: cmds.sets(node, forceElement=geometry_set) - # Add animation data - self.data.update(lib.collect_animation_data()) - - # Only renderable visible shapes - self.data["renderableOnly"] = False - # only nodes that are visible - self.data["visibleOnly"] = False - # Include parent groups - self.data["includeParentHierarchy"] = False - # Default to exporting world-space - self.data["worldSpace"] = True - # Default to suspend refresh. - self.data["refresh"] = False - - # Add options for custom attributes - self.data["attr"] = "" - self.data["attrPrefix"] = "" + def get_instance_attr_defs(self): + + defs = lib.collect_animation_defs() + + defs.extend([ + BoolDef("renderableOnly", + label="Renderable Only", + tooltip="Only export renderable visible shapes", + default=False), + BoolDef("visibleOnly", + label="Visible Only", + tooltip="Only export dag objects visible during " + "frame range", + default=False), + BoolDef("includeParentHierarchy", + label="Include Parent Hierarchy", + tooltip="Whether to include parent hierarchy of nodes in " + "the publish instance", + default=False), + BoolDef("worldSpace", + label="World-Space Export", + default=True), + BoolDef("refresh", + label="Refresh viewport during export", + default=False), + TextDef("attr", + label="Custom Attributes", + default="", + placeholder="attr1, attr2"), + TextDef("attrPrefix", + label="Custom Attributes Prefix", + placeholder="prefix1, prefix2") + ]) + + return defs From 6133730516a698a9650a50d4d448a2ae6f42a38a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 18:18:43 +0100 Subject: [PATCH 045/175] Maya: Refactor Create Multiverse USD to new publisher --- .../plugins/create/create_multiverse_usd.py | 166 +++++++++++++----- 1 file changed, 124 insertions(+), 42 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py index 8cd76b5f406..205e251deb3 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py @@ -1,53 +1,135 @@ from openpype.hosts.maya.api import plugin, lib +from openpype.lib import ( + BoolDef, + NumberDef, + TextDef, + EnumDef +) -class CreateMultiverseUsd(plugin.Creator): +class CreateMultiverseUsd(plugin.MayaCreator): """Create Multiverse USD Asset""" - name = "mvUsdMain" + identifier = "io.openpype.creators.maya.mvusdasset" label = "Multiverse USD Asset" family = "usd" icon = "cubes" - def __init__(self, *args, **kwargs): - super(CreateMultiverseUsd, self).__init__(*args, **kwargs) + def get_instance_attr_defs(self): - # Add animation data first, since it maintains order. - self.data.update(lib.collect_animation_data(True)) + defs = lib.collect_animation_defs(fps=True) + defs.extend([ + EnumDef("fileFormat", + label="File format", + items={key: key for key in ["usd", "usda", "usdz"]}, + default="usd"), + BoolDef("stripNamespaces", + label="Strip Namespaces", + default=True), + BoolDef("mergeTransformAndShape", + label="Merge Transform and Shape", + default=False), + BoolDef("writeAncestors", + label="Write Ancestors", + default=True), + BoolDef("flattenParentXforms", + label="Flatten Parent Xforms", + default=False), + BoolDef("writeSparseOverrides", + label="Write Sparse Overrides", + default=False), + BoolDef("useMetaPrimPath", + label="Use Meta Prim Path", + default=False), + TextDef("customRootPath", + label="Custom Root Path", + default=''), + TextDef("customAttributes", + label="Custom Attributes", + tooltip="Comma-separated list of attribute names", + default=''), + TextDef("nodeTypesToIgnore", + label="Node Types to Ignore", + tooltip="Comma-separated list of node types to be ignored", + default=''), + BoolDef("writeMeshes", + label="Write Meshes", + default=True), + BoolDef("writeCurves", + label="Write Curves", + default=True), + BoolDef("writeParticles", + label="Write Particles", + default=True), + BoolDef("writeCameras", + label="Write Cameras", + default=False), + BoolDef("writeLights", + label="Write Lights", + default=False), + BoolDef("writeJoints", + label="Write Joins", + default=False), + BoolDef("writeCollections", + label="Write Collections", + default=False), + BoolDef("writePositions", + label="Write Positions", + default=True), + BoolDef("writeNormals", + label="Write Normals", + default=True), + BoolDef("writeUVs", + label="Write UVs", + default=True), + BoolDef("writeColorSets", + label="Write Color Sets", + default=False), + BoolDef("writeTangents", + label="Write Tangents", + default=False), + BoolDef("writeRefPositions", + label="Write Ref Positions", + default=True), + BoolDef("writeBlendShapes", + label="Write BlendShapes", + default=False), + BoolDef("writeDisplayColor", + label="Write Display Color", + default=True), + BoolDef("writeSkinWeights", + label="Write Skin Weights", + default=False), + BoolDef("writeMaterialAssignment", + label="Write Material Assignment", + default=False), + BoolDef("writeHardwareShader", + label="Write Hardware Shader", + default=False), + BoolDef("writeShadingNetworks", + label="Write Shading Networks", + default=False), + BoolDef("writeTransformMatrix", + label="Write Transform Matrix", + default=True), + BoolDef("writeUsdAttributes", + label="Write USD Attributes", + default=True), + BoolDef("writeInstancesAsReferences", + label="Write Instances as References", + default=False), + BoolDef("timeVaryingTopology", + label="Time Varying Topology", + default=False), + TextDef("customMaterialNamespace", + label="Custom Material Namespace", + default=''), + NumberDef("numTimeSamples", + label="Num Time Samples", + default=1), + NumberDef("timeSamplesSpan", + label="Time Samples Span", + default=0.0), + ]) - self.data["fileFormat"] = ["usd", "usda", "usdz"] - self.data["stripNamespaces"] = True - self.data["mergeTransformAndShape"] = False - self.data["writeAncestors"] = True - self.data["flattenParentXforms"] = False - self.data["writeSparseOverrides"] = False - self.data["useMetaPrimPath"] = False - self.data["customRootPath"] = '' - self.data["customAttributes"] = '' - self.data["nodeTypesToIgnore"] = '' - self.data["writeMeshes"] = True - self.data["writeCurves"] = True - self.data["writeParticles"] = True - self.data["writeCameras"] = False - self.data["writeLights"] = False - self.data["writeJoints"] = False - self.data["writeCollections"] = False - self.data["writePositions"] = True - self.data["writeNormals"] = True - self.data["writeUVs"] = True - self.data["writeColorSets"] = False - self.data["writeTangents"] = False - self.data["writeRefPositions"] = True - self.data["writeBlendShapes"] = False - self.data["writeDisplayColor"] = True - self.data["writeSkinWeights"] = False - self.data["writeMaterialAssignment"] = False - self.data["writeHardwareShader"] = False - self.data["writeShadingNetworks"] = False - self.data["writeTransformMatrix"] = True - self.data["writeUsdAttributes"] = True - self.data["writeInstancesAsReferences"] = False - self.data["timeVaryingTopology"] = False - self.data["customMaterialNamespace"] = '' - self.data["numTimeSamples"] = 1 - self.data["timeSamplesSpan"] = 0.0 + return defs From 62e4120603d7303f4ef1428ea636f825257cf585 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 18:21:17 +0100 Subject: [PATCH 046/175] Maya: Refactor Create Multiverse USD Composition to new publisher --- .../create/create_multiverse_usd_comp.py | 52 +++++++++++++------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py index ed466a80688..b55931138fd 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py @@ -1,26 +1,48 @@ from openpype.hosts.maya.api import plugin, lib +from openpype.lib import ( + BoolDef, + NumberDef, + EnumDef +) -class CreateMultiverseUsdComp(plugin.Creator): +class CreateMultiverseUsdComp(plugin.MayaCreator): """Create Multiverse USD Composition""" - name = "mvUsdCompositionMain" + identifier = "io.openpype.creators.maya.mvusdcomposition" label = "Multiverse USD Composition" family = "mvUsdComposition" icon = "cubes" - def __init__(self, *args, **kwargs): - super(CreateMultiverseUsdComp, self).__init__(*args, **kwargs) + def get_instance_attr_defs(self): - # Add animation data first, since it maintains order. - self.data.update(lib.collect_animation_data(True)) + defs = lib.collect_animation_defs(fps=True) + defs.extend([ + EnumDef("fileFormat", + label="File format", + items={key: key for key in ["usd", "usda"]}, + default="usd"), + BoolDef("stripNamespaces", + label="Strip Namespaces", + default=False), + BoolDef("mergeTransformAndShape", + label="Merge Transform and Shape", + default=False), + BoolDef("flattenContent", + label="Flatten Content", + default=False), + BoolDef("writeAsCompoundLayers", + label="Write As Compound Layers", + default=False), + BoolDef("writePendingOverrides", + label="Write Pending Overrides", + default=False), + NumberDef("numTimeSamples", + label="Num Time Samples", + default=1), + NumberDef("timeSamplesSpan", + label="Time Samples Span", + default=0.0), + ]) - # Order of `fileFormat` must match extract_multiverse_usd_comp.py - self.data["fileFormat"] = ["usda", "usd"] - self.data["stripNamespaces"] = False - self.data["mergeTransformAndShape"] = False - self.data["flattenContent"] = False - self.data["writeAsCompoundLayers"] = False - self.data["writePendingOverrides"] = False - self.data["numTimeSamples"] = 1 - self.data["timeSamplesSpan"] = 0.0 + return defs From 3e7fb48aa870f521062f6aa0313cb6320def07b6 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 18:25:35 +0100 Subject: [PATCH 047/175] Maya: Refactor Create Multiverse USD Override to new publisher --- .../create/create_multiverse_usd_over.py | 67 +++++++++++++------ 1 file changed, 48 insertions(+), 19 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py index 06e22df2952..4775fa0d9b1 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py @@ -1,30 +1,59 @@ from openpype.hosts.maya.api import plugin, lib +from openpype.lib import ( + BoolDef, + NumberDef, + EnumDef +) class CreateMultiverseUsdOver(plugin.Creator): """Create Multiverse USD Override""" - name = "mvUsdOverrideMain" + identifier = "io.openpype.creators.maya.mvusdoverride" label = "Multiverse USD Override" family = "mvUsdOverride" icon = "cubes" - def __init__(self, *args, **kwargs): - super(CreateMultiverseUsdOver, self).__init__(*args, **kwargs) + def get_instance_attr_defs(self): + defs = lib.collect_animation_defs(fps=True) + defs.extend([ + EnumDef("fileFormat", + label="File format", + items={key: key for key in ["usd", "usda"]}, + default="usd"), + BoolDef("writeAll", + label="Write All", + default=False), + BoolDef("writeTransforms", + label="Write Transforms", + default=True), + BoolDef("writeVisibility", + label="Write Visibility", + default=True), + BoolDef("writeAttributes", + label="Write Attributes", + default=True), + BoolDef("writeMaterials", + label="Write Materials", + default=True), + BoolDef("writeVariants", + label="Write Variants", + default=True), + BoolDef("writeVariantsDefinition", + label="Write Variants Definition", + default=True), + BoolDef("writeActiveState", + label="Write Active State", + default=True), + BoolDef("writeNamespaces", + label="Write Namespaces", + default=False), + NumberDef("numTimeSamples", + label="Num Time Samples", + default=1), + NumberDef("timeSamplesSpan", + label="Time Samples Span", + default=0.0), + ]) - # Add animation data first, since it maintains order. - self.data.update(lib.collect_animation_data(True)) - - # Order of `fileFormat` must match extract_multiverse_usd_over.py - self.data["fileFormat"] = ["usda", "usd"] - self.data["writeAll"] = False - self.data["writeTransforms"] = True - self.data["writeVisibility"] = True - self.data["writeAttributes"] = True - self.data["writeMaterials"] = True - self.data["writeVariants"] = True - self.data["writeVariantsDefinition"] = True - self.data["writeActiveState"] = True - self.data["writeNamespaces"] = False - self.data["numTimeSamples"] = 1 - self.data["timeSamplesSpan"] = 0.0 + return defs From 99a7b6453ce7065e47da489b111d5ba39b13339a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 18:25:57 +0100 Subject: [PATCH 048/175] Fix typo --- openpype/hosts/maya/plugins/create/create_multiverse_usd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py index 205e251deb3..b65c2234688 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py @@ -68,7 +68,7 @@ def get_instance_attr_defs(self): label="Write Lights", default=False), BoolDef("writeJoints", - label="Write Joins", + label="Write Joints", default=False), BoolDef("writeCollections", label="Write Collections", From 15131095d9cdc45222c34ba07d2aae4e0e2521cd Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 27 Jan 2023 20:31:59 +0100 Subject: [PATCH 049/175] Remove `name` attribute --- openpype/hosts/maya/plugins/create/create_rendersetup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/create_rendersetup.py b/openpype/hosts/maya/plugins/create/create_rendersetup.py index e8a93ac388c..dd64a0a8421 100644 --- a/openpype/hosts/maya/plugins/create/create_rendersetup.py +++ b/openpype/hosts/maya/plugins/create/create_rendersetup.py @@ -6,7 +6,6 @@ class CreateRenderSetup(plugin.MayaCreator): """Create rendersetup template json data""" identifier = "io.openpype.creators.maya.rendersetup" - name = "rendersetup" label = "Render Setup Preset" family = "rendersetup" icon = "tablet" From 8fd44f646eb13048bfb5db28a8c8155d28fa0df9 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 22:57:44 +0100 Subject: [PATCH 050/175] Allow to customize deadline pool in new publisher --- .../deadline/plugins/publish/collect_pools.py | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py index 48130848d51..f6f7f7905b3 100644 --- a/openpype/modules/deadline/plugins/publish/collect_pools.py +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -4,8 +4,12 @@ """ import pyblish.api +from openpype.lib import TextDef +from openpype.pipeline.publish import OpenPypePyblishPluginMixin -class CollectDeadlinePools(pyblish.api.InstancePlugin): + +class CollectDeadlinePools(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): """Collect pools from instance if present, from Setting otherwise.""" order = pyblish.api.CollectorOrder + 0.420 @@ -15,9 +19,35 @@ class CollectDeadlinePools(pyblish.api.InstancePlugin): primary_pool = None secondary_pool = None + @classmethod + def apply_settings(cls, project_settings, system_settings): + # deadline.publish.CollectDeadlinePools + settings = project_settings["deadline"]["publish"]["CollectDeadlinePools"] # noqa + cls.primary_pool = settings.get("primary_pool", None) + cls.secondary_pool = settings.get("secondary_pool", None) + def process(self, instance): if not instance.data.get("primaryPool"): instance.data["primaryPool"] = self.primary_pool or "none" if not instance.data.get("secondaryPool"): instance.data["secondaryPool"] = self.secondary_pool or "none" + + def get_attribute_defs(self): + + # TODO: Preferably this would be an enum for the user + # but the Deadline server URL can be dynamic and + # can be set per render instance. Since get_attribute_defs + # can't be dynamic unfortunately EnumDef isn't possible (yet?) + # pool_names = self.deadline_module.get_deadline_pools(deadline_url, + # self.log) + # secondary_pool_names = ["-"] + pool_names + + return [ + TextDef("primaryPool", + label="Primary Pool", + default=self.primary_pool), + TextDef("secondaryPool", + label="Secondary Pool", + default=self.secondary_pool) + ] From 3db4c9154c4163ba4473b31fed42074333a10a55 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 22:58:25 +0100 Subject: [PATCH 051/175] Move `publishJobState` setting to Submit Publish Job --- .../plugins/publish/submit_publish_job.py | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 7e39a644a29..f8a75fd8be5 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -18,6 +18,8 @@ get_representation_path, legacy_io, ) +from openpype.pipeline.publish import OpenPypePyblishPluginMixin +from openpype.lib import EnumDef from openpype.tests.lib import is_in_tests from openpype.pipeline.farm.patterning import match_aov_pattern @@ -78,7 +80,8 @@ def get_resource_files(resources, frame_range=None): return list(res_collection) -class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): +class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): """Process Job submitted on farm. These jobs are dependent on a deadline or muster job @@ -265,6 +268,12 @@ def _submit_deadline_post_job(self, instance, job, instances): priority = self.deadline_priority or instance.data.get("priority", 50) + instance_settings = self.get_attr_values_from_data(instance.data) + initial_status = instance_settings.get("publishJobState", "Active") + # TODO: Remove this backwards compatibility of `suspend_publish` + if instance.data.get("suspend_publish"): + initial_status = "Suspended" + args = [ "--headless", 'publish', @@ -288,6 +297,7 @@ def _submit_deadline_post_job(self, instance, job, instances): "Department": self.deadline_department, "ChunkSize": self.deadline_chunk_size, "Priority": priority, + "InitialStatus": initial_status, "Group": self.deadline_group, "Pool": instance.data.get("primaryPool"), @@ -320,9 +330,6 @@ def _submit_deadline_post_job(self, instance, job, instances): else: payload["JobInfo"]["JobDependency0"] = job["_id"] - if instance.data.get("suspend_publish"): - payload["JobInfo"]["InitialStatus"] = "Suspended" - for index, (key_, value_) in enumerate(environment.items()): payload["JobInfo"].update( { @@ -1152,3 +1159,12 @@ def _get_publish_folder(self, anatomy, template_data, publish_folder = os.path.dirname(file_path) return publish_folder + + @classmethod + def get_attribute_defs(cls): + return [ + EnumDef("publishJobState", + label="Publish Job State", + items=["Active", "Suspended"], + default="Active") + ] From e5f39117cb533e4cbbe826b83b9a12bea5a20865 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 22:58:35 +0100 Subject: [PATCH 052/175] Fix docstring typos --- .../modules/deadline/plugins/publish/submit_publish_job.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index f8a75fd8be5..b9135ca9a24 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -87,10 +87,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, These jobs are dependent on a deadline or muster job submission prior to this plug-in. - - In case of Deadline, it creates dependend job on farm publishing + - In case of Deadline, it creates dependent job on farm publishing rendered image sequence. - - In case of Muster, there is no need for such thing as dependend job, + - In case of Muster, there is no need for such thing as dependent job, post action will be executed and rendered sequence will be published. Options in instance.data: From 3b92cb6574a9dc0be3bc2b91ef36c8f30746d47f Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 22:59:52 +0100 Subject: [PATCH 053/175] Add priority, framesPerTask, machineList attribute defs via AbstractSubmitDeadline --- .../deadline/abstract_submit_deadline.py | 39 ++++++++++++++++++- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 648eb77007b..8d29cc53a33 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -19,7 +19,13 @@ import pyblish.api from openpype.pipeline.publish import ( AbstractMetaInstancePlugin, - KnownPublishError + KnownPublishError, + OpenPypePyblishPluginMixin +) +from openpype.lib import ( + NumberDef, + TextDef, + EnumDef ) JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) @@ -395,14 +401,17 @@ def update(self, data): @six.add_metaclass(AbstractMetaInstancePlugin) -class AbstractSubmitDeadline(pyblish.api.InstancePlugin): +class AbstractSubmitDeadline(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): """Class abstracting access to Deadline.""" label = "Submit to Deadline" order = pyblish.api.IntegratorOrder + 0.1 + import_reference = False use_published = True asset_dependencies = False + default_priority = 50 def __init__(self, *args, **kwargs): super(AbstractSubmitDeadline, self).__init__(*args, **kwargs) @@ -667,3 +676,29 @@ def _get_workfile_instance(context): "Workfile (scene) must be published along") return i + + @classmethod + def get_attribute_defs(cls): + return [ + NumberDef("priority", + label="Priority", + default=cls.default_priority, + decimals=0), + NumberDef("framesPerTask", + label="Frames Per Task", + default=1, + decimals=0, + minimum=1, + maximum=1000), + TextDef("machineList", + label="Machine List", + default="", + placeholder="machine1,machine2"), + EnumDef("whitelist", + label="Machine List (Allow/Deny)", + items={ + True: "Allow List", + False: "Deny List", + }, + default="Deny List") + ] From c2f643935d507c257abf518c428c4411db142fa2 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:00:32 +0100 Subject: [PATCH 054/175] Maya Deadline submit use project settings + allow tile priority to be set --- .../plugins/publish/submit_maya_deadline.py | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 070d4eab18b..b611bcd4f20 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -31,6 +31,7 @@ from maya import cmds from openpype.pipeline import legacy_io +from openpype.lib import NumberDef from openpype.hosts.maya.api.lib_rendersettings import RenderSettings from openpype.hosts.maya.api.lib import get_attr_in_layer @@ -105,6 +106,21 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): pluginInfo = {} group = "none" + @classmethod + def apply_settings(cls, project_settings, system_settings): + settings = project_settings["deadline"]["publish"]["MayaSubmitDeadline"] # noqa + + # Take some defaults from settings + cls.asset_dependencies = settings.get("asset_dependencies", + cls.asset_dependencies) + cls.import_reference = settings.get("import_reference", + cls.import_reference) + cls.use_published = settings.get("use_published", cls.use_published) + cls.priority = settings.get("priority", cls.priority) + cls.tile_priority = settings.get("tile_priority", cls.tile_priority) + cls.limit = settings.get("limit", cls.limit) + cls.group = settings.get("group", cls.group) + def get_job_info(self): job_info = DeadlineJobInfo(Plugin="MayaBatch") @@ -748,6 +764,19 @@ def _iter_expected_files(exp): for file in exp: yield file + @classmethod + def get_attribute_defs(cls): + defs = super(MayaSubmitDeadline, cls).get_attribute_defs() + + defs.extend([ + NumberDef("tile_priority", + label="Tile Assembler Priority", + decimals=0, + default=cls.tile_priority) + ]) + + return defs + def _format_tiles( filename, index, tiles_x, tiles_y, From 8f8136f5228b5e227c4314a97c23682f83b3d44a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:07:39 +0100 Subject: [PATCH 055/175] Use attribute def values for pools --- .../modules/deadline/plugins/publish/collect_pools.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py index f6f7f7905b3..b5a09c68835 100644 --- a/openpype/modules/deadline/plugins/publish/collect_pools.py +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -27,11 +27,18 @@ def apply_settings(cls, project_settings, system_settings): cls.secondary_pool = settings.get("secondary_pool", None) def process(self, instance): + + settings = self.get_attr_values_from_data(instance.data) + if not instance.data.get("primaryPool"): - instance.data["primaryPool"] = self.primary_pool or "none" + instance.data["primaryPool"] = ( + settings.get("primaryPool") or self.primary_pool or "none" + ) if not instance.data.get("secondaryPool"): - instance.data["secondaryPool"] = self.secondary_pool or "none" + instance.data["secondaryPool"] = ( + settings.get("secondaryPool") or self.secondary_pool or "none" + ) def get_attribute_defs(self): From b134b7f55f1e6942adbdafee5d7805d8f6411764 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:20:24 +0100 Subject: [PATCH 056/175] Better variable name --- openpype/modules/deadline/plugins/publish/collect_pools.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py index b5a09c68835..1d8399f7bd5 100644 --- a/openpype/modules/deadline/plugins/publish/collect_pools.py +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -28,16 +28,16 @@ def apply_settings(cls, project_settings, system_settings): def process(self, instance): - settings = self.get_attr_values_from_data(instance.data) + attr_values = self.get_attr_values_from_data(instance.data) if not instance.data.get("primaryPool"): instance.data["primaryPool"] = ( - settings.get("primaryPool") or self.primary_pool or "none" + attr_values.get("primaryPool") or self.primary_pool or "none" ) if not instance.data.get("secondaryPool"): instance.data["secondaryPool"] = ( - settings.get("secondaryPool") or self.secondary_pool or "none" + attr_values.get("secondaryPool") or self.secondary_pool or "none" # noqa ) def get_attribute_defs(self): From a89beb8d6fa269af01c1335af9802f593ad31460 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:20:35 +0100 Subject: [PATCH 057/175] Use attribute value --- .../deadline/plugins/publish/submit_maya_deadline.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index b611bcd4f20..135a8c59396 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -426,8 +426,10 @@ def _tile_render(self, payload): assembly_job_info.Name += " - Tile Assembly Job" assembly_job_info.Frames = 1 assembly_job_info.MachineLimit = 1 - assembly_job_info.Priority = instance.data.get("tile_priority", - self.tile_priority) + + attr_values = self.get_attr_values_from_data(instance.data) + assembly_job_info.Priority = attr_values.get("tile_priority", + self.tile_priority) assembly_plugin_info = { "CleanupTiles": 1, From f3033feeac74470305b814a48df39e2e78950893 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:25:16 +0100 Subject: [PATCH 058/175] Resolve todo --- .../publish/submit_maya_remote_publish_deadline.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index bab6591c7f3..d02ad9d5ec7 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -5,7 +5,6 @@ from maya import cmds from openpype.pipeline import legacy_io, PublishXmlValidationError -from openpype.settings import get_project_settings from openpype.tests.lib import is_in_tests import pyblish.api @@ -36,12 +35,10 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): def process(self, instance): project_name = instance.context.data["projectName"] - # TODO settings can be received from 'context.data["project_settings"]' - settings = get_project_settings(project_name) + # use setting for publish job on farm, no reason to have it separately - deadline_publish_job_sett = (settings["deadline"] - ["publish"] - ["ProcessSubmittedJobOnFarm"]) + project_settings = instance.context.data["project_settings"] + deadline_publish_job_sett = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"] # noqa # Ensure no errors so far if not (all(result["success"] From 99b9f77c991bc0d39feb1528a9c6658185bae8c6 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:44:53 +0100 Subject: [PATCH 059/175] Refactor maya submit remote publish to abstract submit deadline class --- .../submit_maya_remote_publish_deadline.py | 120 +++++++++--------- 1 file changed, 59 insertions(+), 61 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index d02ad9d5ec7..1f7d48b0d9c 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -1,16 +1,33 @@ import os -import requests +import attr from datetime import datetime from maya import cmds from openpype.pipeline import legacy_io, PublishXmlValidationError from openpype.tests.lib import is_in_tests +from openpype_modules.deadline import abstract_submit_deadline +from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo import pyblish.api -class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): +@attr.s +class MayaPluginInfo(object): + Build = attr.ib(default=None) # Don't force build + StrictErrorChecking = attr.ib(default=True) + + SceneFile = attr.ib(default=None) # Input scene + Version = attr.ib(default=None) # Mandatory for Deadline + ProjectPath = attr.ib(default=None) + + ScriptJob = attr.ib(default=True) + ScriptFilename = attr.ib(default=None) + + +class MayaSubmitRemotePublishDeadline( + abstract_submit_deadline.AbstractSubmitDeadline + ): """Submit Maya scene to perform a local publish in Deadline. Publishing in Deadline can be helpful for scenes that publish very slow. @@ -34,11 +51,6 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): targets = ["local"] def process(self, instance): - project_name = instance.context.data["projectName"] - - # use setting for publish job on farm, no reason to have it separately - project_settings = instance.context.data["project_settings"] - deadline_publish_job_sett = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"] # noqa # Ensure no errors so far if not (all(result["success"] @@ -50,52 +62,37 @@ def process(self, instance): "Skipping submission..") return + super(MayaSubmitRemotePublishDeadline, self).process(instance) + + def get_job_info(self): + instance = self._instance + context = instance.context + + project_name = instance.context.data["projectName"] scene = instance.context.data["currentFile"] scenename = os.path.basename(scene) job_name = "{scene} [PUBLISH]".format(scene=scenename) batch_name = "{code} - {scene}".format(code=project_name, scene=scenename) + if is_in_tests(): batch_name += datetime.now().strftime("%d%m%Y%H%M%S") - # Generate the payload for Deadline submission - payload = { - "JobInfo": { - "Plugin": "MayaBatch", - "BatchName": batch_name, - "Name": job_name, - "UserName": instance.context.data["user"], - "Comment": instance.context.data.get("comment", ""), - # "InitialStatus": state - "Department": deadline_publish_job_sett["deadline_department"], - "ChunkSize": deadline_publish_job_sett["deadline_chunk_size"], - "Priority": deadline_publish_job_sett["deadline_priority"], - "Group": deadline_publish_job_sett["deadline_group"], - "Pool": deadline_publish_job_sett["deadline_pool"], - }, - "PluginInfo": { - - "Build": None, # Don't force build - "StrictErrorChecking": True, - "ScriptJob": True, - - # Inputs - "SceneFile": scene, - "ScriptFilename": "{OPENPYPE_REPOS_ROOT}/openpype/scripts/remote_publish.py", # noqa - - # Mandatory for Deadline - "Version": cmds.about(version=True), - - # Resolve relative references - "ProjectPath": cmds.workspace(query=True, - rootDirectory=True), - - }, - - # Mandatory for Deadline, may be empty - "AuxFiles": [] - } + job_info = DeadlineJobInfo(Plugin="MayaBatch") + job_info.BatchName = batch_name + job_info.Name = job_name + job_info.UserName = context.data.get("user") + job_info.Comment = context.data.get("comment", "") + + # use setting for publish job on farm, no reason to have it separately + project_settings = context.data["project_settings"] + deadline_publish_job_sett = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"] # noqa + job_info.Department = deadline_publish_job_sett["deadline_department"] + job_info.ChunkSize = deadline_publish_job_sett["deadline_chunk_size"] + job_info.Priority = deadline_publish_job_sett["deadline_priority"] + job_info.Group = deadline_publish_job_sett["deadline_group"] + job_info.Pool = deadline_publish_job_sett["deadline_pool"] # Include critical environment variables with submission + api.Session keys = [ @@ -118,20 +115,21 @@ def process(self, instance): environment["OPENPYPE_PUBLISH_SUBSET"] = instance.data["subset"] environment["OPENPYPE_REMOTE_PUBLISH"] = "1" - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) - - self.log.info("Submitting Deadline job ...") - deadline_url = instance.context.data["defaultDeadline"] - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - deadline_url = instance.data.get("deadlineUrl") - assert deadline_url, "Requires Deadline Webservice URL" - url = "{}/api/jobs".format(deadline_url) - response = requests.post(url, json=payload, timeout=10) - if not response.ok: - raise Exception(response.text) + for key, value in environment.items(): + job_info.EnvironmentKeyValue[key] = value + + def get_plugin_info(self): + + instance = self._instance + context = instance.context + + scene = context.data["currentFile"] + + plugin_info = MayaPluginInfo() + plugin_info.SceneFile = scene + plugin_info.ScriptFilename = "{OPENPYPE_REPOS_ROOT}/openpype/scripts/remote_publish.py" # noqa + plugin_info.Version = cmds.about(version=True) + plugin_info.ProjectPath = cmds.workspace(query=True, + rootDirectory=True) + + return attr.asdict(plugin_info) From 5ed2751bd2d62ee8516eb82b0b6dc344a749a67d Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:45:25 +0100 Subject: [PATCH 060/175] Cosmetics --- .../plugins/publish/submit_maya_remote_publish_deadline.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index 1f7d48b0d9c..ce75e45a9fd 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -26,8 +26,7 @@ class MayaPluginInfo(object): class MayaSubmitRemotePublishDeadline( - abstract_submit_deadline.AbstractSubmitDeadline - ): + abstract_submit_deadline.AbstractSubmitDeadline): """Submit Maya scene to perform a local publish in Deadline. Publishing in Deadline can be helpful for scenes that publish very slow. From a6ca5cb5dae5f1fedfe109893881d6f62fda3759 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:46:42 +0100 Subject: [PATCH 061/175] Cosmetics --- .../plugins/publish/submit_maya_remote_publish_deadline.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index ce75e45a9fd..024537e53fa 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -93,7 +93,7 @@ def get_job_info(self): job_info.Group = deadline_publish_job_sett["deadline_group"] job_info.Pool = deadline_publish_job_sett["deadline_pool"] - # Include critical environment variables with submission + api.Session + # Include critical environment variables with submission + Session keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", @@ -119,10 +119,7 @@ def get_job_info(self): def get_plugin_info(self): - instance = self._instance - context = instance.context - - scene = context.data["currentFile"] + scene = self._instance.context.data["currentFile"] plugin_info = MayaPluginInfo() plugin_info.SceneFile = scene From 1598ad81ef430c7691e18e664c84193d72b74c8d Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 29 Jan 2023 23:59:20 +0100 Subject: [PATCH 062/175] Fix get_attribute_defs --- openpype/modules/deadline/plugins/publish/collect_pools.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py index 1d8399f7bd5..f4f588bf31a 100644 --- a/openpype/modules/deadline/plugins/publish/collect_pools.py +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -40,7 +40,8 @@ def process(self, instance): attr_values.get("secondaryPool") or self.secondary_pool or "none" # noqa ) - def get_attribute_defs(self): + @classmethod + def get_attribute_defs(cls): # TODO: Preferably this would be an enum for the user # but the Deadline server URL can be dynamic and @@ -53,8 +54,8 @@ def get_attribute_defs(self): return [ TextDef("primaryPool", label="Primary Pool", - default=self.primary_pool), + default=cls.primary_pool), TextDef("secondaryPool", label="Secondary Pool", - default=self.secondary_pool) + default=cls.secondary_pool) ] From 7e41b4449850b386eeca1767f9c58b6362f2c594 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 00:08:57 +0100 Subject: [PATCH 063/175] EARLY WIP - Refactor Create Render to new style publisher --- .../maya/plugins/create/create_render.py | 548 +++++++----------- 1 file changed, 208 insertions(+), 340 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 83751494427..72182458ddf 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -1,29 +1,44 @@ # -*- coding: utf-8 -*- """Create ``Render`` instance in Maya.""" -import json -import os - -import appdirs -import requests - from maya import cmds from maya.app.renderSetup.model import renderSetup -from openpype.settings import ( - get_system_settings, - get_project_settings, -) -from openpype.lib import requests_get -from openpype.modules import ModulesManager -from openpype.pipeline import legacy_io from openpype.hosts.maya.api import ( lib, lib_rendersettings, plugin ) +from openpype.lib import ( + BoolDef, + NumberDef +) + +from openpype.pipeline import legacy_io +from openpype.pipeline.create import ( + CreatorError, + HiddenCreator, + CreatedInstance +) + + +def ensure_namespace(namespace): + """Make sure the namespace exists. + + Args: + namespace (str): The preferred namespace name. + Returns: + str: The generated or existing namespace -class CreateRender(plugin.Creator): + """ + exists = cmds.namespace(exists=namespace) + if exists: + return namespace + else: + return cmds.namespace(add=namespace) + + +class CreateRender(plugin.MayaCreator): """Create *render* instance. Render instances are not actually published, they hold options for @@ -34,20 +49,9 @@ class CreateRender(plugin.Creator): farm. Instance has following attributes:: - - primaryPool (list of str): Primary list of slave machine pool to use. - secondaryPool (list of str): Optional secondary list of slave pools. - suspendPublishJob (bool): Suspend the job after it is submitted. extendFrames (bool): Use already existing frames from previous version to extend current render. overrideExistingFrame (bool): Overwrite already existing frames. - priority (int): Submitted job priority - framesPerTask (int): How many frames per task to render. This is - basically job division on render farm. - whitelist (list of str): White list of slave machines - machineList (list of str): Specific list of slave machines to use - useMayaBatch (bool): Use Maya batch mode to render as opposite to - Maya interactive mode. This consumes different licenses. vrscene (bool): Submit as ``vrscene`` file for standalone V-Ray renderer. ass (bool): Submit as ``ass`` file for standalone Arnold renderer. @@ -60,341 +64,205 @@ class CreateRender(plugin.Creator): """ + identifier = "io.openpype.creators.maya.render" label = "Render" family = "rendering" icon = "eye" - _token = None - _user = None - _password = None - _project_settings = None + render_settings = {} + + @classmethod + def apply_settings(cls, project_settings, system_settings): + cls.render_settings = project_settings["maya"]["RenderSettings"] - def __init__(self, *args, **kwargs): - """Constructor.""" - super(CreateRender, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): - # Defaults - self._project_settings = get_project_settings( - legacy_io.Session["AVALON_PROJECT"]) - if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa + # Only allow a single render instance to exist + nodes = lib.lsattr("creator_identifier", self.identifier) + if nodes: + raise CreatorError("A Render instance already exists - only " + "one can be configured.") + + # Apply default project render settings on create + if self.render_settings.get("apply_render_settings"): lib_rendersettings.RenderSettings().set_default_renderer_settings() - # Deadline-only - manager = ModulesManager() - deadline_settings = get_system_settings()["modules"]["deadline"] - if not deadline_settings["enabled"]: - self.deadline_servers = {} - return - self.deadline_module = manager.modules_by_name["deadline"] - try: - default_servers = deadline_settings["deadline_urls"] - project_servers = ( - self._project_settings["deadline"]["deadline_servers"] - ) - self.deadline_servers = { - k: default_servers[k] - for k in project_servers - if k in default_servers - } + with lib.undo_chunk(): + instance = super(CreateRender, self).create(subset_name, + instance_data, + pre_create_data) + # We never want to SHOW the instance in the UI since the parent + # class already adds it after creation let's directly remove it. + self._remove_instance_from_context(instance) - if not self.deadline_servers: - self.deadline_servers = default_servers + # TODO: Now make it so that RenderLayerCreator 'collect' + # automatically gets triggered to directly see renderlayers - except AttributeError: - # Handle situation were we had only one url for deadline. - # get default deadline webservice url from deadline module - self.deadline_servers = self.deadline_module.deadline_urls + return instance - def process(self): - """Entry point.""" - exists = cmds.ls(self.name) - if exists: - cmds.warning("%s already exists." % exists[0]) - return + def collect_instances(self): + # We never show this instance in the publish UI + return - use_selection = self.options.get("useSelection") - with lib.undo_chunk(): - self._create_render_settings() - self.instance = super(CreateRender, self).process() - # create namespace with instance - index = 1 - namespace_name = "_{}".format(str(self.instance)) - try: - cmds.namespace(rm=namespace_name) - except RuntimeError: - # namespace is not empty, so we leave it untouched - pass - - while cmds.namespace(exists=namespace_name): - namespace_name = "_{}{}".format(str(self.instance), index) - index += 1 - - namespace = cmds.namespace(add=namespace_name) - - # add Deadline server selection list - if self.deadline_servers: - cmds.scriptJob( - attributeChange=[ - "{}.deadlineServers".format(self.instance), - self._deadline_webservice_changed - ]) - - cmds.setAttr("{}.machineList".format(self.instance), lock=True) - rs = renderSetup.instance() - layers = rs.getRenderLayers() - if use_selection: - self.log.info("Processing existing layers") - sets = [] - for layer in layers: - self.log.info(" - creating set for {}:{}".format( - namespace, layer.name())) - render_set = cmds.sets( - n="{}:{}".format(namespace, layer.name())) - sets.append(render_set) - cmds.sets(sets, forceElement=self.instance) - - # if no render layers are present, create default one with - # asterisk selector - if not layers: - render_layer = rs.createRenderLayer('Main') - collection = render_layer.createCollection("defaultCollection") - collection.getSelector().setPattern('*') - - return self.instance - - def _deadline_webservice_changed(self): - """Refresh Deadline server dependent options.""" - # get selected server - webservice = self.deadline_servers[ - self.server_aliases[ - cmds.getAttr("{}.deadlineServers".format(self.instance)) - ] - ] - pools = self.deadline_module.get_deadline_pools(webservice, self.log) - cmds.deleteAttr("{}.primaryPool".format(self.instance)) - cmds.deleteAttr("{}.secondaryPool".format(self.instance)) - - pool_setting = (self._project_settings["deadline"] - ["publish"] - ["CollectDeadlinePools"]) - - primary_pool = pool_setting["primary_pool"] - sorted_pools = self._set_default_pool(list(pools), primary_pool) - cmds.addAttr(self.instance, longName="primaryPool", - attributeType="enum", - enumName=":".join(sorted_pools)) - - pools = ["-"] + pools - secondary_pool = pool_setting["secondary_pool"] - sorted_pools = self._set_default_pool(list(pools), secondary_pool) - cmds.addAttr("{}.secondaryPool".format(self.instance), - attributeType="enum", - enumName=":".join(sorted_pools)) - - def _create_render_settings(self): - """Create instance settings.""" - # get pools (slave machines of the render farm) - pool_names = [] - default_priority = 50 - - self.data["suspendPublishJob"] = False - self.data["review"] = True - self.data["extendFrames"] = False - self.data["overrideExistingFrame"] = True - # self.data["useLegacyRenderLayers"] = True - self.data["priority"] = default_priority - self.data["tile_priority"] = default_priority - self.data["framesPerTask"] = 1 - self.data["whitelist"] = False - self.data["machineList"] = "" - self.data["useMayaBatch"] = False - self.data["tileRendering"] = False - self.data["tilesX"] = 2 - self.data["tilesY"] = 2 - self.data["convertToScanline"] = False - self.data["useReferencedAovs"] = False - self.data["renderSetupIncludeLights"] = ( - self._project_settings.get( - "maya", {}).get( - "RenderSettings", {}).get( - "enable_all_lights", False) - ) - # Disable for now as this feature is not working yet - # self.data["assScene"] = False + def get_pre_create_attr_defs(self): + # Do not show the "use_selection" setting from parent class + return [] - system_settings = get_system_settings()["modules"] - deadline_enabled = system_settings["deadline"]["enabled"] - muster_enabled = system_settings["muster"]["enabled"] - muster_url = system_settings["muster"]["MUSTER_REST_URL"] +class RenderlayerCreator(HiddenCreator, plugin.MayaCreatorBase): + """Create and manges renderlayer subset per renderLayer in workfile. - if deadline_enabled and muster_enabled: - self.log.error( - "Both Deadline and Muster are enabled. " "Cannot support both." - ) - raise RuntimeError("Both Deadline and Muster are enabled") - - if deadline_enabled: - self.server_aliases = list(self.deadline_servers.keys()) - self.data["deadlineServers"] = self.server_aliases - - try: - deadline_url = self.deadline_servers["default"] - except KeyError: - # if 'default' server is not between selected, - # use first one for initial list of pools. - deadline_url = next(iter(self.deadline_servers.values())) - # Uses function to get pool machines from the assigned deadline - # url in settings - pool_names = self.deadline_module.get_deadline_pools(deadline_url, - self.log) - maya_submit_dl = self._project_settings.get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}) - priority = maya_submit_dl.get("priority", default_priority) - self.data["priority"] = priority - - tile_priority = maya_submit_dl.get("tile_priority", - default_priority) - self.data["tile_priority"] = tile_priority - - pool_setting = (self._project_settings["deadline"] - ["publish"] - ["CollectDeadlinePools"]) - primary_pool = pool_setting["primary_pool"] - self.data["primaryPool"] = self._set_default_pool(pool_names, - primary_pool) - # We add a string "-" to allow the user to not - # set any secondary pools - pool_names = ["-"] + pool_names - secondary_pool = pool_setting["secondary_pool"] - self.data["secondaryPool"] = self._set_default_pool(pool_names, - secondary_pool) - - if muster_enabled: - self.log.info(">>> Loading Muster credentials ...") - self._load_credentials() - self.log.info(">>> Getting pools ...") - pools = [] - try: - pools = self._get_muster_pools() - except requests.exceptions.HTTPError as e: - if e.startswith("401"): - self.log.warning("access token expired") - self._show_login() - raise RuntimeError("Access token expired") - except requests.exceptions.ConnectionError: - self.log.error("Cannot connect to Muster API endpoint.") - raise RuntimeError("Cannot connect to {}".format(muster_url)) - for pool in pools: - self.log.info(" - pool: {}".format(pool["name"])) - pool_names.append(pool["name"]) - - self.options = {"useSelection": False} # Force no content - - def _set_default_pool(self, pool_names, pool_value): - """Reorder pool names, default should come first""" - if pool_value and pool_value in pool_names: - pool_names.remove(pool_value) - pool_names = [pool_value] + pool_names - return pool_names - - def _load_credentials(self): - """Load Muster credentials. - - Load Muster credentials from file and set ``MUSTER_USER``, - ``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from settings. - - Raises: - RuntimeError: If loaded credentials are invalid. - AttributeError: If ``MUSTER_REST_URL`` is not set. + This does no do ANYTHING until a CreateRender subset exists in the + scene, created by the CreateRender creator. - """ - app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype")) - file_name = "muster_cred.json" - fpath = os.path.join(app_dir, file_name) - file = open(fpath, "r") - muster_json = json.load(file) - self._token = muster_json.get("token", None) - if not self._token: - self._show_login() - raise RuntimeError("Invalid access token for Muster") - file.close() - self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL") - if not self.MUSTER_REST_URL: - raise AttributeError("Muster REST API url not set") - - def _get_muster_pools(self): - """Get render pools from Muster. - - Raises: - Exception: If pool list cannot be obtained from Muster. + """ - """ - params = {"authToken": self._token} - api_entry = "/api/pools/list" - response = requests_get(self.MUSTER_REST_URL + api_entry, - params=params) - if response.status_code != 200: - if response.status_code == 401: - self.log.warning("Authentication token expired.") - self._show_login() - else: - self.log.error( - ("Cannot get pools from " - "Muster: {}").format(response.status_code) - ) - raise Exception("Cannot get pools from Muster") - try: - pools = response.json()["ResponseData"]["pools"] - except ValueError as e: - self.log.error("Invalid response from Muster server {}".format(e)) - raise Exception("Invalid response from Muster server") - - return pools - - def _show_login(self): - # authentication token expired so we need to login to Muster - # again to get it. We use Pype API call to show login window. - api_url = "{}/muster/show_login".format( - os.environ["OPENPYPE_WEBSERVER_URL"]) - self.log.debug(api_url) - login_response = requests_get(api_url, timeout=1) - if login_response.status_code != 200: - self.log.error("Cannot show login form to Muster") - raise Exception("Cannot show login form to Muster") - - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. + identifier = "io.openpype.creators.maya.renderlayer" + family = "renderlayer" + label = "Renderlayer" + + render_settings = {} + + @classmethod + def apply_settings(cls, project_settings, system_settings): + cls.render_settings = project_settings["maya"]["RenderSettings"] + + def create(self, instance_data, source_data): + # Make sure an instance exists per renderlayer in the scene + + # create namespace with instance + #namespace_name = "_{}".format(subset_name) + #namespace = ensure_namespace(namespace_name) + + # Pre-process any existing layers + # TODO: Document why we're processing the layers explicitly? + + # self.log.info("Processing existing layers") + # sets = [] + # for layer in layers: + # set_name = "{}:{}".format(namespace, layer.name()) + # self.log.info(" - creating set for {}".format(set_name)) + # render_set = cmds.sets(name=set_name, empty=True) + # sets.append(render_set) + # + # cmds.sets(sets, forceElement=instance_node) + # + # # if no render layers are present, create default one with + # # asterisk selector + # if not layers: + # render_layer = rs.createRenderLayer('Main') + # collection = render_layer.createCollection("defaultCollection") + # collection.getSelector().setPattern('*') + return + + def collect_instances(self): + + # We only collect if a CreateRender instance exists + create_render_exists = any( + self.iter_subset_nodes(identifier=CreateRender.identifier) + ) + if not create_render_exists: + return - """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.post(*args, **kwargs) + rs = renderSetup.instance() + layers = rs.getRenderLayers() + for layer in layers: + subset_name = "render" + layer.name() - def _requests_get(self, *args, **kwargs): - """Wrap request get method. + instance_data = { + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"], + "variant": layer.name(), + } + + instance = CreatedInstance( + family=self.family, + subset_name=subset_name, + data=instance_data, + creator=self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + # We only generate the persisting layer data into the scene once + # we save with the UI on e.g. validate or publish + # TODO: Implement this behavior for data persistence + + # for instance, changes in update_list.items(): + # instance_node = instance.data.get("instance_node") + # if not instance_node: + # layer = instance.data.get("layer") + # instance_node = self._create_layer_instance_node(layer) + # + # self.imprint_instance_node(instance_node, + # data=instance.data_to_store()) + pass - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. + def remove_instances(self, instances): + """Remove specified instance from the scene. - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. + This is only removing `id` parameter so instance is no longer + instance, because it might contain valuable data for artist. """ - if "verify" not in kwargs: - kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - return requests.get(*args, **kwargs) + for instance in instances: + node = instance.data.get("instance_node") + if node: + cmds.delete(node) + + self._remove_instance_from_context(instance) + + def get_instance_attr_defs(self): + """Create instance settings.""" + + return [ + BoolDef("review", + label="Review", + tooltip="Mark as reviewable", + default=True), + BoolDef("extendFrames", + label="Extend Frames", + tooltip="Extends the frames on top of the previous " + "publish.\nIf the previous was 1001-1050 and you " + "would now submit 1020-1070 only the new frames " + "1051-1070 would be rendered and published " + "together with the previously rendered frames.\n" + "If 'overrideExistingFrame' is enabled it *will* " + "render any existing frames.", + default=False), + BoolDef("overrideExistingFrame", + label="Override Existing Frame", + tooltip="Mark as reviewable", + default=True), + + # TODO: Should these move to submit_maya_deadline plugin? + # Tile rendering + BoolDef("tileRendering", + label="Enable tiled rendering", + default=False), + NumberDef("tilesX", + label="Tiles X", + default=2, + minimum=1, + decimals=0), + NumberDef("tilesY", + label="Tiles Y", + default=2, + minimum=1, + decimals=0), + + # Additional settings + BoolDef("convertToScanline", + label="Convert to Scanline", + tooltip="Convert the output images to scanline images", + default=False), + BoolDef("useReferencedAovs", + label="Use Referenced AOVs", + tooltip="Consider the AOVs from referenced scenes as well", + default=False), + + BoolDef("renderSetupIncludeLights", + label="Render Setup Include Lights", + default=self.render_settings.get("enable_all_lights", + False)) + ] From e0aaf4dfecdad626d1917d0da58ae1873d7e0108 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 00:10:12 +0100 Subject: [PATCH 064/175] Fix cosmetics --- .../hosts/maya/plugins/create/create_render.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 72182458ddf..ae2d7b54f7e 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -130,13 +130,13 @@ def apply_settings(cls, project_settings, system_settings): def create(self, instance_data, source_data): # Make sure an instance exists per renderlayer in the scene - # create namespace with instance - #namespace_name = "_{}".format(subset_name) - #namespace = ensure_namespace(namespace_name) - - # Pre-process any existing layers - # TODO: Document why we're processing the layers explicitly? - + # # create namespace with instance + # namespace_name = "_{}".format(subset_name) + # namespace = ensure_namespace(namespace_name) + # + # # Pre-process any existing layers + # # TODO: Document why we're processing the layers explicitly? + # # self.log.info("Processing existing layers") # sets = [] # for layer in layers: From fded63f00e02a52974edc43a825acd7daf91ff88 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 00:15:46 +0100 Subject: [PATCH 065/175] Correct docstring --- .../maya/plugins/create/create_render.py | 25 ++++++------------- 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index ae2d7b54f7e..851e3dba901 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -41,23 +41,12 @@ def ensure_namespace(namespace): class CreateRender(plugin.MayaCreator): """Create *render* instance. - Render instances are not actually published, they hold options for - collecting of render data. It render instance is present, it will trigger - collection of render layers, AOVs, cameras for either direct submission - to render farm or export as various standalone formats (like V-Rays - ``vrscenes`` or Arnolds ``ass`` files) and then submitting them to render - farm. - - Instance has following attributes:: - extendFrames (bool): Use already existing frames from previous version - to extend current render. - overrideExistingFrame (bool): Overwrite already existing frames. - vrscene (bool): Submit as ``vrscene`` file for standalone V-Ray - renderer. - ass (bool): Submit as ``ass`` file for standalone Arnold renderer. - tileRendering (bool): Instance is set to tile rendering mode. We - won't submit actual render, but we'll make publish job to wait - for Tile Assembly job done and then publish. + This render instance is not visible in the UI as an instance nor does + it by itself publish. Instead, whenever this is created the + CreateRenderlayer creator collects the active scene's actual renderlayers + as individual instances to submit for publishing. + + This Creator is solely to SHOW in the "Create" of the new publisher. See Also: https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup @@ -109,7 +98,7 @@ def get_pre_create_attr_defs(self): return [] -class RenderlayerCreator(HiddenCreator, plugin.MayaCreatorBase): +class CreateRenderlayer(HiddenCreator, plugin.MayaCreatorBase): """Create and manges renderlayer subset per renderLayer in workfile. This does no do ANYTHING until a CreateRender subset exists in the From 82b6f88c8bf29976d31c748464af83a14880bee3 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 00:24:22 +0100 Subject: [PATCH 066/175] Add icon --- openpype/hosts/maya/plugins/create/create_render.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 851e3dba901..7e8af79ee64 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -109,6 +109,7 @@ class CreateRenderlayer(HiddenCreator, plugin.MayaCreatorBase): identifier = "io.openpype.creators.maya.renderlayer" family = "renderlayer" label = "Renderlayer" + icon = "eye" render_settings = {} From b7e9240ecabbdfb1b92ce13cd4b29661a80dca77 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 00:35:32 +0100 Subject: [PATCH 067/175] Simplify Create Render logic --- .../maya/plugins/create/create_render.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 7e8af79ee64..64cd1fadba6 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -16,6 +16,7 @@ from openpype.pipeline import legacy_io from openpype.pipeline.create import ( CreatorError, + Creator, HiddenCreator, CreatedInstance ) @@ -38,7 +39,7 @@ def ensure_namespace(namespace): return cmds.namespace(add=namespace) -class CreateRender(plugin.MayaCreator): +class CreateRender(Creator): """Create *render* instance. This render instance is not visible in the UI as an instance nor does @@ -67,7 +68,7 @@ def apply_settings(cls, project_settings, system_settings): def create(self, subset_name, instance_data, pre_create_data): # Only allow a single render instance to exist - nodes = lib.lsattr("creator_identifier", self.identifier) + nodes = lib.lsattr("pre_creator_identifier", self.identifier) if nodes: raise CreatorError("A Render instance already exists - only " "one can be configured.") @@ -76,26 +77,28 @@ def create(self, subset_name, instance_data, pre_create_data): if self.render_settings.get("apply_render_settings"): lib_rendersettings.RenderSettings().set_default_renderer_settings() + # TODO: Create default 'Main' renderlayer if no renderlayers exist yet + with lib.undo_chunk(): - instance = super(CreateRender, self).create(subset_name, - instance_data, - pre_create_data) - # We never want to SHOW the instance in the UI since the parent - # class already adds it after creation let's directly remove it. - self._remove_instance_from_context(instance) + node = cmds.sets(empty=True, name=subset_name) + lib.imprint(node, data={ + "pre_creator_identifier": self.identifier + }) # TODO: Now make it so that RenderLayerCreator 'collect' # automatically gets triggered to directly see renderlayers - return instance + return def collect_instances(self): # We never show this instance in the publish UI return - def get_pre_create_attr_defs(self): - # Do not show the "use_selection" setting from parent class - return [] + def update_instances(self, update_list): + return + + def remove_instances(self, instances): + return class CreateRenderlayer(HiddenCreator, plugin.MayaCreatorBase): @@ -148,10 +151,7 @@ def create(self, instance_data, source_data): def collect_instances(self): # We only collect if a CreateRender instance exists - create_render_exists = any( - self.iter_subset_nodes(identifier=CreateRender.identifier) - ) - if not create_render_exists: + if not lib.lsattr("pre_creator_identifier", CreateRender.identifier): return rs = renderSetup.instance() From a37bb77f8d19f14483baf8082b9edb447c2acc0a Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 09:45:38 +0100 Subject: [PATCH 068/175] Support RenderLayer removals --- .../maya/plugins/create/create_render.py | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 64cd1fadba6..cfd0798abae 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -195,12 +195,22 @@ def remove_instances(self, instances): instance, because it might contain valuable data for artist. """ - for instance in instances: - node = instance.data.get("instance_node") - if node: - cmds.delete(node) + # Instead of removing the single instance or renderlayers we instead + # remove the CreateRender node this creator relies on to decide whether + # it should collect anything at all. + nodes = lib.lsattr("pre_creator_identifier", CreateRender.identifier) + if nodes: + cmds.delete(nodes) + + # Remove ALL of the instances even if only one gets deleted + for instance in list(self.create_context.instances): + if instance.get("creator_identifier") == self.identifier: + self._remove_instance_from_context(instance) - self._remove_instance_from_context(instance) + # TODO: Remove the stored settings per renderlayer too? + # node = instance.data.get("instance_node") + # if node: + # cmds.delete(node) def get_instance_attr_defs(self): """Create instance settings.""" From 2ac496572670eb28f72bb7485e21172eb94051df Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 10:09:25 +0100 Subject: [PATCH 069/175] Auto create main layer, directly refresh renderlayers on CreateRender --- .../maya/plugins/create/create_render.py | 66 ++++++++++--------- 1 file changed, 35 insertions(+), 31 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index cfd0798abae..f86acb03269 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -77,7 +77,13 @@ def create(self, subset_name, instance_data, pre_create_data): if self.render_settings.get("apply_render_settings"): lib_rendersettings.RenderSettings().set_default_renderer_settings() - # TODO: Create default 'Main' renderlayer if no renderlayers exist yet + # if no render layers are present, create default one with + # asterisk selector + rs = renderSetup.instance() + if not rs.getRenderLayers(): + render_layer = rs.createRenderLayer('Main') + collection = render_layer.createCollection("defaultCollection") + collection.getSelector().setPattern('*') with lib.undo_chunk(): node = cmds.sets(empty=True, name=subset_name) @@ -85,10 +91,14 @@ def create(self, subset_name, instance_data, pre_create_data): "pre_creator_identifier": self.identifier }) - # TODO: Now make it so that RenderLayerCreator 'collect' - # automatically gets triggered to directly see renderlayers - - return + # By RenderLayerCreator.create we make it so that the renderlayer + # instances directly appear even though it just collects scene + # renderlayers. This doesn't actually 'create' any scene contents. + self.create_context.create( + CreateRenderlayer.identifier, + instance_data={}, + source_data=instance_data + ) def collect_instances(self): # We never show this instance in the publish UI @@ -121,32 +131,10 @@ def apply_settings(cls, project_settings, system_settings): cls.render_settings = project_settings["maya"]["RenderSettings"] def create(self, instance_data, source_data): - # Make sure an instance exists per renderlayer in the scene - - # # create namespace with instance - # namespace_name = "_{}".format(subset_name) - # namespace = ensure_namespace(namespace_name) - # - # # Pre-process any existing layers - # # TODO: Document why we're processing the layers explicitly? - # - # self.log.info("Processing existing layers") - # sets = [] - # for layer in layers: - # set_name = "{}:{}".format(namespace, layer.name()) - # self.log.info(" - creating set for {}".format(set_name)) - # render_set = cmds.sets(name=set_name, empty=True) - # sets.append(render_set) - # - # cmds.sets(sets, forceElement=instance_node) - # - # # if no render layers are present, create default one with - # # asterisk selector - # if not layers: - # render_layer = rs.createRenderLayer('Main') - # collection = render_layer.createCollection("defaultCollection") - # collection.getSelector().setPattern('*') - return + # A Renderlayer is never explicitly created using the create method. + # Instead, renderlayers from the scene are collected. Thus "create" + # would only ever be called to say, 'hey, please refresh collect' + self.collect_instances() def collect_instances(self): @@ -178,6 +166,22 @@ def update_instances(self, update_list): # we save with the UI on e.g. validate or publish # TODO: Implement this behavior for data persistence + # # create namespace with instance + # namespace_name = "_{}".format(subset_name) + # namespace = ensure_namespace(namespace_name) + # + # # Pre-process any existing layers + # self.log.info("Processing existing layers") + # sets = [] + # for layer in layers: + # set_name = "{}:{}".format(namespace, layer.name()) + # self.log.info(" - creating set for {}".format(set_name)) + # render_set = cmds.sets(name=set_name, empty=True) + # sets.append(render_set) + # + # cmds.sets(sets, forceElement=instance_node) + # + # for instance, changes in update_list.items(): # instance_node = instance.data.get("instance_node") # if not instance_node: From 627e71de761b14bfe821fc4639449e87fb7bd809 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 10:27:51 +0100 Subject: [PATCH 070/175] Be more explicit about what settings we're using in the Creator --- openpype/hosts/maya/plugins/create/create_render.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index f86acb03269..a119aff8a93 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -124,11 +124,13 @@ class CreateRenderlayer(HiddenCreator, plugin.MayaCreatorBase): label = "Renderlayer" icon = "eye" - render_settings = {} + enable_all_lights = False @classmethod def apply_settings(cls, project_settings, system_settings): - cls.render_settings = project_settings["maya"]["RenderSettings"] + render_settings = project_settings["maya"]["RenderSettings"] + cls.enable_all_lights = render_settings.get("enable_all_lights", + cls.enable_all_lights) def create(self, instance_data, source_data): # A Renderlayer is never explicitly created using the create method. @@ -267,6 +269,5 @@ def get_instance_attr_defs(self): BoolDef("renderSetupIncludeLights", label="Render Setup Include Lights", - default=self.render_settings.get("enable_all_lights", - False)) + default=self.enable_all_lights) ] From c895b096687b25e5e2232029ddf7a22081254cc4 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:15:36 +0100 Subject: [PATCH 071/175] Implement data persistence for renderlayers --- openpype/hosts/maya/api/plugin.py | 156 +++++++++--------- .../maya/plugins/create/create_render.py | 122 +++++++++----- 2 files changed, 161 insertions(+), 117 deletions(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index e943d279202..16000d7f03a 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -122,46 +122,52 @@ def process(self): @six.add_metaclass(ABCMeta) -class MayaCreator(NewCreator): +class MayaCreatorBase(object): - def create(self, subset_name, instance_data, pre_create_data): + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators to shared data. - members = list() - if pre_create_data.get("use_selection"): - members = cmds.ls(selection=True) + Create `maya_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. - with lib.undo_chunk(): - instance_node = cmds.sets(members, name=subset_name) - instance_data["instance_node"] = instance_node - instance = CreatedInstance( - self.family, - subset_name, - instance_data, - self) - self._add_instance_to_context(instance) + If legacy instances are detected in the scene, create + `maya_cached_legacy_subsets` there and fill it with + all legacy subsets under family as a key. - self.imprint_instance_node(instance_node, - data=instance.data_to_store()) - return instance + Args: + Dict[str, Any]: Shared data. - def collect_instances(self): - self.cache_subsets(self.collection_shared_data) - cached_subsets = self.collection_shared_data["maya_cached_subsets"] - for node in cached_subsets.get(self.identifier, []): - node_data = self.read_instance_node(node) + Return: + Dict[str, Any]: Shared data dictionary. - # Explicitly re-parse the node name - node_data["instance_node"] = node + """ + if shared_data.get("maya_cached_subsets") is None: + cache = dict() + cache_legacy = dict() - created_instance = CreatedInstance.from_existing(node_data, self) - self._add_instance_to_context(created_instance) + for node in cmds.ls(type='objectSet'): - def update_instances(self, update_list): - for created_inst, _changes in update_list: - data = created_inst.data_to_store() - node = data.get("instance_node") + if _get_attr(node, attr="id") != "pyblish.avalon.instance": + continue - self.imprint_instance_node(node, data) + creator_id = _get_attr(node, attr="creator_identifier") + if creator_id is not None: + # creator instance + cache.setdefault(creator_id, []).append(node) + else: + # legacy instance + family = _get_attr(node, attr="family") + if family is None: + # must be a broken instance + continue + + cache_legacy.setdefault(family, []).append(node) + + shared_data["maya_cached_subsets"] = cache + shared_data["maya_cached_legacy_subsets"] = cache_legacy + return shared_data def imprint_instance_node(self, node, data): @@ -202,8 +208,51 @@ def read_instance_node(self, node): if publish_attributes: node_data["publish_attributes"] = json.loads(publish_attributes) + # Explicitly re-parse the node name + node_data["instance_node"] = node + return node_data + +@six.add_metaclass(ABCMeta) +class MayaCreator(NewCreator, MayaCreatorBase): + + def create(self, subset_name, instance_data, pre_create_data): + + members = list() + if pre_create_data.get("use_selection"): + members = cmds.ls(selection=True) + + with lib.undo_chunk(): + instance_node = cmds.sets(members, name=subset_name) + instance_data["instance_node"] = instance_node + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + self._add_instance_to_context(instance) + + self.imprint_instance_node(instance_node, + data=instance.data_to_store()) + return instance + + def collect_instances(self): + self.cache_subsets(self.collection_shared_data) + cached_subsets = self.collection_shared_data["maya_cached_subsets"] + for node in cached_subsets.get(self.identifier, []): + node_data = self.read_instance_node(node) + + created_instance = CreatedInstance.from_existing(node_data, self) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + data = created_inst.data_to_store() + node = data.get("instance_node") + + self.imprint_instance_node(node, data) + def remove_instances(self, instances): """Remove specified instance from the scene. @@ -225,51 +274,6 @@ def get_pre_create_attr_defs(self): default=True) ] - @staticmethod - def cache_subsets(shared_data): - """Cache instances for Creators to shared data. - - Create `maya_cached_subsets` key when needed in shared data and - fill it with all collected instances from the scene under its - respective creator identifiers. - - If legacy instances are detected in the scene, create - `maya_cached_legacy_subsets` there and fill it with - all legacy subsets under family as a key. - - Args: - Dict[str, Any]: Shared data. - - Return: - Dict[str, Any]: Shared data dictionary. - - """ - if shared_data.get("maya_cached_subsets") is None: - cache = dict() - cache_legacy = dict() - - for node in cmds.ls(type='objectSet'): - - if _get_attr(node, attr="id") != "pyblish.avalon.instance": - continue - - creator_id = _get_attr(node, attr="creator_identifier") - if creator_id is not None: - # creator instance - cache.setdefault(creator_id, []).append(node) - else: - # legacy instance - family = _get_attr(node, attr="family") - if family is None: - # must be a broken instance - continue - - cache_legacy.setdefault(family, []).append(node) - - shared_data["maya_cached_subsets"] = cache - shared_data["maya_cached_legacy_subsets"] = cache_legacy - return shared_data - class Loader(LoaderPlugin): hosts = ["maya"] diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index a119aff8a93..8565b4b212e 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -147,52 +147,92 @@ def collect_instances(self): rs = renderSetup.instance() layers = rs.getRenderLayers() for layer in layers: - subset_name = "render" + layer.name() - - instance_data = { - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"], - "variant": layer.name(), - } - - instance = CreatedInstance( - family=self.family, - subset_name=subset_name, - data=instance_data, - creator=self - ) + layer_instance_node = self.find_layer_instance_node(layer) + if layer_instance_node: + data = self.read_instance_node(layer_instance_node) + instance = CreatedInstance.from_existing(data, creator=self) + else: + subset_name = "render" + layer.name() + + instance_data = { + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"], + "variant": layer.name(), + } + + instance = CreatedInstance( + family=self.family, + subset_name=subset_name, + data=instance_data, + creator=self + ) + instance.transient_data["layer"] = layer self._add_instance_to_context(instance) + def find_layer_instance_node(self, layer): + connected_sets = cmds.listConnections( + "{}.message".format(layer.name()), + source=False, + destination=True, + type="objectSet" + ) or [] + + for node in connected_sets: + if not cmds.attributeQuery("creator_identifier", + node=node, + exists=True): + continue + + creator_identifier = cmds.getAttr(node + ".creator_identifier") + if creator_identifier == self.identifier: + print(f"Found node: {node}") + return node + + def _create_layer_instance_node(self, layer): + + # We only collect if a CreateRender instance exists + create_render_sets = lib.lsattr("pre_creator_identifier", + CreateRender.identifier) + if not create_render_sets: + raise CreatorError("Creating a renderlayer instance node is not " + "allowed if no 'CreateRender' instance exists") + create_render_set = create_render_sets[0] + + namespace = "_renderingMain" + namespace = ensure_namespace(namespace) + + name = "{}:{}".format(namespace, layer.name()) + render_set = cmds.sets(name=name, empty=True) + + # Keep an active link with the renderlayer so we can retrieve it + # later by a physical maya connection instead of relying on the layer + # name to still exist + cmds.addAttr(render_set, longName="_renderLayer", at="message") + cmds.connectAttr(layer.name() + ".message", + render_set + "._renderLayer", force=True) + + # Add the set to the 'CreateRender' set. + cmds.sets(render_set, forceElement=create_render_set) + + return render_set + def update_instances(self, update_list): # We only generate the persisting layer data into the scene once # we save with the UI on e.g. validate or publish - # TODO: Implement this behavior for data persistence - - # # create namespace with instance - # namespace_name = "_{}".format(subset_name) - # namespace = ensure_namespace(namespace_name) - # - # # Pre-process any existing layers - # self.log.info("Processing existing layers") - # sets = [] - # for layer in layers: - # set_name = "{}:{}".format(namespace, layer.name()) - # self.log.info(" - creating set for {}".format(set_name)) - # render_set = cmds.sets(name=set_name, empty=True) - # sets.append(render_set) - # - # cmds.sets(sets, forceElement=instance_node) - # - - # for instance, changes in update_list.items(): - # instance_node = instance.data.get("instance_node") - # if not instance_node: - # layer = instance.data.get("layer") - # instance_node = self._create_layer_instance_node(layer) - # - # self.imprint_instance_node(instance_node, - # data=instance.data_to_store()) - pass + for instance, changes in update_list: + instance_node = instance.data.get("instance_node") + + # Ensure a node to persist the data to exists + if not instance_node: + layer = instance.transient_data["layer"] + instance_node = self._create_layer_instance_node(layer) + instance.data["instance_node"] = instance_node + else: + # TODO: Keep name in sync with the actual renderlayer? + pass + + self.imprint_instance_node(instance_node, + data=instance.data_to_store()) def remove_instances(self, instances): """Remove specified instance from the scene. From 70c453552af68dc5c7b692bb7bb588baf363726b Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:18:50 +0100 Subject: [PATCH 072/175] Delete the renderlayer instance node on remove --- openpype/hosts/maya/plugins/create/create_render.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 8565b4b212e..e23873b1887 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -253,10 +253,10 @@ def remove_instances(self, instances): if instance.get("creator_identifier") == self.identifier: self._remove_instance_from_context(instance) - # TODO: Remove the stored settings per renderlayer too? - # node = instance.data.get("instance_node") - # if node: - # cmds.delete(node) + # Remove the stored settings per renderlayer too + node = instance.data.get("instance_node") + if node and cmds.objExists(node): + cmds.delete(node) def get_instance_attr_defs(self): """Create instance settings.""" From b8e5fc21b27bd6e557d9f66f7c37a9379674dd07 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:25:12 +0100 Subject: [PATCH 073/175] Fix hound --- openpype/hosts/maya/plugins/create/create_render.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index e23873b1887..be6a6869b73 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -219,7 +219,7 @@ def _create_layer_instance_node(self, layer): def update_instances(self, update_list): # We only generate the persisting layer data into the scene once # we save with the UI on e.g. validate or publish - for instance, changes in update_list: + for instance, _changes in update_list: instance_node = instance.data.get("instance_node") # Ensure a node to persist the data to exists From 5b858921505ed1d52e8f69a1b4e0a92a3c508fa0 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:32:01 +0100 Subject: [PATCH 074/175] Do collect data for empty sets --- .../plugins/publish/collect_new_instances.py | 37 +++++++++---------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/collect_new_instances.py b/openpype/hosts/maya/plugins/publish/collect_new_instances.py index 6166a4878d6..ee02d3efc05 100644 --- a/openpype/hosts/maya/plugins/publish/collect_new_instances.py +++ b/openpype/hosts/maya/plugins/publish/collect_new_instances.py @@ -81,25 +81,24 @@ def process(self, instance): if creator_attributes: instance.data.update(creator_attributes) - members = cmds.sets(objset, query=True) - if members is None: - self.log.warning("Skipped empty instance: \"%s\" " % objset) - return - - # Collect members - members = cmds.ls(members, long=True) or [] - - dag_members = cmds.ls(members, type="dagNode", long=True) - children = get_all_children(dag_members) - children = cmds.ls(children, noIntermediate=True, long=True) - parents = [] - if creator_attributes.get("includeParentHierarchy", True): - # If `includeParentHierarchy` then include the parents - # so they will also be picked up in the instance by validators - parents = self.get_all_parents(members) - members_hierarchy = list(set(members + children + parents)) - - instance[:] = members_hierarchy + members = cmds.sets(objset, query=True) or [] + if not members: + self.log.warning("Empty instance: \"%s\" " % objset) + else: + # Collect members + members = cmds.ls(members, long=True) or [] + + dag_members = cmds.ls(members, type="dagNode", long=True) + children = get_all_children(dag_members) + children = cmds.ls(children, noIntermediate=True, long=True) + parents = [] + if creator_attributes.get("includeParentHierarchy", True): + # If `includeParentHierarchy` then include the parents + # so they will also be picked up in the instance by validators + parents = self.get_all_parents(members) + members_hierarchy = list(set(members + children + parents)) + + instance[:] = members_hierarchy # Store the exact members of the object set instance.data["setMembers"] = members From ad5cb6b1631e40927c3f0a861394234079be04a2 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:32:22 +0100 Subject: [PATCH 075/175] Do not prefix renderLayer with _ --- openpype/hosts/maya/plugins/create/create_render.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index be6a6869b73..0015eb89b28 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -207,9 +207,9 @@ def _create_layer_instance_node(self, layer): # Keep an active link with the renderlayer so we can retrieve it # later by a physical maya connection instead of relying on the layer # name to still exist - cmds.addAttr(render_set, longName="_renderLayer", at="message") + cmds.addAttr(render_set, longName="renderLayer", at="message") cmds.connectAttr(layer.name() + ".message", - render_set + "._renderLayer", force=True) + render_set + ".renderLayer", force=True) # Add the set to the 'CreateRender' set. cmds.sets(render_set, forceElement=create_render_set) From 63456c5e8259a043f5cab8479aa9289424fc5044 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:34:39 +0100 Subject: [PATCH 076/175] Store lowercase `renderlayer` --- openpype/hosts/maya/plugins/create/create_render.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 0015eb89b28..7ee3dcf0f0c 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -207,9 +207,9 @@ def _create_layer_instance_node(self, layer): # Keep an active link with the renderlayer so we can retrieve it # later by a physical maya connection instead of relying on the layer # name to still exist - cmds.addAttr(render_set, longName="renderLayer", at="message") + cmds.addAttr(render_set, longName="renderlayer", at="message") cmds.connectAttr(layer.name() + ".message", - render_set + ".renderLayer", force=True) + render_set + ".renderlayer", force=True) # Add the set to the 'CreateRender' set. cmds.sets(render_set, forceElement=create_render_set) From 16987eac6d6b48d73a3e6d3444bdcb6de642ec7d Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:35:17 +0100 Subject: [PATCH 077/175] Refactor how layer/renderlayer is retrieved from instance --- openpype/hosts/maya/plugins/publish/collect_inputs.py | 2 +- .../hosts/maya/plugins/publish/collect_render_layer_aovs.py | 2 +- .../hosts/maya/plugins/publish/collect_renderable_camera.py | 2 +- openpype/hosts/maya/plugins/publish/submit_maya_muster.py | 2 +- openpype/hosts/maya/plugins/publish/validate_frame_range.py | 2 +- .../hosts/maya/plugins/publish/validate_rendersettings.py | 2 +- .../plugins/publish/validate_vray_distributed_rendering.py | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/collect_inputs.py b/openpype/hosts/maya/plugins/publish/collect_inputs.py index 470fceffc99..71d3a789a4c 100644 --- a/openpype/hosts/maya/plugins/publish/collect_inputs.py +++ b/openpype/hosts/maya/plugins/publish/collect_inputs.py @@ -174,7 +174,7 @@ def _collect_renderlayer_inputs(self, scene_containers, instance): """Collects inputs from nodes in renderlayer, incl. shaders + camera""" # Get the renderlayer - renderlayer = instance.data.get("setMembers") + renderlayer = instance.data.get("renderlayer") if renderlayer == "defaultRenderLayer": # Assume all loaded containers in the scene are inputs diff --git a/openpype/hosts/maya/plugins/publish/collect_render_layer_aovs.py b/openpype/hosts/maya/plugins/publish/collect_render_layer_aovs.py index 9666499c42d..c3dc31ead9d 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render_layer_aovs.py +++ b/openpype/hosts/maya/plugins/publish/collect_render_layer_aovs.py @@ -50,7 +50,7 @@ def process(self, instance): result = [] # Collect all AOVs / Render Elements - layer = instance.data["setMembers"] + layer = instance.data["renderlayer"] node_type = rp_node_types[renderer] render_elements = cmds.ls(type=node_type) diff --git a/openpype/hosts/maya/plugins/publish/collect_renderable_camera.py b/openpype/hosts/maya/plugins/publish/collect_renderable_camera.py index 93a37d86932..d1c3cf3b2c0 100644 --- a/openpype/hosts/maya/plugins/publish/collect_renderable_camera.py +++ b/openpype/hosts/maya/plugins/publish/collect_renderable_camera.py @@ -19,7 +19,7 @@ def process(self, instance): if "vrayscene_layer" in instance.data.get("families", []): layer = instance.data.get("layer") else: - layer = instance.data["setMembers"] + layer = instance.data["renderlayer"] self.log.info("layer: {}".format(layer)) cameras = cmds.ls(type="camera", long=True) diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index 1a6463fb9de..3b4a3987dd4 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -288,7 +288,7 @@ def process(self, instance): comment = context.data.get("comment", "") scene = os.path.splitext(filename)[0] dirname = os.path.join(workspace, "renders") - renderlayer = instance.data['setMembers'] # rs_beauty + renderlayer = instance.data['renderlayer'] # rs_beauty renderlayer_name = instance.data['subset'] # beauty renderglobals = instance.data["renderGlobals"] # legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] diff --git a/openpype/hosts/maya/plugins/publish/validate_frame_range.py b/openpype/hosts/maya/plugins/publish/validate_frame_range.py index 1b32d795c50..57357221ff3 100644 --- a/openpype/hosts/maya/plugins/publish/validate_frame_range.py +++ b/openpype/hosts/maya/plugins/publish/validate_frame_range.py @@ -140,7 +140,7 @@ def repair(cls, instance): def repair_renderlayer(cls, instance): """Apply frame range in render settings""" - layer = instance.data["setMembers"] + layer = instance.data["renderlayer"] context = instance.context start_attr = "defaultRenderGlobals.startFrame" diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index 94e26335936..bf25e1030d0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -106,7 +106,7 @@ def get_invalid(cls, instance): multipart = False renderer = instance.data['renderer'] - layer = instance.data['setMembers'] + layer = instance.data['renderlayer'] cameras = instance.data.get("cameras", []) # Get the node attributes for current renderer diff --git a/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py b/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py index 366f3bd10e3..ff5f8c96fe9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py +++ b/openpype/hosts/maya/plugins/publish/validate_vray_distributed_rendering.py @@ -36,7 +36,7 @@ def process(self, instance): vray_settings = cmds.ls("vraySettings", type="VRaySettingsNode") assert vray_settings, "Please ensure a VRay Settings Node is present" - renderlayer = instance.data['setMembers'] + renderlayer = instance.data['renderlayer'] if not lib.get_attr_in_layer(self.enabled_attr, layer=renderlayer): # If not distributed rendering enabled, ignore.. @@ -51,7 +51,7 @@ def process(self, instance): @classmethod def repair(cls, instance): - renderlayer = instance.data.get("setMembers") + renderlayer = instance.data.get("renderlayer") with lib.renderlayer(renderlayer): cls.log.info("Enabling Distributed Rendering " "ignore in batch mode..") From fa9361fc39b043a08c6db65f69122792f14cd5b1 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:37:02 +0100 Subject: [PATCH 078/175] Move Muster plug-in to muster module --- .../muster}/plugins/publish/validate_muster_connection.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename openpype/{hosts/maya => modules/muster}/plugins/publish/validate_muster_connection.py (100%) diff --git a/openpype/hosts/maya/plugins/publish/validate_muster_connection.py b/openpype/modules/muster/plugins/publish/validate_muster_connection.py similarity index 100% rename from openpype/hosts/maya/plugins/publish/validate_muster_connection.py rename to openpype/modules/muster/plugins/publish/validate_muster_connection.py From 2bd2fdace1108e34f22fcdfe06ddfdb48f7694f1 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 11:38:54 +0100 Subject: [PATCH 079/175] Do not explicitly try to set `deadlineUrl` from a render instance in Collect Render --- openpype/hosts/maya/plugins/publish/collect_render.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index b1ad3ca58ed..cfa26409f4e 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -321,13 +321,6 @@ def process(self, context): ) } - # Collect Deadline url if Deadline module is enabled - deadline_settings = ( - context.data["system_settings"]["modules"]["deadline"] - ) - if deadline_settings["enabled"]: - data["deadlineUrl"] = render_instance.data.get("deadlineUrl") - if self.sync_workfile_version: data["version"] = context.data["version"] From 9345123190f4a41c4704fc9979de1ffb0c040d64 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 12:09:28 +0100 Subject: [PATCH 080/175] Fix default --- openpype/modules/deadline/abstract_submit_deadline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 8d29cc53a33..7a71035d491 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -700,5 +700,5 @@ def get_attribute_defs(cls): True: "Allow List", False: "Deny List", }, - default="Deny List") + default=False) ] From b9e4360ea78653dca8e5095dc0e3622babc530dc Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 12:37:11 +0100 Subject: [PATCH 081/175] Fix imprint for Create Render --- openpype/hosts/maya/plugins/create/create_render.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 7ee3dcf0f0c..8672ddba216 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -234,6 +234,18 @@ def update_instances(self, update_list): self.imprint_instance_node(instance_node, data=instance.data_to_store()) + def imprint_instance_node(self, node, data): + # Do not ever try to update the `renderlayer` since it'll try + # to remove the attribute and recreate it but fail to keep it a + # message attribute link. We only ever imprint that on the initial + # node creation. + # TODO: Improve how this is handled + data.pop("renderlayer", None) + data.get("creator_attributes", {}).pop("renderlayer", None) + + return super(CreateRenderlayer, self).imprint_instance_node(node, + data=data) + def remove_instances(self, instances): """Remove specified instance from the scene. From ccb03dd56650a03f63cbc879a0aa04dfd7296328 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 12:37:22 +0100 Subject: [PATCH 082/175] Code cosmetics/clarity --- openpype/hosts/maya/plugins/create/create_render.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 8672ddba216..086b77ebb6a 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -152,20 +152,22 @@ def collect_instances(self): data = self.read_instance_node(layer_instance_node) instance = CreatedInstance.from_existing(data, creator=self) else: + # No existing scene instance node for this layer. Note that + # this instance will not have the `instance_node` data yet + # until it's been saved/persisted at least once. subset_name = "render" + layer.name() - instance_data = { "asset": legacy_io.Session["AVALON_ASSET"], "task": legacy_io.Session["AVALON_TASK"], "variant": layer.name(), } - instance = CreatedInstance( family=self.family, subset_name=subset_name, data=instance_data, creator=self ) + instance.transient_data["layer"] = layer self._add_instance_to_context(instance) @@ -206,7 +208,7 @@ def _create_layer_instance_node(self, layer): # Keep an active link with the renderlayer so we can retrieve it # later by a physical maya connection instead of relying on the layer - # name to still exist + # name cmds.addAttr(render_set, longName="renderlayer", at="message") cmds.connectAttr(layer.name() + ".message", render_set + ".renderlayer", force=True) @@ -222,7 +224,7 @@ def update_instances(self, update_list): for instance, _changes in update_list: instance_node = instance.data.get("instance_node") - # Ensure a node to persist the data to exists + # Ensure a node exists to persist the data to if not instance_node: layer = instance.transient_data["layer"] instance_node = self._create_layer_instance_node(layer) From e0067d98519401145f20ef112586c301f5d52cde Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 23:47:28 +0100 Subject: [PATCH 083/175] Don't use separate Creator for CreateRenderlayer logic --- .../maya/plugins/create/create_render.py | 84 +++++-------------- 1 file changed, 21 insertions(+), 63 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 086b77ebb6a..a63d4113d17 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -17,7 +17,6 @@ from openpype.pipeline.create import ( CreatorError, Creator, - HiddenCreator, CreatedInstance ) @@ -39,33 +38,33 @@ def ensure_namespace(namespace): return cmds.namespace(add=namespace) -class CreateRender(Creator): - """Create *render* instance. +class CreateRenderlayer(Creator, plugin.MayaCreatorBase): + """Create and manages renderlayer subset per renderLayer in workfile. - This render instance is not visible in the UI as an instance nor does - it by itself publish. Instead, whenever this is created the - CreateRenderlayer creator collects the active scene's actual renderlayers - as individual instances to submit for publishing. - - This Creator is solely to SHOW in the "Create" of the new publisher. - - See Also: - https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup + This generates a single node in the scene which tells the Creator to if + it exists collect Maya rendersetup renderlayers as individual instances. + As such, triggering create doesn't actually create the instance node per + layer but only the node which tells the Creator it may now collect + the renderlayers. """ - identifier = "io.openpype.creators.maya.render" + identifier = "io.openpype.creators.maya.renderlayer" + family = "renderlayer" label = "Render" - family = "rendering" icon = "eye" render_settings = {} + singleton_node_name = "renderingMain" @classmethod def apply_settings(cls, project_settings, system_settings): cls.render_settings = project_settings["maya"]["RenderSettings"] def create(self, subset_name, instance_data, pre_create_data): + # A Renderlayer is never explicitly created using the create method. + # Instead, renderlayers from the scene are collected. Thus "create" + # would only ever be called to say, 'hey, please refresh collect' # Only allow a single render instance to exist nodes = lib.lsattr("pre_creator_identifier", self.identifier) @@ -86,7 +85,7 @@ def create(self, subset_name, instance_data, pre_create_data): collection.getSelector().setPattern('*') with lib.undo_chunk(): - node = cmds.sets(empty=True, name=subset_name) + node = cmds.sets(empty=True, name=self.singleton_node_name) lib.imprint(node, data={ "pre_creator_identifier": self.identifier }) @@ -94,54 +93,12 @@ def create(self, subset_name, instance_data, pre_create_data): # By RenderLayerCreator.create we make it so that the renderlayer # instances directly appear even though it just collects scene # renderlayers. This doesn't actually 'create' any scene contents. - self.create_context.create( - CreateRenderlayer.identifier, - instance_data={}, - source_data=instance_data - ) - - def collect_instances(self): - # We never show this instance in the publish UI - return - - def update_instances(self, update_list): - return - - def remove_instances(self, instances): - return - - -class CreateRenderlayer(HiddenCreator, plugin.MayaCreatorBase): - """Create and manges renderlayer subset per renderLayer in workfile. - - This does no do ANYTHING until a CreateRender subset exists in the - scene, created by the CreateRender creator. - - """ - - identifier = "io.openpype.creators.maya.renderlayer" - family = "renderlayer" - label = "Renderlayer" - icon = "eye" - - enable_all_lights = False - - @classmethod - def apply_settings(cls, project_settings, system_settings): - render_settings = project_settings["maya"]["RenderSettings"] - cls.enable_all_lights = render_settings.get("enable_all_lights", - cls.enable_all_lights) - - def create(self, instance_data, source_data): - # A Renderlayer is never explicitly created using the create method. - # Instead, renderlayers from the scene are collected. Thus "create" - # would only ever be called to say, 'hey, please refresh collect' - self.collect_instances() + self.collect_instances() def collect_instances(self): # We only collect if a CreateRender instance exists - if not lib.lsattr("pre_creator_identifier", CreateRender.identifier): + if not lib.lsattr("pre_creator_identifier", self.identifier): return rs = renderSetup.instance() @@ -194,13 +151,13 @@ def _create_layer_instance_node(self, layer): # We only collect if a CreateRender instance exists create_render_sets = lib.lsattr("pre_creator_identifier", - CreateRender.identifier) + self.identifier) if not create_render_sets: raise CreatorError("Creating a renderlayer instance node is not " "allowed if no 'CreateRender' instance exists") create_render_set = create_render_sets[0] - namespace = "_renderingMain" + namespace = "_{}".format(self.singleton_node_name) namespace = ensure_namespace(namespace) name = "{}:{}".format(namespace, layer.name()) @@ -258,7 +215,7 @@ def remove_instances(self, instances): # Instead of removing the single instance or renderlayers we instead # remove the CreateRender node this creator relies on to decide whether # it should collect anything at all. - nodes = lib.lsattr("pre_creator_identifier", CreateRender.identifier) + nodes = lib.lsattr("pre_creator_identifier", self.identifier) if nodes: cmds.delete(nodes) @@ -323,5 +280,6 @@ def get_instance_attr_defs(self): BoolDef("renderSetupIncludeLights", label="Render Setup Include Lights", - default=self.enable_all_lights) + default=self.render_settings.get("enable_all_lights", + False)) ] From 5aa05e0bf09891beb0fc02f4c674dd39e0cc97e5 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Mon, 30 Jan 2023 23:54:15 +0100 Subject: [PATCH 084/175] Implement dedicated function to get the singleton render instance node --- .../maya/plugins/create/create_render.py | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index a63d4113d17..ee125029b1a 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -57,6 +57,11 @@ class CreateRenderlayer(Creator, plugin.MayaCreatorBase): render_settings = {} singleton_node_name = "renderingMain" + def _get_singleton_node(self, return_all=False): + nodes = lib.lsattr("pre_creator_identifier", self.identifier) + if nodes: + return nodes if return_all else nodes[0] + @classmethod def apply_settings(cls, project_settings, system_settings): cls.render_settings = project_settings["maya"]["RenderSettings"] @@ -67,8 +72,7 @@ def create(self, subset_name, instance_data, pre_create_data): # would only ever be called to say, 'hey, please refresh collect' # Only allow a single render instance to exist - nodes = lib.lsattr("pre_creator_identifier", self.identifier) - if nodes: + if not self._get_singleton_node(): raise CreatorError("A Render instance already exists - only " "one can be configured.") @@ -97,8 +101,8 @@ def create(self, subset_name, instance_data, pre_create_data): def collect_instances(self): - # We only collect if a CreateRender instance exists - if not lib.lsattr("pre_creator_identifier", self.identifier): + # We only collect if the global render instance exists + if not self._get_singleton_node(): return rs = renderSetup.instance() @@ -112,6 +116,7 @@ def collect_instances(self): # No existing scene instance node for this layer. Note that # this instance will not have the `instance_node` data yet # until it's been saved/persisted at least once. + # TODO: Correctly define the subset name using templates subset_name = "render" + layer.name() instance_data = { "asset": legacy_io.Session["AVALON_ASSET"], @@ -150,12 +155,10 @@ def find_layer_instance_node(self, layer): def _create_layer_instance_node(self, layer): # We only collect if a CreateRender instance exists - create_render_sets = lib.lsattr("pre_creator_identifier", - self.identifier) - if not create_render_sets: + create_render_set = self._get_singleton_node() + if not create_render_set: raise CreatorError("Creating a renderlayer instance node is not " "allowed if no 'CreateRender' instance exists") - create_render_set = create_render_sets[0] namespace = "_{}".format(self.singleton_node_name) namespace = ensure_namespace(namespace) @@ -215,11 +218,11 @@ def remove_instances(self, instances): # Instead of removing the single instance or renderlayers we instead # remove the CreateRender node this creator relies on to decide whether # it should collect anything at all. - nodes = lib.lsattr("pre_creator_identifier", self.identifier) + nodes = self._get_singleton_node(return_all=True) if nodes: cmds.delete(nodes) - # Remove ALL of the instances even if only one gets deleted + # Remove ALL the instances even if only one gets deleted for instance in list(self.create_context.instances): if instance.get("creator_identifier") == self.identifier: self._remove_instance_from_context(instance) From b998ac012744876313f85daf345be2ab01bc190c Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Tue, 31 Jan 2023 14:01:14 +0100 Subject: [PATCH 085/175] Refactor Validate Maya Units to show report in new publisher --- .../publish/help/validate_maya_units.xml | 21 ++++++ .../plugins/publish/validate_maya_units.py | 70 +++++++++++++++---- 2 files changed, 76 insertions(+), 15 deletions(-) create mode 100644 openpype/hosts/maya/plugins/publish/help/validate_maya_units.xml diff --git a/openpype/hosts/maya/plugins/publish/help/validate_maya_units.xml b/openpype/hosts/maya/plugins/publish/help/validate_maya_units.xml new file mode 100644 index 00000000000..40169b28f96 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/help/validate_maya_units.xml @@ -0,0 +1,21 @@ + + + +Maya scene units +## Invalid maya scene units + +Detected invalid maya scene units: + +{issues} + + + +### How to repair? + +You can automatically repair the scene units by clicking the Repair action on +the right. + +After that restart publishing with Reload button. + + + diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index 5698d795ff5..f4bd6031a4b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -8,6 +8,7 @@ from openpype.pipeline.publish import ( RepairContextAction, ValidateSceneOrder, + PublishXmlValidationError ) @@ -31,6 +32,29 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): validate_fps = True + nice_message_format = ( + "- {setting} must be {required_value}. " + "Your scene is set to {current_value}" + ) + log_message_format = ( + "Maya scene {setting} must be '{required_value}'. " + "Current value is '{current_value}'." + ) + + def apply_settings(self, project_settings, system_settings): + """Apply project settings to creator""" + settings = ( + project_settings["maya"]["publish"]["ValidateMayaUnits"] + ) + + self.validate_linear_units = settings.get("validate_linear_units", + self.validate_linear_units) + self.linear_units = settings.get("linear_units", self.linear_units) + self.validate_angular_units = settings.get("validate_angular_units", + self.validate_angular_units) + self.angular_units = settings.get("angular_units", self.angular_units) + self.validate_fps = settings.get("validate_fps", self.validate_fps) + def process(self, context): # Collected units @@ -45,15 +69,14 @@ def process(self, context): # now flooring the value? fps = float_round(context.data.get('fps'), 2, ceil) - # TODO repace query with using 'context.data["assetEntity"]' - asset_doc = get_current_project_asset() + asset_doc = context.data["assetEntity"] asset_fps = asset_doc["data"]["fps"] self.log.info('Units (linear): {0}'.format(linearunits)) self.log.info('Units (angular): {0}'.format(angularunits)) self.log.info('Units (time): {0} FPS'.format(fps)) - valid = True + invalid = [] # Check if units are correct if ( @@ -61,26 +84,43 @@ def process(self, context): and linearunits and linearunits != self.linear_units ): - self.log.error("Scene linear units must be {}".format( - self.linear_units)) - valid = False + invalid.append({ + "setting": "Linear units", + "required_value": self.linear_units, + "current_value": linearunits + }) if ( self.validate_angular_units and angularunits and angularunits != self.angular_units ): - self.log.error("Scene angular units must be {}".format( - self.angular_units)) - valid = False + invalid.append({ + "setting": "Angular units", + "required_value": self.angular_units, + "current_value": angularunits + }) if self.validate_fps and fps and fps != asset_fps: - self.log.error( - "Scene must be {} FPS (now is {})".format(asset_fps, fps)) - valid = False - - if not valid: - raise RuntimeError("Invalid units set.") + invalid.append({ + "setting": "FPS", + "required_value": asset_fps, + "current_value": fps + }) + + if invalid: + + issues = [] + for data in invalid: + self.log.error(self.log_message_format.format(**data)) + issues.append(self.nice_message_format.format(**data)) + issues = "\n".join(issues) + + raise PublishXmlValidationError( + plugin=self, + message="Invalid maya scene units", + formatting_data={"issues": issues} + ) @classmethod def repair(cls, context): From 7985720806ca76da6d49864713d21c346a3cd817 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Tue, 31 Jan 2023 23:52:30 +0100 Subject: [PATCH 086/175] Get and update context data for maya using `maya.cmds.fileInfo` --- openpype/hosts/maya/api/pipeline.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index 50f08810316..30e6679e371 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -1,3 +1,5 @@ +import json +import base64 import os import errno import logging @@ -148,10 +150,16 @@ def maintained_selection(self): yield def get_context_data(self): - return {} + data = cmds.fileInfo("OpenPypeContext", query=True) + if not data: + return {} + decoded = base64.b64decode(data).decode("utf-8") + return json.loads(decoded) def update_context_data(self, data, changes): - return + json_str = json.dumps(data) + encoded = base64.b64encode(json_str.encode("utf-8")) + return cmds.fileInfo("OpenPypeContext", encoded) def _register_callbacks(self): for handler, event in self._op_events.copy().items(): From 1f1ef6b45916802eb7f14375caa855813edfbd9b Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Wed, 1 Feb 2023 00:20:37 +0100 Subject: [PATCH 087/175] Create workfile using auto creator --- .../maya/plugins/create/create_workfile.py | 90 +++++++++++++++++++ .../plugins/publish/collect_current_file.py | 17 ++++ .../maya/plugins/publish/collect_workfile.py | 37 ++------ 3 files changed, 115 insertions(+), 29 deletions(-) create mode 100644 openpype/hosts/maya/plugins/create/create_workfile.py create mode 100644 openpype/hosts/maya/plugins/publish/collect_current_file.py diff --git a/openpype/hosts/maya/plugins/create/create_workfile.py b/openpype/hosts/maya/plugins/create/create_workfile.py new file mode 100644 index 00000000000..cf1de7876e2 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/create_workfile.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating workfiles.""" +from openpype.pipeline import CreatedInstance, AutoCreator +from openpype.pipeline import legacy_io +from openpype.client import get_asset_by_name +from openpype.hosts.maya.api import plugin +from maya import cmds + + +class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator): + """Workfile auto-creator.""" + identifier = "io.openpype.creators.maya.workfile" + label = "Workfile" + family = "workfile" + icon = "document" + + default_variant = "Main" + + def create(self): + + print("Create...") + variant = self.default_variant + current_instance = next( + ( + instance for instance in self.create_context.instances + if instance.creator_identifier == self.identifier + ), None) + + project_name = self.project_name + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + + if current_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update( + self.get_dynamic_data( + variant, task_name, asset_doc, + project_name, host_name, current_instance) + ) + self.log.info("Auto-creating workfile instance...") + current_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(current_instance) + elif ( + current_instance["asset"] != asset_name + or current_instance["task"] != task_name + ): + # Update instance context if is not the same + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + current_instance["asset"] = asset_name + current_instance["task"] = task_name + current_instance["subset"] = subset_name + + def collect_instances(self): + self.cache_subsets(self.collection_shared_data) + cached_subsets = self.collection_shared_data["maya_cached_subsets"] + for node in cached_subsets.get(self.identifier, []): + node_data = self.read_instance_node(node) + + created_instance = CreatedInstance.from_existing(node_data, self) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + data = created_inst.data_to_store() + node = data.get("instance_node") + if not node: + node = self.create_node() + created_inst["instance_node"] = node + data = created_inst.data_to_store() + + self.imprint_instance_node(node, data) + + def create_node(self): + node = cmds.sets(empty=True, name="workfileMain") + cmds.setAttr(node + ".hiddenInOutliner", True) + return node diff --git a/openpype/hosts/maya/plugins/publish/collect_current_file.py b/openpype/hosts/maya/plugins/publish/collect_current_file.py new file mode 100644 index 00000000000..e777a209d45 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_current_file.py @@ -0,0 +1,17 @@ + +import pyblish.api + +from maya import cmds + + +class CollectCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file.""" + + order = pyblish.api.CollectorOrder - 0.4 + label = "Maya Current File" + hosts = ['maya'] + families = ["workfile"] + + def process(self, context): + """Inject the current working file""" + context.data['currentFile'] = cmds.file(query=True, sceneName=True) diff --git a/openpype/hosts/maya/plugins/publish/collect_workfile.py b/openpype/hosts/maya/plugins/publish/collect_workfile.py index 12d86869ea4..e2b64f1ebd8 100644 --- a/openpype/hosts/maya/plugins/publish/collect_workfile.py +++ b/openpype/hosts/maya/plugins/publish/collect_workfile.py @@ -1,46 +1,30 @@ import os import pyblish.api -from maya import cmds -from openpype.pipeline import legacy_io - -class CollectWorkfile(pyblish.api.ContextPlugin): - """Inject the current working file into context""" +class CollectWorkfileData(pyblish.api.InstancePlugin): + """Inject data into Workfile instance""" order = pyblish.api.CollectorOrder - 0.01 label = "Maya Workfile" hosts = ['maya'] + families = ["workfile"] - def process(self, context): + def process(self, instance): """Inject the current working file""" - current_file = cmds.file(query=True, sceneName=True) - context.data['currentFile'] = current_file + context = instance.context + current_file = instance.context.data['currentFile'] folder, file = os.path.split(current_file) filename, ext = os.path.splitext(file) - task = legacy_io.Session["AVALON_TASK"] - - data = {} - - # create instance - instance = context.create_instance(name=filename) - subset = 'workfile' + task.capitalize() - - data.update({ - "subset": subset, - "asset": os.getenv("AVALON_ASSET", None), - "label": subset, - "publish": True, - "family": 'workfile', - "families": ['workfile'], + data = { # noqa "setMembers": [current_file], "frameStart": context.data['frameStart'], "frameEnd": context.data['frameEnd'], "handleStart": context.data['handleStart'], "handleEnd": context.data['handleEnd'] - }) + } data['representations'] = [{ 'name': ext.lstrip("."), @@ -50,8 +34,3 @@ def process(self, context): }] instance.data.update(data) - - self.log.info('Collected instance: {}'.format(file)) - self.log.info('Scene path: {}'.format(current_file)) - self.log.info('staging Dir: {}'.format(folder)) - self.log.info('subset: {}'.format(subset)) From 2b1eeea602999fd83f33055ac56d92f035e8c341 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Wed, 1 Feb 2023 01:00:51 +0100 Subject: [PATCH 088/175] Allow to convert legacy instances in Maya --- .../maya/plugins/create/convert_legacy.py | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 openpype/hosts/maya/plugins/create/convert_legacy.py diff --git a/openpype/hosts/maya/plugins/create/convert_legacy.py b/openpype/hosts/maya/plugins/create/convert_legacy.py new file mode 100644 index 00000000000..9082f6e2374 --- /dev/null +++ b/openpype/hosts/maya/plugins/create/convert_legacy.py @@ -0,0 +1,70 @@ +from openpype.pipeline import legacy_io +from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin +from openpype.hosts.maya.api import plugin + + +class LegacyConverted(SubsetConvertorPlugin, + plugin.MayaCreatorBase): + """Find and convert any legacy subsets in the scene. + + This Convertor will find all legacy subsets in the scene and will + transform them to the current system. Since the old subsets doesn't + retain any information about their original creators, the only mapping + we can do is based on their families. + + Its limitation is that you can have multiple creators creating subset + of the same family and there is no way to handle it. This code should + nevertheless cover all creators that came with OpenPype. + + """ + identifier = "io.openpype.creators.maya.legacy" + + def find_instances(self): + + self.cache_subsets(self.collection_shared_data) + legacy = self.collection_shared_data.get("maya_cached_legacy_subsets") + if not legacy: + return + + self.add_convertor_item("Convert legacy instances") + + def convert(self): + self.remove_convertor_item() + + # We can't use the collected shared data cache here + # we re-query it here directly to convert all found. + cache = {} + self.cache_subsets(cache) + legacy = cache.get("maya_cached_legacy_subsets") + if not legacy: + return + + # From all current new style manual creators find the mapping + # from family to identifier + family_to_id = {} + for identifier, creator in self.create_context.manual_creators.items(): + family = getattr(creator, "family") + if not family: + continue + + if family in family_to_id: + # We have a clash of family -> identifier. Multiple + # new style creators use the same family + self.log.warning("Clash on family->identifier: " + "{}".format(identifier)) + family_to_id[family] = identifier + + # We also embed the current 'task' into the instance since legacy + # instances didn't store that data on the instances. The old style + # logic was thus to be live to the current task to begin with. + data = dict() + data["task"] = legacy_io.Session.get("AVALON_TASK") + for family, instance_nodes in legacy.items(): + if family in family_to_id: + # We only imprint the creator identifier for it to identify + # as the new style creator + creator_id = family_to_id[family] + data["creator_identifier"] = creator_id + for instance_node in instance_nodes: + self.imprint_instance_node(instance_node, + data=data.copy()) From c9d354960dfbcf038bd6543fde5d6c6b7568bba7 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Wed, 1 Feb 2023 01:04:33 +0100 Subject: [PATCH 089/175] Allow workfile + renderlayer to be empty --- .../maya/plugins/publish/validate_instance_has_members.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py index 7423764934b..c78c9c6e2b9 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py +++ b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py @@ -17,6 +17,10 @@ class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance): + # Allow renderlayer and workfile to be empty + if instance.data.get("family") in {"workfile", "renderlayer"}: + return + invalid = list() if not instance.data.get("setMembers"): objectset_name = instance.data['name'] From b931183eca5a11e7792770efd018476a27d58283 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Wed, 1 Feb 2023 01:05:16 +0100 Subject: [PATCH 090/175] Make `getattr` call safer --- openpype/hosts/maya/plugins/create/convert_legacy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/convert_legacy.py b/openpype/hosts/maya/plugins/create/convert_legacy.py index 9082f6e2374..d93e2638ace 100644 --- a/openpype/hosts/maya/plugins/create/convert_legacy.py +++ b/openpype/hosts/maya/plugins/create/convert_legacy.py @@ -43,7 +43,7 @@ def convert(self): # from family to identifier family_to_id = {} for identifier, creator in self.create_context.manual_creators.items(): - family = getattr(creator, "family") + family = getattr(creator, "family", None) if not family: continue From 4f633aa5a40fc33182762870d71fdcd5896085cb Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Wed, 1 Feb 2023 01:12:17 +0100 Subject: [PATCH 091/175] Ignore `cbId` attribute on read --- openpype/hosts/maya/api/plugin.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 16000d7f03a..d9c833b38b7 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -197,6 +197,10 @@ def imprint_instance_node(self, node, data): def read_instance_node(self, node): node_data = read(node) + # Never care about a cbId attribute on the object set + # being read as 'data' + node_data.pop("cbId", None) + # Move the relevant attributes into "creator_attributes" that # we flattened originally node_data["creator_attributes"] = {} From feacb8f2c89dc3c4e23d339f8b3858f4f8712ac9 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Wed, 1 Feb 2023 01:17:02 +0100 Subject: [PATCH 092/175] Just pass flat list as items for the EnumDef if key == label --- openpype/hosts/maya/plugins/create/create_multiverse_look.py | 2 +- openpype/hosts/maya/plugins/create/create_multiverse_usd.py | 2 +- .../hosts/maya/plugins/create/create_multiverse_usd_comp.py | 2 +- .../hosts/maya/plugins/create/create_multiverse_usd_over.py | 2 +- openpype/hosts/maya/plugins/create/create_review.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_look.py b/openpype/hosts/maya/plugins/create/create_multiverse_look.py index b89df856c70..f27eb57fc1f 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_look.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_look.py @@ -19,7 +19,7 @@ def get_instance_attr_defs(self): EnumDef("fileFormat", label="File Format", tooltip="USD export file format", - items={key: key for key in ["usda", "usd"]}, + items=["usda", "usd"], default="usda"), BoolDef("publishMipMap", label="Publish MipMap", diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py index b65c2234688..0b0ad3bccbd 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd.py @@ -21,7 +21,7 @@ def get_instance_attr_defs(self): defs.extend([ EnumDef("fileFormat", label="File format", - items={key: key for key in ["usd", "usda", "usdz"]}, + items=["usd", "usda", "usdz"], default="usd"), BoolDef("stripNamespaces", label="Strip Namespaces", diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py index b55931138fd..66ddd83eda6 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_comp.py @@ -20,7 +20,7 @@ def get_instance_attr_defs(self): defs.extend([ EnumDef("fileFormat", label="File format", - items={key: key for key in ["usd", "usda"]}, + items=["usd", "usda"], default="usd"), BoolDef("stripNamespaces", label="Strip Namespaces", diff --git a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py index 4775fa0d9b1..e1534dd68ce 100644 --- a/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py +++ b/openpype/hosts/maya/plugins/create/create_multiverse_usd_over.py @@ -19,7 +19,7 @@ def get_instance_attr_defs(self): defs.extend([ EnumDef("fileFormat", label="File format", - items={key: key for key in ["usd", "usda"]}, + items=["usd", "usda"], default="usd"), BoolDef("writeAll", label="Write All", diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index 9b10a07af1a..babd56e82a9 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -58,7 +58,7 @@ def get_instance_attr_defs(self): default=True), EnumDef("transparency", label="Transparency", - items={key: key for key in TRANSPARENCIES}) + items=TRANSPARENCIES) ]) return defs From b86287cd91992a02260caac2418e1362b8a2b777 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 3 Mar 2023 15:53:09 +0100 Subject: [PATCH 093/175] Fix apply settings --- .../maya/plugins/publish/validate_maya_units.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index f4bd6031a4b..e8f910b12f7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -41,19 +41,20 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): "Current value is '{current_value}'." ) - def apply_settings(self, project_settings, system_settings): + @classmethod + def apply_settings(cls, project_settings, system_settings): """Apply project settings to creator""" settings = ( project_settings["maya"]["publish"]["ValidateMayaUnits"] ) - self.validate_linear_units = settings.get("validate_linear_units", - self.validate_linear_units) - self.linear_units = settings.get("linear_units", self.linear_units) - self.validate_angular_units = settings.get("validate_angular_units", - self.validate_angular_units) - self.angular_units = settings.get("angular_units", self.angular_units) - self.validate_fps = settings.get("validate_fps", self.validate_fps) + cls.validate_linear_units = settings.get("validate_linear_units", + cls.validate_linear_units) + cls.linear_units = settings.get("linear_units", cls.linear_units) + cls.validate_angular_units = settings.get("validate_angular_units", + cls.validate_angular_units) + cls.angular_units = settings.get("angular_units", cls.angular_units) + cls.validate_fps = settings.get("validate_fps", cls.validate_fps) def process(self, context): From bd482673310e18ab9dcd6ab9fd20e39f8bc4e597 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Fri, 3 Mar 2023 15:56:55 +0100 Subject: [PATCH 094/175] Cosmetics --- openpype/hosts/maya/plugins/publish/validate_maya_units.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index e8f910b12f7..c14abb081ab 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -49,10 +49,10 @@ def apply_settings(cls, project_settings, system_settings): ) cls.validate_linear_units = settings.get("validate_linear_units", - cls.validate_linear_units) + cls.validate_linear_units) cls.linear_units = settings.get("linear_units", cls.linear_units) cls.validate_angular_units = settings.get("validate_angular_units", - cls.validate_angular_units) + cls.validate_angular_units) cls.angular_units = settings.get("angular_units", cls.angular_units) cls.validate_fps = settings.get("validate_fps", cls.validate_fps) From ccbd0c057d702cced1a884ca8cc7e722114f37e4 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 10:39:53 +0100 Subject: [PATCH 095/175] Shush hound --- openpype/hosts/maya/plugins/create/create_arnold_scene_source.py | 1 - 1 file changed, 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py index 42f3c495340..024f1fd91d3 100644 --- a/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py +++ b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -8,7 +8,6 @@ ) - class CreateArnoldSceneSource(plugin.MayaCreator): """Arnold Scene Source""" From ac4bf97338a652b8c644fc1d805cdc1cc84e45f0 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 10:42:57 +0100 Subject: [PATCH 096/175] Add back separate 'Create...' menu entry --- openpype/hosts/maya/api/menu.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index e90d1097ba2..645d6f5a1c2 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -66,6 +66,14 @@ def deferred(): cmds.menuItem(divider=True) + cmds.menuItem( + "Create...", + command=lambda *args: host_tools.show_publisher( + parent=parent_widget, + tab="create" + ) + ) + cmds.menuItem( "Load...", command=lambda *args: host_tools.show_loader( @@ -77,7 +85,8 @@ def deferred(): cmds.menuItem( "Publish...", command=lambda *args: host_tools.show_publisher( - parent=parent_widget + parent=parent_widget, + tab="publish" ), image=pyblish_icon ) From 5a8e87bcd6a7e313ee4de9e9c3136a518cf601f9 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 10:49:44 +0100 Subject: [PATCH 097/175] Fix getting file info data for context --- openpype/hosts/maya/api/pipeline.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index f9e9aaf9523..e64e3b8eef5 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -153,6 +153,8 @@ def get_context_data(self): data = cmds.fileInfo("OpenPypeContext", query=True) if not data: return {} + + data = data[0] # Maya seems to return a list decoded = base64.b64decode(data).decode("utf-8") return json.loads(decoded) From 80a8706714eb9229e7be3b135bbd349d1d3414b6 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 12:48:47 +0100 Subject: [PATCH 098/175] Add strict error checking instance attribute definition --- .../plugins/publish/submit_maya_deadline.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index ef7f020d11d..19aff9be1d2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -31,8 +31,10 @@ from maya import cmds from openpype.pipeline import legacy_io -from openpype.lib import NumberDef - +from openpype.lib import ( + BoolDef, + NumberDef +) from openpype.hosts.maya.api.lib_rendersettings import RenderSettings from openpype.hosts.maya.api.lib import get_attr_in_layer @@ -107,6 +109,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): jobInfo = {} pluginInfo = {} group = "none" + strict_error_checking = True @classmethod def apply_settings(cls, project_settings, system_settings): @@ -122,6 +125,8 @@ def apply_settings(cls, project_settings, system_settings): cls.tile_priority = settings.get("tile_priority", cls.tile_priority) cls.limit = settings.get("limit", cls.limit) cls.group = settings.get("group", cls.group) + cls.strict_error_checking = settings.get("strict_error_checking", + cls.strict_error_checking) def get_job_info(self): job_info = DeadlineJobInfo(Plugin="MayaBatch") @@ -242,7 +247,7 @@ def get_plugin_info(self): if rs_include_lights not in {"1", "0", True, False}: rs_include_lights = default_rs_include_lights strict_error_checking = instance.data.get("strict_error_checking", - True) + self.strict_error_checking) plugin_info = MayaPluginInfo( SceneFile=self.scene_path, Version=cmds.about(version=True), @@ -795,7 +800,11 @@ def get_attribute_defs(cls): NumberDef("tile_priority", label="Tile Assembler Priority", decimals=0, - default=cls.tile_priority) + default=cls.tile_priority), + BoolDef("strict_error_checking", + label="Strict Error Checking", + default=cls.strict_error_checking), + ]) return defs From 479e0bdf2237b0212db8ebcaeeff92cc54f89981 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 12:49:29 +0100 Subject: [PATCH 099/175] Fix if statement --- openpype/hosts/maya/plugins/create/create_render.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index ee125029b1a..9fd98de80dc 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -72,7 +72,7 @@ def create(self, subset_name, instance_data, pre_create_data): # would only ever be called to say, 'hey, please refresh collect' # Only allow a single render instance to exist - if not self._get_singleton_node(): + if self._get_singleton_node(): raise CreatorError("A Render instance already exists - only " "one can be configured.") From 14969a82cda9ebdcb420e2c4f30a11a29a3ed607 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 12:59:11 +0100 Subject: [PATCH 100/175] Remove file that was removed in `develop` but remained in merge conflict --- openpype/hosts/maya/plugins/publish/extract_xgen_cache.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 openpype/hosts/maya/plugins/publish/extract_xgen_cache.py diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py deleted file mode 100644 index e69de29bb2d..00000000000 From 9976d7bc95b9af3e954c1cd7d1b4ea274786869e Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 13:27:50 +0100 Subject: [PATCH 101/175] Fix loading rigs with creation of animation instance --- .../hosts/maya/plugins/load/load_reference.py | 29 ++++++++----------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index d93702a16d6..262294c1971 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -1,12 +1,9 @@ import os from maya import cmds +from openpype.pipeline import registered_host +from openpype.pipeline.create import CreateContext from openpype.settings import get_project_settings -from openpype.pipeline import legacy_io -from openpype.pipeline.create import ( - legacy_create, - get_legacy_creator_by_name, -) import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api.lib import maintained_selection @@ -150,21 +147,19 @@ def _post_process_rig(self, name, namespace, context, options): roots = cmds.ls(self[:], assemblies=True, long=True) assert roots, "No root nodes in rig, this is a bug." - asset = legacy_io.Session["AVALON_ASSET"] - dependency = str(context["representation"]["_id"]) - self.log.info("Creating subset: {}".format(namespace)) + # Fill creator identifier + creator_identifier = "io.openpype.creators.maya.animation" + + host = registered_host() + context = CreateContext(host) + # Create the animation instance - creator_plugin = get_legacy_creator_by_name( - self.animation_creator_name - ) with maintained_selection(): cmds.select([output, controls] + roots, noExpand=True) - legacy_create( - creator_plugin, - name=namespace, - asset=asset, - options={"useSelection": True}, - data={"dependencies": dependency} + context.create( + creator_identifier=creator_identifier, + variant=namespace, + pre_create_data={"use_selection": True} ) From 2f8b45acdcaaea569c951b058bdf0badaf610003 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 13:57:19 +0100 Subject: [PATCH 102/175] Replace legacy instance collector --- .../maya/plugins/publish/collect_instances.py | 140 ++++-------------- .../plugins/publish/collect_new_instances.py | 133 ----------------- 2 files changed, 31 insertions(+), 242 deletions(-) delete mode 100644 openpype/hosts/maya/plugins/publish/collect_new_instances.py diff --git a/openpype/hosts/maya/plugins/publish/collect_instances.py b/openpype/hosts/maya/plugins/publish/collect_instances.py index a5cbabaeef0..ee02d3efc05 100644 --- a/openpype/hosts/maya/plugins/publish/collect_instances.py +++ b/openpype/hosts/maya/plugins/publish/collect_instances.py @@ -2,7 +2,6 @@ import maya.api.OpenMaya as om import pyblish.api -import json def get_all_children(nodes): @@ -45,8 +44,8 @@ def get_all_children(nodes): return list(traversed) -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by objectSet and pre-defined attribute +class CollectNewInstances(pyblish.api.InstancePlugin): + """Gather members for instances and pre-defined attribute This collector takes into account assets that are associated with an objectSet and marked with a unique identifier; @@ -65,132 +64,55 @@ class CollectInstances(pyblish.api.ContextPlugin): """ - label = "Collect Instances" + label = "Collect New Instance Data" order = pyblish.api.CollectorOrder hosts = ["maya"] - def process(self, context): + def process(self, instance): - objectset = cmds.ls("*.id", long=True, type="objectSet", - recursive=True, objectsOnly=True) + objset = instance.data.get("instance_node") + if not objset: + self.log.debug("Instance has no `instance_node` data") - context.data['objectsets'] = objectset - for objset in objectset: - - if cmds.attributeQuery("creator_identifier", - node=objset, - exists=True): - # Ignore new style instances - continue - - if not cmds.attributeQuery("id", node=objset, exists=True): - continue - - id_attr = "{}.id".format(objset) - if cmds.getAttr(id_attr) != "pyblish.avalon.instance": - continue - - # The developer is responsible for specifying - # the family of each instance. - has_family = cmds.attributeQuery("family", - node=objset, - exists=True) - assert has_family, "\"%s\" was missing a family" % objset - - members = cmds.sets(objset, query=True) - if members is None: - self.log.warning("Skipped empty instance: \"%s\" " % objset) - continue - - self.log.info("Creating instance for {}".format(objset)) - - data = dict() - - # Apply each user defined attribute as data - for attr in cmds.listAttr(objset, userDefined=True) or list(): - try: - value = cmds.getAttr("%s.%s" % (objset, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - data[attr] = value - - # temporarily translation of `active` to `publish` till issue has - # been resolved, https://github.com/pyblish/pyblish-base/issues/307 - if "active" in data: - data["publish"] = data["active"] + # TODO: We might not want to do this in the future + # Merge creator attributes into instance.data just backwards compatible + # code still runs as expected + creator_attributes = instance.data.get("creator_attributes", {}) + if creator_attributes: + instance.data.update(creator_attributes) + members = cmds.sets(objset, query=True) or [] + if not members: + self.log.warning("Empty instance: \"%s\" " % objset) + else: # Collect members members = cmds.ls(members, long=True) or [] dag_members = cmds.ls(members, type="dagNode", long=True) children = get_all_children(dag_members) children = cmds.ls(children, noIntermediate=True, long=True) - parents = [] - if data.get("includeParentHierarchy", True): + if creator_attributes.get("includeParentHierarchy", True): # If `includeParentHierarchy` then include the parents # so they will also be picked up in the instance by validators parents = self.get_all_parents(members) members_hierarchy = list(set(members + children + parents)) - if 'families' not in data: - data['families'] = [data.get('family')] - - # Create the instance - instance = context.create_instance(objset) instance[:] = members_hierarchy - instance.data["objset"] = objset - - # Store the exact members of the object set - instance.data["setMembers"] = members - - # Define nice label - name = cmds.ls(objset, long=False)[0] # use short name - label = "{0} ({1})".format(name, - data["asset"]) - - # Append start frame and end frame to label if present - if "frameStart" and "frameEnd" in data: - - # Backwards compatibility for 'handles' data - if "handles" in data: - data["handleStart"] = data["handles"] - data["handleEnd"] = data["handles"] - data.pop('handles') - - # Take handles from context if not set locally on the instance - for key in ["handleStart", "handleEnd"]: - if key not in data: - data[key] = context.data[key] - - data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501 - data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501 - - label += " [{0}-{1}]".format(int(data["frameStartHandle"]), - int(data["frameEndHandle"])) - - instance.data["label"] = label - - instance.data.update(data) - - # Produce diagnostic message for any graphical - # user interface interested in visualising it. - self.log.info("Found: \"%s\" " % instance.data["name"]) - self.log.debug( - "DATA: {} ".format(json.dumps(instance.data, indent=4))) - - def sort_by_family(instance): - """Sort by family""" - return instance.data.get("families", instance.data.get("family")) - - # Sort/grouped by family (preserving local index) - context[:] = sorted(context, key=sort_by_family) - return context + # Store the exact members of the object set + instance.data["setMembers"] = members + + # TODO: This might make more sense as a separate collector + # Collect frameStartHandle and frameEndHandle if frames present + if "frameStart" in instance.data: + handle_start = instance.data.get("handleStart", 0) + frame_start_handle = instance.data["frameStart"] - handle_start + instance.data["frameStartHandle"] = frame_start_handle + if "frameEnd" in instance.data: + handle_end = instance.data.get("handleEnd", 0) + frame_end_handle = instance.data["frameEnd"] + handle_end + instance.data["frameEndHandle"] = frame_end_handle def get_all_parents(self, nodes): """Get all parents by using string operations (optimization) diff --git a/openpype/hosts/maya/plugins/publish/collect_new_instances.py b/openpype/hosts/maya/plugins/publish/collect_new_instances.py deleted file mode 100644 index ee02d3efc05..00000000000 --- a/openpype/hosts/maya/plugins/publish/collect_new_instances.py +++ /dev/null @@ -1,133 +0,0 @@ -from maya import cmds -import maya.api.OpenMaya as om - -import pyblish.api - - -def get_all_children(nodes): - """Return all children of `nodes` including each instanced child. - Using maya.cmds.listRelatives(allDescendents=True) includes only the first - instance. As such, this function acts as an optimal replacement with a - focus on a fast query. - - """ - - sel = om.MSelectionList() - traversed = set() - iterator = om.MItDag(om.MItDag.kDepthFirst) - for node in nodes: - - if node in traversed: - # Ignore if already processed as a child - # before - continue - - sel.clear() - sel.add(node) - dag = sel.getDagPath(0) - - iterator.reset(dag) - # ignore self - iterator.next() # noqa: B305 - while not iterator.isDone(): - - path = iterator.fullPathName() - - if path in traversed: - iterator.prune() - iterator.next() # noqa: B305 - continue - - traversed.add(path) - iterator.next() # noqa: B305 - - return list(traversed) - - -class CollectNewInstances(pyblish.api.InstancePlugin): - """Gather members for instances and pre-defined attribute - - This collector takes into account assets that are associated with - an objectSet and marked with a unique identifier; - - Identifier: - id (str): "pyblish.avalon.instance" - - Limitations: - - Does not take into account nodes connected to those - within an objectSet. Extractors are assumed to export - with history preserved, but this limits what they will - be able to achieve and the amount of data available - to validators. An additional collector could also - append this input data into the instance, as we do - for `pype.rig` with collect_history. - - """ - - label = "Collect New Instance Data" - order = pyblish.api.CollectorOrder - hosts = ["maya"] - - def process(self, instance): - - objset = instance.data.get("instance_node") - if not objset: - self.log.debug("Instance has no `instance_node` data") - - # TODO: We might not want to do this in the future - # Merge creator attributes into instance.data just backwards compatible - # code still runs as expected - creator_attributes = instance.data.get("creator_attributes", {}) - if creator_attributes: - instance.data.update(creator_attributes) - - members = cmds.sets(objset, query=True) or [] - if not members: - self.log.warning("Empty instance: \"%s\" " % objset) - else: - # Collect members - members = cmds.ls(members, long=True) or [] - - dag_members = cmds.ls(members, type="dagNode", long=True) - children = get_all_children(dag_members) - children = cmds.ls(children, noIntermediate=True, long=True) - parents = [] - if creator_attributes.get("includeParentHierarchy", True): - # If `includeParentHierarchy` then include the parents - # so they will also be picked up in the instance by validators - parents = self.get_all_parents(members) - members_hierarchy = list(set(members + children + parents)) - - instance[:] = members_hierarchy - - # Store the exact members of the object set - instance.data["setMembers"] = members - - # TODO: This might make more sense as a separate collector - # Collect frameStartHandle and frameEndHandle if frames present - if "frameStart" in instance.data: - handle_start = instance.data.get("handleStart", 0) - frame_start_handle = instance.data["frameStart"] - handle_start - instance.data["frameStartHandle"] = frame_start_handle - if "frameEnd" in instance.data: - handle_end = instance.data.get("handleEnd", 0) - frame_end_handle = instance.data["frameEnd"] + handle_end - instance.data["frameEndHandle"] = frame_end_handle - - def get_all_parents(self, nodes): - """Get all parents by using string operations (optimization) - - Args: - nodes (list): the nodes which are found in the objectSet - - Returns: - list - """ - - parents = [] - for node in nodes: - splitted = node.split("|") - items = ["|".join(splitted[0:i]) for i in range(2, len(splitted))] - parents.extend(items) - - return list(set(parents)) From 27f590a7f075bbc5fb7995a00e1ec20b2b2696f6 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 14:46:38 +0100 Subject: [PATCH 103/175] Refactor to at least get to a working deadline submission. This is quite quick and dirty and likely many settings from the instance or settings aren't working as expected. --- .../maya/plugins/publish/collect_render.py | 550 ++++++++---------- .../plugins/publish/submit_maya_muster.py | 5 + .../validate_render_no_default_cameras.py | 11 +- .../publish/validate_rendersettings.py | 10 +- .../deadline/abstract_submit_deadline.py | 10 +- 5 files changed, 257 insertions(+), 329 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index a501c87eb23..c5d213d7ad9 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -39,27 +39,25 @@ instance -> pixelAspect """ -import re import os import platform import json from maya import cmds -import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api from openpype.lib import get_formatted_current_time -from openpype.pipeline import legacy_io from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501 from openpype.hosts.maya.api import lib -class CollectMayaRender(pyblish.api.ContextPlugin): +class CollectMayaRender(pyblish.api.InstancePlugin): """Gather all publishable render layers from renderSetup.""" order = pyblish.api.CollectorOrder + 0.01 hosts = ["maya"] + families = ["renderlayer"] label = "Collect Render Layers" sync_workfile_version = False @@ -69,303 +67,249 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "underscore": "_" } - def process(self, context): - """Entry point to collector.""" - render_instance = None + def process(self, instance): - for instance in context: - if "rendering" in instance.data["families"]: - render_instance = instance - render_instance.data["remove"] = True + # TODO: Re-add force enable of workfile instance? + # TODO: Re-add legacy layer support with LAYER_ prefix but in Creator + # TODO: Set and collect active state of RenderLayer in Creator using + # renderlayer.isRenderable() + context = instance.context - # make sure workfile instance publishing is enabled - if "workfile" in instance.data["families"]: - instance.data["publish"] = True - - if not render_instance: - self.log.info( - "No render instance found, skipping render " - "layer collection." - ) - return - - render_globals = render_instance - collected_render_layers = render_instance.data["setMembers"] + layer = instance.data["transientData"]["layer"] + objset = instance.data.get("instance_node") filepath = context.data["currentFile"].replace("\\", "/") - asset = legacy_io.Session["AVALON_ASSET"] workspace = context.data["workspaceDir"] - # Retrieve render setup layers - rs = renderSetup.instance() - maya_render_layers = { - layer.name(): layer for layer in rs.getRenderLayers() - } - - for layer in collected_render_layers: - if layer.startswith("LAYER_"): - # this is support for legacy mode where render layers - # started with `LAYER_` prefix. - layer_name_pattern = r"^LAYER_(.*)" - else: - # new way is to prefix render layer name with instance - # namespace. - layer_name_pattern = r"^.+:(.*)" - - # todo: We should have a more explicit way to link the renderlayer - match = re.match(layer_name_pattern, layer) - if not match: - msg = "Invalid layer name in set [ {} ]".format(layer) - self.log.warning(msg) - continue - - expected_layer_name = match.group(1) - self.log.info("Processing '{}' as layer [ {} ]" - "".format(layer, expected_layer_name)) + # check if layer is renderable + if not layer.isRenderable(): + msg = "Render layer [ {} ] is not " "renderable".format( + layer.name() + ) + raise RuntimeError(msg) - # check if layer is part of renderSetup - if expected_layer_name not in maya_render_layers: - msg = "Render layer [ {} ] is not in " "Render Setup".format( - expected_layer_name - ) - self.log.warning(msg) + # detect if there are sets (subsets) to attach render to + sets = cmds.sets(objset, query=True) or [] + attach_to = [] + for s in sets: + if not cmds.attributeQuery("family", node=s, exists=True): continue - # check if layer is renderable - if not maya_render_layers[expected_layer_name].isRenderable(): - msg = "Render layer [ {} ] is not " "renderable".format( - expected_layer_name - ) - self.log.warning(msg) - continue + attach_to.append( + { + "version": None, # we need integrator for that + "subset": s, + "family": cmds.getAttr("{}.family".format(s)), + } + ) + self.log.info(" -> attach render to: {}".format(s)) + + layer_name = layer.name() + + # collect all frames we are expecting to be rendered + # return all expected files for all cameras and aovs in given + # frame range + layer_render_products = get_layer_render_products(layer.name()) + render_products = layer_render_products.layer_data.products + assert render_products, "no render products generated" + exp_files = [] + multipart = False + for product in render_products: + if product.multipart: + multipart = True + product_name = product.productName + if product.camera and layer_render_products.has_camera_token(): + product_name = "{}{}".format( + product.camera, + "_" + product_name if product_name else "") + exp_files.append( + { + product_name: layer_render_products.get_files( + product) + }) + + has_cameras = any(product.camera for product in render_products) + assert has_cameras, "No render cameras found." + + self.log.info("multipart: {}".format( + multipart)) + assert exp_files, "no file names were generated, this is bug" + self.log.info( + "expected files: {}".format( + json.dumps(exp_files, indent=4, sort_keys=True) + ) + ) - # detect if there are sets (subsets) to attach render to - sets = cmds.sets(layer, query=True) or [] - attach_to = [] - for s in sets: - if not cmds.attributeQuery("family", node=s, exists=True): - continue - - attach_to.append( - { - "version": None, # we need integrator for that - "subset": s, - "family": cmds.getAttr("{}.family".format(s)), - } - ) - self.log.info(" -> attach render to: {}".format(s)) - - layer_name = "rs_{}".format(expected_layer_name) - - # collect all frames we are expecting to be rendered - # return all expected files for all cameras and aovs in given - # frame range - layer_render_products = get_layer_render_products(layer_name) - render_products = layer_render_products.layer_data.products - assert render_products, "no render products generated" - exp_files = [] - multipart = False - for product in render_products: - if product.multipart: - multipart = True - product_name = product.productName - if product.camera and layer_render_products.has_camera_token(): - product_name = "{}{}".format( - product.camera, - "_" + product_name if product_name else "") - exp_files.append( - { - product_name: layer_render_products.get_files( - product) - }) - - has_cameras = any(product.camera for product in render_products) - assert has_cameras, "No render cameras found." - - self.log.info("multipart: {}".format( - multipart)) - assert exp_files, "no file names were generated, this is bug" - self.log.info( - "expected files: {}".format( - json.dumps(exp_files, indent=4, sort_keys=True) - ) + # if we want to attach render to subset, check if we have AOV's + # in expectedFiles. If so, raise error as we cannot attach AOV + # (considered to be subset on its own) to another subset + if attach_to: + assert isinstance(exp_files, list), ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported" ) - # if we want to attach render to subset, check if we have AOV's - # in expectedFiles. If so, raise error as we cannot attach AOV - # (considered to be subset on its own) to another subset - if attach_to: - assert isinstance(exp_files, list), ( - "attaching multiple AOVs or renderable cameras to " - "subset is not supported" - ) - - # append full path - aov_dict = {} - default_render_file = context.data.get('project_settings')\ - .get('maya')\ - .get('RenderSettings')\ - .get('default_render_image_folder') or "" - # replace relative paths with absolute. Render products are - # returned as list of dictionaries. - publish_meta_path = None - for aov in exp_files: - full_paths = [] - aov_first_key = list(aov.keys())[0] - for file in aov[aov_first_key]: - full_path = os.path.join(workspace, default_render_file, - file) - full_path = full_path.replace("\\", "/") - full_paths.append(full_path) - publish_meta_path = os.path.dirname(full_path) - aov_dict[aov_first_key] = full_paths - full_exp_files = [aov_dict] - - frame_start_render = int(self.get_render_attribute( - "startFrame", layer=layer_name)) - frame_end_render = int(self.get_render_attribute( - "endFrame", layer=layer_name)) - - if (int(context.data['frameStartHandle']) == frame_start_render - and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501 - - handle_start = context.data['handleStart'] - handle_end = context.data['handleEnd'] - frame_start = context.data['frameStart'] - frame_end = context.data['frameEnd'] - frame_start_handle = context.data['frameStartHandle'] - frame_end_handle = context.data['frameEndHandle'] - else: - handle_start = 0 - handle_end = 0 - frame_start = frame_start_render - frame_end = frame_end_render - frame_start_handle = frame_start_render - frame_end_handle = frame_end_render - - # find common path to store metadata - # so if image prefix is branching to many directories - # metadata file will be located in top-most common - # directory. - # TODO: use `os.path.commonpath()` after switch to Python 3 - publish_meta_path = os.path.normpath(publish_meta_path) - common_publish_meta_path = os.path.splitdrive( - publish_meta_path)[0] - if common_publish_meta_path: - common_publish_meta_path += os.path.sep - for part in publish_meta_path.replace( - common_publish_meta_path, "").split(os.path.sep): - common_publish_meta_path = os.path.join( - common_publish_meta_path, part) - if part == expected_layer_name: - break - - # TODO: replace this terrible linux hotfix with real solution :) - if platform.system().lower() in ["linux", "darwin"]: - common_publish_meta_path = "/" + common_publish_meta_path - - self.log.info( - "Publish meta path: {}".format(common_publish_meta_path)) - - self.log.info(full_exp_files) - self.log.info("collecting layer: {}".format(layer_name)) - # Get layer specific settings, might be overrides - colorspace_data = lib.get_color_management_preferences() - data = { - "subset": expected_layer_name, - "attachTo": attach_to, - "setMembers": layer_name, - "multipartExr": multipart, - "review": render_instance.data.get("review") or False, - "publish": True, - - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartHandle": frame_start_handle, - "frameEndHandle": frame_end_handle, - "byFrameStep": int( - self.get_render_attribute("byFrameStep", - layer=layer_name)), - "renderer": self.get_render_attribute( - "currentRenderer", layer=layer_name).lower(), - # instance subset - "family": "renderlayer", - "families": ["renderlayer"], - "asset": asset, - "time": get_formatted_current_time(), - "author": context.data["user"], - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath, - "expectedFiles": full_exp_files, - "publishRenderMetadataFolder": common_publish_meta_path, - "renderProducts": layer_render_products, - "resolutionWidth": lib.get_attr_in_layer( - "defaultResolution.width", layer=layer_name - ), - "resolutionHeight": lib.get_attr_in_layer( - "defaultResolution.height", layer=layer_name - ), - "pixelAspect": lib.get_attr_in_layer( - "defaultResolution.pixelAspect", layer=layer_name - ), - "tileRendering": render_instance.data.get("tileRendering") or False, # noqa: E501 - "tilesX": render_instance.data.get("tilesX") or 2, - "tilesY": render_instance.data.get("tilesY") or 2, - "priority": render_instance.data.get("priority"), - "convertToScanline": render_instance.data.get( - "convertToScanline") or False, - "useReferencedAovs": render_instance.data.get( - "useReferencedAovs") or render_instance.data.get( - "vrayUseReferencedAovs") or False, - "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 - "renderSetupIncludeLights": render_instance.data.get( - "renderSetupIncludeLights" - ), - "colorspaceConfig": colorspace_data["config"], - "colorspaceDisplay": colorspace_data["display"], - "colorspaceView": colorspace_data["view"], - "strict_error_checking": render_instance.data.get( - "strict_error_checking", True - ) - } - - if self.sync_workfile_version: - data["version"] = context.data["version"] - - for instance in context: - if instance.data['family'] == "workfile": - instance.data["version"] = context.data["version"] - - # handle standalone renderers - if render_instance.data.get("vrayScene") is True: - data["families"].append("vrayscene_render") - - if render_instance.data.get("assScene") is True: - data["families"].append("assscene_render") - - # Include (optional) global settings - # Get global overrides and translate to Deadline values - overrides = self.parse_options(str(render_globals)) - data.update(**overrides) - - # get string values for pools - primary_pool = overrides["renderGlobals"]["Pool"] - secondary_pool = overrides["renderGlobals"].get("SecondaryPool") - data["primaryPool"] = primary_pool - data["secondaryPool"] = secondary_pool - - # Define nice label - label = "{0} ({1})".format(expected_layer_name, data["asset"]) - label += " [{0}-{1}]".format( - int(data["frameStartHandle"]), int(data["frameEndHandle"]) + # append full path + aov_dict = {} + default_render_file = context.data.get('project_settings')\ + .get('maya')\ + .get('RenderSettings')\ + .get('default_render_image_folder') or "" + # replace relative paths with absolute. Render products are + # returned as list of dictionaries. + publish_meta_path = None + for aov in exp_files: + full_paths = [] + aov_first_key = list(aov.keys())[0] + for file in aov[aov_first_key]: + full_path = os.path.join(workspace, default_render_file, + file) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + publish_meta_path = os.path.dirname(full_path) + aov_dict[aov_first_key] = full_paths + full_exp_files = [aov_dict] + + frame_start_render = int(self.get_render_attribute( + "startFrame", layer=layer_name)) + frame_end_render = int(self.get_render_attribute( + "endFrame", layer=layer_name)) + + if (int(context.data['frameStartHandle']) == frame_start_render + and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501 + + handle_start = context.data['handleStart'] + handle_end = context.data['handleEnd'] + frame_start = context.data['frameStart'] + frame_end = context.data['frameEnd'] + frame_start_handle = context.data['frameStartHandle'] + frame_end_handle = context.data['frameEndHandle'] + else: + handle_start = 0 + handle_end = 0 + frame_start = frame_start_render + frame_end = frame_end_render + frame_start_handle = frame_start_render + frame_end_handle = frame_end_render + + # find common path to store metadata + # so if image prefix is branching to many directories + # metadata file will be located in top-most common + # directory. + # TODO: use `os.path.commonpath()` after switch to Python 3 + publish_meta_path = os.path.normpath(publish_meta_path) + common_publish_meta_path = os.path.splitdrive( + publish_meta_path)[0] + if common_publish_meta_path: + common_publish_meta_path += os.path.sep + for part in publish_meta_path.replace( + common_publish_meta_path, "").split(os.path.sep): + common_publish_meta_path = os.path.join( + common_publish_meta_path, part) + if part == layer_name: + break + + # TODO: replace this terrible linux hotfix with real solution :) + if platform.system().lower() in ["linux", "darwin"]: + common_publish_meta_path = "/" + common_publish_meta_path + + self.log.info( + "Publish meta path: {}".format(common_publish_meta_path)) + + self.log.info(full_exp_files) + self.log.info("collecting layer: {}".format(layer_name)) + # Get layer specific settings, might be overrides + colorspace_data = lib.get_color_management_preferences() + data = { + # TODO: Why do we need to explicitly add this - does this not exist + # on the instance by itself? + "publish": True, + + "attachTo": attach_to, + + # The legacy renderlayer node + "setMembers": layer._getLegacyNodeName(), + + "multipartExr": multipart, + "review": instance.data.get("review") or False, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartHandle": frame_start_handle, + "frameEndHandle": frame_end_handle, + "byFrameStep": int( + self.get_render_attribute("byFrameStep", + layer=layer_name)), + "renderer": self.get_render_attribute( + "currentRenderer", layer=layer_name).lower(), + + "renderlayer": layer_name, + + # todo: is `time` and `author` still needed? + "time": get_formatted_current_time(), + "author": context.data["user"], + + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": filepath, + "expectedFiles": full_exp_files, + "publishRenderMetadataFolder": common_publish_meta_path, + "renderProducts": layer_render_products, + "resolutionWidth": lib.get_attr_in_layer( + "defaultResolution.width", layer=layer_name + ), + "resolutionHeight": lib.get_attr_in_layer( + "defaultResolution.height", layer=layer_name + ), + "pixelAspect": lib.get_attr_in_layer( + "defaultResolution.pixelAspect", layer=layer_name + ), + + # todo: Following are likely not needed due to collecting from the + # instance itself if they are attribute definitions + "tileRendering": instance.data.get("tileRendering") or False, # noqa: E501 + "tilesX": instance.data.get("tilesX") or 2, + "tilesY": instance.data.get("tilesY") or 2, + "priority": instance.data.get("priority"), + "convertToScanline": instance.data.get( + "convertToScanline") or False, + "useReferencedAovs": instance.data.get( + "useReferencedAovs") or instance.data.get( + "vrayUseReferencedAovs") or False, + "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 + "renderSetupIncludeLights": instance.data.get( + "renderSetupIncludeLights" + ), + "colorspaceConfig": colorspace_data["config"], + "colorspaceDisplay": colorspace_data["display"], + "colorspaceView": colorspace_data["view"], + "strict_error_checking": instance.data.get( + "strict_error_checking", True ) + } - instance = context.create_instance(expected_layer_name) - instance.data["label"] = label - instance.data["farm"] = True - instance.data.update(data) + if self.sync_workfile_version: + data["version"] = context.data["version"] + for instance in context: + if instance.data['family'] == "workfile": + instance.data["version"] = context.data["version"] + + # Include (optional) global settings + # Get global overrides and translate to Deadline values + # TODO: Re-implement render globals instance data logic + # TODO: Re-implement extend frames + # overrides = self.parse_options(str(render_globals)) + # data.update(**overrides) + + # Define nice label + label = "{0} ({1})".format(layer_name, instance.data["asset"]) + label += " [{0}-{1}]".format( + int(data["frameStartHandle"]), int(data["frameEndHandle"]) + ) + + instance.data["label"] = label + instance.data["farm"] = True + instance.data.update(data) def parse_options(self, render_globals): """Get all overrides with a value, skip those without. @@ -415,36 +359,6 @@ def parse_options(self, render_globals): options["extendFrames"] = extend_frames options["overrideExistingFrame"] = override_frames - maya_render_plugin = "MayaBatch" - - options["mayaRenderPlugin"] = maya_render_plugin - - return options - - def _discover_pools(self, attributes): - - pool_a = None - pool_b = None - - # Check for specific pools - pool_b = [] - if "primaryPool" in attributes: - pool_a = attributes["primaryPool"] - if "secondaryPool" in attributes: - pool_b = attributes["secondaryPool"] - - else: - # Backwards compatibility - pool_str = attributes.get("pools", None) - if pool_str: - pool_a, pool_b = pool_str.split(";") - - # Ensure empty entry token is caught - if pool_b == "-": - pool_b = None - - return pool_a, pool_b - @staticmethod def get_render_attribute(attr, layer): """Get attribute from render options. diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index 3b4a3987dd4..ac7beac1074 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -546,3 +546,8 @@ def preflight_check(self, instance): "%f=%d was rounded off to nearest integer" % (value, int(value)) ) + +# TODO: Remove hack to avoid this plug-in in new publisher +# This plug-in should actually be in dedicated module +if not os.environ.get("MUSTER_REST_URL"): + del MayaSubmitMuster \ No newline at end of file diff --git a/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py b/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py index 67ece75af88..9d4410186b1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py +++ b/openpype/hosts/maya/plugins/publish/validate_render_no_default_cameras.py @@ -3,7 +3,10 @@ import pyblish.api import openpype.hosts.maya.api.action -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError, +) class ValidateRenderNoDefaultCameras(pyblish.api.InstancePlugin): @@ -31,5 +34,7 @@ def process(self, instance): """Process all the cameras in the instance""" invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Renderable default cameras " - "found: {0}".format(invalid)) + raise PublishValidationError( + title="Rendering default cameras", + message="Renderable default cameras " + "found: {0}".format(invalid)) diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index 517b12bbfd7..f1976cb8b16 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -9,6 +9,7 @@ from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, + PublishValidationError, ) from openpype.hosts.maya.api import lib @@ -112,14 +113,17 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) - assert invalid is False, ("Invalid render settings " - "found for '{}'!".format(instance.name)) + if invalid: + raise PublishValidationError( + title="Invalid Render Settings", + message=("Invalid render settings found " + "for '{}'!".format(instance.name)) + ) @classmethod def get_invalid(cls, instance): invalid = False - multipart = False renderer = instance.data['renderer'] layer = instance.data['renderlayer'] diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 7a71035d491..6b284eaac3f 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -661,21 +661,21 @@ def submit(self, payload): @staticmethod def _get_workfile_instance(context): """Find workfile instance in context""" - for i in context: + for instance in context: is_workfile = ( - "workfile" in i.data.get("families", []) or - i.data["family"] == "workfile" + "workfile" in instance.data.get("families", []) or + instance.data["family"] == "workfile" ) if not is_workfile: continue # test if there is instance of workfile waiting # to be published. - assert i.data["publish"] is True, ( + assert instance.data.get("publish", True) is True, ( "Workfile (scene) must be published along") - return i + return instance @classmethod def get_attribute_defs(cls): From 663359498a3395583c5e0102c8adafa725e90148 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 14:48:35 +0100 Subject: [PATCH 104/175] Shush hound for the hacky workaround --- openpype/hosts/maya/plugins/publish/submit_maya_muster.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index ac7beac1074..298c3bd3450 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -547,7 +547,8 @@ def preflight_check(self, instance): % (value, int(value)) ) + # TODO: Remove hack to avoid this plug-in in new publisher # This plug-in should actually be in dedicated module if not os.environ.get("MUSTER_REST_URL"): - del MayaSubmitMuster \ No newline at end of file + del MayaSubmitMuster From 73c4df85cf430b63e4b93b29b427b84df4c010e7 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 14:52:14 +0100 Subject: [PATCH 105/175] Fix missed removal of code --- openpype/hosts/maya/plugins/create/create_review.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index a26587ab6ae..13187e63752 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -44,9 +44,6 @@ def get_instance_attr_defs(self): attr_def = defs_by_key[key] attr_def.default = value - for key, value in frame_range.items(): - data[key] = value - defs.extend([ NumberDef("review_width", label="Review width", From cfdc1d3001893fd9d640d9d4853b7a457a9554da Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sat, 18 Mar 2023 20:23:00 +0100 Subject: [PATCH 106/175] Cleanup --- .../maya/plugins/publish/collect_render.py | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index c5d213d7ad9..6b946512dde 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -47,6 +47,7 @@ import pyblish.api +from openpype.pipeline import KnownPublishError from openpype.lib import get_formatted_current_time from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501 from openpype.hosts.maya.api import lib @@ -150,7 +151,7 @@ def process(self, instance): # append full path aov_dict = {} - default_render_file = context.data.get('project_settings')\ + default_render_folder = context.data.get('project_settings')\ .get('maya')\ .get('RenderSettings')\ .get('default_render_image_folder') or "" @@ -161,13 +162,21 @@ def process(self, instance): full_paths = [] aov_first_key = list(aov.keys())[0] for file in aov[aov_first_key]: - full_path = os.path.join(workspace, default_render_file, + full_path = os.path.join(workspace, default_render_folder, file) full_path = full_path.replace("\\", "/") full_paths.append(full_path) publish_meta_path = os.path.dirname(full_path) aov_dict[aov_first_key] = full_paths full_exp_files = [aov_dict] + self.log.info(full_exp_files) + + if publish_meta_path is None: + raise KnownPublishError("Unable to detect any expected output " + "images for: {}. Make sure you have a " + "renderable camera and a valid frame " + "range set for your renderlayer." + "".format(instance.name)) frame_start_render = int(self.get_render_attribute( "startFrame", layer=layer_name)) @@ -215,22 +224,15 @@ def process(self, instance): self.log.info( "Publish meta path: {}".format(common_publish_meta_path)) - self.log.info(full_exp_files) - self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides colorspace_data = lib.get_color_management_preferences() data = { - # TODO: Why do we need to explicitly add this - does this not exist - # on the instance by itself? - "publish": True, - "attachTo": attach_to, - # The legacy renderlayer node - "setMembers": layer._getLegacyNodeName(), - "multipartExr": multipart, "review": instance.data.get("review") or False, + + # Frame range "handleStart": handle_start, "handleEnd": handle_end, "frameStart": frame_start, @@ -240,9 +242,11 @@ def process(self, instance): "byFrameStep": int( self.get_render_attribute("byFrameStep", layer=layer_name)), + + # Renderlayer "renderer": self.get_render_attribute( "currentRenderer", layer=layer_name).lower(), - + "setMembers": layer._getLegacyNodeName(), # legacy renderlayer "renderlayer": layer_name, # todo: is `time` and `author` still needed? From 822e2f6503b8c6444c47de75b3192bd60cd3f2ee Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 19 Mar 2023 14:00:31 +0100 Subject: [PATCH 107/175] Move attribute definitions to SubmitMayaDeadline explicitly to (for now) not pollute other host implementations + Cleanup CollectRender --- .../maya/plugins/publish/collect_render.py | 68 +++---------------- .../deadline/abstract_submit_deadline.py | 33 +-------- .../plugins/publish/submit_maya_deadline.py | 55 ++++++++++++--- 3 files changed, 56 insertions(+), 100 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 6b946512dde..fcfaa00db26 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -227,6 +227,7 @@ def process(self, instance): # Get layer specific settings, might be overrides colorspace_data = lib.get_color_management_preferences() data = { + "farm": True, "attachTo": attach_to, "multipartExr": multipart, @@ -274,7 +275,6 @@ def process(self, instance): "tileRendering": instance.data.get("tileRendering") or False, # noqa: E501 "tilesX": instance.data.get("tilesX") or 2, "tilesY": instance.data.get("tilesY") or 2, - "priority": instance.data.get("priority"), "convertToScanline": instance.data.get( "convertToScanline") or False, "useReferencedAovs": instance.data.get( @@ -287,9 +287,6 @@ def process(self, instance): "colorspaceConfig": colorspace_data["config"], "colorspaceDisplay": colorspace_data["display"], "colorspaceView": colorspace_data["view"], - "strict_error_checking": instance.data.get( - "strict_error_checking", True - ) } if self.sync_workfile_version: @@ -298,70 +295,21 @@ def process(self, instance): if instance.data['family'] == "workfile": instance.data["version"] = context.data["version"] - # Include (optional) global settings - # Get global overrides and translate to Deadline values - # TODO: Re-implement render globals instance data logic - # TODO: Re-implement extend frames - # overrides = self.parse_options(str(render_globals)) - # data.update(**overrides) - # Define nice label label = "{0} ({1})".format(layer_name, instance.data["asset"]) label += " [{0}-{1}]".format( int(data["frameStartHandle"]), int(data["frameEndHandle"]) ) - - instance.data["label"] = label - instance.data["farm"] = True - instance.data.update(data) - - def parse_options(self, render_globals): - """Get all overrides with a value, skip those without. - - Here's the kicker. These globals override defaults in the submission - integrator, but an empty value means no overriding is made. - Otherwise, Frames would override the default frames set under globals. - - Args: - render_globals (str): collection of render globals - - Returns: - dict: only overrides with values - - """ - attributes = lib.read(render_globals) - - options = {"renderGlobals": {}} - options["renderGlobals"]["Priority"] = attributes["priority"] - - # Check for specific pools - pool_a, pool_b = self._discover_pools(attributes) - options["renderGlobals"].update({"Pool": pool_a}) - if pool_b: - options["renderGlobals"].update({"SecondaryPool": pool_b}) - - # Machine list - machine_list = attributes["machineList"] - if machine_list: - key = "Whitelist" if attributes["whitelist"] else "Blacklist" - options["renderGlobals"][key] = machine_list - - # Suspend publish job - state = "Suspended" if attributes["suspendPublishJob"] else "Active" - options["publishJobState"] = state - - chunksize = attributes.get("framesPerTask", 1) - options["renderGlobals"]["ChunkSize"] = chunksize + data["label"] = label # Override frames should be False if extendFrames is False. This is # to ensure it doesn't go off doing crazy unpredictable things - override_frames = False - extend_frames = attributes.get("extendFrames", False) - if extend_frames: - override_frames = attributes.get("overrideExistingFrame", False) + extend_frames = instance.data.get("extendFrames", False) + if not extend_frames: + instance.data["overrideExistingFrame"] = False - options["extendFrames"] = extend_frames - options["overrideExistingFrame"] = override_frames + # Update the instace + instance.data.update(data) @staticmethod def get_render_attribute(attr, layer): @@ -377,4 +325,4 @@ def get_render_attribute(attr, layer): """ return lib.get_attr_in_layer( "defaultRenderGlobals.{}".format(attr), layer=layer - ) + ) \ No newline at end of file diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 6b284eaac3f..bdb647fa217 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -22,11 +22,6 @@ KnownPublishError, OpenPypePyblishPluginMixin ) -from openpype.lib import ( - NumberDef, - TextDef, - EnumDef -) JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) @@ -675,30 +670,4 @@ def _get_workfile_instance(context): assert instance.data.get("publish", True) is True, ( "Workfile (scene) must be published along") - return instance - - @classmethod - def get_attribute_defs(cls): - return [ - NumberDef("priority", - label="Priority", - default=cls.default_priority, - decimals=0), - NumberDef("framesPerTask", - label="Frames Per Task", - default=1, - decimals=0, - minimum=1, - maximum=1000), - TextDef("machineList", - label="Machine List", - default="", - placeholder="machine1,machine2"), - EnumDef("whitelist", - label="Machine List (Allow/Deny)", - items={ - True: "Allow List", - False: "Deny List", - }, - default=False) - ] + return instance \ No newline at end of file diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 19aff9be1d2..e68bc564384 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -30,10 +30,15 @@ from maya import cmds -from openpype.pipeline import legacy_io +from openpype.pipeline import ( + legacy_io, + OpenPypePyblishPluginMixin +) from openpype.lib import ( BoolDef, - NumberDef + NumberDef, + TextDef, + EnumDef ) from openpype.hosts.maya.api.lib_rendersettings import RenderSettings from openpype.hosts.maya.api.lib import get_attr_in_layer @@ -95,7 +100,8 @@ class ArnoldPluginInfo(object): ArnoldFile = attr.ib(default=None) -class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): +class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, + OpenPypePyblishPluginMixin): label = "Submit Render to Deadline" hosts = ["maya"] @@ -163,10 +169,7 @@ def get_job_info(self): job_info.Pool = instance.data.get("primaryPool") job_info.SecondaryPool = instance.data.get("secondaryPool") - job_info.ChunkSize = instance.data.get("chunkSize", 10) job_info.Comment = context.data.get("comment") - job_info.Priority = instance.data.get("priority", self.priority) - job_info.FramesPerTask = instance.data.get("framesPerTask", 1) if self.group != "none" and self.group: job_info.Group = self.group @@ -174,6 +177,19 @@ def get_job_info(self): if self.limit: job_info.LimitGroups = ",".join(self.limit) + attr_values = self.get_attr_values_from_data(instance.data) + render_globals = instance.data.setdefault("renderGlobals", dict()) + machine_list = attr_values.get("machineList", "") + if machine_list: + if attr_values.get("whitelist", True): + machine_list_key = "Whitelist" + else: + machine_list_key = "Blacklist" + render_globals[machine_list_key] = machine_list + + job_info.Priority = attr_values.get("priority") + job_info.ChunkSize = attr_values.get("chunkSize") + # Add options from RenderGlobals render_globals = instance.data.get("renderGlobals", {}) job_info.update(render_globals) @@ -246,8 +262,10 @@ def get_plugin_info(self): "renderSetupIncludeLights", default_rs_include_lights) if rs_include_lights not in {"1", "0", True, False}: rs_include_lights = default_rs_include_lights - strict_error_checking = instance.data.get("strict_error_checking", - self.strict_error_checking) + + attr_values = self.get_attr_values_from_data(instance.data) + strict_error_checking = attr_values.get("strict_error_checking", + self.strict_error_checking) plugin_info = MayaPluginInfo( SceneFile=self.scene_path, Version=cmds.about(version=True), @@ -797,6 +815,27 @@ def get_attribute_defs(cls): defs = super(MayaSubmitDeadline, cls).get_attribute_defs() defs.extend([ + NumberDef("priority", + label="Priority", + default=cls.default_priority, + decimals=0), + NumberDef("chunkSize", + label="Frames Per Task", + default=1, + decimals=0, + minimum=1, + maximum=1000), + TextDef("machineList", + label="Machine List", + default="", + placeholder="machine1,machine2"), + EnumDef("whitelist", + label="Machine List (Allow/Deny)", + items={ + True: "Allow List", + False: "Deny List", + }, + default=False), NumberDef("tile_priority", label="Tile Assembler Priority", decimals=0, From c5bf02cd898724423bcf0198337390cc7afa30a0 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Sun, 19 Mar 2023 14:01:54 +0100 Subject: [PATCH 108/175] Hound fixes --- openpype/hosts/maya/plugins/publish/collect_render.py | 2 +- openpype/modules/deadline/abstract_submit_deadline.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index fcfaa00db26..6d5e61199ff 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -325,4 +325,4 @@ def get_render_attribute(attr, layer): """ return lib.get_attr_in_layer( "defaultRenderGlobals.{}".format(attr), layer=layer - ) \ No newline at end of file + ) diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index bdb647fa217..944b8110d62 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -670,4 +670,4 @@ def _get_workfile_instance(context): assert instance.data.get("publish", True) is True, ( "Workfile (scene) must be published along") - return instance \ No newline at end of file + return instance From 791c7deeb5edfb8957bcb033f858638f21132210 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Tue, 21 Mar 2023 17:20:19 +0100 Subject: [PATCH 109/175] Update openpype/hosts/maya/api/plugin.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Fabià Serra Arrizabalaga --- openpype/hosts/maya/api/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index dc3f10d9606..6e15b2bce3f 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -147,7 +147,7 @@ def cache_subsets(shared_data): cache = dict() cache_legacy = dict() - for node in cmds.ls(type='objectSet'): + for node in cmds.ls(type="objectSet"): if _get_attr(node, attr="id") != "pyblish.avalon.instance": continue From 49b2b657a4aa7a0297f94dedc8133990741f28fa Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Tue, 21 Mar 2023 17:27:08 +0100 Subject: [PATCH 110/175] Cleanup code from review suggestions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Fabià Serra Arrizabalaga --- openpype/hosts/maya/api/plugin.py | 2 +- .../hosts/maya/plugins/create/create_unreal_skeletalmesh.py | 2 +- openpype/hosts/maya/plugins/create/create_workfile.py | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 6e15b2bce3f..8ac6777cf84 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -187,7 +187,7 @@ def imprint_instance_node(self, node, data): data.pop("publish_attributes", None) # pop to move to end of dict data["publish_attributes"] = publish_attributes - # Kill any existing attributes just we can imprint cleanly again + # Kill any existing attributes just so we can imprint cleanly again for attr in data.keys(): if cmds.attributeQuery(attr, node=node, exists=True): cmds.deleteAttr("{}.{}".format(node, attr)) diff --git a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py index b53c03b078c..4e2a99ecedb 100644 --- a/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py +++ b/openpype/hosts/maya/plugins/create/create_unreal_skeletalmesh.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""Creator for Unreal Static Meshes.""" +"""Creator for Unreal Skeletal Meshes.""" from openpype.hosts.maya.api import plugin, lib from openpype.lib import ( BoolDef, diff --git a/openpype/hosts/maya/plugins/create/create_workfile.py b/openpype/hosts/maya/plugins/create/create_workfile.py index cf1de7876e2..474526d30a8 100644 --- a/openpype/hosts/maya/plugins/create/create_workfile.py +++ b/openpype/hosts/maya/plugins/create/create_workfile.py @@ -18,7 +18,6 @@ class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator): def create(self): - print("Create...") variant = self.default_variant current_instance = next( ( From faeb53362a5c1ac6da81a612fda5d75fcc354984 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Tue, 21 Mar 2023 17:37:15 +0100 Subject: [PATCH 111/175] More suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Fabià Serra Arrizabalaga --- .../maya/plugins/create/create_render.py | 11 ++++---- .../maya/plugins/publish/collect_render.py | 28 +++++++++---------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 9fd98de80dc..9a27f2052e0 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -117,7 +117,7 @@ def collect_instances(self): # this instance will not have the `instance_node` data yet # until it's been saved/persisted at least once. # TODO: Correctly define the subset name using templates - subset_name = "render" + layer.name() + subset_name = "render{}".format(layer.name()) instance_data = { "asset": legacy_io.Session["AVALON_ASSET"], "task": legacy_io.Session["AVALON_TASK"], @@ -149,7 +149,7 @@ def find_layer_instance_node(self, layer): creator_identifier = cmds.getAttr(node + ".creator_identifier") if creator_identifier == self.identifier: - print(f"Found node: {node}") + self.log.info(f"Found node: {node}") return node def _create_layer_instance_node(self, layer): @@ -191,7 +191,8 @@ def update_instances(self, update_list): instance.data["instance_node"] = instance_node else: # TODO: Keep name in sync with the actual renderlayer? - pass + self.log.warning("No instance node found for to be updated instance: {}".format(instance)) + continue self.imprint_instance_node(instance_node, data=instance.data_to_store()) @@ -209,7 +210,7 @@ def imprint_instance_node(self, node, data): data=data) def remove_instances(self, instances): - """Remove specified instance from the scene. + """Remove specified instances from the scene. This is only removing `id` parameter so instance is no longer instance, because it might contain valuable data for artist. @@ -252,7 +253,7 @@ def get_instance_attr_defs(self): default=False), BoolDef("overrideExistingFrame", label="Override Existing Frame", - tooltip="Mark as reviewable", + tooltip="Override existing rendered frames (if they exist).", default=True), # TODO: Should these move to submit_maya_deadline plugin? diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 6d5e61199ff..afcca42c26f 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -121,7 +121,7 @@ def process(self, instance): if product.camera and layer_render_products.has_camera_token(): product_name = "{}{}".format( product.camera, - "_" + product_name if product_name else "") + "_{}".format(product_name) if product_name else "") exp_files.append( { product_name: layer_render_products.get_files( @@ -133,7 +133,7 @@ def process(self, instance): self.log.info("multipart: {}".format( multipart)) - assert exp_files, "no file names were generated, this is bug" + assert exp_files, "no file names were generated, this is a bug" self.log.info( "expected files: {}".format( json.dumps(exp_files, indent=4, sort_keys=True) @@ -151,10 +151,10 @@ def process(self, instance): # append full path aov_dict = {} - default_render_folder = context.data.get('project_settings')\ - .get('maya')\ - .get('RenderSettings')\ - .get('default_render_image_folder') or "" + default_render_folder = context.data.get("project_settings")\ + .get("maya")\ + .get("RenderSettings")\ + .get("default_render_image_folder") or "" # replace relative paths with absolute. Render products are # returned as list of dictionaries. publish_meta_path = None @@ -183,15 +183,15 @@ def process(self, instance): frame_end_render = int(self.get_render_attribute( "endFrame", layer=layer_name)) - if (int(context.data['frameStartHandle']) == frame_start_render - and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501 + if (int(context.data["frameStartHandle"]) == frame_start_render + and int(context.data["frameEndHandle"]) == frame_end_render): # noqa: W503, E501 - handle_start = context.data['handleStart'] - handle_end = context.data['handleEnd'] - frame_start = context.data['frameStart'] - frame_end = context.data['frameEnd'] - frame_start_handle = context.data['frameStartHandle'] - frame_end_handle = context.data['frameEndHandle'] + handle_start = context.data["handleStart"] + handle_end = context.data["handleEnd"] + frame_start = context.data["frameStart"] + frame_end = context.data["frameEnd"] + frame_start_handle = context.data["frameStartHandle"] + frame_end_handle = context.data["frameEndHandle"] else: handle_start = 0 handle_end = 0 From 9c5ce5e9f41dcfce32a80be83cfd8ced84093db4 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Tue, 21 Mar 2023 17:49:28 +0100 Subject: [PATCH 112/175] Cleanup code based on code review from @fabiaserra --- .../hosts/maya/plugins/create/create_render.py | 10 ++++++---- .../hosts/maya/plugins/publish/collect_render.py | 14 +++++++------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 9a27f2052e0..02b1a3ca8d3 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -170,8 +170,8 @@ def _create_layer_instance_node(self, layer): # later by a physical maya connection instead of relying on the layer # name cmds.addAttr(render_set, longName="renderlayer", at="message") - cmds.connectAttr(layer.name() + ".message", - render_set + ".renderlayer", force=True) + cmds.connectAttr("{}.message".format(layer.name()), + "{}.renderlayer".format(render_set), force=True) # Add the set to the 'CreateRender' set. cmds.sets(render_set, forceElement=create_render_set) @@ -191,7 +191,8 @@ def update_instances(self, update_list): instance.data["instance_node"] = instance_node else: # TODO: Keep name in sync with the actual renderlayer? - self.log.warning("No instance node found for to be updated instance: {}".format(instance)) + self.log.warning("No instance node found for to be updated " + "instance: {}".format(instance)) continue self.imprint_instance_node(instance_node, @@ -253,7 +254,8 @@ def get_instance_attr_defs(self): default=False), BoolDef("overrideExistingFrame", label="Override Existing Frame", - tooltip="Override existing rendered frames (if they exist).", + tooltip="Override existing rendered frames " + "(if they exist).", default=True), # TODO: Should these move to submit_maya_deadline plugin? diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index afcca42c26f..5291d05a3a5 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -86,7 +86,7 @@ def process(self, instance): msg = "Render layer [ {} ] is not " "renderable".format( layer.name() ) - raise RuntimeError(msg) + self.log.warning(msg) # detect if there are sets (subsets) to attach render to sets = cmds.sets(objset, query=True) or [] @@ -112,7 +112,7 @@ def process(self, instance): layer_render_products = get_layer_render_products(layer.name()) render_products = layer_render_products.layer_data.products assert render_products, "no render products generated" - exp_files = [] + expected_files = [] multipart = False for product in render_products: if product.multipart: @@ -122,7 +122,7 @@ def process(self, instance): product_name = "{}{}".format( product.camera, "_{}".format(product_name) if product_name else "") - exp_files.append( + expected_files.append( { product_name: layer_render_products.get_files( product) @@ -133,10 +133,10 @@ def process(self, instance): self.log.info("multipart: {}".format( multipart)) - assert exp_files, "no file names were generated, this is a bug" + assert expected_files, "no file names were generated, this is a bug" self.log.info( "expected files: {}".format( - json.dumps(exp_files, indent=4, sort_keys=True) + json.dumps(expected_files, indent=4, sort_keys=True) ) ) @@ -144,7 +144,7 @@ def process(self, instance): # in expectedFiles. If so, raise error as we cannot attach AOV # (considered to be subset on its own) to another subset if attach_to: - assert isinstance(exp_files, list), ( + assert isinstance(expected_files, list), ( "attaching multiple AOVs or renderable cameras to " "subset is not supported" ) @@ -158,7 +158,7 @@ def process(self, instance): # replace relative paths with absolute. Render products are # returned as list of dictionaries. publish_meta_path = None - for aov in exp_files: + for aov in expected_files: full_paths = [] aov_first_key = list(aov.keys())[0] for file in aov[aov_first_key]: From 962b5c1458ec027cc4736cd3f88d79bf41ce4ea2 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Thu, 23 Mar 2023 01:19:09 +0100 Subject: [PATCH 113/175] Set up validation messages for new publisher --- .../plugins/publish/validate_mesh_non_zero_edge.py | 11 ++++++++--- .../plugins/publish/validate_node_ids_unique.py | 14 +++++++++++--- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py index 0adeffd6e8e..5ec6e5779bd 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py @@ -5,7 +5,8 @@ from openpype.hosts.maya.api import lib from openpype.pipeline.publish import ( ValidateMeshOrder, - OptionalPyblishPluginMixin + OptionalPyblishPluginMixin, + PublishValidationError ) @@ -74,5 +75,9 @@ def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Meshes found with zero " - "edge length: {0}".format(invalid)) + label = "Meshes found with zero edge length" + raise PublishValidationError( + message="{}: {}".format(label, invalid), + title=label, + description="{}:\n- ".format(label) + "\n- ".join(invalid) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py b/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py index f7a5e6e2927..61386fc9399 100644 --- a/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py +++ b/openpype/hosts/maya/plugins/publish/validate_node_ids_unique.py @@ -1,7 +1,10 @@ from collections import defaultdict import pyblish.api -from openpype.pipeline.publish import ValidatePipelineOrder +from openpype.pipeline.publish import ( + ValidatePipelineOrder, + PublishValidationError +) import openpype.hosts.maya.api.action from openpype.hosts.maya.api import lib @@ -29,8 +32,13 @@ def process(self, instance): # Ensure all nodes have a cbId invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Nodes found with non-unique " - "asset IDs: {0}".format(invalid)) + label = "Nodes found with non-unique asset IDs" + raise PublishValidationError( + message="{}: {}".format(label, invalid), + title="Non-unique asset ids on nodes", + description="{}\n- {}".format(label, + "\n- ".join(sorted(invalid))) + ) @classmethod def get_invalid(cls, instance): From 14d767a97ab11fd837bf88af45d636c7bc6db435 Mon Sep 17 00:00:00 2001 From: Roy Nieterau Date: Tue, 4 Apr 2023 17:05:59 +0200 Subject: [PATCH 114/175] Merge remote-tracking branch 'upstream/develop' into maya_new_publisher # Conflicts: # openpype/hosts/maya/plugins/create/create_look.py # openpype/hosts/maya/plugins/create/create_review.py # openpype/hosts/maya/plugins/publish/collect_instances.py # openpype/hosts/maya/plugins/publish/validate_attributes.py # openpype/hosts/maya/plugins/publish/validate_frame_range.py # openpype/hosts/maya/plugins/publish/validate_maya_units.py # openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py # openpype/modules/deadline/plugins/publish/collect_pools.py --- .github/pr-branch-labeler.yml | 15 + .github/pr-glob-labeler.yml | 102 + .github/workflows/project_actions.yml | 118 + ARCHITECTURE.md | 77 + CHANGELOG.md | 926 ++ openpype/client/entities.py | 8 +- openpype/client/notes.md | 4 +- openpype/client/operations.py | 10 +- .../hooks/pre_create_extra_workdir_folders.py | 9 +- openpype/hooks/pre_foundry_apps.py | 4 +- openpype/host/dirmap.py | 4 +- openpype/host/host.py | 6 +- openpype/host/interfaces.py | 10 +- .../api/extension/jsx/hostscript.jsx | 2 +- openpype/hosts/aftereffects/api/ws_stub.py | 2 +- openpype/hosts/blender/api/ops.py | 6 +- .../plugins/publish/extract_playblast.py | 3 +- .../celaction/hooks/pre_celaction_setup.py | 36 +- .../publish/collect_celaction_cli_kwargs.py | 2 +- openpype/hosts/flame/api/lib.py | 4 +- openpype/hosts/flame/api/pipeline.py | 2 +- openpype/hosts/flame/api/plugin.py | 54 +- .../hosts/flame/api/scripts/wiretap_com.py | 2 +- openpype/hosts/flame/api/utils.py | 2 +- openpype/hosts/flame/hooks/pre_flame_setup.py | 2 +- .../flame/plugins/create/create_shot_clip.py | 2 +- .../hosts/flame/plugins/load/load_clip.py | 6 +- .../flame/plugins/load/load_clip_batch.py | 8 +- .../publish/collect_timeline_instances.py | 4 +- .../publish/extract_subset_resources.py | 4 +- .../plugins/publish/integrate_batch_group.py | 2 +- openpype/hosts/fusion/api/action.py | 13 +- openpype/hosts/fusion/api/menu.py | 13 - .../fusion/plugins/create/create_saver.py | 81 +- openpype/hosts/fusion/plugins/load/actions.py | 5 +- .../publish/collect_expected_frames.py | 50 + .../plugins/publish/collect_render_target.py | 44 - .../fusion/plugins/publish/collect_renders.py | 25 + .../plugins/publish/extract_render_local.py | 109 + .../fusion/plugins/publish/render_local.py | 100 - .../publish/validate_create_folder_checked.py | 14 +- .../validate_expected_frames_existence.py | 78 + .../hosts/fusion/scripts/set_rendermode.py | 112 - openpype/hosts/harmony/api/README.md | 8 +- openpype/hosts/harmony/api/TB_sceneOpened.js | 29 +- openpype/hosts/harmony/api/lib.py | 6 +- openpype/hosts/harmony/api/pipeline.py | 2 +- openpype/hosts/harmony/api/server.py | 2 +- .../harmony/plugins/publish/extract_render.py | 13 +- .../publish/validate_scene_settings.py | 25 +- .../harmony/vendor/OpenHarmony/README.md | 6 +- .../harmony/vendor/OpenHarmony/openHarmony.js | 8 +- .../openHarmony/openHarmony_actions.js | 6 +- .../openHarmony/openHarmony_application.js | 6 +- .../openHarmony/openHarmony_attribute.js | 8 +- .../openHarmony/openHarmony_backdrop.js | 4 +- .../openHarmony/openHarmony_color.js | 6 +- .../openHarmony/openHarmony_column.js | 4 +- .../openHarmony/openHarmony_database.js | 4 +- .../openHarmony/openHarmony_dialog.js | 18 +- .../openHarmony/openHarmony_drawing.js | 18 +- .../openHarmony/openHarmony_element.js | 4 +- .../openHarmony/openHarmony_file.js | 6 +- .../openHarmony/openHarmony_frame.js | 10 +- .../openHarmony/openHarmony_list.js | 6 +- .../openHarmony/openHarmony_math.js | 16 +- .../openHarmony/openHarmony_metadata.js | 4 +- .../openHarmony/openHarmony_misc.js | 6 +- .../openHarmony/openHarmony_network.js | 8 +- .../openHarmony/openHarmony_node.js | 22 +- .../openHarmony/openHarmony_nodeLink.js | 24 +- .../vendor/OpenHarmony/openHarmony_tools.js | 8 +- .../harmony/vendor/OpenHarmony/package.json | 2 +- openpype/hosts/hiero/api/__init__.py | 2 +- openpype/hosts/hiero/api/lib.py | 6 +- openpype/hosts/hiero/api/pipeline.py | 4 +- openpype/hosts/hiero/api/plugin.py | 26 +- openpype/hosts/houdini/api/lib.py | 14 +- openpype/hosts/houdini/api/pipeline.py | 11 +- openpype/hosts/houdini/api/plugin.py | 2 +- openpype/hosts/houdini/api/shelves.py | 2 +- .../houdini/plugins/create/convert_legacy.py | 4 +- .../plugins/publish/collect_current_file.py | 5 +- openpype/hosts/max/api/lib.py | 104 + openpype/hosts/max/api/menu.py | 20 + openpype/hosts/max/api/pipeline.py | 5 + .../max/plugins/publish/collect_render.py | 2 +- .../publish/increment_workfile_version.py | 19 + openpype/hosts/maya/api/commands.py | 2 +- openpype/hosts/maya/api/lib.py | 215 +- openpype/hosts/maya/api/lib_renderproducts.py | 31 +- openpype/hosts/maya/api/lib_rendersetup.py | 15 +- .../maya/api/workfile_template_builder.py | 4 +- openpype/hosts/maya/plugins/load/actions.py | 2 +- .../maya/plugins/load/load_arnold_standin.py | 6 +- .../hosts/maya/plugins/load/load_audio.py | 22 +- .../hosts/maya/plugins/load/load_gpucache.py | 23 +- .../maya/plugins/load/load_image_plane.py | 151 +- .../hosts/maya/plugins/load/load_reference.py | 157 +- .../publish/collect_arnold_scene_source.py | 14 +- .../maya/plugins/publish/collect_look.py | 2 +- .../publish/collect_multiverse_look.py | 2 +- .../maya/plugins/publish/collect_review.py | 176 +- .../publish/extract_arnold_scene_source.py | 84 +- .../maya/plugins/publish/extract_gpu_cache.py | 65 + .../maya/plugins/publish/extract_look.py | 862 +- .../publish/extract_multiverse_usd_over.py | 2 +- .../maya/plugins/publish/extract_playblast.py | 128 +- .../maya/plugins/publish/extract_thumbnail.py | 61 +- .../maya/plugins/publish/extract_vrayproxy.py | 4 +- .../plugins/publish/reset_xgen_attributes.py | 2 +- .../publish/validate_arnold_scene_source.py | 38 +- .../validate_arnold_scene_source_cbid.py | 74 + .../publish/validate_camera_attributes.py | 2 +- .../publish/validate_look_color_space.py | 26 - .../plugins/publish/validate_look_contents.py | 22 + .../validate_mesh_arnold_attributes.py | 82 +- .../publish/validate_mvlook_contents.py | 3 +- .../plugins/publish/validate_no_namespace.py | 19 +- .../publish/validate_renderlayer_aovs.py | 4 +- .../publish/validate_rendersettings.py | 20 +- .../publish/validate_rig_output_ids.py | 79 +- .../validate_transform_naming_suffix.py | 2 +- .../hosts/maya/tools/mayalookassigner/app.py | 34 +- .../tools/mayalookassigner/arnold_standin.py | 247 + .../maya/tools/mayalookassigner/commands.py | 51 +- .../hosts/maya/tools/mayalookassigner/lib.py | 87 + .../tools/mayalookassigner/vray_proxies.py | 101 +- openpype/hosts/nuke/api/lib.py | 18 +- openpype/hosts/nuke/api/plugin.py | 4 +- openpype/hosts/nuke/api/utils.py | 2 +- .../nuke/api/workfile_template_builder.py | 10 +- .../nuke/plugins/create/convert_legacy.py | 2 +- .../nuke/plugins/create/create_source.py | 2 +- openpype/hosts/nuke/plugins/load/actions.py | 5 +- .../hosts/nuke/plugins/load/load_backdrop.py | 22 +- .../nuke/plugins/load/load_script_precomp.py | 1 - .../nuke/plugins/publish/collect_backdrop.py | 32 +- .../plugins/publish/collect_context_data.py | 2 - .../nuke/plugins/publish/collect_gizmo.py | 1 - .../nuke/plugins/publish/collect_model.py | 1 - .../nuke/plugins/publish/collect_reads.py | 4 +- .../nuke/plugins/publish/collect_writes.py | 2 +- .../nuke/plugins/publish/validate_backdrop.py | 2 +- .../photoshop/api/extension/host/index.jsx | 6 +- openpype/hosts/photoshop/api/launch_logic.py | 4 +- .../plugins/publish/extract_review.py | 1 - openpype/hosts/resolve/api/lib.py | 6 +- openpype/hosts/resolve/api/menu_style.qss | 2 +- openpype/hosts/resolve/api/plugin.py | 17 +- .../publish/collect_bulk_mov_instances.py | 4 +- .../plugins/publish/collect_context.py | 8 +- .../plugins/publish/collect_editorial.py | 2 +- .../plugins/publish/validate_frame_ranges.py | 2 +- openpype/hosts/traypublisher/api/editorial.py | 38 +- .../plugins/create/create_editorial.py | 14 +- .../publish/collect_simple_instances.py | 2 +- .../hosts/tvpaint/api/communication_server.py | 4 +- openpype/hosts/tvpaint/api/pipeline.py | 5 - .../tvpaint/plugins/create/convert_legacy.py | 4 +- .../tvpaint/plugins/create/create_render.py | 15 +- .../publish/collect_instance_frames.py | 2 + .../help/validate_layers_visibility.xml | 2 +- .../help/validate_workfile_metadata.xml | 2 +- .../help/validate_workfile_project_name.xml | 2 +- .../plugins/publish/validate_asset_name.py | 14 +- .../publish/validate_layers_visibility.py | 2 +- .../tvpaint/plugins/publish/validate_marks.py | 13 +- .../publish/validate_scene_settings.py | 13 +- .../plugins/publish/validate_start_frame.py | 13 +- openpype/hosts/unreal/api/pipeline.py | 2 +- .../Source/OpenPype/Private/OpenPypeLib.cpp | 2 +- .../Public/Commandlets/OPActionResult.h | 12 +- .../Source/OpenPype/Private/OpenPypeLib.cpp | 2 +- .../Public/Commandlets/OPActionResult.h | 12 +- .../hosts/unreal/plugins/load/load_camera.py | 2 +- openpype/hosts/webpublisher/lib.py | 2 +- openpype/lib/applications.py | 20 +- openpype/lib/attribute_definitions.py | 6 +- openpype/lib/events.py | 2 +- openpype/lib/execute.py | 21 +- openpype/lib/file_transaction.py | 2 +- openpype/lib/transcoding.py | 6 +- openpype/lib/vendor_bin_utils.py | 4 +- openpype/modules/base.py | 14 +- openpype/modules/clockify/clockify_api.py | 2 +- openpype/modules/clockify/clockify_module.py | 2 +- openpype/modules/clockify/widgets.py | 2 +- .../publish/submit_celaction_deadline.py | 2 +- .../plugins/publish/submit_max_deadline.py | 72 +- .../plugins/publish/submit_nuke_deadline.py | 11 +- .../plugins/publish/submit_publish_job.py | 14 +- .../publish/validate_deadline_pools.py | 6 +- .../example_addons/example_addon/addon.py | 2 +- .../action_clone_review_session.py | 2 +- .../action_create_review_session.py | 2 +- .../action_prepare_project.py | 2 +- .../action_push_frame_values_to_task.py | 316 +- .../action_tranfer_hierarchical_values.py | 4 +- .../event_next_task_update.py | 2 +- .../event_push_frame_values_to_task.py | 1102 +- .../event_radio_buttons.py | 2 +- .../event_sync_to_avalon.py | 14 +- .../event_task_to_parent_status.py | 4 +- .../event_user_assigment.py | 4 +- .../event_version_to_task_statuses.py | 2 +- .../action_batch_task_creation.py | 2 +- .../action_create_cust_attrs.py | 6 +- .../action_create_folders.py | 2 +- .../action_delete_asset.py | 2 +- .../action_delete_old_versions.py | 4 +- .../event_handlers_user/action_delivery.py | 2 +- .../action_fill_workfile_attr.py | 2 +- .../event_handlers_user/action_job_killer.py | 6 +- .../action_prepare_project.py | 4 +- .../ftrack/event_handlers_user/action_rv.py | 2 +- .../ftrack/event_handlers_user/action_seed.py | 10 +- .../action_store_thumbnails_to_avalon.py | 2 +- .../ftrack/ftrack_server/event_server_cli.py | 6 +- openpype/modules/ftrack/lib/avalon_sync.py | 14 +- .../modules/ftrack/lib/custom_attributes.py | 2 +- .../ftrack/lib/ftrack_action_handler.py | 2 +- .../modules/ftrack/lib/ftrack_base_handler.py | 4 +- .../publish/collect_custom_attributes_data.py | 2 +- .../plugins/publish/integrate_ftrack_api.py | 2 +- .../publish/integrate_hierarchy_ftrack.py | 2 +- openpype/modules/ftrack/tray/ftrack_tray.py | 2 +- openpype/modules/interfaces.py | 2 +- .../plugins/publish/collect_kitsu_entities.py | 2 +- .../plugins/publish/integrate_kitsu_note.py | 40 +- .../plugins/publish/integrate_kitsu_review.py | 8 +- .../modules/kitsu/utils/update_op_with_zou.py | 22 +- openpype/modules/settings_action.py | 6 +- openpype/pipeline/colorspace.py | 5 +- openpype/pipeline/create/context.py | 91 +- openpype/pipeline/publish/contants.py | 1 + openpype/pipeline/publish/lib.py | 80 +- openpype/plugins/publish/cleanup.py | 4 + openpype/plugins/publish/cleanup_farm.py | 2 +- .../publish/collect_context_entities.py | 21 +- .../publish/collect_custom_staging_dir.py | 70 + openpype/plugins/publish/extract_burnin.py | 45 +- openpype/plugins/publish/extract_review.py | 405 +- openpype/plugins/publish/integrate.py | 4 +- .../plugins/publish/integrate_hero_version.py | 33 +- openpype/plugins/publish/integrate_legacy.py | 2 +- openpype/scripts/otio_burnin.py | 119 +- .../defaults/project_anatomy/templates.json | 6 +- .../defaults/project_settings/celaction.json | 7 + .../defaults/project_settings/deadline.json | 5 +- .../defaults/project_settings/global.json | 26 +- .../defaults/project_settings/kitsu.json | 5 +- .../defaults/project_settings/maya.json | 33 +- .../defaults/project_settings/tvpaint.json | 1 + .../system_settings/applications.json | 2 +- .../schema_project_celaction.json | 25 + .../schema_project_deadline.json | 17 +- .../projects_schema/schema_project_kitsu.json | 76 +- .../schema_project_tvpaint.json | 6 + .../schemas/schema_global_publish.json | 2 +- .../schemas/schema_global_tools.json | 65 + .../schemas/schema_maya_capture.json | 32 +- .../schemas/schema_maya_create.json | 5 + .../schemas/schema_maya_publish.json | 88 + openpype/tools/loader/model.py | 2 - .../project_manager/project_manager/view.py | 4 +- openpype/tools/publisher/constants.py | 5 +- openpype/tools/publisher/control.py | 185 +- openpype/tools/publisher/widgets/__init__.py | 6 +- .../publisher/widgets/card_view_widgets.py | 19 + .../tools/publisher/widgets/images/save.png | Bin 0 -> 3961 bytes .../publisher/widgets/list_view_widgets.py | 27 + .../publisher/widgets/overview_widget.py | 38 +- .../publisher/widgets/validations_widget.py | 4 +- openpype/tools/publisher/widgets/widgets.py | 34 +- openpype/tools/publisher/window.py | 129 +- openpype/tools/traypublisher/window.py | 2 +- openpype/tools/utils/lib.py | 4 +- openpype/tools/workfiles/files_widget.py | 22 +- openpype/tools/workfiles/save_as_dialog.py | 6 +- openpype/vendor/python/common/capture.py | 25 +- openpype/version.py | 2 +- pyproject.toml | 2 +- website/docs/admin_hosts_maya.md | 68 +- website/docs/artist_hosts_aftereffects.md | 36 +- .../assets/integrate_kitsu_note_settings.png | Bin 30524 -> 48874 bytes .../maya-admin_extract_playblast_settings.png | Bin 0 -> 26814 bytes ...ract_playblast_settings_camera_options.png | Bin 0 -> 16732 bytes ...ct_playblast_settings_viewport_options.png | Bin 0 -> 1064191 bytes website/docs/assets/maya-admin_gpu_cache.png | Bin 0 -> 20248 bytes website/docs/module_kitsu.md | 9 +- .../global_tools_custom_staging_dir.png | Bin 0 -> 9940 bytes .../settings_project_global.md | 107 +- website/docs/pype2/admin_presets_plugins.md | 4 +- website/yarn.lock | 11812 +++++++--------- 295 files changed, 11732 insertions(+), 9764 deletions(-) create mode 100644 .github/pr-branch-labeler.yml create mode 100644 .github/pr-glob-labeler.yml create mode 100644 .github/workflows/project_actions.yml create mode 100644 ARCHITECTURE.md create mode 100644 openpype/hosts/fusion/plugins/publish/collect_expected_frames.py delete mode 100644 openpype/hosts/fusion/plugins/publish/collect_render_target.py create mode 100644 openpype/hosts/fusion/plugins/publish/collect_renders.py create mode 100644 openpype/hosts/fusion/plugins/publish/extract_render_local.py delete mode 100644 openpype/hosts/fusion/plugins/publish/render_local.py create mode 100644 openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py delete mode 100644 openpype/hosts/fusion/scripts/set_rendermode.py create mode 100644 openpype/hosts/max/plugins/publish/increment_workfile_version.py create mode 100644 openpype/hosts/maya/plugins/publish/extract_gpu_cache.py create mode 100644 openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py delete mode 100644 openpype/hosts/maya/plugins/publish/validate_look_color_space.py create mode 100644 openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py create mode 100644 openpype/hosts/maya/tools/mayalookassigner/lib.py create mode 100644 openpype/plugins/publish/collect_custom_staging_dir.py create mode 100644 openpype/tools/publisher/widgets/images/save.png create mode 100644 website/docs/assets/maya-admin_extract_playblast_settings.png create mode 100644 website/docs/assets/maya-admin_extract_playblast_settings_camera_options.png create mode 100644 website/docs/assets/maya-admin_extract_playblast_settings_viewport_options.png create mode 100644 website/docs/assets/maya-admin_gpu_cache.png create mode 100644 website/docs/project_settings/assets/global_tools_custom_staging_dir.png diff --git a/.github/pr-branch-labeler.yml b/.github/pr-branch-labeler.yml new file mode 100644 index 00000000000..b4343262366 --- /dev/null +++ b/.github/pr-branch-labeler.yml @@ -0,0 +1,15 @@ +# Apply label "feature" if head matches "feature/*" +'type: feature': + head: "feature/*" + +# Apply label "feature" if head matches "feature/*" +'type: enhancement': + head: "enhancement/*" + +# Apply label "bugfix" if head matches one of "bugfix/*" or "hotfix/*" +'type: bug': + head: ["bugfix/*", "hotfix/*"] + +# Apply label "release" if base matches "release/*" +'Bump Minor': + base: "release/next-minor" diff --git a/.github/pr-glob-labeler.yml b/.github/pr-glob-labeler.yml new file mode 100644 index 00000000000..286e7768b5e --- /dev/null +++ b/.github/pr-glob-labeler.yml @@ -0,0 +1,102 @@ +# Add type: unittest label if any changes in tests folders +'type: unittest': +- '*/*tests*/**/*' + +# any changes in documentation structure +'type: documentation': +- '*/**/*website*/**/*' +- '*/**/*docs*/**/*' + +# hosts triage +'host: Nuke': +- '*/**/*nuke*' +- '*/**/*nuke*/**/*' + +'host: Photoshop': +- '*/**/*photoshop*' +- '*/**/*photoshop*/**/*' + +'host: Harmony': +- '*/**/*harmony*' +- '*/**/*harmony*/**/*' + +'host: UE': +- '*/**/*unreal*' +- '*/**/*unreal*/**/*' + +'host: Houdini': +- '*/**/*houdini*' +- '*/**/*houdini*/**/*' + +'host: Maya': +- '*/**/*maya*' +- '*/**/*maya*/**/*' + +'host: Resolve': +- '*/**/*resolve*' +- '*/**/*resolve*/**/*' + +'host: Blender': +- '*/**/*blender*' +- '*/**/*blender*/**/*' + +'host: Hiero': +- '*/**/*hiero*' +- '*/**/*hiero*/**/*' + +'host: Fusion': +- '*/**/*fusion*' +- '*/**/*fusion*/**/*' + +'host: Flame': +- '*/**/*flame*' +- '*/**/*flame*/**/*' + +'host: TrayPublisher': +- '*/**/*traypublisher*' +- '*/**/*traypublisher*/**/*' + +'host: 3dsmax': +- '*/**/*max*' +- '*/**/*max*/**/*' + +'host: TV Paint': +- '*/**/*tvpaint*' +- '*/**/*tvpaint*/**/*' + +'host: CelAction': +- '*/**/*celaction*' +- '*/**/*celaction*/**/*' + +'host: After Effects': +- '*/**/*aftereffects*' +- '*/**/*aftereffects*/**/*' + +'host: Substance Painter': +- '*/**/*substancepainter*' +- '*/**/*substancepainter*/**/*' + +# modules triage +'module: Deadline': +- '*/**/*deadline*' +- '*/**/*deadline*/**/*' + +'module: RoyalRender': +- '*/**/*royalrender*' +- '*/**/*royalrender*/**/*' + +'module: Sitesync': +- '*/**/*sync_server*' +- '*/**/*sync_server*/**/*' + +'module: Ftrack': +- '*/**/*ftrack*' +- '*/**/*ftrack*/**/*' + +'module: Shotgrid': +- '*/**/*shotgrid*' +- '*/**/*shotgrid*/**/*' + +'module: Kitsu': +- '*/**/*kitsu*' +- '*/**/*kitsu*/**/*' diff --git a/.github/workflows/project_actions.yml b/.github/workflows/project_actions.yml new file mode 100644 index 00000000000..b21946f0eea --- /dev/null +++ b/.github/workflows/project_actions.yml @@ -0,0 +1,118 @@ +name: project-actions + +on: + pull_request_target: + types: [opened, assigned] + pull_request_review: + types: [submitted] + issue_comment: + types: [created] + pull_request_review_comment: + types: [created] + +jobs: + + pr_review_started: + name: pr_review_started + runs-on: ubuntu-latest + # ----------------------------- + # conditions are: + # - PR issue comment which is not form Ynbot + # - PR review comment which is not Hound (or any other bot) + # - PR review submitted which is not from Hound (or any other bot) and is not 'Changes requested' + # - make sure it only runs if not forked repo + # ----------------------------- + if: | + (github.event_name == 'issue_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.id != 82967070) || + (github.event_name == 'pull_request_review_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.type != 'Bot') || + (github.event_name == 'pull_request_review' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.review.state != 'changes_requested' && github.event.review.user.type != 'Bot') + steps: + - name: Move PR to 'Review In Progress' + uses: leonsteinhaeuser/project-beta-automations@v2.1.0 + with: + gh_token: ${{ secrets.YNPUT_BOT_TOKEN }} + organization: ynput + project_id: 11 + resource_node_id: ${{ github.event.pull_request.node_id || github.event.issue.node_id }} + status_value: Review In Progress + + pr_review_requested: + # ----------------------------- + # Resets Clickup Task status to 'In Progress' after 'Changes Requested' were submitted to PR + # It only runs if custom clickup task id was found in ref branch of PR + # ----------------------------- + name: pr_review_requested + runs-on: ubuntu-latest + if: github.event_name == 'pull_request_review' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.review.state == 'changes_requested' + steps: + - name: Set branch env + run: echo "BRANCH_NAME=${{ github.event.pull_request.head.ref}}" >> $GITHUB_ENV + - name: Get ClickUp ID from ref head name + id: get_cuID + run: | + echo ${{ env.BRANCH_NAME }} + echo "cuID=$(echo $BRANCH_NAME | sed 's/.*\/\(OP\-[0-9]\{4\}\).*/\1/')" >> $GITHUB_OUTPUT + + - name: Print ClickUp ID + run: echo ${{ steps.get_cuID.outputs.cuID }} + + - name: Move found Clickup task to 'Review in Progress' + if: steps.get_cuID.outputs.cuID + run: | + curl -i -X PUT \ + 'https://api.clickup.com/api/v2/task/${{ steps.get_cuID.outputs.cuID }}?custom_task_ids=true&team_id=${{secrets.CLICKUP_TEAM_ID}}' \ + -H 'Authorization: ${{secrets.CLICKUP_API_KEY}}' \ + -H 'Content-Type: application/json' \ + -d '{ + "status": "in progress" + }' + + size-label: + name: pr_size_label + runs-on: ubuntu-latest + if: | + (github.event_name == 'pull_request' && github.event.action == 'assigned') || + (github.event_name == 'pull_request' && github.event.action == 'opened') + + steps: + - name: Add size label + uses: "pascalgn/size-label-action@v0.4.3" + env: + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" + IGNORED: ".gitignore\n*.md\n*.json" + with: + sizes: > + { + "0": "XS", + "100": "S", + "500": "M", + "1000": "L", + "1500": "XL", + "2500": "XXL" + } + + label_prs_branch: + name: pr_branch_label + runs-on: ubuntu-latest + if: | + (github.event_name == 'pull_request' && github.event.action == 'assigned') || + (github.event_name == 'pull_request' && github.event.action == 'opened') + steps: + - name: Label PRs - Branch name detection + uses: ffittschen/pr-branch-labeler@v1 + with: + repo-token: ${{ secrets.YNPUT_BOT_TOKEN }} + + label_prs_globe: + name: pr_globe_label + runs-on: ubuntu-latest + if: | + (github.event_name == 'pull_request' && github.event.action == 'assigned') || + (github.event_name == 'pull_request' && github.event.action == 'opened') + steps: + - name: Label PRs - Globe detection + uses: actions/labeler@v4.0.3 + with: + repo-token: ${{ secrets.YNPUT_BOT_TOKEN }} + configuration-path: ".github/pr-glob-labeler.yml" + sync-labels: false diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 00000000000..912780d803b --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,77 @@ +# Architecture + +OpenPype is a monolithic Python project that bundles several parts, this document will try to give a birds eye overview of the project and, to a certain degree, each of the sub-projects. +The current file structure looks like this: + +``` +. +├── common - Code in this folder is backend portion of Addon distribution logic for v4 server. +├── docs - Documentation of the source code. +├── igniter - The OpenPype bootstrapper, deals with running version resolution and setting up the connection to the mongodb. +├── openpype - The actual OpenPype core package. +├── schema - Collection of JSON files describing schematics of objects. This follows Avalon's convention. +├── tests - Integration and unit tests. +├── tools - Conveninece scripts to perform common actions (in both bash and ps1). +├── vendor - When using the igniter, it deploys third party tools in here, such as ffmpeg. +└── website - Source files for https://openpype.io/ which is Docusaursus (https://docusaurus.io/). +``` + +The core functionality of the pipeline can be found in `igniter` and `openpype`, which in turn rely on the `schema` files, whenever you build (or download a pre-built) version of OpenPype, these two are bundled in there, and `Igniter` is the entry point. + + +## Igniter + +It's the setup and update tool for OpenPype, unless you want to package `openpype` separately and deal with all the config manually, this will most likely be your entry point. + +``` +igniter/ +├── bootstrap_repos.py - Module that will find or install OpenPype versions in the system. +├── __init__.py - Igniter entry point. +├── install_dialog.py- Show dialog for choosing central pype repository. +├── install_thread.py - Threading helpers for the install process. +├── __main__.py - Like `__init__.py` ? +├── message_dialog.py - Qt Dialog with a message and "Ok" button. +├── nice_progress_bar.py - Fancy Qt progress bar. +├── splash.txt - ASCII art for the terminal installer. +├── stylesheet.css - Installer Qt styles. +├── terminal_splash.py - Terminal installer animation, relies in `splash.txt`. +├── tools.py - Collection of methods that don't fit in other modules. +├── update_thread.py - Threading helper to update existing OpenPype installs. +├── update_window.py - Qt UI to update OpenPype installs. +├── user_settings.py - Interface for the OpenPype user settings. +└── version.py - Igniter's version number. +``` + +## OpenPype + +This is the main package of the OpenPype logic, it could be loosely described as a combination of [Avalon](https://getavalon.github.io), [Pyblish](https://pyblish.com/) and glue around those with custom OpenPype only elements, things are in progress of being moved around to better prepare for V4, which will be released under a new name AYON. + +``` +openpype/ +├── client - Interface for the MongoDB. +├── hooks - Hooks to be executed on certain OpenPype Applications defined in `openpype.lib.applications`. +├── host - Base class for the different hosts. +├── hosts - Integration with the different DCCs (hosts) using the `host` base class. +├── lib - Libraries that stitch together the package, some have been moved into other parts. +├── modules - OpenPype modules should contain separated logic of specific kind of implementation, such as Ftrack connection and its python API. +├── pipeline - Core of the OpenPype pipeline, handles creation of data, publishing, etc. +├── plugins - Global/core plugins for loader and publisher tool. +├── resources - Icons, fonts, etc. +├── scripts - Loose scipts that get run by tools/publishers. +├── settings - OpenPype settings interface. +├── style - Qt styling. +├── tests - Unit tests. +├── tools - Core tools, check out https://openpype.io/docs/artist_tools. +├── vendor - Vendoring of needed required Python packes. +├── widgets - Common re-usable Qt Widgets. +├── action.py - LEGACY: Lives now in `openpype.pipeline.publish.action` Pyblish actions. +├── cli.py - Command line interface, leverages `click`. +├── __init__.py - Sets two constants. +├── __main__.py - Entry point, calls the `cli.py` +├── plugin.py - Pyblish plugins. +├── pype_commands.py - Implementation of OpenPype commands. +└── version.py - Current version number. +``` + + + diff --git a/CHANGELOG.md b/CHANGELOG.md index 145c2e2c1ac..4e22b783c4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,931 @@ # Changelog +## [3.15.3](https://github.com/ynput/OpenPype/tree/3.15.3) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.2...3.15.3) + +### **🆕 New features** + + +
+Blender: Extract Review #3616 + +Added Review to Blender. + +This implementation is based on #3508 but made compatible for the current implementation of OpenPype for Blender. + + +___ + +
+ + +
+Data Exchanges: Point Cloud for 3dsMax #4532 + +Publish PRT format with tyFlow in 3dsmax + +Publish PRT format with tyFlow in 3dsmax and possibly set up loader to load the format too. +- [x] creator +- [x] extractor +- [x] validator +- [x] loader + + +___ + +
+ + +
+Global: persistent staging directory for renders #4583 + +Allows configure if staging directory (`stagingDir`) should be persistent with use of profiles. + +With this feature, users can specify a transient data folder path based on presets, which can be used during the creation and publishing stages. In some cases, these DCCs automatically add a rendering path during the creation stage, which is then used in publishing.One of the key advantages of this feature is that it allows users to take advantage of faster storages for rendering, which can help improve workflow efficiency. Additionally, this feature allows users to keep their rendered data persistent, and use their own infrastructure for regular cleaning.However, it should be noted that some productions may want to use this feature without persistency. Furthermore, there may be a need for retargeting the rendering folder to faster storages, which is also not supported at the moment.It is studio responsibility to clean up obsolete folders with data.Location of the folder is configured in `project_anatomy/templates/others`. ('transient' key is expected, with 'folder' key, could be more templates)Which family/task type/subset is applicable is configured in:`project_settings/global/tools/publish/transient_dir_profiles` + + +___ + +
+ + +
+Kitsu custom comment template #4599 + +Kitsu allows to write markdown in its comment field. This can be something very powerful to deliver dynamic comments with the help the data from the instance.This feature is defaults to off so the admin have to manually set up the comment field the way they want.I have added a basic example on how the comment can look like as the comment-fields default value.To this I want to add some documentation also but that's on its way when the code itself looks good for the reviewers. + + +___ + +
+ + +
+MaxScene Family #4615 + +Introduction of the Max Scene Family + + +___ + +
+ +### **🚀 Enhancements** + + +
+Maya: Multiple values on single render attribute - OP-4131 #4631 + +When validating render attributes, this adds support for multiple values. When repairing first value in list is used. + + +___ + +
+ + +
+Maya: enable 2D Pan/Zoom for playblasts - OP-5213 #4687 + +Setting for enabling 2D Pan/Zoom on reviews. + + +___ + +
+ + +
+Copy existing or generate new Fusion profile on prelaunch #4572 + +Fusion preferences will be copied to the predefined `~/.openpype/hosts/fusion/prefs` folder (or any other folder set in system settings) on launch. + +The idea is to create a copy of existing Fusion profile, adding an OpenPype menu to the Fusion instance.By default the copy setting is turned off, so no file copying is performed. Instead the clean Fusion profile is created by Fusion in the predefined folder. The default locaion is set to `~/.openpype/hosts/fusion/prefs`, to better comply with the other os platforms. After creating the default profile, some modifications are applied: +- forced Python3 +- forced English interface +- setup Openpype specific path maps.If the `copy_prefs` checkbox is toggled, a copy of existing Fusion profile folder will be placed in the mentioned location. Then they are altered the same way as described above. The operation is run only once, on the first launch, unless the `force_sync [Resync profile on each launch]` is toggled.English interface is forced because the `FUSION16_PROFILE_DIR` environment variable is not read otherwise (seems to be a Fusion bug). + + +___ + +
+ + +
+Houdini: Create button open new publisher's "create" tab #4601 + +During a talk with @maxpareschi he mentioned that the new publisher in Houdini felt super confusing due to "Create" going to the older creator but now being completely empty and the publish button directly went to the publish tab.This resolves that by fixing the Create button to now open the new publisher but on the Create tab.Also made publish button enforce going to the "publish" tab for consistency in usage.@antirotor I think changing the Create button's callback was just missed in this commit or was there a specific reason to not change that around yet? + + +___ + +
+ + +
+Clockify: refresh and fix the integration #4607 + +Due to recent API changes, Clockify requires `user_id` to operate with the timers. I updated this part and currently it is a WIP for making it fully functional. Most functions, such as start and stop timer, and projects sync are currently working. For the rate limiting task new dependency is added: https://pypi.org/project/ratelimiter/ + + +___ + +
+ + +
+Fusion publish existing frames #4611 + +This PR adds the function to publish existing frames instead of having to re-render all of them for each new publish.I have split the render_locally plugin so the review-part is its own plugin now.I also change the saver-creator-plugin's label from Saver to Render (saver) as I intend to add a Prerender creator like in Nuke. + + +___ + +
+ + +
+Resolution settings referenced from DB record for 3dsMax #4652 + +- Add Callback for setting the resolution according to DB after the new scene is created. +- Add a new Action into openpype menu which allows the user to reset the resolution in 3dsMax + + +___ + +
+ + +
+3dsmax: render instance settings in Publish tab #4658 + +Allows user preset the pools, group and use_published settings in Render Creator in the Max Hosts.User can set the settings before or after creating instance in the new publisher + + +___ + +
+ + +
+scene length setting referenced from DB record for 3dsMax #4665 + +Setting the timeline length based on DB record in 3dsMax Hosts + + +___ + +
+ + +
+Publisher: Windows reduce command window pop-ups during Publishing #4672 + +Reduce the command line pop-ups that show on Windows during publishing. + + +___ + +
+ + +
+Publisher: Explicit save #4676 + +Publisher have explicit button to save changes, so reset can happen without saving any changes. Save still happens automatically when publishing is started or on publisher window close. But a popup is shown if context of host has changed. Important context was enhanced by workfile path (if host integration supports it) so workfile changes are captured too. In that case a dialog with confirmation is shown to user. All callbacks that may require save of context were moved to main window to be able handle dialog show at one place. Save changes now returns success so the rest of logic is skipped -> publishing won't start, when save of instances fails.Save and reset buttons have shortcuts (Ctrl + s and Ctrls + r). + + +___ + +
+ + +
+CelAction: conditional workfile parameters from settings #4677 + +Since some productions were requesting excluding some workfile parameters from publishing submission, we needed to move them to settings so those could be altered per project. + + +___ + +
+ + +
+Improve logging of used app + tool envs on application launch #4682 + +Improve logging of what apps + tool environments got loaded for an application launch. + + +___ + +
+ + +
+Fix name and docstring for Create Workdir Extra Folders prelaunch hook #4683 + +Fix class name and docstring for Create Workdir Extra Folders prelaunch hookThe class name and docstring were originally copied from another plug-in and didn't match the plug-in logic.This also fixes potentially seeing this twice in your logs. Before:After:Where it was actually running both this prelaunch hook and the actual `AddLastWorkfileToLaunchArgs` plugin. + + +___ + +
+ + +
+Application launch context: Include app group name in logger #4684 + +Clarify in logs better what app group the ApplicationLaunchContext belongs to and what application is being launched.Before:After: + + +___ + +
+ + +
+increment workfile version 3dsmax #4685 + +increment workfile version in 3dsmax as if in blender and maya hosts. + + +___ + +
+ +### **🐛 Bug fixes** + + +
+Maya: Fix getting non-active model panel. #2968 + +When capturing multiple cameras with image planes that have file sequences playing, only the active (first) camera will play through the file sequence. + + +___ + +
+ + +
+Maya: Fix broken review publishing. #4549 + +Resolves #4547 + + +___ + +
+ + +
+Maya: Avoid error on right click in Loader if `mtoa` is not loaded #4616 + +Fix an error on right clicking in the Loader when `mtoa` is not a loaded plug-in.Additionally if `mtoa` isn't loaded the loader will now load the plug-in before trying to create the arnold standin. + + +___ + +
+ + +
+Maya: Fix extract look colorspace detection #4618 + +Fix the logic which guesses the colorspace using `arnold` python library. +- Previously it'd error if `mtoa` was not available on path so it still required `mtoa` to be available. +- The guessing colorspace logic doesn't actually require `mtoa` to be loaded, but just the `arnold` python library to be available. This changes the logic so it doesn't require the `mtoa` plugin to get loaded to guess the colorspace. +- The if/else branch was likely not doing what was intended `cmds.loadPlugin("mtoa", quiet=True)` returns None if the plug-in was already loaded. So this would only ever be true if it ends up loading the `mtoa` plugin the first time. +```python +# Tested in Maya 2022.1 +print(cmds.loadPlugin("mtoa", quiet=True)) +# ['mtoa'] +print(cmds.loadPlugin("mtoa", quiet=True)) +# None +``` + + +___ + +
+ + +
+Maya: Maya Playblast Options overrides - OP-3847 #4634 + +When publishing a review in Maya, the extractor would fail due to wrong (long) panel name. + + +___ + +
+ + +
+Bugfix/op 2834 fix extract playblast #4701 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Bugfix/op 2834 fix extract playblast #4704 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Maya: bug fix for passing zoom settings if review is attached to subset #4716 + +Fix for attaching review to subset with pan/zoom option. + + +___ + +
+ + +
+Maya: tile assembly fail in draft - OP-4820 #4416 + +Tile assembly in Deadline was broken. + +Initial bug report revealed other areas of the tile assembly that needed fixing. + + +___ + +
+ + +
+Maya: Yeti Validate Rig Input - OP-3454 #4554 + +Fix Yeti Validate Rig Input + +Existing workflow was broken due to this #3297. + + +___ + +
+ + +
+Scene inventory: Fix code errors when "not found" entries are found #4594 + +Whenever a "NOT FOUND" entry is present a lot of errors happened in the Scene Inventory: +- It started spamming a lot of errors for the VersionDelegate since it had no numeric version (no version at all).Error reported on Discord: +```python +Traceback (most recent call last): + File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 65, in paint + text = self.displayText( + File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 33, in displayText + assert isinstance(value, numbers.Integral), ( +AssertionError: Version is not integer. "None" +``` +- Right click menu would error on NOT FOUND entries, and thus not show. With this PR it will now _disregard_ not found items for "Set version" and "Remove" but still allow actions.This PR resolves those. + + +___ + +
+ + +
+Kitsu: Sync OP with zou, make sure value-data is int or float #4596 + +Currently the data zou pulls is a string and not a value causing some bugs in the pipe where a value is expected (like `Set frame range` in Fusion). + + + +This PR makes sure each value is set with int() or float() so these bugs can't happen later on. + + + +_(A request to cgwire has also bin sent to allow force values only for some metadata columns, but currently the user can enter what ever they want in there)_ + + +___ + +
+ + +
+Max: fix the bug of removing an instance #4617 + +fix the bug of removing an instance in 3dsMax + + +___ + +
+ + +
+Global | Nuke: fixing farm publishing workflow #4623 + +After Nuke had adopted new publisher with new creators new issues were introduced. Those issues were addressed with this PR. Those are for example broken reviewable video files publishing if published via farm. Also fixed local publishing. + + +___ + +
+ + +
+Ftrack: Ftrack additional families filtering #4633 + +Ftrack family collector makes sure the subset family is also in instance families for additional families filtering. + + +___ + +
+ + +
+Ftrack: Hierarchical <> Non-Hierarchical attributes sync fix #4635 + +Sync between hierarchical and non-hierarchical attributes should be fixed and work as expected. Action should sync the values as expected and event handler should do it too and only on newly created entities. + + +___ + +
+ + +
+bugfix for 3dsmax publishing error #4637 + +fix the bug of failing publishing job in 3dsMax + + +___ + +
+ + +
+General: Use right validation for ffmpeg executable #4640 + +Use ffmpeg exec validation for ffmpeg executables instead of oiio exec validation. The validation is used as last possible source of ffmpeg from `PATH` environment variables, which is an edge case but can cause issues. + + +___ + +
+ + +
+3dsmax: opening last workfile #4644 + +Supports opening last saved workfile in 3dsmax host. + + +___ + +
+ + +
+Fixed a bug where a QThread in the splash screen could be destroyed before finishing execution #4647 + +This should fix the occasional behavior of the QThread being destroyed before even its worker returns from the `run()` function.After quiting, it should wait for the QThread object to properly close itself. + + +___ + +
+ + +
+General: Use right plugin class for Collect Comment #4653 + +Collect Comment plugin is instance plugin so should inherit from `InstancePlugin` instead of `ContextPlugin`. + + +___ + +
+ + +
+Global: add tags field to thumbnail representation #4660 + +Thumbnail representation might be missing tags field. + + +___ + +
+ + +
+Integrator: Enforce unique destination transfers, disallow overwrites in queued transfers #4662 + +Fix #4656 by enforcing unique destination transfers in the Integrator. It's now disallowed to a destination in the file transaction queue with a new source path during the publish. + + +___ + +
+ + +
+Hiero: Creator with correct workfile numeric padding input #4666 + +Creator was showing 99 in workfile input for long time, even if users set default value to 1001 in studio settings. This has been fixed now. + + +___ + +
+ + +
+Nuke: Nukenodes family instance without frame range #4669 + +No need to add frame range data into `nukenodes` (backdrop) family publishes - since those are timeless. + + +___ + +
+ + +
+TVPaint: Optional Validation plugins can be de/activated by user #4674 + +Added `OptionalPyblishPluginMixin` to TVpaint plugins that can be optional. + + +___ + +
+ + +
+Kitsu: Slightly less strict with instance data #4678 + +- Allow to take task name from context if asset doesn't have any. Fixes an issue with Photoshop's review instance not having `task` in data. +- Allow to match "review" against both `instance.data["family"]` and `instance.data["families"]` because some instances don't have the primary family in families, e.g. in Photoshop and TVPaint. +- Do not error on Integrate Kitsu Review whenever for whatever reason Integrate Kitsu Note did not created a comment but just log the message that it was unable to connect a review. + + +___ + +
+ + +
+Publisher: Fix reset shortcut sequence #4694 + +Fix bug created in https://github.com/ynput/OpenPype/pull/4676 where key sequence is checked using unsupported method. The check was changed to convert event into `QKeySequence` object which can be compared to prepared sequence. + + +___ + +
+ + +
+Refactor _capture #4702 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Hiero: correct container colors if UpToDate #4708 + +Colors on loaded containers are now correctly identifying real state of version. `Red` for out of date and `green` for up to date. + + +___ + +
+ +### **🔀 Refactored code** + + +
+Look Assigner: Move Look Assigner tool since it's Maya only #4604 + +Fix #4357: Move Look Assigner tool to maya since it's Maya only + + +___ + +
+ + +
+Maya: Remove unused functions from Extract Look #4671 + +Remove unused functions from Maya Extract Look plug-in + + +___ + +
+ + +
+Extract Review code refactor #3930 + +Trying to reduce complexity of Extract Review plug-in +- Re-use profile filtering from lib +- Remove "combination families" additional filtering which supposedly was from OP v2 +- Simplify 'formatting' for filling gaps +- Use `legacy_io.Session` over `os.environ` + + +___ + +
+ + +
+Maya: Replace last usages of Qt module #4610 + +Replace last usage of `Qt` module with `qtpy`. This change is needed for `PySide6` support. All changes happened in Maya loader plugins. + + +___ + +
+ + +
+Update tests and documentation for `ColormanagedPyblishPluginMixin` #4612 + +Refactor `ExtractorColormanaged` to `ColormanagedPyblishPluginMixin` in tests and documentation. + + +___ + +
+ + +
+Improve logging of used app + tool envs on application launch (minor tweak) #4686 + +Use `app.full_name` for change done in #4682 + + +___ + +
+ +### **📃 Documentation** + + +
+Docs/add architecture document #4344 + +Add `ARCHITECTURE.md` document. + +his document attemps to give a quick overview of the project to help onboarding, it's not an extensive documentation but more of a elevator pitch one-line descriptions of files/directories and what the attempt to do. + + +___ + +
+ + +
+Documentation: Tweak grammar and fix some typos #4613 + +This resolves some grammar and typos in the documentation.Also fixes the extension of some images in after effects docs which used uppercase extension even though files were lowercase extension. + + +___ + +
+ + +
+Docs: Fix some minor grammar/typos #4680 + +Typo/grammar fixes in documentation. + + +___ + +
+ +### **Merged pull requests** + + +
+Maya: Implement image file node loader #4313 + +Implements a loader for loading texture image into a `file` node in Maya. + +Similar to Maya's hypershade creation of textures on load you have the option to choose for three modes of creating: +- Texture +- Projection +- StencilThese should match what Maya generates if you create those in Maya. +- [x] Load and manage file nodes +- [x] Apply color spaces after #4195 +- [x] Support for _either_ UDIM or image sequence - currently it seems to always load sequences as UDIM automatically. +- [ ] Add support for animation sequences of UDIM textures using the `..exr` path format? + + +___ + +
+ + +
+Maya Look Assigner: Don't rely on containers for get all assets #4600 + +This resolves #4044 by not actually relying on containers in the scene but instead just rely on finding nodes with `cbId` attributes. As such, imported nodes would also be found and a shader can be assigned (similar to when using get from selection).**Please take into consideration the potential downsides below**Potential downsides would be: +- IF an already loaded look has any dagNodes, say a 3D Projection node - then that will also show up as a loaded asset where previously nodes from loaded looks were ignored. +- If any dag nodes were created locally - they would have gotten `cbId` attributes on scene save and thus the current asset would almost always show? + + +___ + +
+ + +
+Maya: Unify menu labels for "Set Frame Range" and "Set Resolution" #4605 + +Fix #4109: Unify menu labels for "Set Frame Range" and "Set Resolution"This also tweaks it in Houdini from Reset Frame Range to Set Frame Range. + + +___ + +
+ + +
+Resolve missing OPENPYPE_MONGO in deadline global job preload #4484 + +In the GlobalJobPreLoad plugin, we propose to replace the SpawnProcess by a sub-process and to pass the environment variables in the parameters, since the SpawnProcess under Centos Linux does not pass the environment variables. + +In the GlobalJobPreLoad plugin, the Deadline SpawnProcess is used to start the OpenPype process. The problem is that the SpawnProcess does not pass environment variables, including OPENPYPE_MONGO, to the process when it is under Centos7 linux, and the process gets stuck. We propose to replace it by a subprocess and to pass the variable in the parameters. + + +___ + +
+ + +
+Tests: Added setup_only to tests #4591 + +Allows to download test zip, unzip and restore DB in preparation for new test. + + +___ + +
+ + +
+Maya: Arnold don't reset maya timeline frame range on render creation (or setting render settings) #4603 + +Fix #4429: Do not reset fps or playback timeline on applying or creating render settings + + +___ + +
+ + +
+Bump @sideway/formula from 3.0.0 to 3.0.1 in /website #4609 + +Bumps [@sideway/formula](https://github.com/sideway/formula) from 3.0.0 to 3.0.1. +
+Commits + +
+
+Maintainer changes +

This version was pushed to npm by marsup, a new releaser for @​sideway/formula since your current version.

+
+
+ + +[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@sideway/formula&package-manager=npm_and_yarn&previous-version=3.0.0&new-version=3.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) + +Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. + +[//]: # (dependabot-automerge-start) +[//]: # (dependabot-automerge-end) + +--- + +
+Dependabot commands and options +
+ +You can trigger Dependabot actions by commenting on this PR: +- `@dependabot rebase` will rebase this PR +- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it +- `@dependabot merge` will merge this PR after your CI passes on it +- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it +- `@dependabot cancel merge` will cancel a previously requested merge and block automerging +- `@dependabot reopen` will reopen this PR if it is closed +- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually +- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) +- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language +- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language +- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language +- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language + +You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). + +
+___ + +
+ + +
+Update artist_hosts_maya_arnold.md #4626 + +Correct Arnold docs. +___ + +
+ + +
+Maya: Add "Include Parent Hierarchy" option in animation creator plugin #4645 + +Add an option in Project Settings > Maya > Creator Plugins > Create Animation to include (or not) parent hierarchy. This is to avoid artists to check manually the option for all create animation. + + +___ + +
+ + +
+General: Filter available applications #4667 + +Added option to filter applications that don't have valid executable available in settings in launcher and ftrack actions. This option can be disabled in new settings category `Applications`. The filtering is by default disabled. + + +___ + +
+ + +
+3dsmax: make sure that startup script executes #4695 + +Fixing reliability of OpenPype startup in 3dsmax. + + +___ + +
+ + +
+Project Manager: Change minimum frame start/end to '0' #4719 + +Project manager can have frame start/end set to `0`. + + +___ + +
+ + + +## [3.15.2](https://github.com/ynput/OpenPype/tree/3.15.2) [Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.1...3.15.2) diff --git a/openpype/client/entities.py b/openpype/client/entities.py index c415be8816f..7054658c645 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -3,7 +3,7 @@ Goal is that most of functions here are called on (or with) an object that has project name as a context (e.g. on 'ProjectEntity'?). -+ We will need more specific functions doing wery specific queires really fast. ++ We will need more specific functions doing very specific queries really fast. """ import re @@ -193,7 +193,7 @@ def _get_assets( be found. asset_names (Iterable[str]): Name assets that should be found. parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. - standard (bool): Query standart assets (type 'asset'). + standard (bool): Query standard assets (type 'asset'). archived (bool): Query archived assets (type 'archived_asset'). fields (Iterable[str]): Fields that should be returned. All fields are returned if 'None' is passed. @@ -1185,7 +1185,7 @@ def get_representations( standard=True, fields=None ): - """Representaion entities data from one project filtered by filters. + """Representation entities data from one project filtered by filters. Filters are additive (all conditions must pass to return subset). @@ -1231,7 +1231,7 @@ def get_archived_representations( names_by_version_ids=None, fields=None ): - """Archived representaion entities data from project with applied filters. + """Archived representation entities data from project with applied filters. Filters are additive (all conditions must pass to return subset). diff --git a/openpype/client/notes.md b/openpype/client/notes.md index a261b86eca9..59743892eb6 100644 --- a/openpype/client/notes.md +++ b/openpype/client/notes.md @@ -2,7 +2,7 @@ ## Reason Preparation for OpenPype v4 server. Goal is to remove direct mongo calls in code to prepare a little bit for different source of data for code before. To start think about database calls less as mongo calls but more universally. To do so was implemented simple wrapper around database calls to not use pymongo specific code. -Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tighly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state. +Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tightly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state. ## Queries Query functions don't use full potential of mongo queries like very specific queries based on subdictionaries or unknown structures. We try to avoid these calls as much as possible because they'll probably won't be available in future. If it's really necessary a new function can be added but only if it's reasonable for overall logic. All query functions were moved to `~/client/entities.py`. Each function has arguments with available filters and possible reduce of returned keys for each entity. @@ -14,7 +14,7 @@ Changes are a little bit complicated. Mongo has many options how update can happ Create operations expect already prepared document data, for that are prepared functions creating skeletal structures of documents (do not fill all required data), except `_id` all data should be right. Existence of entity is not validated so if the same creation operation is send n times it will create the entity n times which can cause issues. ### Update -Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare__update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementaion. +Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare__update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementation. ### Delete Delete operation need entity id. Entity will be deleted from mongo. diff --git a/openpype/client/operations.py b/openpype/client/operations.py index fd639c34a7c..ef48f2a1c49 100644 --- a/openpype/client/operations.py +++ b/openpype/client/operations.py @@ -368,7 +368,7 @@ def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): class AbstractOperation(object): """Base operation class. - Opration represent a call into database. The call can create, change or + Operation represent a call into database. The call can create, change or remove data. Args: @@ -409,7 +409,7 @@ def to_mongo_operation(self): pass def to_data(self): - """Convert opration to data that can be converted to json or others. + """Convert operation to data that can be converted to json or others. Warning: Current state returns ObjectId objects which cannot be parsed by @@ -428,7 +428,7 @@ def to_data(self): class CreateOperation(AbstractOperation): - """Opeartion to create an entity. + """Operation to create an entity. Args: project_name (str): On which project operation will happen. @@ -485,7 +485,7 @@ def to_data(self): class UpdateOperation(AbstractOperation): - """Opeartion to update an entity. + """Operation to update an entity. Args: project_name (str): On which project operation will happen. @@ -552,7 +552,7 @@ def to_data(self): class DeleteOperation(AbstractOperation): - """Opeartion to delete an entity. + """Operation to delete an entity. Args: project_name (str): On which project operation will happen. diff --git a/openpype/hooks/pre_create_extra_workdir_folders.py b/openpype/hooks/pre_create_extra_workdir_folders.py index c5af620c877..8856281120f 100644 --- a/openpype/hooks/pre_create_extra_workdir_folders.py +++ b/openpype/hooks/pre_create_extra_workdir_folders.py @@ -3,10 +3,13 @@ from openpype.pipeline.workfile import create_workdir_extra_folders -class AddLastWorkfileToLaunchArgs(PreLaunchHook): - """Add last workfile path to launch arguments. +class CreateWorkdirExtraFolders(PreLaunchHook): + """Create extra folders for the work directory. + + Based on setting `project_settings/global/tools/Workfiles/extra_folders` + profile filtering will decide whether extra folders need to be created in + the work directory. - This is not possible to do for all applications the same way. """ # Execute after workfile template copy diff --git a/openpype/hooks/pre_foundry_apps.py b/openpype/hooks/pre_foundry_apps.py index 2092d5025dc..21ec8e78814 100644 --- a/openpype/hooks/pre_foundry_apps.py +++ b/openpype/hooks/pre_foundry_apps.py @@ -7,7 +7,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook): Nuke is executed "like" python process so it is required to pass `CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console. - At the same time the newly created console won't create it's own stdout + At the same time the newly created console won't create its own stdout and stderr handlers so they should not be redirected to DEVNULL. """ @@ -18,7 +18,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook): def execute(self): # Change `creationflags` to CREATE_NEW_CONSOLE - # - on Windows will nuke create new window using it's console + # - on Windows nuke will create new window using its console # Set `stdout` and `stderr` to None so new created console does not # have redirected output to DEVNULL in build self.launch_context.kwargs.update({ diff --git a/openpype/host/dirmap.py b/openpype/host/dirmap.py index 1d084cccad5..42bf80ececc 100644 --- a/openpype/host/dirmap.py +++ b/openpype/host/dirmap.py @@ -2,7 +2,7 @@ Idea for current dirmap implementation was used from Maya where is possible to enter source and destination roots and maya will try each found source -in referenced file replace with each destionation paths. First path which +in referenced file replace with each destination paths. First path which exists is used. """ @@ -183,7 +183,7 @@ def _get_local_sync_dirmap(self): project_name, remote_site ) # dirmap has sense only with regular disk provider, in the workfile - # wont be root on cloud or sftp provider + # won't be root on cloud or sftp provider if remote_provider != "local_drive": remote_site = "studio" for root_name, active_site_dir in active_overrides.items(): diff --git a/openpype/host/host.py b/openpype/host/host.py index d2335c00621..630fb873a81 100644 --- a/openpype/host/host.py +++ b/openpype/host/host.py @@ -18,7 +18,7 @@ class HostBase(object): Compared to 'avalon' concept: What was before considered as functions in host implementation folder. The host implementation should primarily care about adding ability of creation - (mark subsets to be published) and optionaly about referencing published + (mark subsets to be published) and optionally about referencing published representations as containers. Host may need extend some functionality like working with workfiles @@ -129,9 +129,9 @@ def get_current_context(self): """Get current context information. This method should be used to get current context of host. Usage of - this method can be crutial for host implementations in DCCs where + this method can be crucial for host implementations in DCCs where can be opened multiple workfiles at one moment and change of context - can't be catched properly. + can't be caught properly. Default implementation returns values from 'legacy_io.Session'. diff --git a/openpype/host/interfaces.py b/openpype/host/interfaces.py index 999aefd2547..7c6057acf06 100644 --- a/openpype/host/interfaces.py +++ b/openpype/host/interfaces.py @@ -81,7 +81,7 @@ def validate_load_methods(host): @abstractmethod def get_containers(self): - """Retreive referenced containers from scene. + """Retrieve referenced containers from scene. This can be implemented in hosts where referencing can be used. @@ -191,7 +191,7 @@ def open_workfile(self, filepath): @abstractmethod def get_current_workfile(self): - """Retreive path to current opened file. + """Retrieve path to current opened file. Returns: str: Path to file which is currently opened. @@ -220,8 +220,8 @@ def work_root(self, session): Default implementation keeps workdir untouched. Warnings: - We must handle this modification with more sofisticated way because - this can't be called out of DCC so opening of last workfile + We must handle this modification with more sophisticated way + because this can't be called out of DCC so opening of last workfile (calculated before DCC is launched) is complicated. Also breaking defined work template is not a good idea. Only place where it's really used and can make sense is Maya. There @@ -302,7 +302,7 @@ def get_missing_publish_methods(host): required methods. Returns: - list[str]: Missing method implementations for new publsher + list[str]: Missing method implementations for new publisher workflow. """ diff --git a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx index 9b211207de9..5c1d1634398 100644 --- a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx +++ b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx @@ -504,7 +504,7 @@ function addItemAsLayerToComp(comp_id, item_id, found_comp){ * Args: * comp_id (int): id of target composition * item_id (int): FootageItem.id - * found_comp (CompItem, optional): to limit quering if + * found_comp (CompItem, optional): to limit querying if * comp already found previously */ var comp = found_comp || app.project.itemByID(comp_id); diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py index e5d6d9ed893..f094c7fa2a6 100644 --- a/openpype/hosts/aftereffects/api/ws_stub.py +++ b/openpype/hosts/aftereffects/api/ws_stub.py @@ -80,7 +80,7 @@ def get_metadata(self): Get complete stored JSON with metadata from AE.Metadata.Label field. - It contains containers loaded by any Loader OR instances creted + It contains containers loaded by any Loader OR instances created by Creator. Returns: diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index b1fa13acb9d..91cbfe524f9 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -24,7 +24,7 @@ PREVIEW_COLLECTIONS: Dict = dict() # This seems like a good value to keep the Qt app responsive and doesn't slow -# down Blender. At least on macOS I the interace of Blender gets very laggy if +# down Blender. At least on macOS I the interface of Blender gets very laggy if # you make it smaller. TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1 @@ -84,11 +84,11 @@ def __init__(self, callback, *args, **kwargs): self.kwargs = kwargs def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ print("Executing process in main thread") if self.done: diff --git a/openpype/hosts/blender/plugins/publish/extract_playblast.py b/openpype/hosts/blender/plugins/publish/extract_playblast.py index 8dc2f66c22a..196e75b8ccb 100644 --- a/openpype/hosts/blender/plugins/publish/extract_playblast.py +++ b/openpype/hosts/blender/plugins/publish/extract_playblast.py @@ -50,7 +50,7 @@ def process(self, instance): # get isolate objects list isolate = instance.data("isolate", None) - # get ouput path + # get output path stagingdir = self.staging_dir(instance) filename = instance.name path = os.path.join(stagingdir, filename) @@ -116,7 +116,6 @@ def process(self, instance): "frameStart": start, "frameEnd": end, "fps": fps, - "preview": True, "tags": tags, "camera_name": camera } diff --git a/openpype/hosts/celaction/hooks/pre_celaction_setup.py b/openpype/hosts/celaction/hooks/pre_celaction_setup.py index 62cebf99ed2..96e784875c2 100644 --- a/openpype/hosts/celaction/hooks/pre_celaction_setup.py +++ b/openpype/hosts/celaction/hooks/pre_celaction_setup.py @@ -38,8 +38,9 @@ def execute(self): ) path_to_cli = os.path.join(CELACTION_SCRIPTS_DIR, "publish_cli.py") - subproces_args = get_openpype_execute_args("run", path_to_cli) - openpype_executable = subproces_args.pop(0) + subprocess_args = get_openpype_execute_args("run", path_to_cli) + openpype_executable = subprocess_args.pop(0) + workfile_settings = self.get_workfile_settings() winreg.SetValueEx( hKey, @@ -49,20 +50,34 @@ def execute(self): openpype_executable ) - parameters = subproces_args + [ - "--currentFile", "*SCENE*", - "--chunk", "*CHUNK*", - "--frameStart", "*START*", - "--frameEnd", "*END*", - "--resolutionWidth", "*X*", - "--resolutionHeight", "*Y*" + # add required arguments for workfile path + parameters = subprocess_args + [ + "--currentFile", "*SCENE*" ] + # Add custom parameters from workfile settings + if "render_chunk" in workfile_settings["submission_overrides"]: + parameters += [ + "--chunk", "*CHUNK*" + ] + if "resolution" in workfile_settings["submission_overrides"]: + parameters += [ + "--resolutionWidth", "*X*", + "--resolutionHeight", "*Y*" + ] + if "frame_range" in workfile_settings["submission_overrides"]: + parameters += [ + "--frameStart", "*START*", + "--frameEnd", "*END*" + ] + winreg.SetValueEx( hKey, "SubmitParametersTitle", 0, winreg.REG_SZ, subprocess.list2cmdline(parameters) ) + self.log.debug(f"__ parameters: \"{parameters}\"") + # setting resolution parameters path_submit = "\\".join([ path_user_settings, "Dialogs", "SubmitOutput" @@ -135,3 +150,6 @@ def workfile_path(self): self.log.info(f"Workfile to open: \"{workfile_path}\"") return workfile_path + + def get_workfile_settings(self): + return self.data["project_settings"]["celaction"]["workfile"] diff --git a/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py b/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py index 43b81b83e7e..54dea15dffc 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py +++ b/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py @@ -39,7 +39,7 @@ def process(self, context): passing_kwargs[key] = value if missing_kwargs: - raise RuntimeError("Missing arguments {}".format( + self.log.debug("Missing arguments {}".format( ", ".join( [f'"{key}"' for key in missing_kwargs] ) diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 6aca5c5ce61..ab713aed841 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -773,7 +773,7 @@ def __init__(self, path, logger=None): if logger: self.log = logger - # test if `dl_get_media_info` paht exists + # test if `dl_get_media_info` path exists self._validate_media_script_path() # derivate other feed variables @@ -993,7 +993,7 @@ def file_pattern(self, fpattern): def _validate_media_script_path(self): if not os.path.isfile(self.MEDIA_SCRIPT_PATH): - raise IOError("Media Scirpt does not exist: `{}`".format( + raise IOError("Media Script does not exist: `{}`".format( self.MEDIA_SCRIPT_PATH)) def _generate_media_info_file(self, fpath, feed_ext, feed_dir): diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py index 3a233899619..d6fbf750ba0 100644 --- a/openpype/hosts/flame/api/pipeline.py +++ b/openpype/hosts/flame/api/pipeline.py @@ -38,7 +38,7 @@ def install(): pyblish.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) - log.info("OpenPype Flame plug-ins registred ...") + log.info("OpenPype Flame plug-ins registered ...") # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index 983d7486b39..df8c1ac8872 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -157,7 +157,7 @@ def create_row(self, layout, type_name, text, **kwargs): # convert label text to normal capitalized text with spaces label_text = self.camel_case_split(text) - # assign the new text to lable widget + # assign the new text to label widget label = QtWidgets.QLabel(label_text) label.setObjectName("LineLabel") @@ -345,8 +345,8 @@ class PublishableClip: "track": "sequence", } - # parents search patern - parents_search_patern = r"\{([a-z]*?)\}" + # parents search pattern + parents_search_pattern = r"\{([a-z]*?)\}" # default templates for non-ui use rename_default = False @@ -445,7 +445,7 @@ def convert(self): return self.current_segment def _populate_segment_default_data(self): - """ Populate default formating data from segment. """ + """ Populate default formatting data from segment. """ self.current_segment_default_data = { "_folder_": "shots", @@ -538,7 +538,7 @@ def _convert_to_marker_data(self): if not self.index_from_segment: self.count_steps *= self.rename_index - hierarchy_formating_data = {} + hierarchy_formatting_data = {} hierarchy_data = deepcopy(self.hierarchy_data) _data = self.current_segment_default_data.copy() if self.ui_inputs: @@ -552,7 +552,7 @@ def _convert_to_marker_data(self): # mark review layer if self.review_track and ( self.review_track not in self.review_track_default): - # if review layer is defined and not the same as defalut + # if review layer is defined and not the same as default self.review_layer = self.review_track # shot num calculate @@ -578,13 +578,13 @@ def _convert_to_marker_data(self): # fill up pythonic expresisons in hierarchy data for k, _v in hierarchy_data.items(): - hierarchy_formating_data[k] = _v["value"].format(**_data) + hierarchy_formatting_data[k] = _v["value"].format(**_data) else: # if no gui mode then just pass default data - hierarchy_formating_data = hierarchy_data + hierarchy_formatting_data = hierarchy_data tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formating_data + hierarchy_formatting_data ) tag_hierarchy_data.update({"heroTrack": True}) @@ -615,27 +615,27 @@ def _convert_to_marker_data(self): # in case track name and subset name is the same then add if self.subset_name == self.track_name: _hero_data["subset"] = self.subset - # assing data to return hierarchy data to tag + # assign data to return hierarchy data to tag tag_hierarchy_data = _hero_data break # add data to return data dict self.marker_data.update(tag_hierarchy_data) - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): """ Solve marker data from hierarchy data and templates. """ # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) - clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) # remove shot from hierarchy data: is not needed anymore - hierarchy_formating_data.pop("shot") + hierarchy_formatting_data.pop("shot") return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, "parents": self.parents, - "hierarchyData": hierarchy_formating_data, + "hierarchyData": hierarchy_formatting_data, "subset": self.subset, "family": self.subset_family, "families": [self.family] @@ -650,17 +650,17 @@ def _convert_to_entity(self, type, template): type ) - # first collect formating data to use for formating template - formating_data = {} + # first collect formatting data to use for formatting template + formatting_data = {} for _k, _v in self.hierarchy_data.items(): value = _v["value"].format( **self.current_segment_default_data) - formating_data[_k] = value + formatting_data[_k] = value return { "entity_type": entity_type, "entity_name": template.format( - **formating_data + **formatting_data ) } @@ -668,9 +668,9 @@ def _create_parents(self): """ Create parents and return it in list. """ self.parents = [] - patern = re.compile(self.parents_search_patern) + pattern = re.compile(self.parents_search_pattern) - par_split = [(patern.findall(t).pop(), t) + par_split = [(pattern.findall(t).pop(), t) for t in self.hierarchy.split("/")] for type, template in par_split: @@ -902,22 +902,22 @@ def _rename_track_name(self, xml_track_data): ): return - formating_data = self._update_formating_data( + formatting_data = self._update_formatting_data( layerName=layer_name, layerUID=layer_uid ) name_obj.text = StringTemplate( self.layer_rename_template - ).format(formating_data) + ).format(formatting_data) - def _update_formating_data(self, **kwargs): - """ Updating formating data for layer rename + def _update_formatting_data(self, **kwargs): + """ Updating formatting data for layer rename Attributes: - key=value (optional): will be included to formating data + key=value (optional): will be included to formatting data as {key: value} Returns: - dict: anatomy context data for formating + dict: anatomy context data for formatting """ self.log.debug(">> self.clip_data: {}".format(self.clip_data)) clip_name_obj = self.clip_data.find("name") diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index 4825ff43865..a74172c4056 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -203,7 +203,7 @@ def _get_all_volumes(self): list: all available volumes in server Rises: - AttributeError: unable to get any volumes childs from server + AttributeError: unable to get any volumes children from server """ root = WireTapNodeHandle(self._server, "/volumes") children_num = WireTapInt(0) diff --git a/openpype/hosts/flame/api/utils.py b/openpype/hosts/flame/api/utils.py index fb8bdee42df..80a5c47e892 100644 --- a/openpype/hosts/flame/api/utils.py +++ b/openpype/hosts/flame/api/utils.py @@ -108,7 +108,7 @@ def _sync_utility_scripts(env=None): shutil.copy2(src, dst) except (PermissionError, FileExistsError) as msg: log.warning( - "Not able to coppy to: `{}`, Problem with: `{}`".format( + "Not able to copy to: `{}`, Problem with: `{}`".format( dst, msg ) diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/openpype/hosts/flame/hooks/pre_flame_setup.py index 713daf10317..8034885c478 100644 --- a/openpype/hosts/flame/hooks/pre_flame_setup.py +++ b/openpype/hosts/flame/hooks/pre_flame_setup.py @@ -153,7 +153,7 @@ def _get_flame_fps(self, fps_num): def _add_pythonpath(self): pythonpath = self.launch_context.env.get("PYTHONPATH") - # separate it explicity by `;` that is what we use in settings + # separate it explicitly by `;` that is what we use in settings new_pythonpath = self.flame_pythonpath.split(os.pathsep) new_pythonpath += pythonpath.split(os.pathsep) diff --git a/openpype/hosts/flame/plugins/create/create_shot_clip.py b/openpype/hosts/flame/plugins/create/create_shot_clip.py index 4fb041a4b26..b01354c3137 100644 --- a/openpype/hosts/flame/plugins/create/create_shot_clip.py +++ b/openpype/hosts/flame/plugins/create/create_shot_clip.py @@ -209,7 +209,7 @@ def get_gui_inputs(self): "type": "QComboBox", "label": "Subset Name", "target": "ui", - "toolTip": "chose subset name patern, if [ track name ] is selected, name of track layer will be used", # noqa + "toolTip": "chose subset name pattern, if [ track name ] is selected, name of track layer will be used", # noqa "order": 0}, "subsetFamily": { "value": ["plate", "take"], diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 25b31c94a32..dfb2d2b6f00 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -61,9 +61,9 @@ def load(self, context, name, namespace, options): self.layer_rename_template = self.layer_rename_template.replace( "output", "representation") - formating_data = deepcopy(context["representation"]["context"]) + formatting_data = deepcopy(context["representation"]["context"]) clip_name = StringTemplate(self.clip_name_template).format( - formating_data) + formatting_data) # convert colorspace with ocio to flame mapping # in imageio flame section @@ -88,7 +88,7 @@ def load(self, context, name, namespace, options): "version": "v{:0>3}".format(version_name), "layer_rename_template": self.layer_rename_template, "layer_rename_patterns": self.layer_rename_patterns, - "context_data": formating_data + "context_data": formatting_data } self.log.debug(pformat( loading_context diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py index 86bc0f8f1e8..5c5a77f0d09 100644 --- a/openpype/hosts/flame/plugins/load/load_clip_batch.py +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -58,11 +58,11 @@ def load(self, context, name, namespace, options): self.layer_rename_template = self.layer_rename_template.replace( "output", "representation") - formating_data = deepcopy(context["representation"]["context"]) - formating_data["batch"] = self.batch.name.get_value() + formatting_data = deepcopy(context["representation"]["context"]) + formatting_data["batch"] = self.batch.name.get_value() clip_name = StringTemplate(self.clip_name_template).format( - formating_data) + formatting_data) # convert colorspace with ocio to flame mapping # in imageio flame section @@ -88,7 +88,7 @@ def load(self, context, name, namespace, options): "version": "v{:0>3}".format(version_name), "layer_rename_template": self.layer_rename_template, "layer_rename_patterns": self.layer_rename_patterns, - "context_data": formating_data + "context_data": formatting_data } self.log.debug(pformat( loading_context diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 76d48dded24..23fdf5e7851 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -203,7 +203,7 @@ def _get_comment_attributes(self, segment): self._get_xml_preset_attrs( attributes, split) - # add xml overides resolution to instance data + # add xml overrides resolution to instance data xml_overrides = attributes["xml_overrides"] if xml_overrides.get("width"): attributes.update({ @@ -284,7 +284,7 @@ def _get_head_tail(self, clip_data, otio_clip, handle_start, handle_end): self.log.debug("__ head: `{}`".format(head)) self.log.debug("__ tail: `{}`".format(tail)) - # HACK: it is here to serve for versions bellow 2021.1 + # HACK: it is here to serve for versions below 2021.1 if not any([head, tail]): retimed_attributes = get_media_range_with_retimes( otio_clip, handle_start, handle_end) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index 5082217db06..a7979ab4d53 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -227,7 +227,7 @@ def process(self, instance): self.hide_others( exporting_clip, segment_name, s_track_name) - # change name patern + # change name pattern name_patern_xml = ( "__{}.").format( unique_name) @@ -358,7 +358,7 @@ def process(self, instance): representation_data["stagingDir"] = n_stage_dir files = n_files - # add files to represetation but add + # add files to representation but add # imagesequence as list if ( # first check if path in files is not mov extension diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py index 4d45f67ded2..4f3945bb0fd 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -50,7 +50,7 @@ def process(self, instance): self._load_clip_to_context(instance, bgroup) def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group): - # get write file node properties > OrederDict because order does mater + # get write file node properties > OrederDict because order does matter write_pref_data = self._get_write_prefs(instance, task_data) batch_nodes = [ diff --git a/openpype/hosts/fusion/api/action.py b/openpype/hosts/fusion/api/action.py index 17509209504..ff5dd14caa3 100644 --- a/openpype/hosts/fusion/api/action.py +++ b/openpype/hosts/fusion/api/action.py @@ -6,12 +6,13 @@ class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Maya when plug-in failed. + """Select invalid nodes in Fusion when plug-in failed. To retrieve the invalid nodes this assumes a static `get_invalid()` method is available on the plugin. """ + label = "Select invalid" on = "failed" # This action is only available on a failed plug-in icon = "search" # Icon from Awesome Icon @@ -31,8 +32,10 @@ def process(self, context, plugin): if isinstance(invalid_nodes, (list, tuple)): invalid.extend(invalid_nodes) else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") + self.log.warning( + "Plug-in returned to be invalid, " + "but has no selectable nodes." + ) if not invalid: # Assume relevant comp is current comp and clear selection @@ -51,4 +54,6 @@ def process(self, context, plugin): for tool in invalid: flow.Select(tool, True) names.add(tool.Name) - self.log.info("Selecting invalid tools: %s" % ", ".join(sorted(names))) + self.log.info( + "Selecting invalid tools: %s" % ", ".join(sorted(names)) + ) diff --git a/openpype/hosts/fusion/api/menu.py b/openpype/hosts/fusion/api/menu.py index 343f5f803ad..92f38a64c2a 100644 --- a/openpype/hosts/fusion/api/menu.py +++ b/openpype/hosts/fusion/api/menu.py @@ -6,7 +6,6 @@ from openpype.style import load_stylesheet from openpype.lib import register_event_callback from openpype.hosts.fusion.scripts import ( - set_rendermode, duplicate_with_inputs, ) from openpype.hosts.fusion.api.lib import ( @@ -60,7 +59,6 @@ def __init__(self, *args, **kwargs): publish_btn = QtWidgets.QPushButton("Publish...", self) manager_btn = QtWidgets.QPushButton("Manage...", self) libload_btn = QtWidgets.QPushButton("Library...", self) - rendermode_btn = QtWidgets.QPushButton("Set render mode...", self) set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self) set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self) duplicate_with_inputs_btn = QtWidgets.QPushButton( @@ -91,7 +89,6 @@ def __init__(self, *args, **kwargs): layout.addWidget(set_framerange_btn) layout.addWidget(set_resolution_btn) - layout.addWidget(rendermode_btn) layout.addSpacing(20) @@ -108,7 +105,6 @@ def __init__(self, *args, **kwargs): load_btn.clicked.connect(self.on_load_clicked) manager_btn.clicked.connect(self.on_manager_clicked) libload_btn.clicked.connect(self.on_libload_clicked) - rendermode_btn.clicked.connect(self.on_rendermode_clicked) duplicate_with_inputs_btn.clicked.connect( self.on_duplicate_with_inputs_clicked ) @@ -162,15 +158,6 @@ def on_manager_clicked(self): def on_libload_clicked(self): host_tools.show_library_loader() - def on_rendermode_clicked(self): - if self.render_mode_widget is None: - window = set_rendermode.SetRenderMode() - window.setStyleSheet(load_stylesheet()) - window.show() - self.render_mode_widget = window - else: - self.render_mode_widget.show() - def on_duplicate_with_inputs_clicked(self): duplicate_with_inputs.duplicate_with_input_connections() diff --git a/openpype/hosts/fusion/plugins/create/create_saver.py b/openpype/hosts/fusion/plugins/create/create_saver.py index e581bac20ff..56085b0a06b 100644 --- a/openpype/hosts/fusion/plugins/create/create_saver.py +++ b/openpype/hosts/fusion/plugins/create/create_saver.py @@ -4,29 +4,34 @@ from openpype.hosts.fusion.api import ( get_current_comp, - comp_lock_and_undo_chunk + comp_lock_and_undo_chunk, ) -from openpype.lib import BoolDef +from openpype.lib import ( + BoolDef, + EnumDef, +) from openpype.pipeline import ( legacy_io, Creator, - CreatedInstance + CreatedInstance, +) +from openpype.client import ( + get_asset_by_name, ) -from openpype.client import get_asset_by_name class CreateSaver(Creator): identifier = "io.openpype.creators.fusion.saver" - name = "saver" - label = "Saver" + label = "Render (saver)" + name = "render" family = "render" - default_variants = ["Main"] - + default_variants = ["Main", "Mask"] description = "Fusion Saver to generate image sequence" - def create(self, subset_name, instance_data, pre_create_data): + instance_attributes = ["reviewable"] + def create(self, subset_name, instance_data, pre_create_data): # TODO: Add pre_create attributes to choose file format? file_format = "OpenEXRFormat" @@ -58,7 +63,8 @@ def create(self, subset_name, instance_data, pre_create_data): family=self.family, subset_name=subset_name, data=instance_data, - creator=self) + creator=self, + ) # Insert the transient data instance.transient_data["tool"] = saver @@ -68,11 +74,9 @@ def create(self, subset_name, instance_data, pre_create_data): return instance def collect_instances(self): - comp = get_current_comp() tools = comp.GetToolList(False, "Saver").values() for tool in tools: - data = self.get_managed_tool_data(tool) if not data: data = self._collect_unmanaged_saver(tool) @@ -90,7 +94,6 @@ def get_icon(self): def update_instances(self, update_list): for created_inst, _changes in update_list: - new_data = created_inst.data_to_store() tool = created_inst.transient_data["tool"] self._update_tool_with_data(tool, new_data) @@ -139,7 +142,6 @@ def _update_tool_with_data(self, tool, data): tool.SetAttrs({"TOOLS_Name": subset}) def _collect_unmanaged_saver(self, tool): - # TODO: this should not be done this way - this should actually # get the data as stored on the tool explicitly (however) # that would disallow any 'regular saver' to be collected @@ -153,8 +155,7 @@ def _collect_unmanaged_saver(self, tool): asset = legacy_io.Session["AVALON_ASSET"] task = legacy_io.Session["AVALON_TASK"] - asset_doc = get_asset_by_name(project_name=project, - asset_name=asset) + asset_doc = get_asset_by_name(project_name=project, asset_name=asset) path = tool["Clip"][comp.TIME_UNDEFINED] fname = os.path.basename(path) @@ -178,21 +179,20 @@ def _collect_unmanaged_saver(self, tool): "variant": variant, "active": not passthrough, "family": self.family, - # Unique identifier for instance and this creator "id": "pyblish.avalon.instance", - "creator_identifier": self.identifier + "creator_identifier": self.identifier, } def get_managed_tool_data(self, tool): """Return data of the tool if it matches creator identifier""" - data = tool.GetData('openpype') + data = tool.GetData("openpype") if not isinstance(data, dict): return required = { "id": "pyblish.avalon.instance", - "creator_identifier": self.identifier + "creator_identifier": self.identifier, } for key, value in required.items(): if key not in data or data[key] != value: @@ -205,11 +205,40 @@ def get_managed_tool_data(self, tool): return data + def get_pre_create_attr_defs(self): + """Settings for create page""" + attr_defs = [ + self._get_render_target_enum(), + self._get_reviewable_bool(), + ] + return attr_defs + def get_instance_attr_defs(self): - return [ - BoolDef( - "review", - default=True, - label="Review" - ) + """Settings for publish page""" + attr_defs = [ + self._get_render_target_enum(), + self._get_reviewable_bool(), ] + return attr_defs + + # These functions below should be moved to another file + # so it can be used by other plugins. plugin.py ? + + def _get_render_target_enum(self): + rendering_targets = { + "local": "Local machine rendering", + "frames": "Use existing frames", + } + if "farm_rendering" in self.instance_attributes: + rendering_targets["farm"] = "Farm rendering" + + return EnumDef( + "render_target", items=rendering_targets, label="Render target" + ) + + def _get_reviewable_bool(self): + return BoolDef( + "review", + default=("reviewable" in self.instance_attributes), + label="Review", + ) diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py index 3b14f022e51..f83ab433ee5 100644 --- a/openpype/hosts/fusion/plugins/load/actions.py +++ b/openpype/hosts/fusion/plugins/load/actions.py @@ -72,8 +72,7 @@ def load(self, context, name, namespace, data): return # Include handles - handles = version_data.get("handles", 0) - start -= handles - end += handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) lib.update_frame_range(start, end) diff --git a/openpype/hosts/fusion/plugins/publish/collect_expected_frames.py b/openpype/hosts/fusion/plugins/publish/collect_expected_frames.py new file mode 100644 index 00000000000..0ba777629fa --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_expected_frames.py @@ -0,0 +1,50 @@ +import pyblish.api +from openpype.pipeline import publish +import os + + +class CollectFusionExpectedFrames( + pyblish.api.InstancePlugin, publish.ColormanagedPyblishPluginMixin +): + """Collect all frames needed to publish expected frames""" + + order = pyblish.api.CollectorOrder + 0.5 + label = "Collect Expected Frames" + hosts = ["fusion"] + families = ["render"] + + def process(self, instance): + context = instance.context + + frame_start = context.data["frameStartHandle"] + frame_end = context.data["frameEndHandle"] + path = instance.data["path"] + output_dir = instance.data["outputDir"] + + basename = os.path.basename(path) + head, ext = os.path.splitext(basename) + files = [ + f"{head}{str(frame).zfill(4)}{ext}" + for frame in range(frame_start, frame_end + 1) + ] + repre = { + "name": ext[1:], + "ext": ext[1:], + "frameStart": f"%0{len(str(frame_end))}d" % frame_start, + "files": files, + "stagingDir": output_dir, + } + + self.set_representation_colorspace( + representation=repre, + context=context, + ) + + # review representation + if instance.data.get("review", False): + repre["tags"] = ["review"] + + # add the repre to the instance + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(repre) diff --git a/openpype/hosts/fusion/plugins/publish/collect_render_target.py b/openpype/hosts/fusion/plugins/publish/collect_render_target.py deleted file mode 100644 index 39017f32e02..00000000000 --- a/openpype/hosts/fusion/plugins/publish/collect_render_target.py +++ /dev/null @@ -1,44 +0,0 @@ -import pyblish.api - - -class CollectFusionRenderMode(pyblish.api.InstancePlugin): - """Collect current comp's render Mode - - Options: - local - farm - - Note that this value is set for each comp separately. When you save the - comp this information will be stored in that file. If for some reason the - available tool does not visualize which render mode is set for the - current comp, please run the following line in the console (Py2) - - comp.GetData("openpype.rendermode") - - This will return the name of the current render mode as seen above under - Options. - - """ - - order = pyblish.api.CollectorOrder + 0.4 - label = "Collect Render Mode" - hosts = ["fusion"] - families = ["render"] - - def process(self, instance): - """Collect all image sequence tools""" - options = ["local", "farm"] - - comp = instance.context.data.get("currentComp") - if not comp: - raise RuntimeError("No comp previously collected, unable to " - "retrieve Fusion version.") - - rendermode = comp.GetData("openpype.rendermode") or "local" - assert rendermode in options, "Must be supported render mode" - - self.log.info("Render mode: {0}".format(rendermode)) - - # Append family - family = "render.{0}".format(rendermode) - instance.data["families"].append(family) diff --git a/openpype/hosts/fusion/plugins/publish/collect_renders.py b/openpype/hosts/fusion/plugins/publish/collect_renders.py new file mode 100644 index 00000000000..7f38e68447d --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_renders.py @@ -0,0 +1,25 @@ +import pyblish.api + + +class CollectFusionRenders(pyblish.api.InstancePlugin): + """Collect current saver node's render Mode + + Options: + local (Render locally) + frames (Use existing frames) + + """ + + order = pyblish.api.CollectorOrder + 0.4 + label = "Collect Renders" + hosts = ["fusion"] + families = ["render"] + + def process(self, instance): + render_target = instance.data["render_target"] + family = instance.data["family"] + + # add targeted family to families + instance.data["families"].append( + "{}.{}".format(family, render_target) + ) diff --git a/openpype/hosts/fusion/plugins/publish/extract_render_local.py b/openpype/hosts/fusion/plugins/publish/extract_render_local.py new file mode 100644 index 00000000000..5a0140c5258 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/extract_render_local.py @@ -0,0 +1,109 @@ +import logging +import contextlib +import pyblish.api +from openpype.hosts.fusion.api import comp_lock_and_undo_chunk + + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def enabled_savers(comp, savers): + """Enable only the `savers` in Comp during the context. + + Any Saver tool in the passed composition that is not in the savers list + will be set to passthrough during the context. + + Args: + comp (object): Fusion composition object. + savers (list): List of Saver tool objects. + + """ + passthrough_key = "TOOLB_PassThrough" + original_states = {} + enabled_save_names = {saver.Name for saver in savers} + try: + all_savers = comp.GetToolList(False, "Saver").values() + for saver in all_savers: + original_state = saver.GetAttrs()[passthrough_key] + original_states[saver] = original_state + + # The passthrough state we want to set (passthrough != enabled) + state = saver.Name not in enabled_save_names + if state != original_state: + saver.SetAttrs({passthrough_key: state}) + yield + finally: + for saver, original_state in original_states.items(): + saver.SetAttrs({"TOOLB_PassThrough": original_state}) + + +class FusionRenderLocal(pyblish.api.InstancePlugin): + """Render the current Fusion composition locally.""" + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Render Local" + hosts = ["fusion"] + families = ["render.local"] + + def process(self, instance): + context = instance.context + + # Start render + self.render_once(context) + + # Log render status + self.log.info( + "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( + nm=instance.data["name"], + ast=instance.data["asset"], + tsk=instance.data["task"], + ) + ) + + def render_once(self, context): + """Render context comp only once, even with more render instances""" + + # This plug-in assumes all render nodes get rendered at the same time + # to speed up the rendering. The check below makes sure that we only + # execute the rendering once and not for each instance. + key = f"__hasRun{self.__class__.__name__}" + + savers_to_render = [ + # Get the saver tool from the instance + instance[0] for instance in context if + # Only active instances + instance.data.get("publish", True) and + # Only render.local instances + "render.local" in instance.data["families"] + ] + + if key not in context.data: + # We initialize as false to indicate it wasn't successful yet + # so we can keep track of whether Fusion succeeded + context.data[key] = False + + current_comp = context.data["currentComp"] + frame_start = context.data["frameStartHandle"] + frame_end = context.data["frameEndHandle"] + + self.log.info("Starting Fusion render") + self.log.info(f"Start frame: {frame_start}") + self.log.info(f"End frame: {frame_end}") + saver_names = ", ".join(saver.Name for saver in savers_to_render) + self.log.info(f"Rendering tools: {saver_names}") + + with comp_lock_and_undo_chunk(current_comp): + with enabled_savers(current_comp, savers_to_render): + result = current_comp.Render( + { + "Start": frame_start, + "End": frame_end, + "Wait": True, + } + ) + + context.data[key] = bool(result) + + if context.data[key] is False: + raise RuntimeError("Comp render failed") diff --git a/openpype/hosts/fusion/plugins/publish/render_local.py b/openpype/hosts/fusion/plugins/publish/render_local.py deleted file mode 100644 index 7d5f1a40c72..00000000000 --- a/openpype/hosts/fusion/plugins/publish/render_local.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.fusion.api import comp_lock_and_undo_chunk - - -class Fusionlocal(pyblish.api.InstancePlugin, - publish.ColormanagedPyblishPluginMixin): - """Render the current Fusion composition locally. - - Extract the result of savers by starting a comp render - This will run the local render of Fusion. - - """ - - order = pyblish.api.ExtractorOrder - 0.1 - label = "Render Local" - hosts = ["fusion"] - families = ["render.local"] - - def process(self, instance): - context = instance.context - - # Start render - self.render_once(context) - - # Log render status - self.log.info( - "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( - nm=instance.data["name"], - ast=instance.data["asset"], - tsk=instance.data["task"], - ) - ) - - frame_start = context.data["frameStartHandle"] - frame_end = context.data["frameEndHandle"] - path = instance.data["path"] - output_dir = instance.data["outputDir"] - - basename = os.path.basename(path) - head, ext = os.path.splitext(basename) - files = [ - f"{head}{str(frame).zfill(4)}{ext}" - for frame in range(frame_start, frame_end + 1) - ] - repre = { - "name": ext[1:], - "ext": ext[1:], - "frameStart": f"%0{len(str(frame_end))}d" % frame_start, - "files": files, - "stagingDir": output_dir, - } - - self.set_representation_colorspace( - representation=repre, - context=context, - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - instance.data["representations"].append(repre) - - # review representation - if instance.data.get("review", False): - repre["tags"] = ["review", "ftrackreview"] - - def render_once(self, context): - """Render context comp only once, even with more render instances""" - - # This plug-in assumes all render nodes get rendered at the same time - # to speed up the rendering. The check below makes sure that we only - # execute the rendering once and not for each instance. - key = f"__hasRun{self.__class__.__name__}" - if key not in context.data: - # We initialize as false to indicate it wasn't successful yet - # so we can keep track of whether Fusion succeeded - context.data[key] = False - - current_comp = context.data["currentComp"] - frame_start = context.data["frameStartHandle"] - frame_end = context.data["frameEndHandle"] - - self.log.info("Starting Fusion render") - self.log.info(f"Start frame: {frame_start}") - self.log.info(f"End frame: {frame_end}") - - with comp_lock_and_undo_chunk(current_comp): - result = current_comp.Render( - { - "Start": frame_start, - "End": frame_end, - "Wait": True, - } - ) - - context.data[key] = bool(result) - - if context.data[key] is False: - raise RuntimeError("Comp render failed") diff --git a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py index ba943abacb0..8a91f23578c 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py +++ b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py @@ -14,22 +14,19 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): """ order = pyblish.api.ValidatorOrder - actions = [RepairAction] label = "Validate Create Folder Checked" families = ["render"] hosts = ["fusion"] - actions = [SelectInvalidAction] + actions = [RepairAction, SelectInvalidAction] @classmethod def get_invalid(cls, instance): - active = instance.data.get("active", instance.data.get("publish")) - if not active: - return [] - tool = instance[0] create_dir = tool.GetInput("CreateDir") if create_dir == 0.0: - cls.log.error("%s has Create Folder turned off" % instance[0].Name) + cls.log.error( + "%s has Create Folder turned off" % instance[0].Name + ) return [tool] def process(self, instance): @@ -37,7 +34,8 @@ def process(self, instance): if invalid: raise PublishValidationError( "Found Saver with Create Folder During Render checked off", - title=self.label) + title=self.label, + ) @classmethod def repair(cls, instance): diff --git a/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py b/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py new file mode 100644 index 00000000000..c208b8ef159 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py @@ -0,0 +1,78 @@ +import os +import pyblish.api + +from openpype.pipeline.publish import RepairAction +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction + + +class ValidateLocalFramesExistence(pyblish.api.InstancePlugin): + """Checks if files for savers that's set + to publish expected frames exists + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Expected Frames Exists" + families = ["render"] + hosts = ["fusion"] + actions = [RepairAction, SelectInvalidAction] + + @classmethod + def get_invalid(cls, instance, non_existing_frames=None): + if non_existing_frames is None: + non_existing_frames = [] + + if instance.data.get("render_target") == "frames": + tool = instance[0] + + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + path = instance.data["path"] + output_dir = instance.data["outputDir"] + + basename = os.path.basename(path) + head, ext = os.path.splitext(basename) + files = [ + f"{head}{str(frame).zfill(4)}{ext}" + for frame in range(frame_start, frame_end + 1) + ] + + for file in files: + if not os.path.exists(os.path.join(output_dir, file)): + cls.log.error( + f"Missing file: {os.path.join(output_dir, file)}" + ) + non_existing_frames.append(file) + + if len(non_existing_frames) > 0: + cls.log.error(f"Some of {tool.Name}'s files does not exist") + return [tool] + + def process(self, instance): + non_existing_frames = [] + invalid = self.get_invalid(instance, non_existing_frames) + if invalid: + raise PublishValidationError( + "{} is set to publish existing frames but " + "some frames are missing. " + "The missing file(s) are:\n\n{}".format( + invalid[0].Name, + "\n\n".join(non_existing_frames), + ), + title=self.label, + ) + + @classmethod + def repair(cls, instance): + invalid = cls.get_invalid(instance) + if invalid: + tool = invalid[0] + + # Change render target to local to render locally + tool.SetData("openpype.creator_attributes.render_target", "local") + + cls.log.info( + f"Reload the publisher and {tool.Name} " + "will be set to render locally" + ) diff --git a/openpype/hosts/fusion/scripts/set_rendermode.py b/openpype/hosts/fusion/scripts/set_rendermode.py deleted file mode 100644 index 9d2bfef3107..00000000000 --- a/openpype/hosts/fusion/scripts/set_rendermode.py +++ /dev/null @@ -1,112 +0,0 @@ -from qtpy import QtWidgets -import qtawesome -from openpype.hosts.fusion.api import get_current_comp - - -_help = {"local": "Render the comp on your own machine and publish " - "it from that the destination folder", - "farm": "Submit a Fusion render job to a Render farm to use all other" - " computers and add a publish job"} - - -class SetRenderMode(QtWidgets.QWidget): - - def __init__(self, parent=None): - QtWidgets.QWidget.__init__(self, parent) - - self._comp = get_current_comp() - self._comp_name = self._get_comp_name() - - self.setWindowTitle("Set Render Mode") - self.setFixedSize(300, 175) - - layout = QtWidgets.QVBoxLayout() - - # region comp info - comp_info_layout = QtWidgets.QHBoxLayout() - - update_btn = QtWidgets.QPushButton(qtawesome.icon("fa.refresh", - color="white"), "") - update_btn.setFixedWidth(25) - update_btn.setFixedHeight(25) - - comp_information = QtWidgets.QLineEdit() - comp_information.setEnabled(False) - - comp_info_layout.addWidget(comp_information) - comp_info_layout.addWidget(update_btn) - # endregion comp info - - # region modes - mode_options = QtWidgets.QComboBox() - mode_options.addItems(_help.keys()) - - mode_information = QtWidgets.QTextEdit() - mode_information.setReadOnly(True) - # endregion modes - - accept_btn = QtWidgets.QPushButton("Accept") - - layout.addLayout(comp_info_layout) - layout.addWidget(mode_options) - layout.addWidget(mode_information) - layout.addWidget(accept_btn) - - self.setLayout(layout) - - self.comp_information = comp_information - self.update_btn = update_btn - - self.mode_options = mode_options - self.mode_information = mode_information - - self.accept_btn = accept_btn - - self.connections() - self.update() - - # Force updated render mode help text - self._update_rendermode_info() - - def connections(self): - """Build connections between code and buttons""" - - self.update_btn.clicked.connect(self.update) - self.accept_btn.clicked.connect(self._set_comp_rendermode) - self.mode_options.currentIndexChanged.connect( - self._update_rendermode_info) - - def update(self): - """Update all information in the UI""" - - self._comp = get_current_comp() - self._comp_name = self._get_comp_name() - self.comp_information.setText(self._comp_name) - - # Update current comp settings - mode = self._get_comp_rendermode() - index = self.mode_options.findText(mode) - self.mode_options.setCurrentIndex(index) - - def _update_rendermode_info(self): - rendermode = self.mode_options.currentText() - self.mode_information.setText(_help[rendermode]) - - def _get_comp_name(self): - return self._comp.GetAttrs("COMPS_Name") - - def _get_comp_rendermode(self): - return self._comp.GetData("openpype.rendermode") or "local" - - def _set_comp_rendermode(self): - rendermode = self.mode_options.currentText() - self._comp.SetData("openpype.rendermode", rendermode) - - self._comp.Print("Updated render mode to '%s'\n" % rendermode) - self.hide() - - def _validation(self): - ui_mode = self.mode_options.currentText() - comp_mode = self._get_comp_rendermode() - - return comp_mode == ui_mode diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md index b39f900886a..12f21f551a2 100644 --- a/openpype/hosts/harmony/api/README.md +++ b/openpype/hosts/harmony/api/README.md @@ -432,11 +432,11 @@ copy_files = """function copyFile(srcFilename, dstFilename) import_files = """function %s_import_files() { - var PNGTransparencyMode = 0; // Premultiplied wih Black - var TGATransparencyMode = 0; // Premultiplied wih Black - var SGITransparencyMode = 0; // Premultiplied wih Black + var PNGTransparencyMode = 0; // Premultiplied with Black + var TGATransparencyMode = 0; // Premultiplied with Black + var SGITransparencyMode = 0; // Premultiplied with Black var LayeredPSDTransparencyMode = 1; // Straight - var FlatPSDTransparencyMode = 2; // Premultiplied wih White + var FlatPSDTransparencyMode = 2; // Premultiplied with White function getUniqueColumnName( column_prefix ) { diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/openpype/hosts/harmony/api/TB_sceneOpened.js index e7cd555332e..a284a6ec5cf 100644 --- a/openpype/hosts/harmony/api/TB_sceneOpened.js +++ b/openpype/hosts/harmony/api/TB_sceneOpened.js @@ -142,10 +142,10 @@ function Client() { }; /** - * Process recieved request. This will eval recieved function and produce + * Process received request. This will eval received function and produce * results. * @function - * @param {object} request - recieved request JSON + * @param {object} request - received request JSON * @return {object} result of evaled function. */ self.processRequest = function(request) { @@ -245,7 +245,7 @@ function Client() { var request = JSON.parse(to_parse); var mid = request.message_id; // self.logDebug('[' + mid + '] - Request: ' + '\n' + JSON.stringify(request)); - self.logDebug('[' + mid + '] Recieved.'); + self.logDebug('[' + mid + '] Received.'); request.result = self.processRequest(request); self.logDebug('[' + mid + '] Processing done.'); @@ -286,8 +286,8 @@ function Client() { /** Harmony 21.1 doesn't have QDataStream anymore. This means we aren't able to write bytes into QByteArray so we had - modify how content lenght is sent do the server. - Content lenght is sent as string of 8 char convertible into integer + modify how content length is sent do the server. + Content length is sent as string of 8 char convertible into integer (instead of 0x00000001[4 bytes] > "000000001"[8 bytes]) */ var codec_name = new QByteArray().append("UTF-8"); @@ -476,6 +476,25 @@ function start() { action.triggered.connect(self.onSubsetManage); } + /** + * Set scene settings from DB to the scene + */ + self.onSetSceneSettings = function() { + app.avalonClient.send( + { + "module": "openpype.hosts.harmony.api", + "method": "ensure_scene_settings", + "args": [] + }, + false + ); + }; + // add Set Scene Settings + if (app.avalonMenu == null) { + action = menu.addAction('Set Scene Settings...'); + action.triggered.connect(self.onSetSceneSettings); + } + /** * Show Experimental dialog */ diff --git a/openpype/hosts/harmony/api/lib.py b/openpype/hosts/harmony/api/lib.py index e1e77bfbee0..8048705dc8d 100644 --- a/openpype/hosts/harmony/api/lib.py +++ b/openpype/hosts/harmony/api/lib.py @@ -394,7 +394,7 @@ def get_scene_data(): "function": "AvalonHarmony.getSceneData" })["result"] except json.decoder.JSONDecodeError: - # Means no sceen metadata has been made before. + # Means no scene metadata has been made before. return {} except KeyError: # Means no existing scene metadata has been made. @@ -465,7 +465,7 @@ def imprint(node_id, data, remove=False): Example: >>> from openpype.hosts.harmony.api import lib >>> node = "Top/Display" - >>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True} + >>> data = {"str": "something", "int": 1, "float": 0.32, "bool": True} >>> lib.imprint(layer, data) """ scene_data = get_scene_data() @@ -550,7 +550,7 @@ def save_scene(): method prevents this double request and safely saves the scene. """ - # Need to turn off the backgound watcher else the communication with + # Need to turn off the background watcher else the communication with # the server gets spammed with two requests at the same time. scene_path = send( {"function": "AvalonHarmony.saveScene"})["result"] diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 686770b64ef..285ee806a1f 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -142,7 +142,7 @@ def application_launch(event): harmony.send({"script": script}) inject_avalon_js() - ensure_scene_settings() + # ensure_scene_settings() check_inventory() diff --git a/openpype/hosts/harmony/api/server.py b/openpype/hosts/harmony/api/server.py index ecf339d91bd..04048e5c84d 100644 --- a/openpype/hosts/harmony/api/server.py +++ b/openpype/hosts/harmony/api/server.py @@ -61,7 +61,7 @@ def process_request(self, request): "module": (str), # Module of method. "method" (str), # Name of method in module. "args" (list), # Arguments to pass to method. - "kwargs" (dict), # Keywork arguments to pass to method. + "kwargs" (dict), # Keyword arguments to pass to method. "reply" (bool), # Optional wait for method completion. } """ diff --git a/openpype/hosts/harmony/plugins/publish/extract_render.py b/openpype/hosts/harmony/plugins/publish/extract_render.py index c29864bb286..38b09902c15 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_render.py +++ b/openpype/hosts/harmony/plugins/publish/extract_render.py @@ -25,8 +25,9 @@ def process(self, instance): application_path = instance.context.data.get("applicationPath") scene_path = instance.context.data.get("scenePath") frame_rate = instance.context.data.get("frameRate") - frame_start = instance.context.data.get("frameStart") - frame_end = instance.context.data.get("frameEnd") + # real value from timeline + frame_start = instance.context.data.get("frameStartHandle") + frame_end = instance.context.data.get("frameEndHandle") audio_path = instance.context.data.get("audioPath") if audio_path and os.path.exists(audio_path): @@ -55,9 +56,13 @@ def process(self, instance): # Execute rendering. Ignoring error cause Harmony returns error code # always. - self.log.info(f"running [ {application_path} -batch {scene_path}") + + args = [application_path, "-batch", + "-frames", str(frame_start), str(frame_end), + "-scene", scene_path] + self.log.info(f"running [ {application_path} {' '.join(args)}") proc = subprocess.Popen( - [application_path, "-batch", scene_path], + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py index 936533abd6b..6e4c6955e49 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py @@ -60,7 +60,8 @@ def process(self, instance): # which is available on 'context.data["assetEntity"]' # - the same approach can be used in 'ValidateSceneSettingsRepair' expected_settings = harmony.get_asset_settings() - self.log.info("scene settings from DB:".format(expected_settings)) + self.log.info("scene settings from DB:{}".format(expected_settings)) + expected_settings.pop("entityType") # not useful for the validation expected_settings = _update_frames(dict.copy(expected_settings)) expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\ @@ -68,21 +69,32 @@ def process(self, instance): if (any(re.search(pattern, os.getenv('AVALON_TASK')) for pattern in self.skip_resolution_check)): + self.log.info("Skipping resolution check because of " + "task name and pattern {}".format( + self.skip_resolution_check)) expected_settings.pop("resolutionWidth") expected_settings.pop("resolutionHeight") - entity_type = expected_settings.get("entityType") - if (any(re.search(pattern, entity_type) + if (any(re.search(pattern, os.getenv('AVALON_TASK')) for pattern in self.skip_timelines_check)): + self.log.info("Skipping frames check because of " + "task name and pattern {}".format( + self.skip_timelines_check)) expected_settings.pop('frameStart', None) expected_settings.pop('frameEnd', None) - - expected_settings.pop("entityType") # not useful after the check + expected_settings.pop('frameStartHandle', None) + expected_settings.pop('frameEndHandle', None) asset_name = instance.context.data['anatomyData']['asset'] if any(re.search(pattern, asset_name) for pattern in self.frame_check_filter): - expected_settings.pop("frameEnd") + self.log.info("Skipping frames check because of " + "task name and pattern {}".format( + self.frame_check_filter)) + expected_settings.pop('frameStart', None) + expected_settings.pop('frameEnd', None) + expected_settings.pop('frameStartHandle', None) + expected_settings.pop('frameEndHandle', None) # handle case where ftrack uses only two decimal places # 23.976023976023978 vs. 23.98 @@ -99,6 +111,7 @@ def process(self, instance): "frameEnd": instance.context.data["frameEnd"], "handleStart": instance.context.data.get("handleStart"), "handleEnd": instance.context.data.get("handleEnd"), + "frameStartHandle": instance.context.data.get("frameStartHandle"), "frameEndHandle": instance.context.data.get("frameEndHandle"), "resolutionWidth": instance.context.data.get("resolutionWidth"), "resolutionHeight": instance.context.data.get("resolutionHeight"), diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/README.md b/openpype/hosts/harmony/vendor/OpenHarmony/README.md index 7c77fbfcfa8..064afca86ca 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/README.md +++ b/openpype/hosts/harmony/vendor/OpenHarmony/README.md @@ -6,7 +6,7 @@ Ever tried to make a simple script for toonboom Harmony, then got stumped by the Toonboom Harmony is a very powerful software, with hundreds of functions and tools, and it unlocks a great amount of possibilities for animation studios around the globe. And... being the produce of the hard work of a small team forced to prioritise, it can also be a bit rustic at times! -We are users at heart, animators and riggers, who just want to interact with the software as simply as possible. Simplicity is at the heart of the design of openHarmony. But we also are developpers, and we made the library for people like us who can't resist tweaking the software and bend it in all possible ways, and are looking for powerful functions to help them do it. +We are users at heart, animators and riggers, who just want to interact with the software as simply as possible. Simplicity is at the heart of the design of openHarmony. But we also are developers, and we made the library for people like us who can't resist tweaking the software and bend it in all possible ways, and are looking for powerful functions to help them do it. This library's aim is to create a more direct way to interact with Toonboom through scripts, by providing a more intuitive way to access its elements, and help with the cumbersome and repetitive tasks as well as help unlock untapped potential in its many available systems. So we can go from having to do things like this: @@ -78,7 +78,7 @@ All you have to do is call : ```javascript include("openHarmony.js"); ``` -at the beggining of your script. +at the beginning of your script. You can ask your users to download their copy of the library and store it alongside, or bundle it as you wish as long as you include the license file provided on this repository. @@ -129,7 +129,7 @@ Check that the environment variable `LIB_OPENHARMONY_PATH` is set correctly to t ## How to add openHarmony to vscode intellisense for autocompletion Although not fully supported, you can get most of the autocompletion features to work by adding the following lines to a `jsconfig.json` file placed at the root of your working folder. -The paths need to be relative which means the openHarmony source code must be placed directly in your developping environnement. +The paths need to be relative which means the openHarmony source code must be placed directly in your developping environment. For example, if your working folder contains the openHarmony source in a folder called `OpenHarmony` and your working scripts in a folder called `myScripts`, place the `jsconfig.json` file at the root of the folder and add these lines to the file: diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js index 530c0902c52..ae65d32a2b4 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -78,7 +78,7 @@ * $.log("hello"); // prints out a message to the MessageLog. * var myPoint = new $.oPoint(0,0,0); // create a new class instance from an openHarmony class. * - * // function members of the $ objects get published to the global scope, which means $ can be ommited + * // function members of the $ objects get published to the global scope, which means $ can be omitted * * log("hello"); * var myPoint = new oPoint(0,0,0); // This is all valid @@ -118,7 +118,7 @@ Object.defineProperty( $, "directory", { /** - * Wether Harmony is run with the interface or simply from command line + * Whether Harmony is run with the interface or simply from command line */ Object.defineProperty( $, "batchMode", { get: function(){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js index ad1efc91beb..a54f74e1477 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -67,7 +67,7 @@ * @hideconstructor * @namespace * @example - * // To check wether an action is available, call the synthax: + * // To check whether an action is available, call the synthax: * Action.validate (, ); * * // To launch an action, call the synthax: diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js index 9e9acb766c5..5809cee6942 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -409,7 +409,7 @@ $.oApp.prototype.getToolByName = function(toolName){ /** - * returns the list of stencils useable by the specified tool + * returns the list of stencils usable by the specified tool * @param {$.oTool} tool the tool object we want valid stencils for * @return {$.oStencil[]} the list of stencils compatible with the specified tool */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js index d4d2d791ae2..fa044d5b742 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -338,7 +338,7 @@ Object.defineProperty($.oAttribute.prototype, "useSeparate", { * Returns the default value of the attribute for most keywords * @name $.oAttribute#defaultValue * @type {bool} - * @todo switch the implentation to types? + * @todo switch the implementation to types? * @example * // to reset an attribute to its default value: * // (mostly used for position/angle/skew parameters of pegs and drawing nodes) @@ -449,7 +449,7 @@ $.oAttribute.prototype.getLinkedColumns = function(){ /** * Recursively sets an attribute to the same value as another. Both must have the same keyword. - * @param {bool} [duplicateColumns=false] In the case that the attribute has a column, wether to duplicate the column before linking + * @param {bool} [duplicateColumns=false] In the case that the attribute has a column, whether to duplicate the column before linking * @private */ $.oAttribute.prototype.setToAttributeValue = function(attributeToCopy, duplicateColumns){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js index c98e1945394..1d359f93c4f 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js index 7726be6cd6d..ff06688e66d 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -158,7 +158,7 @@ $.oColorValue.prototype.fromColorString = function (hexString){ /** - * Uses a color integer (used in backdrops) and parses the INT; applies the RGBA components of the INT to thos oColorValue + * Uses a color integer (used in backdrops) and parses the INT; applies the RGBA components of the INT to the oColorValue * @param { int } colorInt 24 bit-shifted integer containing RGBA values */ $.oColorValue.prototype.parseColorFromInt = function(colorInt){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js index 1b73c7943e9..f73309049ea 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js index 73964c5c38a..5440b92875b 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js index a6e16ecb782..3ab78b87d6c 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js @@ -5,7 +5,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -17,7 +17,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -250,7 +250,7 @@ $.oDialog.prototype.prompt = function( labelText, title, prefilledText){ /** * Prompts with a file selector window * @param {string} [text="Select a file:"] The title of the confirmation dialog. - * @param {string} [filter="*"] The filter for the file type and/or file name that can be selected. Accepts wildcard charater "*". + * @param {string} [filter="*"] The filter for the file type and/or file name that can be selected. Accepts wildcard character "*". * @param {string} [getExisting=true] Whether to select an existing file or a save location * @param {string} [acceptMultiple=false] Whether or not selecting more than one file is ok. Is ignored if getExisting is falses. * @param {string} [startDirectory] The directory showed at the opening of the dialog. @@ -327,14 +327,14 @@ $.oDialog.prototype.browseForFolder = function(text, startDirectory){ * @constructor * @classdesc An simple progress dialog to display the progress of a task. * To react to the user clicking the cancel button, connect a function to $.oProgressDialog.canceled() signal. - * When $.batchmode is true, the progress will be outputed as a "Progress : value/range" string to the Harmony stdout. + * When $.batchmode is true, the progress will be outputted as a "Progress : value/range" string to the Harmony stdout. * @param {string} [labelText] The text displayed above the progress bar. * @param {string} [range=100] The maximum value that represents a full progress bar. * @param {string} [title] The title of the dialog * @param {bool} [show=false] Whether to immediately show the dialog. * * @property {bool} wasCanceled Whether the progress bar was cancelled. - * @property {$.oSignal} canceled A Signal emited when the dialog is canceled. Can be connected to a callback. + * @property {$.oSignal} canceled A Signal emitted when the dialog is canceled. Can be connected to a callback. */ $.oProgressDialog = function( labelText, range, title, show ){ if (typeof title === 'undefined') var title = "Progress"; @@ -608,7 +608,7 @@ $.oPieMenu = function( name, widgets, show, minAngle, maxAngle, radius, position this.maxAngle = maxAngle; this.globalCenter = position; - // how wide outisde the icons is the slice drawn + // how wide outside the icons is the slice drawn this._circleMargin = 30; // set these values before calling show() to customize the menu appearance @@ -974,7 +974,7 @@ $.oPieMenu.prototype.getMenuRadius = function(){ var _minRadius = UiLoader.dpiScale(30); var _speed = 10; // the higher the value, the slower the progression - // hyperbolic tangent function to determin the radius + // hyperbolic tangent function to determine the radius var exp = Math.exp(2*itemsNumber/_speed); var _radius = ((exp-1)/(exp+1))*_maxRadius+_minRadius; @@ -1383,7 +1383,7 @@ $.oActionButton.prototype.activate = function(){ * This class is a subclass of QPushButton and all the methods from that class are available to modify this button. * @param {string} paletteName The name of the palette that contains the color * @param {string} colorName The name of the color (if more than one is present, will pick the first match) - * @param {bool} showName Wether to display the name of the color on the button + * @param {bool} showName Whether to display the name of the color on the button * @param {QWidget} parent The parent QWidget for the button. Automatically set during initialisation of the menu. * */ @@ -1437,7 +1437,7 @@ $.oColorButton.prototype.activate = function(){ * @name $.oScriptButton * @constructor * @classdescription This subclass of QPushButton provides an easy way to create a button for a widget that will launch a function from another script file.
- * The buttons created this way automatically load the icon named after the script if it finds one named like the funtion in a script-icons folder next to the script file.
+ * The buttons created this way automatically load the icon named after the script if it finds one named like the function in a script-icons folder next to the script file.
* It will also automatically set the callback to lanch the function from the script.
* This class is a subclass of QPushButton and all the methods from that class are available to modify this button. * @param {string} scriptFile The path to the script file that will be launched diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js index bad735f2370..6f2bc19c0c2 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -426,7 +426,7 @@ Object.defineProperty($.oDrawing.prototype, 'drawingData', { /** * Import a given file into an existing drawing. * @param {$.oFile} file The path to the file - * @param {bool} [convertToTvg=false] Wether to convert the bitmap to the tvg format (this doesn't vectorise the drawing) + * @param {bool} [convertToTvg=false] Whether to convert the bitmap to the tvg format (this doesn't vectorise the drawing) * * @return { $.oFile } the oFile object pointing to the drawing file after being it has been imported into the element folder. */ @@ -878,8 +878,8 @@ $.oArtLayer.prototype.drawCircle = function(center, radius, lineStyle, fillStyle * @param {$.oVertex[]} path an array of $.oVertex objects that describe a path. * @param {$.oLineStyle} [lineStyle] the line style to draw with. (By default, will use the current stencil selection) * @param {$.oFillStyle} [fillStyle] the fill information for the path. (By default, will use the current palette selection) - * @param {bool} [polygon] Wether bezier handles should be created for the points in the path (ignores "onCurve" properties of oVertex from path) - * @param {bool} [createUnderneath] Wether the new shape will appear on top or underneath the contents of the layer. (not working yet) + * @param {bool} [polygon] Whether bezier handles should be created for the points in the path (ignores "onCurve" properties of oVertex from path) + * @param {bool} [createUnderneath] Whether the new shape will appear on top or underneath the contents of the layer. (not working yet) */ $.oArtLayer.prototype.drawShape = function(path, lineStyle, fillStyle, polygon, createUnderneath){ if (typeof fillStyle === 'undefined') var fillStyle = new this.$.oFillStyle(); @@ -959,7 +959,7 @@ $.oArtLayer.prototype.drawContour = function(path, fillStyle){ * @param {float} width the width of the rectangle. * @param {float} height the height of the rectangle. * @param {$.oLineStyle} lineStyle a line style to use for the rectangle stroke. - * @param {$.oFillStyle} fillStyle a fill style to use for the rectange fill. + * @param {$.oFillStyle} fillStyle a fill style to use for the rectangle fill. * @returns {$.oShape} the shape containing the added stroke. */ $.oArtLayer.prototype.drawRectangle = function(x, y, width, height, lineStyle, fillStyle){ @@ -1514,7 +1514,7 @@ Object.defineProperty($.oStroke.prototype, "path", { /** - * The oVertex that are on the stroke (Bezier handles exluded.) + * The oVertex that are on the stroke (Bezier handles excluded.) * The first is repeated at the last position when the stroke is closed. * @name $.oStroke#points * @type {$.oVertex[]} @@ -1583,7 +1583,7 @@ Object.defineProperty($.oStroke.prototype, "style", { /** - * wether the stroke is a closed shape. + * whether the stroke is a closed shape. * @name $.oStroke#closed * @type {bool} */ @@ -1919,7 +1919,7 @@ $.oContour.prototype.toString = function(){ * @constructor * @classdesc * The $.oVertex class represents a single control point on a stroke. This class is used to get the index of the point in the stroke path sequence, as well as its position as a float along the stroke's length. - * The onCurve property describes wether this control point is a bezier handle or a point on the curve. + * The onCurve property describes whether this control point is a bezier handle or a point on the curve. * * @param {$.oStroke} stroke the stroke that this vertex belongs to * @param {float} x the x coordinate of the vertex, in drawing space diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js index ed50d6e50b4..b64c8169ecc 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js index 14dafa3b638..50e4b0d4758 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -509,7 +509,7 @@ Object.defineProperty($.oFile.prototype, 'fullName', { /** - * The name of the file without extenstion. + * The name of the file without extension. * @name $.oFile#name * @type {string} */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js index 37bdede02a9..e1d1dd7fad3 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -263,7 +263,7 @@ Object.defineProperty($.oFrame.prototype, 'duration', { return _sceneLength; } - // walk up the frames of the scene to the next keyFrame to determin duration + // walk up the frames of the scene to the next keyFrame to determine duration var _frames = this.column.frames for (var i=this.frameNumber+1; i<_sceneLength; i++){ if (_frames[i].isKeyframe) return _frames[i].frameNumber - _startFrame; @@ -426,7 +426,7 @@ Object.defineProperty($.oFrame.prototype, 'velocity', { * easeIn : a $.oPoint object representing the left handle for bezier columns, or a {point, ease} object for ease columns. * easeOut : a $.oPoint object representing the left handle for bezier columns, or a {point, ease} object for ease columns. * continuity : the type of bezier used by the point. - * constant : wether the frame is interpolated or a held value. + * constant : whether the frame is interpolated or a held value. * @name $.oFrame#ease * @type {oPoint/object} */ @@ -520,7 +520,7 @@ Object.defineProperty($.oFrame.prototype, 'easeOut', { /** - * Determines the frame's continuity setting. Can take the values "CORNER", (two independant bezier handles on each side), "SMOOTH"(handles are aligned) or "STRAIGHT" (no handles and in straight lines). + * Determines the frame's continuity setting. Can take the values "CORNER", (two independent bezier handles on each side), "SMOOTH"(handles are aligned) or "STRAIGHT" (no handles and in straight lines). * @name $.oFrame#continuity * @type {string} */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js index 9d02b1c2aa6..63a5c0eeb82 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -516,5 +516,5 @@ Object.defineProperty($.oList.prototype, 'toString', { -//Needs all filtering, limiting. mapping, pop, concat, join, ect +//Needs all filtering, limiting. mapping, pop, concat, join, etc //Speed up by finessing the way it extends and tracks the enumerable properties. \ No newline at end of file diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js index c0d4ca99a74..06bfb51f306 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -193,7 +193,7 @@ $.oPoint.prototype.pointSubtract = function( sub_pt ){ /** * Subtracts the point to the coordinates of the current oPoint and returns a new oPoint with the result. * @param {$.oPoint} point The point to subtract to this point. - * @returns {$.oPoint} a new independant oPoint. + * @returns {$.oPoint} a new independent oPoint. */ $.oPoint.prototype.subtractPoint = function( point ){ var x = this.x - point.x; @@ -298,9 +298,9 @@ $.oPoint.prototype.convertToWorldspace = function(){ /** - * Linearily Interpolate between this (0.0) and the provided point (1.0) + * Linearly Interpolate between this (0.0) and the provided point (1.0) * @param {$.oPoint} point The target point at 100% - * @param {double} perc 0-1.0 value to linearily interp + * @param {double} perc 0-1.0 value to linearly interp * * @return: { $.oPoint } The interpolated value. */ @@ -410,9 +410,9 @@ $.oBox.prototype.include = function(box){ /** - * Checks wether the box contains another $.oBox. + * Checks whether the box contains another $.oBox. * @param {$.oBox} box The $.oBox to check for. - * @param {bool} [partial=false] wether to accept partially contained boxes. + * @param {bool} [partial=false] whether to accept partially contained boxes. */ $.oBox.prototype.contains = function(box, partial){ if (typeof partial === 'undefined') var partial = false; @@ -537,7 +537,7 @@ $.oMatrix.prototype.toString = function(){ * @classdesc The $.oVector is a replacement for the Vector3d objects of Harmony. * @param {float} x a x coordinate for this vector. * @param {float} y a y coordinate for this vector. - * @param {float} [z=0] a z coordinate for this vector. If ommited, will be set to 0 and vector will be 2D. + * @param {float} [z=0] a z coordinate for this vector. If omitted, will be set to 0 and vector will be 2D. */ $.oVector = function(x, y, z){ if (typeof z === "undefined" || isNaN(z)) var z = 0; diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js index c19e6d12f41..29afeb522cf 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js index fec5d328164..6ef75f55604 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -54,7 +54,7 @@ /** - * The $.oUtils helper class -- providing generic utilities. Doesn't need instanciation. + * The $.oUtils helper class -- providing generic utilities. Doesn't need instantiation. * @classdesc $.oUtils utility Class */ $.oUtils = function(){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js index a4476d75914..2a6aa3519ac 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -87,7 +87,7 @@ $.oNetwork = function( ){ * @param {function} callback_func Providing a callback function prevents blocking, and will respond on this function. The callback function is in form func( results ){} * @param {bool} use_json In the event of a JSON api, this will return an object converted from the returned JSON. * - * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occured.. + * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occurred.. */ $.oNetwork.prototype.webQuery = function ( address, callback_func, use_json ){ if (typeof callback_func === 'undefined') var callback_func = false; @@ -272,7 +272,7 @@ $.oNetwork.prototype.webQuery = function ( address, callback_func, use_json ){ * @param {function} path The local file path to save the download. * @param {bool} replace Replace the file if it exists. * - * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occured.. + * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occurred.. */ $.oNetwork.prototype.downloadSingle = function ( address, path, replace ){ if (typeof replace === 'undefined') var replace = false; diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js index 5590d7b7e94..deb18543578 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -562,7 +562,7 @@ Object.defineProperty($.oNode.prototype, 'height', { /** - * The list of oNodeLinks objects descibing the connections to the inport of this node, in order of inport. + * The list of oNodeLinks objects describing the connections to the inport of this node, in order of inport. * @name $.oNode#inLinks * @readonly * @deprecated returns $.oNodeLink instances but $.oLink is preferred. Use oNode.getInLinks() instead. @@ -658,7 +658,7 @@ Object.defineProperty($.oNode.prototype, 'outPorts', { /** - * The list of oNodeLinks objects descibing the connections to the outports of this node, in order of outport. + * The list of oNodeLinks objects describing the connections to the outports of this node, in order of outport. * @name $.oNode#outLinks * @readonly * @type {$.oNodeLink[]} @@ -1666,7 +1666,7 @@ $.oNode.prototype.refreshAttributes = function( ){ * It represents peg nodes in the scene. * @constructor * @augments $.oNode - * @classdesc Peg Moudle Class + * @classdesc Peg Module Class * @param {string} path Path to the node in the network. * @param {oScene} oSceneObject Access to the oScene object of the DOM. */ @@ -1886,7 +1886,7 @@ $.oDrawingNode.prototype.getDrawingAtFrame = function(frameNumber){ /** - * Gets the list of palettes containing colors used by a drawing node. This only gets palettes with the first occurence of the colors. + * Gets the list of palettes containing colors used by a drawing node. This only gets palettes with the first occurrence of the colors. * @return {$.oPalette[]} The palettes that contain the color IDs used by the drawings of the node. */ $.oDrawingNode.prototype.getUsedPalettes = function(){ @@ -1968,7 +1968,7 @@ $.oDrawingNode.prototype.unlinkPalette = function(oPaletteObject){ * Duplicates a node by creating an independent copy. * @param {string} [newName] The new name for the duplicated node. * @param {oPoint} [newPosition] The new position for the duplicated node. - * @param {bool} [duplicateElement] Wether to also duplicate the element. + * @param {bool} [duplicateElement] Whether to also duplicate the element. */ $.oDrawingNode.prototype.duplicate = function(newName, newPosition, duplicateElement){ if (typeof newPosition === 'undefined') var newPosition = this.nodePosition; @@ -2464,7 +2464,7 @@ $.oGroupNode.prototype.getNodeByName = function(name){ * Returns all the nodes of a certain type in the group. * Pass a value to recurse to look into the groups as well. * @param {string} typeName The type of the nodes. - * @param {bool} recurse Wether to look inside the groups. + * @param {bool} recurse Whether to look inside the groups. * * @return {$.oNode[]} The nodes found. */ @@ -2626,7 +2626,7 @@ $.oGroupNode.prototype.orderNodeView = function(recurse){ * * peg.linkOutNode(drawingNode); * - * //through all this we didn't specify nodePosition parameters so we'll sort evertything at once + * //through all this we didn't specify nodePosition parameters so we'll sort everything at once * * sceneRoot.orderNodeView(); * @@ -3333,7 +3333,7 @@ $.oGroupNode.prototype.importImageAsTVG = function(path, alignment, nodePosition * imports an image sequence as a node into the current group. * @param {$.oFile[]} imagePaths a list of paths to the images to import (can pass a list of strings or $.oFile) * @param {number} [exposureLength=1] the number of frames each drawing should be exposed at. If set to 0/false, each drawing will use the numbering suffix of the file to set its frame. - * @param {boolean} [convertToTvg=false] wether to convert the files to tvg during import + * @param {boolean} [convertToTvg=false] whether to convert the files to tvg during import * @param {string} [alignment="ASIS"] the alignment to apply to the node * @param {$.oPoint} [nodePosition] the position of the node in the nodeview * @@ -3346,7 +3346,7 @@ $.oGroupNode.prototype.importImageSequence = function(imagePaths, exposureLength if (typeof extendScene === 'undefined') var extendScene = false; - // match anything but capture trailing numbers and separates punctuation preceeding it + // match anything but capture trailing numbers and separates punctuation preceding it var numberingRe = /(.*?)([\W_]+)?(\d*)$/i; // sanitize imagePaths diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js index 279a8716911..07a4d147da3 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -174,7 +174,7 @@ Object.defineProperty($.oNodeLink.prototype, 'outNode', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -198,7 +198,7 @@ Object.defineProperty($.oNodeLink.prototype, 'inNode', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -222,7 +222,7 @@ Object.defineProperty($.oNodeLink.prototype, 'outPort', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -256,7 +256,7 @@ Object.defineProperty($.oNodeLink.prototype, 'inPort', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -983,7 +983,7 @@ $.oNodeLink.prototype.validate = function ( ) { * @return {bool} Whether the connection is a valid connection that exists currently in the node system. */ $.oNodeLink.prototype.validateUpwards = function( inport, outportProvided ) { - //IN THE EVENT OUTNODE WASNT PROVIDED. + //IN THE EVENT OUTNODE WASN'T PROVIDED. this.path = this.findInputPath( this._inNode, inport, [] ); if( !this.path || this.path.length == 0 ){ return false; @@ -1173,7 +1173,7 @@ Object.defineProperty($.oLink.prototype, 'outPort', { /** - * The index of the link comming out of the out-port. + * The index of the link coming out of the out-port. *
In the event this value wasn't known by the link object but the link is actually connected, the correct value will be found. * @name $.oLink#outLink * @readonly @@ -1323,7 +1323,7 @@ $.oLink.prototype.getValidLink = function(createOutPorts, createInPorts){ /** - * Attemps to connect a link. Will guess the ports if not provided. + * Attempts to connect a link. Will guess the ports if not provided. * @return {bool} */ $.oLink.prototype.connect = function(){ @@ -1623,11 +1623,11 @@ $.oLinkPath.prototype.findExistingPath = function(){ /** - * Gets a link object from two nodes that can be succesfully connected. Provide port numbers if there are specific requirements to match. If a link already exists, it will be returned. + * Gets a link object from two nodes that can be successfully connected. Provide port numbers if there are specific requirements to match. If a link already exists, it will be returned. * @param {$.oNode} start The node from which the link originates. * @param {$.oNode} end The node at which the link ends. - * @param {int} [outPort] A prefered out-port for the link to use. - * @param {int} [inPort] A prefered in-port for the link to use. + * @param {int} [outPort] A preferred out-port for the link to use. + * @param {int} [inPort] A preferred in-port for the link to use. * * @return {$.oLink} the valid $.oLink object. Returns null if no such link could be created (for example if the node's in-port is already linked) */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js index 57d4a63e961..9014929fc48 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, ... +// Developed by Mathieu Chaptel, ... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -212,7 +212,7 @@ function openHarmony_toolInstaller(){ //---------------------------------------------- - //-- GET THE FILE CONTENTS IN A DIRCTORY ON GIT + //-- GET THE FILE CONTENTS IN A DIRECTORY ON GIT this.recurse_files = function( contents, arr_files ){ with( context.$.global ){ try{ @@ -501,7 +501,7 @@ function openHarmony_toolInstaller(){ var download_item = item["download_url"]; var query = $.network.webQuery( download_item, false, false ); if( query ){ - //INSTALL TYPES ARE script, package, ect. + //INSTALL TYPES ARE script, package, etc. if( install_types[ m.install_cache[ item["url"] ] ] ){ m.installLabel.text = install_types[ m.install_cache[ item["url"] ] ]; diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/package.json b/openpype/hosts/harmony/vendor/OpenHarmony/package.json index c62ecbc9d80..7a535cdcf6f 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/package.json +++ b/openpype/hosts/harmony/vendor/OpenHarmony/package.json @@ -1,7 +1,7 @@ { "name": "openharmony", "version": "0.0.1", - "description": "An Open Source Imlementation of a Document Object Model for the Toonboom Harmony scripting interface", + "description": "An Open Source Implementation of a Document Object Model for the Toonboom Harmony scripting interface", "main": "openHarmony.js", "scripts": { "test": "$", diff --git a/openpype/hosts/hiero/api/__init__.py b/openpype/hosts/hiero/api/__init__.py index 1fa40c9f745..b95c0fe1d73 100644 --- a/openpype/hosts/hiero/api/__init__.py +++ b/openpype/hosts/hiero/api/__init__.py @@ -108,7 +108,7 @@ "apply_colorspace_project", "apply_colorspace_clips", "get_sequence_pattern_and_padding", - # depricated + # deprecated "get_track_item_pype_tag", "set_track_item_pype_tag", "get_track_item_pype_data", diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index bbd1edc14a3..0d4368529f5 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -1221,7 +1221,7 @@ def set_track_color(track_item, color): def check_inventory_versions(track_items=None): """ - Actual version color idetifier of Loaded containers + Actual version color identifier of Loaded containers Check all track items and filter only Loader nodes for its version. It will get all versions from database @@ -1249,10 +1249,10 @@ def check_inventory_versions(track_items=None): project_name = legacy_io.active_project() filter_result = filter_containers(containers, project_name) for container in filter_result.latest: - set_track_color(container["_item"], clip_color) + set_track_color(container["_item"], clip_color_last) for container in filter_result.outdated: - set_track_color(container["_item"], clip_color_last) + set_track_color(container["_item"], clip_color) def selection_changed_timeline(event): diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index 4ab73e7d194..d88aeac8100 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -193,8 +193,8 @@ def data_to_container(item, data): return # convert the data to list and validate them for _, obj_data in _data.items(): - cotnainer = data_to_container(item, obj_data) - return_list.append(cotnainer) + container = data_to_container(item, obj_data) + return_list.append(container) return return_list else: _data = lib.get_trackitem_openpype_data(item) diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 5ca901caaa2..a3f8a6c5243 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -411,7 +411,7 @@ def __init__(self, cls, context, **options): self.with_handles = options.get("handles") or bool( options.get("handles") is True) # try to get value from options or evaluate key value for `load_how` - self.sequencial_load = options.get("sequencially") or bool( + self.sequencial_load = options.get("sequentially") or bool( "Sequentially in order" in options.get("load_how", "")) # try to get value from options or evaluate key value for `load_to` self.new_sequence = options.get("newSequence") or bool( @@ -836,7 +836,7 @@ def _convert_to_tag_data(self): # increasing steps by index of rename iteration self.count_steps *= self.rename_index - hierarchy_formating_data = {} + hierarchy_formatting_data = {} hierarchy_data = deepcopy(self.hierarchy_data) _data = self.track_item_default_data.copy() if self.ui_inputs: @@ -871,13 +871,13 @@ def _convert_to_tag_data(self): # fill up pythonic expresisons in hierarchy data for k, _v in hierarchy_data.items(): - hierarchy_formating_data[k] = _v["value"].format(**_data) + hierarchy_formatting_data[k] = _v["value"].format(**_data) else: # if no gui mode then just pass default data - hierarchy_formating_data = hierarchy_data + hierarchy_formatting_data = hierarchy_data tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formating_data + hierarchy_formatting_data ) tag_hierarchy_data.update({"heroTrack": True}) @@ -905,20 +905,20 @@ def _convert_to_tag_data(self): # add data to return data dict self.tag_data.update(tag_hierarchy_data) - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): """ Solve tag data from hierarchy data and templates. """ # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) - clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) # remove shot from hierarchy data: is not needed anymore - hierarchy_formating_data.pop("shot") + hierarchy_formatting_data.pop("shot") return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, "parents": self.parents, - "hierarchyData": hierarchy_formating_data, + "hierarchyData": hierarchy_formatting_data, "subset": self.subset, "family": self.subset_family, "families": [self.data["family"]] @@ -934,16 +934,16 @@ def _convert_to_entity(self, type, template): ) # first collect formatting data to use for formatting template - formating_data = {} + formatting_data = {} for _k, _v in self.hierarchy_data.items(): value = _v["value"].format( **self.track_item_default_data) - formating_data[_k] = value + formatting_data[_k] = value return { "entity_type": entity_type, "entity_name": template.format( - **formating_data + **formatting_data ) } diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index 13f5a62ec33..f19dc649925 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -479,23 +479,13 @@ def reset_framerange(): frame_start = asset_data.get("frameStart") frame_end = asset_data.get("frameEnd") - # Backwards compatibility - if frame_start is None or frame_end is None: - frame_start = asset_data.get("edit_in") - frame_end = asset_data.get("edit_out") if frame_start is None or frame_end is None: log.warning("No edit information found for %s" % asset_name) return - handles = asset_data.get("handles") or 0 - handle_start = asset_data.get("handleStart") - if handle_start is None: - handle_start = handles - - handle_end = asset_data.get("handleEnd") - if handle_end is None: - handle_end = handles + handle_start = asset_data.get("handleStart", 0) + handle_end = asset_data.get("handleEnd", 0) frame_start -= int(handle_start) frame_end += int(handle_end) diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index 9793679b45d..45e2f8f87fb 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -144,13 +144,10 @@ def create_context_node(): """ obj_network = hou.node("/obj") - op_ctx = obj_network.createNode("null", node_name="OpenPypeContext") - - # A null in houdini by default comes with content inside to visualize - # the null. However since we explicitly want to hide the node lets - # remove the content and disable the display flag of the node - for node in op_ctx.children(): - node.destroy() + op_ctx = obj_network.createNode("subnet", + node_name="OpenPypeContext", + run_init_scripts=False, + load_contents=False) op_ctx.moveToGoodPosition() op_ctx.setBuiltExplicitly(False) diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py index f0985973a61..340a7f07704 100644 --- a/openpype/hosts/houdini/api/plugin.py +++ b/openpype/hosts/houdini/api/plugin.py @@ -60,7 +60,7 @@ def process(self): def process(self): instance = super(CreateEpicNode, self, process() - # Set paramaters for Alembic node + # Set parameters for Alembic node instance.setParms( {"sop_path": "$HIP/%s.abc" % self.nodes[0]} ) diff --git a/openpype/hosts/houdini/api/shelves.py b/openpype/hosts/houdini/api/shelves.py index ebd668e9e48..6e0f367f624 100644 --- a/openpype/hosts/houdini/api/shelves.py +++ b/openpype/hosts/houdini/api/shelves.py @@ -69,7 +69,7 @@ def generate_shelves(): mandatory_attributes = {'label', 'script'} for tool_definition in shelf_definition.get('tools_list'): - # We verify that the name and script attibutes of the tool + # We verify that the name and script attributes of the tool # are set if not all( tool_definition[key] for key in mandatory_attributes diff --git a/openpype/hosts/houdini/plugins/create/convert_legacy.py b/openpype/hosts/houdini/plugins/create/convert_legacy.py index 4b8041b4f55..e549c9dc26b 100644 --- a/openpype/hosts/houdini/plugins/create/convert_legacy.py +++ b/openpype/hosts/houdini/plugins/create/convert_legacy.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""Convertor for legacy Houdini subsets.""" +"""Converter for legacy Houdini subsets.""" from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin from openpype.hosts.houdini.api.lib import imprint @@ -7,7 +7,7 @@ class HoudiniLegacyConvertor(SubsetConvertorPlugin): """Find and convert any legacy subsets in the scene. - This Convertor will find all legacy subsets in the scene and will + This Converter will find all legacy subsets in the scene and will transform them to the current system. Since the old subsets doesn't retain any information about their original creators, the only mapping we can do is based on their families. diff --git a/openpype/hosts/houdini/plugins/publish/collect_current_file.py b/openpype/hosts/houdini/plugins/publish/collect_current_file.py index 9cca07fdc7c..caf679f98be 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/collect_current_file.py @@ -1,7 +1,6 @@ import os import hou -from openpype.pipeline import legacy_io import pyblish.api @@ -11,7 +10,7 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder - 0.01 label = "Houdini Current File" hosts = ["houdini"] - family = ["workfile"] + families = ["workfile"] def process(self, instance): """Inject the current working file""" @@ -21,7 +20,7 @@ def process(self, instance): # By default, Houdini will even point a new scene to a path. # However if the file is not saved at all and does not exist, # we assume the user never set it. - filepath = "" + current_file = "" elif os.path.basename(current_file) == "untitled.hip": # Due to even a new file being called 'untitled.hip' we are unable diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py index 4fb750d91bd..ad9a450cad3 100644 --- a/openpype/hosts/max/api/lib.py +++ b/openpype/hosts/max/api/lib.py @@ -6,6 +6,11 @@ from typing import Union import contextlib +from openpype.pipeline.context_tools import ( + get_current_project_asset, + get_current_project +) + JSON_PREFIX = "JSON::" @@ -157,6 +162,105 @@ def get_multipass_setting(project_setting=None): ["multipass"]) +def set_scene_resolution(width: int, height: int): + """Set the render resolution + + Args: + width(int): value of the width + height(int): value of the height + + Returns: + None + + """ + rt.renderWidth = width + rt.renderHeight = height + + +def reset_scene_resolution(): + """Apply the scene resolution from the project definition + + scene resolution can be overwritten by an asset if the asset.data contains + any information regarding scene resolution . + Returns: + None + """ + data = ["data.resolutionWidth", "data.resolutionHeight"] + project_resolution = get_current_project(fields=data) + project_resolution_data = project_resolution["data"] + asset_resolution = get_current_project_asset(fields=data) + asset_resolution_data = asset_resolution["data"] + # Set project resolution + project_width = int(project_resolution_data.get("resolutionWidth", 1920)) + project_height = int(project_resolution_data.get("resolutionHeight", 1080)) + width = int(asset_resolution_data.get("resolutionWidth", project_width)) + height = int(asset_resolution_data.get("resolutionHeight", project_height)) + + set_scene_resolution(width, height) + + +def get_frame_range() -> dict: + """Get the current assets frame range and handles. + + Returns: + dict: with frame start, frame end, handle start, handle end. + """ + # Set frame start/end + asset = get_current_project_asset() + frame_start = asset["data"].get("frameStart") + frame_end = asset["data"].get("frameEnd") + + if frame_start is None or frame_end is None: + return + + handle_start = asset["data"].get("handleStart", 0) + handle_end = asset["data"].get("handleEnd", 0) + return { + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end + } + + +def reset_frame_range(fps: bool = True): + """Set frame range to current asset. + This is part of 3dsmax documentation: + + animationRange: A System Global variable which lets you get and + set an Interval value that defines the start and end frames + of the Active Time Segment. + frameRate: A System Global variable which lets you get + and set an Integer value that defines the current + scene frame rate in frames-per-second. + """ + if fps: + data_fps = get_current_project(fields=["data.fps"]) + fps_number = float(data_fps["data"]["fps"]) + rt.frameRate = fps_number + frame_range = get_frame_range() + frame_start = frame_range["frameStart"] - int(frame_range["handleStart"]) + frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"]) + frange_cmd = f"animationRange = interval {frame_start} {frame_end}" + rt.execute(frange_cmd) + + +def set_context_setting(): + """Apply the project settings from the project definition + + Settings can be overwritten by an asset if the asset.data contains + any information regarding those settings. + + Examples of settings: + frame range + resolution + + Returns: + None + """ + reset_scene_resolution() + + def get_max_version(): """ Args: diff --git a/openpype/hosts/max/api/menu.py b/openpype/hosts/max/api/menu.py index 5c273b49b4c..066cc900394 100644 --- a/openpype/hosts/max/api/menu.py +++ b/openpype/hosts/max/api/menu.py @@ -4,6 +4,7 @@ from pymxs import runtime as rt from openpype.tools.utils import host_tools +from openpype.hosts.max.api import lib class OpenPypeMenu(object): @@ -107,6 +108,17 @@ def build_openpype_menu(self) -> QtWidgets.QAction: workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu) workfiles_action.triggered.connect(self.workfiles_callback) openpype_menu.addAction(workfiles_action) + + openpype_menu.addSeparator() + + res_action = QtWidgets.QAction("Set Resolution", openpype_menu) + res_action.triggered.connect(self.resolution_callback) + openpype_menu.addAction(res_action) + + frame_action = QtWidgets.QAction("Set Frame Range", openpype_menu) + frame_action.triggered.connect(self.frame_range_callback) + openpype_menu.addAction(frame_action) + return openpype_menu def load_callback(self): @@ -128,3 +140,11 @@ def library_callback(self): def workfiles_callback(self): """Callback to show Workfiles tool.""" host_tools.show_workfiles(parent=self.main_widget) + + def resolution_callback(self): + """Callback to reset scene resolution""" + return lib.reset_scene_resolution() + + def frame_range_callback(self): + """Callback to reset frame range""" + return lib.reset_frame_range() diff --git a/openpype/hosts/max/api/pipeline.py b/openpype/hosts/max/api/pipeline.py index f8a7b8ea5c1..dacc402318d 100644 --- a/openpype/hosts/max/api/pipeline.py +++ b/openpype/hosts/max/api/pipeline.py @@ -50,6 +50,11 @@ def install(self): self._has_been_setup = True + def context_setting(): + return lib.set_context_setting() + rt.callbacks.addScript(rt.Name('systemPostNew'), + context_setting) + def has_unsaved_changes(self): # TODO: how to get it from 3dsmax? return True diff --git a/openpype/hosts/max/plugins/publish/collect_render.py b/openpype/hosts/max/plugins/publish/collect_render.py index 7c9e311c2fb..63e4108c849 100644 --- a/openpype/hosts/max/plugins/publish/collect_render.py +++ b/openpype/hosts/max/plugins/publish/collect_render.py @@ -61,7 +61,7 @@ def process(self, instance): "plugin": "3dsmax", "frameStart": context.data['frameStart'], "frameEnd": context.data['frameEnd'], - "version": version_int + "version": version_int, } self.log.info("data: {0}".format(data)) instance.data.update(data) diff --git a/openpype/hosts/max/plugins/publish/increment_workfile_version.py b/openpype/hosts/max/plugins/publish/increment_workfile_version.py new file mode 100644 index 00000000000..3dec214f777 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/increment_workfile_version.py @@ -0,0 +1,19 @@ +import pyblish.api +from openpype.lib import version_up +from pymxs import runtime as rt + + +class IncrementWorkfileVersion(pyblish.api.ContextPlugin): + """Increment current workfile version.""" + + order = pyblish.api.IntegratorOrder + 0.9 + label = "Increment Workfile Version" + hosts = ["max"] + families = ["workfile"] + + def process(self, context): + path = context.data["currentFile"] + filepath = version_up(path) + + rt.saveMaxFile(filepath) + self.log.info("Incrementing file version") diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py index 018340d86c7..3e31875fd8c 100644 --- a/openpype/hosts/maya/api/commands.py +++ b/openpype/hosts/maya/api/commands.py @@ -69,7 +69,7 @@ def _resolution_from_document(doc): resolution_width = doc["data"].get("resolution_width") resolution_height = doc["data"].get("resolution_height") - # Make sure both width and heigh are set + # Make sure both width and height are set if resolution_width is None or resolution_height is None: cmds.warning( "No resolution information found for \"{}\"".format(doc["name"]) diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 1b84bd67f76..ddd41822247 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -1419,6 +1419,71 @@ def set_id(node, unique_id, overwrite=False): cmds.setAttr(attr, unique_id, type="string") +def get_attribute(plug, + asString=False, + expandEnvironmentVariables=False, + **kwargs): + """Maya getAttr with some fixes based on `pymel.core.general.getAttr()`. + + Like Pymel getAttr this applies some changes to `maya.cmds.getAttr` + - maya pointlessly returned vector results as a tuple wrapped in a list + (ex. '[(1,2,3)]'). This command unpacks the vector for you. + - when getting a multi-attr, maya would raise an error, but this will + return a list of values for the multi-attr + - added support for getting message attributes by returning the + connections instead + + Note that the asString + expandEnvironmentVariables argument naming + convention matches the `maya.cmds.getAttr` arguments so that it can + act as a direct replacement for it. + + Args: + plug (str): Node's attribute plug as `node.attribute` + asString (bool): Return string value for enum attributes instead + of the index. Note that the return value can be dependent on the + UI language Maya is running in. + expandEnvironmentVariables (bool): Expand any environment variable and + (tilde characters on UNIX) found in string attributes which are + returned. + + Kwargs: + Supports the keyword arguments of `maya.cmds.getAttr` + + Returns: + object: The value of the maya attribute. + + """ + attr_type = cmds.getAttr(plug, type=True) + if asString: + kwargs["asString"] = True + if expandEnvironmentVariables: + kwargs["expandEnvironmentVariables"] = True + try: + res = cmds.getAttr(plug, **kwargs) + except RuntimeError: + if attr_type == "message": + return cmds.listConnections(plug) + + node, attr = plug.split(".", 1) + children = cmds.attributeQuery(attr, node=node, listChildren=True) + if children: + return [ + get_attribute("{}.{}".format(node, child)) + for child in children + ] + + raise + + # Convert vector result wrapped in tuple + if isinstance(res, list) and len(res): + if isinstance(res[0], tuple) and len(res): + if attr_type in {'pointArray', 'vectorArray'}: + return res + return res[0] + + return res + + def set_attribute(attribute, value, node): """Adjust attributes based on the value from the attribute data @@ -1933,6 +1998,12 @@ def remove_other_uv_sets(mesh): cmds.removeMultiInstance(attr, b=True) +def get_node_parent(node): + """Return full path name for parent of node""" + parents = cmds.listRelatives(node, parent=True, fullPath=True) + return parents[0] if parents else None + + def get_id_from_sibling(node, history_only=True): """Return first node id in the history chain that matches this node. @@ -1956,10 +2027,6 @@ def get_id_from_sibling(node, history_only=True): """ - def _get_parent(node): - """Return full path name for parent of node""" - return cmds.listRelatives(node, parent=True, fullPath=True) - node = cmds.ls(node, long=True)[0] # Find all similar nodes in history @@ -1971,8 +2038,8 @@ def _get_parent(node): similar_nodes = [x for x in similar_nodes if x != node] # The node *must be* under the same parent - parent = _get_parent(node) - similar_nodes = [i for i in similar_nodes if _get_parent(i) == parent] + parent = get_node_parent(node) + similar_nodes = [i for i in similar_nodes if get_node_parent(i) == parent] # Check all of the remaining similar nodes and take the first one # with an id and assume it's the original. @@ -2125,23 +2192,13 @@ def get_frame_range(): frame_start = asset["data"].get("frameStart") frame_end = asset["data"].get("frameEnd") - # Backwards compatibility - if frame_start is None or frame_end is None: - frame_start = asset["data"].get("edit_in") - frame_end = asset["data"].get("edit_out") if frame_start is None or frame_end is None: cmds.warning("No edit information found for %s" % asset_name) return - handles = asset["data"].get("handles") or 0 - handle_start = asset["data"].get("handleStart") - if handle_start is None: - handle_start = handles - - handle_end = asset["data"].get("handleEnd") - if handle_end is None: - handle_end = handles + handle_start = asset["data"].get("handleStart") or 0 + handle_end = asset["data"].get("handleEnd") or 0 return { "frameStart": frame_start, @@ -2530,8 +2587,8 @@ def load_capture_preset(data=None): float(value[2]) / 255 ] disp_options[key] = value - else: - disp_options['displayGradient'] = True + elif key == "displayGradient": + disp_options[key] = value options['display_options'] = disp_options @@ -3228,38 +3285,78 @@ def _colormanage(**kwargs): def parent_nodes(nodes, parent=None): # type: (list, str) -> list """Context manager to un-parent provided nodes and return them back.""" - import pymel.core as pm # noqa - parent_node = None - delete_parent = False + def _as_mdagpath(node): + """Return MDagPath for node path.""" + if not node: + return + sel = OpenMaya.MSelectionList() + sel.add(node) + return sel.getDagPath(0) + # We can only parent dag nodes so we ensure input contains only dag nodes + nodes = cmds.ls(nodes, type="dagNode", long=True) + if not nodes: + # opt-out early + yield + return + + parent_node_path = None + delete_parent = False if parent: if not cmds.objExists(parent): - parent_node = pm.createNode("transform", n=parent, ss=False) + parent_node = cmds.createNode("transform", + name=parent, + skipSelect=False) delete_parent = True else: - parent_node = pm.PyNode(parent) + parent_node = parent + parent_node_path = cmds.ls(parent_node, long=True)[0] + + # Store original parents node_parents = [] for node in nodes: - n = pm.PyNode(node) - try: - root = pm.listRelatives(n, parent=1)[0] - except IndexError: - root = None - node_parents.append((n, root)) + node_parent = get_node_parent(node) + node_parents.append((_as_mdagpath(node), _as_mdagpath(node_parent))) + try: - for node in node_parents: - if not parent: - node[0].setParent(world=True) + for node, node_parent in node_parents: + node_parent_path = node_parent.fullPathName() if node_parent else None # noqa + if node_parent_path == parent_node_path: + # Already a child + continue + + if parent_node_path: + cmds.parent(node.fullPathName(), parent_node_path) else: - node[0].setParent(parent_node) + cmds.parent(node.fullPathName(), world=True) + yield finally: - for node in node_parents: - if node[1]: - node[0].setParent(node[1]) + # Reparent to original parents + for node, original_parent in node_parents: + node_path = node.fullPathName() + if not node_path: + # Node must have been deleted + continue + + node_parent_path = get_node_parent(node_path) + + original_parent_path = None + if original_parent: + original_parent_path = original_parent.fullPathName() + if not original_parent_path: + # Original parent node must have been deleted + continue + + if node_parent_path != original_parent_path: + if not original_parent_path: + cmds.parent(node_path, world=True) + else: + cmds.parent(node_path, original_parent_path) + if delete_parent: - pm.delete(parent_node) + cmds.delete(parent_node_path) @contextlib.contextmanager @@ -3727,3 +3824,43 @@ def len_flattened(components): else: n += 1 return n + + +def get_all_children(nodes): + """Return all children of `nodes` including each instanced child. + Using maya.cmds.listRelatives(allDescendents=True) includes only the first + instance. As such, this function acts as an optimal replacement with a + focus on a fast query. + + """ + + sel = OpenMaya.MSelectionList() + traversed = set() + iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst) + for node in nodes: + + if node in traversed: + # Ignore if already processed as a child + # before + continue + + sel.clear() + sel.add(node) + dag = sel.getDagPath(0) + + iterator.reset(dag) + # ignore self + iterator.next() # noqa: B305 + while not iterator.isDone(): + + path = iterator.fullPathName() + + if path in traversed: + iterator.prune() + iterator.next() # noqa: B305 + continue + + traversed.add(path) + iterator.next() # noqa: B305 + + return list(traversed) diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index a54256c59a1..a6bcd003a54 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -339,7 +339,7 @@ def extract_separator(file_prefix): aov_tokens = ["", ""] def match_last(tokens, text): - """regex match the last occurence from a list of tokens""" + """regex match the last occurrence from a list of tokens""" pattern = "(?:.*)({})".format("|".join(tokens)) return re.search(pattern, text, re.IGNORECASE) @@ -857,6 +857,7 @@ def get_render_products(self): if default_ext in {"exr (multichannel)", "exr (deep)"}: default_ext = "exr" + colorspace = lib.get_color_management_output_transform() products = [] # add beauty as default when not disabled @@ -868,7 +869,7 @@ def get_render_products(self): productName="", ext=default_ext, camera=camera, - colorspace=lib.get_color_management_output_transform(), + colorspace=colorspace, multipart=self.multipart ) ) @@ -882,6 +883,7 @@ def get_render_products(self): productName="Alpha", ext=default_ext, camera=camera, + colorspace=colorspace, multipart=self.multipart ) ) @@ -917,7 +919,8 @@ def get_render_products(self): product = RenderProduct(productName=name, ext=default_ext, aov=aov, - camera=camera) + camera=camera, + colorspace=colorspace) products.append(product) # Continue as we've processed this special case AOV continue @@ -929,7 +932,7 @@ def get_render_products(self): ext=default_ext, aov=aov, camera=camera, - colorspace=lib.get_color_management_output_transform() + colorspace=colorspace ) products.append(product) @@ -1051,7 +1054,7 @@ class RenderProductsRedshift(ARenderProducts): def get_files(self, product): # When outputting AOVs we need to replace Redshift specific AOV tokens # with Maya render tokens for generating file sequences. We validate to - # a specific AOV fileprefix so we only need to accout for one + # a specific AOV fileprefix so we only need to account for one # replacement. if not product.multipart and product.driver: file_prefix = self._get_attr(product.driver + ".filePrefix") @@ -1130,6 +1133,7 @@ def get_render_products(self): products = [] light_groups_enabled = False has_beauty_aov = False + colorspace = lib.get_color_management_output_transform() for aov in aovs: enabled = self._get_attr(aov, "enabled") if not enabled: @@ -1173,7 +1177,8 @@ def get_render_products(self): ext=ext, multipart=False, camera=camera, - driver=aov) + driver=aov, + colorspace=colorspace) products.append(product) if light_groups: @@ -1188,7 +1193,8 @@ def get_render_products(self): ext=ext, multipart=False, camera=camera, - driver=aov) + driver=aov, + colorspace=colorspace) products.append(product) # When a Beauty AOV is added manually, it will be rendered as @@ -1204,7 +1210,8 @@ def get_render_products(self): RenderProduct(productName=beauty_name, ext=ext, multipart=self.multipart, - camera=camera)) + camera=camera, + colorspace=colorspace)) return products @@ -1236,6 +1243,8 @@ def get_render_products(self): """ from rfm2.api.displays import get_displays # noqa + colorspace = lib.get_color_management_output_transform() + cameras = [ self.sanitize_camera_name(c) for c in self.get_renderable_cameras() @@ -1302,7 +1311,8 @@ def get_render_products(self): productName=aov_name, ext=extensions, camera=camera, - multipart=True + multipart=True, + colorspace=colorspace ) if has_cryptomatte and matte_enabled: @@ -1311,7 +1321,8 @@ def get_render_products(self): aov=cryptomatte_aov, ext=extensions, camera=camera, - multipart=True + multipart=True, + colorspace=colorspace ) else: # this code should handle the case where no multipart diff --git a/openpype/hosts/maya/api/lib_rendersetup.py b/openpype/hosts/maya/api/lib_rendersetup.py index e616f26e1b4..440ee21a524 100644 --- a/openpype/hosts/maya/api/lib_rendersetup.py +++ b/openpype/hosts/maya/api/lib_rendersetup.py @@ -19,6 +19,8 @@ UniqueOverride ) +from openpype.hosts.maya.api.lib import get_attribute + EXACT_MATCH = 0 PARENT_MATCH = 1 CLIENT_MATCH = 2 @@ -96,9 +98,6 @@ def get_attr_in_layer(node_attr, layer): """ - # Delay pymel import to here because it's slow to load - import pymel.core as pm - def _layer_needs_update(layer): """Return whether layer needs updating.""" # Use `getattr` as e.g. DEFAULT_RENDER_LAYER does not have @@ -125,7 +124,7 @@ def get_default_layer_value(node_attr_): node = history_overrides[-1] if history_overrides else override node_attr_ = node + ".original" - return pm.getAttr(node_attr_, asString=True) + return get_attribute(node_attr_, asString=True) layer = get_rendersetup_layer(layer) rs = renderSetup.instance() @@ -145,7 +144,7 @@ def get_default_layer_value(node_attr_): # we will let it error out. rs.switchToLayer(current_layer) - return pm.getAttr(node_attr, asString=True) + return get_attribute(node_attr, asString=True) overrides = get_attr_overrides(node_attr, layer) default_layer_value = get_default_layer_value(node_attr) @@ -156,7 +155,7 @@ def get_default_layer_value(node_attr_): for match, layer_override, index in overrides: if isinstance(layer_override, AbsOverride): # Absolute override - value = pm.getAttr(layer_override.name() + ".attrValue") + value = get_attribute(layer_override.name() + ".attrValue") if match == EXACT_MATCH: # value = value pass @@ -168,8 +167,8 @@ def get_default_layer_value(node_attr_): elif isinstance(layer_override, RelOverride): # Relative override # Value = Original * Multiply + Offset - multiply = pm.getAttr(layer_override.name() + ".multiply") - offset = pm.getAttr(layer_override.name() + ".offset") + multiply = get_attribute(layer_override.name() + ".multiply") + offset = get_attribute(layer_override.name() + ".offset") if match == EXACT_MATCH: value = value * multiply + offset diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py index 90ab6e21e04..4bee0664ef7 100644 --- a/openpype/hosts/maya/api/workfile_template_builder.py +++ b/openpype/hosts/maya/api/workfile_template_builder.py @@ -33,7 +33,7 @@ def import_template(self, path): get_template_preset implementation) Returns: - bool: Wether the template was succesfully imported or not + bool: Whether the template was successfully imported or not """ if cmds.objExists(PLACEHOLDER_SET): @@ -116,7 +116,7 @@ def _create_placeholder_name(self, placeholder_data): placeholder_name_parts = placeholder_data["builder_type"].split("_") pos = 1 - # add famlily in any + # add family in any placeholder_family = placeholder_data["family"] if placeholder_family: placeholder_name_parts.insert(pos, placeholder_family) diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index 2574624dbb8..ba69debc405 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -118,7 +118,7 @@ class ImportMayaLoader(load.LoaderPlugin): "clean_import", label="Clean import", default=False, - help="Should all occurences of cbId be purged?" + help="Should all occurrences of cbId be purged?" ) ] diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/openpype/hosts/maya/plugins/load/load_arnold_standin.py index 11a2bd19669..7c3a7323891 100644 --- a/openpype/hosts/maya/plugins/load/load_arnold_standin.py +++ b/openpype/hosts/maya/plugins/load/load_arnold_standin.py @@ -84,7 +84,7 @@ def load(self, context, name, namespace, options): sequence = is_sequence(os.listdir(os.path.dirname(self.fname))) cmds.setAttr(standin_shape + ".useFrameExtension", sequence) - nodes = [root, standin] + nodes = [root, standin, standin_shape] if operator is not None: nodes.append(operator) self[:] = nodes @@ -180,10 +180,10 @@ def update(self, container, representation): proxy_basename, proxy_path = self._get_proxy_path(path) # Whether there is proxy or so, we still update the string operator. - # If no proxy exists, the string operator wont replace anything. + # If no proxy exists, the string operator won't replace anything. cmds.setAttr( string_replace_operator + ".match", - "resources/" + proxy_basename, + proxy_basename, type="string" ) cmds.setAttr( diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index 6f60cb57265..9e7fd96bdb7 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -11,7 +11,7 @@ get_representation_path, ) from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import unique_namespace +from openpype.hosts.maya.api.lib import unique_namespace, get_container_members class AudioLoader(load.LoaderPlugin): @@ -52,17 +52,15 @@ def load(self, context, name, namespace, data): ) def update(self, container, representation): - import pymel.core as pm - audio_node = None - for node in pm.PyNode(container["objectName"]).members(): - if node.nodeType() == "audio": - audio_node = node + members = get_container_members(container) + audio_nodes = cmds.ls(members, type="audio") - assert audio_node is not None, "Audio node not found." + assert audio_nodes is not None, "Audio node not found." + audio_node = audio_nodes[0] path = get_representation_path(representation) - audio_node.filename.set(path) + cmds.setAttr("{}.filename".format(audio_node), path, type="string") cmds.setAttr( container["objectName"] + ".representation", str(representation["_id"]), @@ -80,8 +78,12 @@ def update(self, container, representation): asset = get_asset_by_id( project_name, subset["parent"], fields=["parent"] ) - audio_node.sourceStart.set(1 - asset["data"]["frameStart"]) - audio_node.sourceEnd.set(asset["data"]["frameEnd"]) + + source_start = 1 - asset["data"]["frameStart"] + source_end = asset["data"]["frameEnd"] + + cmds.setAttr("{}.sourceStart".format(audio_node), source_start) + cmds.setAttr("{}.sourceEnd".format(audio_node), source_end) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py index 07e5734f43d..794b21eb5d5 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/openpype/hosts/maya/plugins/load/load_gpucache.py @@ -1,5 +1,9 @@ import os +import maya.cmds as cmds + +from openpype.hosts.maya.api.pipeline import containerise +from openpype.hosts.maya.api.lib import unique_namespace from openpype.pipeline import ( load, get_representation_path @@ -11,19 +15,15 @@ class GpuCacheLoader(load.LoaderPlugin): """Load Alembic as gpuCache""" families = ["model", "animation", "proxyAbc", "pointcache"] - representations = ["abc"] + representations = ["abc", "gpu_cache"] - label = "Import Gpu Cache" + label = "Load Gpu Cache" order = -5 icon = "code-fork" color = "orange" def load(self, context, name, namespace, data): - import maya.cmds as cmds - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace - asset = context['asset']['name'] namespace = namespace or unique_namespace( asset + "_", @@ -42,10 +42,9 @@ def load(self, context, name, namespace, data): c = colors.get('model') if c is not None: cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) + cmds.setAttr( + root + ".outlinerColor", + (float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255) ) # Create transform with shape @@ -74,9 +73,6 @@ def load(self, context, name, namespace, data): loader=self.__class__.__name__) def update(self, container, representation): - - import maya.cmds as cmds - path = get_representation_path(representation) # Update the cache @@ -96,7 +92,6 @@ def switch(self, container, representation): self.update(container, representation) def remove(self, container): - import maya.cmds as cmds members = cmds.sets(container['objectName'], query=True) cmds.lockNode(members, lock=False) cmds.delete([container['objectName']] + members) diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index 6421f3ffe2c..bf13708e9b6 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -11,11 +11,26 @@ get_representation_path ) from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import unique_namespace +from openpype.hosts.maya.api.lib import ( + unique_namespace, + namespaced, + pairwise, + get_container_members +) from maya import cmds +def disconnect_inputs(plug): + overrides = cmds.listConnections(plug, + source=True, + destination=False, + plugs=True, + connections=True) or [] + for dest, src in pairwise(overrides): + cmds.disconnectAttr(src, dest) + + class CameraWindow(QtWidgets.QDialog): def __init__(self, cameras): @@ -74,6 +89,7 @@ def on_cancel_pressed(self): self.camera = None self.close() + class ImagePlaneLoader(load.LoaderPlugin): """Specific loader of plate for image planes on selected camera.""" @@ -84,9 +100,7 @@ class ImagePlaneLoader(load.LoaderPlugin): color = "orange" def load(self, context, name, namespace, data, options=None): - import pymel.core as pm - new_nodes = [] image_plane_depth = 1000 asset = context['asset']['name'] namespace = namespace or unique_namespace( @@ -96,16 +110,20 @@ def load(self, context, name, namespace, data, options=None): ) # Get camera from user selection. - camera = None # is_static_image_plane = None # is_in_all_views = None - if data: - camera = pm.PyNode(data.get("camera")) + camera = data.get("camera") if data else None if not camera: - cameras = pm.ls(type="camera") - camera_names = {x.getParent().name(): x for x in cameras} - camera_names["Create new camera."] = "create_camera" + cameras = cmds.ls(type="camera") + + # Cameras by names + camera_names = {} + for camera in cameras: + parent = cmds.listRelatives(camera, parent=True, path=True)[0] + camera_names[parent] = camera + + camera_names["Create new camera."] = "create-camera" window = CameraWindow(camera_names.keys()) window.exec_() # Skip if no camera was selected (Dialog was closed) @@ -113,43 +131,48 @@ def load(self, context, name, namespace, data, options=None): return camera = camera_names[window.camera] - if camera == "create_camera": - camera = pm.createNode("camera") + if camera == "create-camera": + camera = cmds.createNode("camera") if camera is None: return try: - camera.displayResolution.set(1) - camera.farClipPlane.set(image_plane_depth * 10) + cmds.setAttr("{}.displayResolution".format(camera), True) + cmds.setAttr("{}.farClipPlane".format(camera), + image_plane_depth * 10) except RuntimeError: pass # Create image plane - image_plane_transform, image_plane_shape = pm.imagePlane( - fileName=context["representation"]["data"]["path"], - camera=camera) - image_plane_shape.depth.set(image_plane_depth) - - - start_frame = pm.playbackOptions(q=True, min=True) - end_frame = pm.playbackOptions(q=True, max=True) - - image_plane_shape.frameOffset.set(0) - image_plane_shape.frameIn.set(start_frame) - image_plane_shape.frameOut.set(end_frame) - image_plane_shape.frameCache.set(end_frame) - image_plane_shape.useFrameExtension.set(1) + with namespaced(namespace): + # Create inside the namespace + image_plane_transform, image_plane_shape = cmds.imagePlane( + fileName=context["representation"]["data"]["path"], + camera=camera + ) + start_frame = cmds.playbackOptions(query=True, min=True) + end_frame = cmds.playbackOptions(query=True, max=True) + + for attr, value in { + "depth": image_plane_depth, + "frameOffset": 0, + "frameIn": start_frame, + "frameOut": end_frame, + "frameCache": end_frame, + "useFrameExtension": True + }.items(): + plug = "{}.{}".format(image_plane_shape, attr) + cmds.setAttr(plug, value) movie_representations = ["mov", "preview"] if context["representation"]["name"] in movie_representations: - # Need to get "type" by string, because its a method as well. - pm.Attribute(image_plane_shape + ".type").set(2) + cmds.setAttr(image_plane_shape + ".type", 2) # Ask user whether to use sequence or still image. if context["representation"]["name"] == "exr": # Ensure OpenEXRLoader plugin is loaded. - pm.loadPlugin("OpenEXRLoader.mll", quiet=True) + cmds.loadPlugin("OpenEXRLoader", quiet=True) message = ( "Hold image sequence on first frame?" @@ -161,32 +184,18 @@ def load(self, context, name, namespace, data, options=None): None, "Frame Hold.", message, - QtWidgets.QMessageBox.Ok, - QtWidgets.QMessageBox.Cancel + QtWidgets.QMessageBox.Yes, + QtWidgets.QMessageBox.No ) - if reply == QtWidgets.QMessageBox.Ok: - # find the input and output of frame extension - expressions = image_plane_shape.frameExtension.inputs() - frame_ext_output = image_plane_shape.frameExtension.outputs() - if expressions: - # the "time1" node is non-deletable attr - # in Maya, use disconnectAttr instead - pm.disconnectAttr(expressions, frame_ext_output) - - if not image_plane_shape.frameExtension.isFreeToChange(): - raise RuntimeError("Can't set frame extension for {}".format(image_plane_shape)) # noqa - # get the node of time instead and set the time for it. - image_plane_shape.frameExtension.set(start_frame) - - new_nodes.extend( - [ - image_plane_transform.longName().split("|")[-1], - image_plane_shape.longName().split("|")[-1] - ] - ) + if reply == QtWidgets.QMessageBox.Yes: + frame_extension_plug = "{}.frameExtension".format(image_plane_shape) # noqa - for node in new_nodes: - pm.rename(node, "{}:{}".format(namespace, node)) + # Remove current frame expression + disconnect_inputs(frame_extension_plug) + + cmds.setAttr(frame_extension_plug, start_frame) + + new_nodes = [image_plane_transform, image_plane_shape] return containerise( name=name, @@ -197,21 +206,19 @@ def load(self, context, name, namespace, data, options=None): ) def update(self, container, representation): - import pymel.core as pm - image_plane_shape = None - for node in pm.PyNode(container["objectName"]).members(): - if node.nodeType() == "imagePlane": - image_plane_shape = node - assert image_plane_shape is not None, "Image plane not found." + members = get_container_members(container) + image_planes = cmds.ls(members, type="imagePlane") + assert image_planes, "Image plane not found." + image_plane_shape = image_planes[0] path = get_representation_path(representation) - image_plane_shape.imageName.set(path) - cmds.setAttr( - container["objectName"] + ".representation", - str(representation["_id"]), - type="string" - ) + cmds.setAttr("{}.imageName".format(image_plane_shape), + path, + type="string") + cmds.setAttr("{}.representation".format(container["objectName"]), + str(representation["_id"]), + type="string") # Set frame range. project_name = legacy_io.active_project() @@ -227,10 +234,14 @@ def update(self, container, representation): start_frame = asset["data"]["frameStart"] end_frame = asset["data"]["frameEnd"] - image_plane_shape.frameOffset.set(0) - image_plane_shape.frameIn.set(start_frame) - image_plane_shape.frameOut.set(end_frame) - image_plane_shape.frameCache.set(end_frame) + for attr, value in { + "frameOffset": 0, + "frameIn": start_frame, + "frameOut": end_frame, + "frameCache": end_frame + }: + plug = "{}.{}".format(image_plane_shape, attr) + cmds.setAttr(plug, value) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index 262294c1971..a16cc846e32 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -1,11 +1,89 @@ import os +import difflib +import contextlib from maya import cmds from openpype.pipeline import registered_host from openpype.pipeline.create import CreateContext from openpype.settings import get_project_settings import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api.lib import maintained_selection +from openpype.hosts.maya.api.lib import ( + maintained_selection, + get_container_members, + parent_nodes +) + + +@contextlib.contextmanager +def preserve_modelpanel_cameras(container, log=None): + """Preserve camera members of container in the modelPanels. + + This is used to ensure a camera remains in the modelPanels after updating + to a new version. + + """ + + # Get the modelPanels that used the old camera + members = get_container_members(container) + old_cameras = set(cmds.ls(members, type="camera", long=True)) + if not old_cameras: + # No need to manage anything + yield + return + + panel_cameras = {} + for panel in cmds.getPanel(type="modelPanel"): + cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True), + long=True) + + # Often but not always maya returns the transform from the + # modelPanel as opposed to the camera shape, so we convert it + # to explicitly be the camera shape + if cmds.nodeType(cam) != "camera": + cam = cmds.listRelatives(cam, + children=True, + fullPath=True, + type="camera")[0] + if cam in old_cameras: + panel_cameras[panel] = cam + + if not panel_cameras: + # No need to manage anything + yield + return + + try: + yield + finally: + new_members = get_container_members(container) + new_cameras = set(cmds.ls(new_members, type="camera", long=True)) + if not new_cameras: + return + + for panel, cam_name in panel_cameras.items(): + new_camera = None + if cam_name in new_cameras: + new_camera = cam_name + elif len(new_cameras) == 1: + new_camera = next(iter(new_cameras)) + else: + # Multiple cameras in the updated container but not an exact + # match detected by name. Find the closest match + matches = difflib.get_close_matches(word=cam_name, + possibilities=new_cameras, + n=1) + if matches: + new_camera = matches[0] # best match + if log: + log.info("Camera in '{}' restored with " + "closest match camera: {} (before: {})" + .format(panel, new_camera, cam_name)) + + if not new_camera: + # Unable to find the camera to re-apply in the modelpanel + continue + + cmds.modelPanel(panel, edit=True, camera=new_camera) class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): @@ -38,7 +116,6 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): def process_reference(self, context, name, namespace, options): import maya.cmds as cmds - import pymel.core as pm try: family = context["representation"]["context"]["family"] @@ -65,7 +142,10 @@ def process_reference(self, context, name, namespace, options): new_nodes = (list(set(nodes) - set(shapes))) - current_namespace = pm.namespaceInfo(currentNamespace=True) + # if there are cameras, try to lock their transforms + self._lock_camera_transforms(new_nodes) + + current_namespace = cmds.namespaceInfo(currentNamespace=True) if current_namespace != ":": group_name = current_namespace + ":" + group_name @@ -75,37 +155,29 @@ def process_reference(self, context, name, namespace, options): self[:] = new_nodes if attach_to_root: - group_node = pm.PyNode(group_name) - roots = set() - - for node in new_nodes: - try: - roots.add(pm.PyNode(node).getAllParents()[-2]) - except: # noqa: E722 - pass + roots = cmds.listRelatives(group_name, + children=True, + fullPath=True) or [] - if family not in ["layout", "setdress", - "mayaAscii", "mayaScene"]: - for root in roots: - root.setParent(world=True) + if family not in {"layout", "setdress", + "mayaAscii", "mayaScene"}: + # QUESTION Why do we need to exclude these families? + with parent_nodes(roots, parent=None): + cmds.xform(group_name, zeroTransformPivots=True) - group_node.zeroTransformPivots() - for root in roots: - root.setParent(group_node) - - cmds.setAttr(group_name + ".displayHandle", 1) + cmds.setAttr("{}.displayHandle".format(group_name), 1) settings = get_project_settings(os.environ['AVALON_PROJECT']) colors = settings['maya']['load']['colors'] c = colors.get(family) if c is not None: - group_node.useOutlinerColor.set(1) - group_node.outlinerColor.set( - (float(c[0]) / 255), - (float(c[1]) / 255), - (float(c[2]) / 255)) + cmds.setAttr("{}.useOutlinerColor".format(group_name), 1) + cmds.setAttr("{}.outlinerColor".format(group_name), + (float(c[0]) / 255), + (float(c[1]) / 255), + (float(c[2]) / 255)) - cmds.setAttr(group_name + ".displayHandle", 1) + cmds.setAttr("{}.displayHandle".format(group_name), 1) # get bounding box bbox = cmds.exactWorldBoundingBox(group_name) # get pivot position on world space @@ -119,20 +191,30 @@ def process_reference(self, context, name, namespace, options): cy = cy + pivot[1] cz = cz + pivot[2] # set selection handle offset to center of bounding box - cmds.setAttr(group_name + ".selectHandleX", cx) - cmds.setAttr(group_name + ".selectHandleY", cy) - cmds.setAttr(group_name + ".selectHandleZ", cz) + cmds.setAttr("{}.selectHandleX".format(group_name), cx) + cmds.setAttr("{}.selectHandleY".format(group_name), cy) + cmds.setAttr("{}.selectHandleZ".format(group_name), cz) if family == "rig": self._post_process_rig(name, namespace, context, options) else: if "translate" in options: - cmds.setAttr(group_name + ".t", *options["translate"]) + cmds.setAttr("{}.translate".format(group_name), + *options["translate"]) return new_nodes def switch(self, container, representation): self.update(container, representation) + def update(self, container, representation): + with preserve_modelpanel_cameras(container, log=self.log): + super(ReferenceLoader, self).update(container, representation) + + # We also want to lock camera transforms on any new cameras in the + # reference or for a camera which might have changed names. + members = get_container_members(container) + self._lock_camera_transforms(members) + def _post_process_rig(self, name, namespace, context, options): output = next((node for node in self if @@ -163,3 +245,18 @@ def _post_process_rig(self, name, namespace, context, options): variant=namespace, pre_create_data={"use_selection": True} ) + + def _lock_camera_transforms(self, nodes): + cameras = cmds.ls(nodes, type="camera") + if not cameras: + return + + # Check the Maya version, lockTransform has been introduced since + # Maya 2016.5 Ext 2 + version = int(cmds.about(version=True)) + if version >= 2016: + for camera in cameras: + cmds.camera(camera, edit=True, lockTransform=True) + else: + self.log.warning("This version of Maya does not support locking of" + " transforms of cameras.") diff --git a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py index 0415808b7ae..0845f653b13 100644 --- a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py +++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -1,6 +1,7 @@ from maya import cmds import pyblish.api +from openpype.hosts.maya.api.lib import get_all_children class CollectArnoldSceneSource(pyblish.api.InstancePlugin): @@ -21,18 +22,21 @@ def process(self, instance): self.log.warning("Skipped empty instance: \"%s\" " % objset) continue if objset.endswith("content_SET"): - instance.data["setMembers"] = cmds.ls(members, long=True) - self.log.debug("content members: {}".format(members)) + members = cmds.ls(members, long=True) + children = get_all_children(members) + instance.data["contentMembers"] = children + self.log.debug("content members: {}".format(children)) elif objset.endswith("proxy_SET"): - instance.data["proxy"] = cmds.ls(members, long=True) - self.log.debug("proxy members: {}".format(members)) + set_members = get_all_children(cmds.ls(members, long=True)) + instance.data["proxy"] = set_members + self.log.debug("proxy members: {}".format(set_members)) # Use camera in object set if present else default to render globals # camera. cameras = cmds.ls(type="camera", long=True) renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] camera = renderable[0] - for node in instance.data["setMembers"]: + for node in instance.data["contentMembers"]: camera_shapes = cmds.listRelatives( node, shapes=True, type="camera" ) diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index b01160a1c05..287ddc228bb 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -556,7 +556,7 @@ def collect_attributes_changed(self, instance): continue if cmds.getAttr(attribute, type=True) == "message": continue - node_attributes[attr] = cmds.getAttr(attribute) + node_attributes[attr] = cmds.getAttr(attribute, asString=True) # Only include if there are any properties we care about if not node_attributes: continue diff --git a/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py b/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py index a7cb14855b2..33fc7a025f8 100644 --- a/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py @@ -255,7 +255,7 @@ class CollectMultiverseLookData(pyblish.api.InstancePlugin): Searches through the overrides finding all material overrides. From there it extracts the shading group and then finds all texture files in the shading group network. It also checks for mipmap versions of texture files - and adds them to the resouces to get published. + and adds them to the resources to get published. """ diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py index 548b1c996aa..0b039880023 100644 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ b/openpype/hosts/maya/plugins/publish/collect_review.py @@ -1,10 +1,10 @@ from maya import cmds, mel -import pymel.core as pm import pyblish.api from openpype.client import get_subset_by_name -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError +from openpype.hosts.maya.api.lib import get_attribute_input class CollectReview(pyblish.api.InstancePlugin): @@ -15,7 +15,6 @@ class CollectReview(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.3 label = 'Collect Review Data' families = ["review"] - legacy = True def process(self, instance): @@ -35,55 +34,67 @@ def process(self, instance): self.log.debug('members: {}'.format(members)) # validate required settings - assert len(cameras) == 1, "Not a single camera found in extraction" + if len(cameras) == 0: + raise KnownPublishError("No camera found in review " + "instance: {}".format(instance)) + elif len(cameras) > 2: + raise KnownPublishError( + "Only a single camera is allowed for a review instance but " + "more than one camera found in review instance: {}. " + "Cameras found: {}".format(instance, ", ".join(cameras))) + camera = cameras[0] self.log.debug('camera: {}'.format(camera)) - objectset = instance.context.data['objectsets'] - - reviewable_subset = None - reviewable_subset = list(set(members) & set(objectset)) - if reviewable_subset: - assert len(reviewable_subset) <= 1, "Multiple subsets for review" - self.log.debug('subset for review: {}'.format(reviewable_subset)) - - i = 0 - for inst in instance.context: - - self.log.debug('filtering {}'.format(inst)) - data = instance.context[i].data - - if inst.name != reviewable_subset[0]: - self.log.debug('subset name does not match {}'.format( - reviewable_subset[0])) - i += 1 - continue - - if data.get('families'): - data['families'].append('review') - else: - data['families'] = ['review'] - self.log.debug('adding review family to {}'.format( - reviewable_subset)) - data['review_camera'] = camera - # data["publish"] = False - data['frameStartFtrack'] = instance.data["frameStartHandle"] - data['frameEndFtrack'] = instance.data["frameEndHandle"] - data['frameStartHandle'] = instance.data["frameStartHandle"] - data['frameEndHandle'] = instance.data["frameEndHandle"] - data["frameStart"] = instance.data["frameStart"] - data["frameEnd"] = instance.data["frameEnd"] - data['handles'] = instance.data.get('handles', None) - data['step'] = instance.data['step'] - data['fps'] = instance.data['fps'] - data['review_width'] = instance.data['review_width'] - data['review_height'] = instance.data['review_height'] - data["isolate"] = instance.data["isolate"] - cmds.setAttr(str(instance) + '.active', 1) - self.log.debug('data {}'.format(instance.context[i].data)) - instance.context[i].data.update(data) - instance.data['remove'] = True - self.log.debug('isntance data {}'.format(instance.data)) + context = instance.context + objectset = context.data['objectsets'] + + reviewable_subsets = list(set(members) & set(objectset)) + if reviewable_subsets: + if len(reviewable_subsets) > 1: + raise KnownPublishError( + "Multiple attached subsets for review are not supported. " + "Attached: {}".format(", ".join(reviewable_subsets)) + ) + + reviewable_subset = reviewable_subsets[0] + self.log.debug( + "Subset attached to review: {}".format(reviewable_subset) + ) + + # Find the relevant publishing instance in the current context + reviewable_inst = next(inst for inst in context + if inst.name == reviewable_subset) + data = reviewable_inst.data + + self.log.debug( + 'Adding review family to {}'.format(reviewable_subset) + ) + if data.get('families'): + data['families'].append('review') + else: + data['families'] = ['review'] + + data['review_camera'] = camera + data['frameStartFtrack'] = instance.data["frameStartHandle"] + data['frameEndFtrack'] = instance.data["frameEndHandle"] + data['frameStartHandle'] = instance.data["frameStartHandle"] + data['frameEndHandle'] = instance.data["frameEndHandle"] + data["frameStart"] = instance.data["frameStart"] + data["frameEnd"] = instance.data["frameEnd"] + data['step'] = instance.data['step'] + data['fps'] = instance.data['fps'] + data['review_width'] = instance.data['review_width'] + data['review_height'] = instance.data['review_height'] + data["isolate"] = instance.data["isolate"] + data["panZoom"] = instance.data.get("panZoom", False) + data["panel"] = instance.data["panel"] + + # The review instance must be active + cmds.setAttr(str(instance) + '.active', 1) + + instance.data['remove'] = True + else: legacy_subset_name = task + 'Review' asset_doc = instance.context.data['assetEntity'] @@ -105,42 +116,59 @@ def process(self, instance): instance.data["frameEndHandle"] # make ftrack publishable - instance.data["families"] = ['ftrack'] + instance.data.setdefault("families", []).append('ftrack') cmds.setAttr(str(instance) + '.active', 1) # Collect audio playback_slider = mel.eval('$tmpVar=$gPlayBackSlider') - audio_name = cmds.timeControl(playback_slider, q=True, s=True) + audio_name = cmds.timeControl(playback_slider, + query=True, + sound=True) display_sounds = cmds.timeControl( - playback_slider, q=True, displaySound=True + playback_slider, query=True, displaySound=True ) - audio_nodes = [] + def get_audio_node_data(node): + return { + "offset": cmds.getAttr("{}.offset".format(node)), + "filename": cmds.getAttr("{}.filename".format(node)) + } + + audio_data = [] if audio_name: - audio_nodes.append(pm.PyNode(audio_name)) + audio_data.append(get_audio_node_data(audio_name)) - if not audio_name and display_sounds: - start_frame = int(pm.playbackOptions(q=True, min=True)) - end_frame = float(pm.playbackOptions(q=True, max=True)) - frame_range = range(int(start_frame), int(end_frame)) + elif display_sounds: + start_frame = int(cmds.playbackOptions(query=True, min=True)) + end_frame = int(cmds.playbackOptions(query=True, max=True)) - for node in pm.ls(type="audio"): + for node in cmds.ls(type="audio"): # Check if frame range and audio range intersections, # for whether to include this audio node or not. - start_audio = node.offset.get() - end_audio = node.offset.get() + node.duration.get() - audio_range = range(int(start_audio), int(end_audio)) - - if bool(set(frame_range).intersection(audio_range)): - audio_nodes.append(node) - - instance.data["audio"] = [] - for node in audio_nodes: - instance.data["audio"].append( - { - "offset": node.offset.get(), - "filename": node.filename.get() - } - ) + duration = cmds.getAttr("{}.duration".format(node)) + start_audio = cmds.getAttr("{}.offset".format(node)) + end_audio = start_audio + duration + + if start_audio <= end_frame and end_audio > start_frame: + audio_data.append(get_audio_node_data(node)) + + instance.data["audio"] = audio_data + + # Collect focal length. + attr = camera + ".focalLength" + if get_attribute_input(attr): + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + 1 + focal_length = [ + cmds.getAttr(attr, time=t) for t in range(int(start), int(end)) + ] + else: + focal_length = cmds.getAttr(attr) + + key = "focalLength" + try: + instance.data["burninDataMembers"][key] = focal_length + except KeyError: + instance.data["burninDataMembers"] = {key: focal_length} diff --git a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py index 924ac58c403..14bcc71da6c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py +++ b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -1,12 +1,12 @@ import os +from collections import defaultdict +import json from maya import cmds import arnold from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( - maintained_selection, attribute_values, delete_after -) +from openpype.hosts.maya.api import lib class ExtractArnoldSceneSource(publish.Extractor): @@ -19,8 +19,7 @@ class ExtractArnoldSceneSource(publish.Extractor): def process(self, instance): staging_dir = self.staging_dir(instance) - filename = "{}.ass".format(instance.name) - file_path = os.path.join(staging_dir, filename) + file_path = os.path.join(staging_dir, "{}.ass".format(instance.name)) # Mask mask = arnold.AI_NODE_ALL @@ -71,8 +70,8 @@ def process(self, instance): "mask": mask } - filenames = self._extract( - instance.data["setMembers"], attribute_data, kwargs + filenames, nodes_by_id = self._extract( + instance.data["contentMembers"], attribute_data, kwargs ) if "representations" not in instance.data: @@ -88,6 +87,19 @@ def process(self, instance): instance.data["representations"].append(representation) + json_path = os.path.join(staging_dir, "{}.json".format(instance.name)) + with open(json_path, "w") as f: + json.dump(nodes_by_id, f) + + representation = { + "name": "json", + "ext": "json", + "files": os.path.basename(json_path), + "stagingDir": staging_dir + } + + instance.data["representations"].append(representation) + self.log.info( "Extracted instance {} to: {}".format(instance.name, staging_dir) ) @@ -97,7 +109,7 @@ def process(self, instance): return kwargs["filename"] = file_path.replace(".ass", "_proxy.ass") - filenames = self._extract( + filenames, _ = self._extract( instance.data["proxy"], attribute_data, kwargs ) @@ -113,34 +125,60 @@ def process(self, instance): instance.data["representations"].append(representation) def _extract(self, nodes, attribute_data, kwargs): - self.log.info("Writing: " + kwargs["filename"]) + self.log.info( + "Writing {} with:\n{}".format(kwargs["filename"], kwargs) + ) filenames = [] + nodes_by_id = defaultdict(list) # Duplicating nodes so they are direct children of the world. This # makes the hierarchy of any exported ass file the same. - with delete_after() as delete_bin: + with lib.delete_after() as delete_bin: duplicate_nodes = [] for node in nodes: + # Only interested in transforms: + if cmds.nodeType(node) != "transform": + continue + + # Only interested in transforms with shapes. + shapes = cmds.listRelatives( + node, shapes=True, noIntermediate=True + ) + if not shapes: + continue + duplicate_transform = cmds.duplicate(node)[0] - # Discard the children. - shapes = cmds.listRelatives(duplicate_transform, shapes=True) + if cmds.listRelatives(duplicate_transform, parent=True): + duplicate_transform = cmds.parent( + duplicate_transform, world=True + )[0] + + basename = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + duplicate_transform = cmds.rename( + duplicate_transform, basename + ) + + # Discard children nodes that are not shapes + shapes = cmds.listRelatives( + duplicate_transform, shapes=True, fullPath=True + ) children = cmds.listRelatives( - duplicate_transform, children=True + duplicate_transform, children=True, fullPath=True ) cmds.delete(set(children) - set(shapes)) - duplicate_transform = cmds.parent( - duplicate_transform, world=True - )[0] - - cmds.rename(duplicate_transform, node.split("|")[-1]) - duplicate_transform = "|" + node.split("|")[-1] - duplicate_nodes.append(duplicate_transform) + duplicate_nodes.extend(shapes) delete_bin.append(duplicate_transform) - with attribute_values(attribute_data): - with maintained_selection(): + # Copy cbId to mtoa_constant. + for node in duplicate_nodes: + # Converting Maya hierarchy separator "|" to Arnold + # separator "/". + nodes_by_id[lib.get_id(node)].append(node.replace("|", "/")) + + with lib.attribute_values(attribute_data): + with lib.maintained_selection(): self.log.info( "Writing: {}".format(duplicate_nodes) ) @@ -157,4 +195,4 @@ def _extract(self, nodes, attribute_data, kwargs): self.log.info("Exported: {}".format(filenames)) - return filenames + return filenames, nodes_by_id diff --git a/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py b/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py new file mode 100644 index 00000000000..422f5ad019b --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py @@ -0,0 +1,65 @@ +import json + +from maya import cmds + +from openpype.pipeline import publish + + +class ExtractGPUCache(publish.Extractor): + """Extract the content of the instance to a GPU cache file.""" + + label = "GPU Cache" + hosts = ["maya"] + families = ["model", "animation", "pointcache"] + step = 1.0 + stepSave = 1 + optimize = True + optimizationThreshold = 40000 + optimizeAnimationsForMotionBlur = True + writeMaterials = True + useBaseTessellation = True + + def process(self, instance): + cmds.loadPlugin("gpuCache", quiet=True) + + staging_dir = self.staging_dir(instance) + filename = "{}_gpu_cache".format(instance.name) + + # Write out GPU cache file. + kwargs = { + "directory": staging_dir, + "fileName": filename, + "saveMultipleFiles": False, + "simulationRate": self.step, + "sampleMultiplier": self.stepSave, + "optimize": self.optimize, + "optimizationThreshold": self.optimizationThreshold, + "optimizeAnimationsForMotionBlur": ( + self.optimizeAnimationsForMotionBlur + ), + "writeMaterials": self.writeMaterials, + "useBaseTessellation": self.useBaseTessellation + } + self.log.debug( + "Extract {} with:\n{}".format( + instance[:], json.dumps(kwargs, indent=4, sort_keys=True) + ) + ) + cmds.gpuCache(instance[:], **kwargs) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "gpu_cache", + "ext": "abc", + "files": filename + ".abc", + "stagingDir": staging_dir, + "outputName": "gpu_cache" + } + + instance.data["representations"].append(representation) + + self.log.info( + "Extracted instance {} to: {}".format(instance.name, staging_dir) + ) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index 447c9a615cc..93054e5fbb7 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -1,33 +1,42 @@ # -*- coding: utf-8 -*- """Maya look extractor.""" -import os +from abc import ABCMeta, abstractmethod +from collections import OrderedDict +import contextlib import json -import tempfile +import logging +import os import platform -import contextlib -from collections import OrderedDict - -from maya import cmds # noqa +import tempfile +import six +import attr import pyblish.api -from openpype.lib import source_hash, run_subprocess -from openpype.pipeline import legacy_io, publish +from maya import cmds # noqa + +from openpype.lib.vendor_bin_utils import find_executable +from openpype.lib import source_hash, run_subprocess, get_oiio_tools_path +from openpype.pipeline import legacy_io, publish, KnownPublishError from openpype.hosts.maya.api import lib -from openpype.hosts.maya.api.lib import image_info, guess_colorspace # Modes for transfer COPY = 1 HARDLINK = 2 -def _has_arnold(): - """Return whether the arnold package is available and can be imported.""" - try: - import arnold # noqa: F401 - return True - except (ImportError, ModuleNotFoundError): - return False +@attr.s +class TextureResult: + """The resulting texture of a processed file for a resource""" + # Path to the file + path = attr.ib() + # Colorspace of the resulting texture. This might not be the input + # colorspace of the texture if a TextureProcessor has processed the file. + colorspace = attr.ib() + # Hash generated for the texture using openpype.lib.source_hash + file_hash = attr.ib() + # The transfer mode, e.g. COPY or HARDLINK + transfer_mode = attr.ib() def find_paths_by_hash(texture_hash): @@ -46,61 +55,6 @@ def find_paths_by_hash(texture_hash): return legacy_io.distinct(key, {"type": "version"}) -def maketx(source, destination, args, logger): - """Make `.tx` using `maketx` with some default settings. - - The settings are based on default as used in Arnold's - txManager in the scene. - This function requires the `maketx` executable to be - on the `PATH`. - - Args: - source (str): Path to source file. - destination (str): Writing destination path. - args (list): Additional arguments for `maketx`. - logger (logging.Logger): Logger to log messages to. - - Returns: - str: Output of `maketx` command. - - """ - from openpype.lib import get_oiio_tools_path - - maketx_path = get_oiio_tools_path("maketx") - - if not maketx_path: - print( - "OIIO tool not found in {}".format(maketx_path)) - raise AssertionError("OIIO tool not found") - - subprocess_args = [ - maketx_path, - "-v", # verbose - "-u", # update mode - # unpremultiply before conversion (recommended when alpha present) - "--unpremult", - "--checknan", - # use oiio-optimized settings for tile-size, planarconfig, metadata - "--oiio", - "--filter", "lanczos3", - source - ] - - subprocess_args.extend(args) - subprocess_args.extend(["-o", destination]) - - cmd = " ".join(subprocess_args) - logger.debug(cmd) - - try: - out = run_subprocess(subprocess_args) - except Exception: - logger.error("Maketx converion failed", exc_info=True) - raise - - return out - - @contextlib.contextmanager def no_workspace_dir(): """Force maya to a fake temporary workspace directory. @@ -133,6 +87,303 @@ def no_workspace_dir(): os.rmdir(fake_workspace_dir) +@six.add_metaclass(ABCMeta) +class TextureProcessor: + + extension = None + + def __init__(self, log=None): + if log is None: + log = logging.getLogger(self.__class__.__name__) + self.log = log + + def apply_settings(self, system_settings, project_settings): + """Apply OpenPype system/project settings to the TextureProcessor + + Args: + system_settings (dict): OpenPype system settings + project_settings (dict): OpenPype project settings + + Returns: + None + + """ + pass + + @abstractmethod + def process(self, + source, + colorspace, + color_management, + staging_dir): + """Process the `source` texture. + + Must be implemented on inherited class. + + This must always return a TextureResult even when it does not generate + a texture. If it doesn't generate a texture then it should return a + TextureResult using the input path and colorspace. + + Args: + source (str): Path to source file. + colorspace (str): Colorspace of the source file. + color_management (dict): Maya Color management data from + `lib.get_color_management_preferences` + staging_dir (str): Output directory to write to. + + Returns: + TextureResult: The resulting texture information. + + """ + pass + + def __repr__(self): + # Log instance as class name + return self.__class__.__name__ + + +class MakeRSTexBin(TextureProcessor): + """Make `.rstexbin` using `redshiftTextureProcessor`""" + + extension = ".rstexbin" + + def process(self, + source, + colorspace, + color_management, + staging_dir): + + texture_processor_path = self.get_redshift_tool( + "redshiftTextureProcessor" + ) + if not texture_processor_path: + raise KnownPublishError("Must have Redshift available.") + + subprocess_args = [ + texture_processor_path, + source + ] + + hash_args = ["rstex"] + texture_hash = source_hash(source, *hash_args) + + # Redshift stores the output texture next to the input but with + # the extension replaced to `.rstexbin` + basename, ext = os.path.splitext(source) + destination = "{}{}".format(basename, self.extension) + + self.log.debug(" ".join(subprocess_args)) + try: + run_subprocess(subprocess_args) + except Exception: + self.log.error("Texture .rstexbin conversion failed", + exc_info=True) + raise + + return TextureResult( + path=destination, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=COPY + ) + + @staticmethod + def get_redshift_tool(tool_name): + """Path to redshift texture processor. + + On Windows it adds .exe extension if missing from tool argument. + + Args: + tool_name (string): Tool name. + + Returns: + str: Full path to redshift texture processor executable. + """ + if "REDSHIFT_COREDATAPATH" not in os.environ: + raise RuntimeError("Must have Redshift available.") + + redshift_tool_path = os.path.join( + os.environ["REDSHIFT_COREDATAPATH"], + "bin", + tool_name + ) + + return find_executable(redshift_tool_path) + + +class MakeTX(TextureProcessor): + """Make `.tx` using `maketx` with some default settings. + + Some hardcoded arguments passed to `maketx` are based on the defaults used + in Arnold's txManager tool. + + """ + + extension = ".tx" + + def __init__(self, log=None): + super(MakeTX, self).__init__(log=log) + self.extra_args = [] + + def apply_settings(self, system_settings, project_settings): + # Allow extra maketx arguments from project settings + args_settings = ( + project_settings["maya"]["publish"] + .get("ExtractLook", {}).get("maketx_arguments", []) + ) + extra_args = [] + for arg_data in args_settings: + argument = arg_data["argument"] + parameters = arg_data["parameters"] + if not argument: + self.log.debug("Ignoring empty parameter from " + "`maketx_arguments` setting..") + continue + + extra_args.append(argument) + extra_args.extend(parameters) + + self.extra_args = extra_args + + def process(self, + source, + colorspace, + color_management, + staging_dir): + """Process the texture. + + This function requires the `maketx` executable to be available in an + OpenImageIO toolset detectable by OpenPype. + + Args: + source (str): Path to source file. + colorspace (str): Colorspace of the source file. + color_management (dict): Maya Color management data from + `lib.get_color_management_preferences` + staging_dir (str): Output directory to write to. + + Returns: + TextureResult: The resulting texture information. + + """ + + maketx_path = get_oiio_tools_path("maketx") + + if not maketx_path: + raise AssertionError( + "OIIO 'maketx' tool not found. Result: {}".format(maketx_path) + ) + + # Define .tx filepath in staging if source file is not .tx + fname, ext = os.path.splitext(os.path.basename(source)) + if ext == ".tx": + # Do nothing if the source file is already a .tx file. + return TextureResult( + path=source, + file_hash=None, # todo: unknown texture hash? + colorspace=colorspace, + transfer_mode=COPY + ) + + # Hardcoded default arguments for maketx conversion based on Arnold's + # txManager in Maya + args = [ + # unpremultiply before conversion (recommended when alpha present) + "--unpremult", + # use oiio-optimized settings for tile-size, planarconfig, metadata + "--oiio", + "--filter", "lanczos3", + ] + if color_management["enabled"]: + config_path = color_management["config"] + if not os.path.exists(config_path): + raise RuntimeError("OCIO config not found at: " + "{}".format(config_path)) + + render_colorspace = color_management["rendering_space"] + + self.log.info("tx: converting colorspace {0} " + "-> {1}".format(colorspace, + render_colorspace)) + args.extend(["--colorconvert", colorspace, render_colorspace]) + args.extend(["--colorconfig", config_path]) + + else: + # Maya Color management is disabled. We cannot rely on an OCIO + self.log.debug("tx: Maya color management is disabled. No color " + "conversion will be applied to .tx conversion for: " + "{}".format(source)) + # Assume linear + render_colorspace = "linear" + + # Note: The texture hash is only reliable if we include any potential + # conversion arguments provide to e.g. `maketx` + hash_args = ["maketx"] + args + self.extra_args + texture_hash = source_hash(source, *hash_args) + + # Ensure folder exists + resources_dir = os.path.join(staging_dir, "resources") + if not os.path.exists(resources_dir): + os.makedirs(resources_dir) + + self.log.info("Generating .tx file for %s .." % source) + + subprocess_args = [ + maketx_path, + "-v", # verbose + "-u", # update mode + # --checknan doesn't influence the output file but aborts the + # conversion if it finds any. So we can avoid it for the file hash + "--checknan", + source + ] + + subprocess_args.extend(args) + if self.extra_args: + subprocess_args.extend(self.extra_args) + + # Add source hash attribute after other arguments for log readability + # Note: argument is excluded from the hash since it is the hash itself + subprocess_args.extend([ + "--sattrib", + "sourceHash", + texture_hash + ]) + + destination = os.path.join(resources_dir, fname + ".tx") + subprocess_args.extend(["-o", destination]) + + # We want to make sure we are explicit about what OCIO config gets + # used. So when we supply no --colorconfig flag that no fallback to + # an OCIO env var occurs. + env = os.environ.copy() + env.pop("OCIO", None) + + self.log.debug(" ".join(subprocess_args)) + try: + run_subprocess(subprocess_args, env=env) + except Exception: + self.log.error("Texture maketx conversion failed", + exc_info=True) + raise + + return TextureResult( + path=destination, + file_hash=texture_hash, + colorspace=render_colorspace, + transfer_mode=COPY + ) + + @staticmethod + def _has_arnold(): + """Return whether the arnold package is available and importable.""" + try: + import arnold # noqa: F401 + return True + except (ImportError, ModuleNotFoundError): + return False + + class ExtractLook(publish.Extractor): """Extract Look (Maya Scene + JSON) @@ -149,22 +400,6 @@ class ExtractLook(publish.Extractor): scene_type = "ma" look_data_type = "json" - @staticmethod - def get_renderer_name(): - """Get renderer name from Maya. - - Returns: - str: Renderer name. - - """ - renderer = cmds.getAttr( - "defaultRenderGlobals.currentRenderer" - ).lower() - # handle various renderman names - if renderer.startswith("renderman"): - renderer = "renderman" - return renderer - def get_maya_scene_type(self, instance): """Get Maya scene type from settings. @@ -204,16 +439,12 @@ def process(self, instance): dir_path = self.staging_dir(instance) maya_fname = "{0}.{1}".format(instance.name, self.scene_type) json_fname = "{0}.{1}".format(instance.name, self.look_data_type) - - # Make texture dump folder maya_path = os.path.join(dir_path, maya_fname) json_path = os.path.join(dir_path, json_fname) - self.log.info("Performing extraction..") - # Remove all members of the sets so they are not included in the # exported file by accident - self.log.info("Extract sets (%s) ..." % _scene_type) + self.log.info("Processing sets..") lookdata = instance.data["lookData"] relationships = lookdata["relationships"] sets = list(relationships.keys()) @@ -221,13 +452,36 @@ def process(self, instance): self.log.info("No sets found") return - results = self.process_resources(instance, staging_dir=dir_path) + # Specify texture processing executables to activate + # TODO: Load these more dynamically once we support more processors + processors = [] + context = instance.context + for key, Processor in { + # Instance data key to texture processor mapping + "maketx": MakeTX, + "rstex": MakeRSTexBin + }.items(): + if instance.data.get(key, False): + processor = Processor() + processor.apply_settings(context.data["system_settings"], + context.data["project_settings"]) + processors.append(processor) + + if processors: + self.log.debug("Collected texture processors: " + "{}".format(processors)) + + self.log.debug("Processing resources..") + results = self.process_resources(instance, + staging_dir=dir_path, + processors=processors) transfers = results["fileTransfers"] hardlinks = results["fileHardlinks"] hashes = results["fileHashes"] remap = results["attrRemap"] # Extract in correct render layer + self.log.info("Extracting look maya scene file: {}".format(maya_path)) layer = instance.data.get("renderlayer", "defaultRenderLayer") with lib.renderlayer(layer): # TODO: Ensure membership edits don't become renderlayer overrides @@ -235,7 +489,7 @@ def process(self, instance): # To avoid Maya trying to automatically remap the file # textures relative to the `workspace -directory` we force # it to a fake temporary workspace. This fixes textures - # getting incorrectly remapped. (LKD-17, PLN-101) + # getting incorrectly remapped. with no_workspace_dir(): with lib.attribute_values(remap): with lib.maintained_selection(): @@ -299,40 +553,38 @@ def process(self, instance): # Source hash for the textures instance.data["sourceHashes"] = hashes - """ - self.log.info("Returning colorspaces to their original values ...") - for attr, value in remap.items(): - self.log.info(" - {}: {}".format(attr, value)) - cmds.setAttr(attr, value, type="string") - """ self.log.info("Extracted instance '%s' to: %s" % (instance.name, maya_path)) - def process_resources(self, instance, staging_dir): + def _set_resource_result_colorspace(self, resource, colorspace): + """Update resource resulting colorspace after texture processing""" + if "result_color_space" in resource: + if resource["result_color_space"] == colorspace: + return - # Extract the textures to transfer, possibly convert with maketx and - # remap the node paths to the destination path. Note that a source - # might be included more than once amongst the resources as they could - # be the input file to multiple nodes. - resources = instance.data["resources"] - do_maketx = instance.data.get("maketx", False) + self.log.warning( + "Resource already has a resulting colorspace but is now " + "being overridden to a new one: {} -> {}".format( + resource["result_color_space"], colorspace + ) + ) + resource["result_color_space"] = colorspace - # Collect all unique files used in the resources - files_metadata = {} - for resource in resources: - # Preserve color space values (force value after filepath change) - # This will also trigger in the same order at end of context to - # ensure after context it's still the original value. - color_space = resource.get("color_space") + def process_resources(self, instance, staging_dir, processors): + """Process all resources in the instance. - for f in resource["files"]: - files_metadata[os.path.normpath(f)] = { - "color_space": color_space} + It is assumed that all resources are nodes using file textures. + + Extract the textures to transfer, possibly convert with maketx and + remap the node paths to the destination path. Note that a source + might be included more than once amongst the resources as they could + be the input file to multiple nodes. + + """ + + resources = instance.data["resources"] + color_management = lib.get_color_management_preferences() - # Process the resource files - transfers = [] - hardlinks = [] - hashes = {} # Temporary fix to NOT create hardlinks on windows machines if platform.system().lower() == "windows": self.log.info( @@ -342,95 +594,114 @@ def process_resources(self, instance, staging_dir): else: force_copy = instance.data.get("forceCopy", False) - for filepath in files_metadata: - - linearize = False - # if OCIO color management enabled - # it won't take the condition of the files_metadata + destinations_cache = {} - ocio_maya = cmds.colorManagementPrefs(q=True, - cmConfigFileEnabled=True, - cmEnabled=True) + def get_resource_destination_cached(path): + """Get resource destination with cached result per filepath""" + if path not in destinations_cache: + destination = self.get_resource_destination( + path, instance.data["resourcesDir"], processors) + destinations_cache[path] = destination + return destinations_cache[path] - if do_maketx and not ocio_maya: - if files_metadata[filepath]["color_space"].lower() == "srgb": # noqa: E501 - linearize = True - # set its file node to 'raw' as tx will be linearized - files_metadata[filepath]["color_space"] = "Raw" + # Process all resource's individual files + processed_files = {} + transfers = [] + hardlinks = [] + hashes = {} + remap = OrderedDict() + for resource in resources: + colorspace = resource["color_space"] + + for filepath in resource["files"]: + filepath = os.path.normpath(filepath) + + if filepath in processed_files: + # The file was already processed, likely due to usage by + # another resource in the scene. We confirm here it + # didn't do color spaces different than the current + # resource. + processed_file = processed_files[filepath] + self.log.debug( + "File was already processed. Likely used by another " + "resource too: {}".format(filepath) + ) + + if colorspace != processed_file["color_space"]: + self.log.warning( + "File '{}' was already processed using colorspace " + "'{}' instead of the current resource's " + "colorspace '{}'. The already processed texture " + "result's colorspace '{}' will be used." + "".format(filepath, + colorspace, + processed_file["color_space"], + processed_file["result_color_space"])) + + self._set_resource_result_colorspace( + resource, + colorspace=processed_file["result_color_space"] + ) + continue + + texture_result = self._process_texture( + filepath, + processors=processors, + staging_dir=staging_dir, + force_copy=force_copy, + color_management=color_management, + colorspace=colorspace + ) - # if do_maketx: - # color_space = "Raw" + # Set the resulting color space on the resource + self._set_resource_result_colorspace( + resource, colorspace=texture_result.colorspace + ) - source, mode, texture_hash = self._process_texture( - filepath, - resource, - do_maketx, - staging=staging_dir, - linearize=linearize, - force=force_copy + processed_files[filepath] = { + "color_space": colorspace, + "result_color_space": texture_result.colorspace, + } + + source = texture_result.path + destination = get_resource_destination_cached(source) + if force_copy or texture_result.transfer_mode == COPY: + transfers.append((source, destination)) + self.log.info('file will be copied {} -> {}'.format( + source, destination)) + elif texture_result.transfer_mode == HARDLINK: + hardlinks.append((source, destination)) + self.log.info('file will be hardlinked {} -> {}'.format( + source, destination)) + + # Store the hashes from hash to destination to include in the + # database + hashes[texture_result.file_hash] = destination + + # Set up remapping attributes for the node during the publish + # The order of these can be important if one attribute directly + # affects another, e.g. we set colorspace after filepath because + # maya sometimes tries to guess the colorspace when changing + # filepaths (which is avoidable, but we don't want to have those + # attributes changed in the resulting publish) + # Remap filepath to publish destination + # TODO It would be much better if we could use the destination path + # from the actual processed texture results, but since the + # attribute will need to preserve tokens like , etc for + # now we will define the output path from the attribute value + # including the tokens to persist them. + filepath_attr = resource["attribute"] + remap[filepath_attr] = get_resource_destination_cached( + resource["source"] ) - destination = self.resource_destination(instance, - source, - do_maketx) - - # Force copy is specified. - if force_copy: - mode = COPY - - if mode == COPY: - transfers.append((source, destination)) - self.log.info('file will be copied {} -> {}'.format( - source, destination)) - elif mode == HARDLINK: - hardlinks.append((source, destination)) - self.log.info('file will be hardlinked {} -> {}'.format( - source, destination)) - - # Store the hashes from hash to destination to include in the - # database - hashes[texture_hash] = destination - - # Remap the resources to the destination path (change node attributes) - destinations = {} - remap = OrderedDict() # needs to be ordered, see color space values - for resource in resources: - source = os.path.normpath(resource["source"]) - if source not in destinations: - # Cache destination as source resource might be included - # multiple times - destinations[source] = self.resource_destination( - instance, source, do_maketx - ) # Preserve color space values (force value after filepath change) # This will also trigger in the same order at end of context to # ensure after context it's still the original value. - color_space_attr = resource["node"] + ".colorSpace" - try: - color_space = cmds.getAttr(color_space_attr) - except ValueError: - # node doesn't have color space attribute - color_space = "Raw" - else: - # get the resolved files - metadata = files_metadata.get(source) - # if the files are unresolved from `source` - # assume color space from the first file of - # the resource - if not metadata: - first_file = next(iter(resource.get( - "files", [])), None) - if not first_file: - continue - first_filepath = os.path.normpath(first_file) - metadata = files_metadata[first_filepath] - if metadata["color_space"] == "Raw": - # set color space to raw if we linearized it - color_space = "Raw" - # Remap file node filename to destination - remap[color_space_attr] = color_space - attr = resource["attribute"] - remap[attr] = destinations[source] + node = resource["node"] + if cmds.attributeQuery("colorSpace", node=node, exists=True): + color_space_attr = "{}.colorSpace".format(node) + remap[color_space_attr] = resource["result_color_space"] self.log.info("Finished remapping destinations ...") @@ -441,134 +712,131 @@ def process_resources(self, instance, staging_dir): "attrRemap": remap, } - def resource_destination(self, instance, filepath, do_maketx): + def get_resource_destination(self, filepath, resources_dir, processors): """Get resource destination path. This is utility function to change path if resource file name is changed by some external tool like `maketx`. Args: - instance: Current Instance. - filepath (str): Resource path - do_maketx (bool): Flag if resource is processed by `maketx`. + filepath (str): Resource source path + resources_dir (str): Destination dir for resources in publish. + processors (list): Texture processors converting resource. Returns: str: Path to resource file """ - resources_dir = instance.data["resourcesDir"] - # Compute destination location basename, ext = os.path.splitext(os.path.basename(filepath)) - # If `maketx` then the texture will always end with .tx - if do_maketx: - ext = ".tx" + # Get extension from the last processor + for processor in reversed(processors): + processor_ext = processor.extension + if processor_ext and ext != processor_ext: + self.log.debug("Processor {} overrides extension to '{}' " + "for path: {}".format(processor, + processor_ext, + filepath)) + ext = processor_ext + break return os.path.join( resources_dir, basename + ext ) - def _process_texture(self, filepath, resource, - do_maketx, staging, linearize, force): - """Process a single texture file on disk for publishing. - This will: - 1. Check whether it's already published, if so it will do hardlink - 2. If not published and maketx is enabled, generate a new .tx file. - 3. Compute the destination path for the source file. - Args: - filepath (str): The source file path to process. - do_maketx (bool): Whether to produce a .tx file - Returns: - """ - - fname, ext = os.path.splitext(os.path.basename(filepath)) - - args = [] - if do_maketx: - args.append("maketx") - texture_hash = source_hash(filepath, *args) + def _get_existing_hashed_texture(self, texture_hash): + """Return the first found filepath from a texture hash""" # If source has been published before with the same settings, # then don't reprocess but hardlink from the original existing = find_paths_by_hash(texture_hash) - if existing and not force: - self.log.info("Found hash in database, preparing hardlink..") + if existing: source = next((p for p in existing if os.path.exists(p)), None) if source: - return source, HARDLINK, texture_hash + return source else: self.log.warning( - ("Paths not found on disk, " - "skipping hardlink: %s") % (existing,) + "Paths not found on disk, " + "skipping hardlink: {}".format(existing) ) - if do_maketx and ext != ".tx": - # Produce .tx file in staging if source file is not .tx - converted = os.path.join(staging, "resources", fname + ".tx") - additional_args = [ - "--sattrib", - "sourceHash", - texture_hash - ] - if linearize: - if cmds.colorManagementPrefs(query=True, cmEnabled=True): - render_colorspace = cmds.colorManagementPrefs(query=True, - renderingSpaceName=True) # noqa - config_path = cmds.colorManagementPrefs(query=True, - configFilePath=True) # noqa - if not os.path.exists(config_path): - raise RuntimeError("No OCIO config path found!") - - color_space_attr = resource["node"] + ".colorSpace" - try: - color_space = cmds.getAttr(color_space_attr) - except ValueError: - # node doesn't have color space attribute - if _has_arnold(): - img_info = image_info(filepath) - color_space = guess_colorspace(img_info) - else: - color_space = "Raw" - self.log.info("tx: converting {0} -> {1}".format(color_space, render_colorspace)) # noqa - - additional_args.extend(["--colorconvert", - color_space, - render_colorspace]) - else: - - if _has_arnold(): - img_info = image_info(filepath) - color_space = guess_colorspace(img_info) - if color_space == "sRGB": - self.log.info("tx: converting sRGB -> linear") - additional_args.extend(["--colorconvert", - "sRGB", - "Raw"]) - else: - self.log.info("tx: texture's colorspace " - "is already linear") - else: - self.log.warning("cannot guess the colorspace" - "color conversion won't be available!") # noqa - - - additional_args.extend(["--colorconfig", config_path]) - # Ensure folder exists - if not os.path.exists(os.path.dirname(converted)): - os.makedirs(os.path.dirname(converted)) - - self.log.info("Generating .tx file for %s .." % filepath) - maketx( - filepath, - converted, - additional_args, - self.log + def _process_texture(self, + filepath, + processors, + staging_dir, + force_copy, + color_management, + colorspace): + """Process a single texture file on disk for publishing. + + This will: + 1. Check whether it's already published, if so it will do hardlink + (if the texture hash is found and force copy is not enabled) + 2. It will process the texture using the supplied texture + processors like MakeTX and MakeRSTexBin if enabled. + 3. Compute the destination path for the source file. + + Args: + filepath (str): The source file path to process. + processors (list): List of TextureProcessor processing the texture + staging_dir (str): The staging directory to write to. + force_copy (bool): Whether to force a copy even if a file hash + might have existed already in the project, otherwise + hardlinking the existing file is allowed. + color_management (dict): Maya's Color Management settings from + `lib.get_color_management_preferences` + colorspace (str): The source colorspace of the resources this + texture belongs to. + + Returns: + TextureResult: The texture result information. + """ + + if len(processors) > 1: + raise KnownPublishError( + "More than one texture processor not supported. " + "Current processors enabled: {}".format(processors) ) - return converted, COPY, texture_hash + for processor in processors: + self.log.debug("Processing texture {} with processor {}".format( + filepath, processor + )) + + processed_result = processor.process(filepath, + colorspace, + color_management, + staging_dir) + if not processed_result: + raise RuntimeError("Texture Processor {} returned " + "no result.".format(processor)) + self.log.info("Generated processed " + "texture: {}".format(processed_result.path)) + + # TODO: Currently all processors force copy instead of allowing + # hardlinks using source hashes. This should be refactored + return processed_result + + # No texture processing for this file + texture_hash = source_hash(filepath) + if not force_copy: + existing = self._get_existing_hashed_texture(filepath) + if existing: + self.log.info("Found hash in database, preparing hardlink..") + return TextureResult( + path=filepath, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=HARDLINK + ) - return filepath, COPY, texture_hash + return TextureResult( + path=filepath, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=COPY + ) class ExtractModelRenderSets(ExtractLook): diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py index 0628623e887..cf610ac6b42 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py @@ -102,7 +102,7 @@ def process(self, instance): long=True) self.log.info("Collected object {}".format(members)) - # TODO: Deal with asset, composition, overide with options. + # TODO: Deal with asset, composition, override with options. import multiverse time_opts = None diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 94571ff7312..0f3425a1deb 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -1,5 +1,6 @@ import os import json +import contextlib import clique import capture @@ -8,7 +9,16 @@ from openpype.hosts.maya.api import lib from maya import cmds -import pymel.core as pm + + +@contextlib.contextmanager +def panel_camera(panel, camera): + original_camera = cmds.modelPanel(panel, query=True, camera=True) + try: + cmds.modelPanel(panel, edit=True, camera=camera) + yield + finally: + cmds.modelPanel(panel, edit=True, camera=original_camera) class ExtractPlayblast(publish.Extractor): @@ -25,6 +35,16 @@ class ExtractPlayblast(publish.Extractor): optional = True capture_preset = {} + def _capture(self, preset): + self.log.info( + "Using preset:\n{}".format( + json.dumps(preset, sort_keys=True, indent=4) + ) + ) + + path = capture.capture(log=self.log, **preset) + self.log.debug("playblast path {}".format(path)) + def process(self, instance): self.log.info("Extracting capture..") @@ -43,7 +63,7 @@ def process(self, instance): self.log.info("start: {}, end: {}".format(start, end)) # get cameras - camera = instance.data['review_camera'] + camera = instance.data["review_camera"] preset = lib.load_capture_preset(data=self.capture_preset) # Grab capture presets from the project settings @@ -57,23 +77,23 @@ def process(self, instance): asset_height = asset_data.get("resolutionHeight") review_instance_width = instance.data.get("review_width") review_instance_height = instance.data.get("review_height") - preset['camera'] = camera + preset["camera"] = camera # Tests if project resolution is set, # if it is a value other than zero, that value is # used, if not then the asset resolution is # used if review_instance_width and review_instance_height: - preset['width'] = review_instance_width - preset['height'] = review_instance_height + preset["width"] = review_instance_width + preset["height"] = review_instance_height elif width_preset and height_preset: - preset['width'] = width_preset - preset['height'] = height_preset + preset["width"] = width_preset + preset["height"] = height_preset elif asset_width and asset_height: - preset['width'] = asset_width - preset['height'] = asset_height - preset['start_frame'] = start - preset['end_frame'] = end + preset["width"] = asset_width + preset["height"] = asset_height + preset["start_frame"] = start + preset["end_frame"] = end # Enforce persisting camera depth of field camera_options = preset.setdefault("camera_options", {}) @@ -86,14 +106,14 @@ def process(self, instance): self.log.info("Outputting images to %s" % path) - preset['filename'] = path - preset['overwrite'] = True + preset["filename"] = path + preset["overwrite"] = True - pm.refresh(f=True) + cmds.refresh(force=True) - refreshFrameInt = int(pm.playbackOptions(q=True, minTime=True)) - pm.currentTime(refreshFrameInt - 1, edit=True) - pm.currentTime(refreshFrameInt, edit=True) + refreshFrameInt = int(cmds.playbackOptions(q=True, minTime=True)) + cmds.currentTime(refreshFrameInt - 1, edit=True) + cmds.currentTime(refreshFrameInt, edit=True) # Override transparency if requested. transparency = instance.data.get("transparency", 0) @@ -114,7 +134,8 @@ def process(self, instance): # Disable Pan/Zoom. pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"])) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False) + preset.pop("pan_zoom", None) + preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"] # Need to explicitly enable some viewport changes so the viewport is # refreshed ahead of playblasting. @@ -136,30 +157,39 @@ def process(self, instance): ) override_viewport_options = ( - capture_presets['Viewport Options']['override_viewport_options'] + capture_presets["Viewport Options"]["override_viewport_options"] ) - with lib.maintained_time(): - filename = preset.get("filename", "%TEMP%") - - # Force viewer to False in call to capture because we have our own - # viewer opening call to allow a signal to trigger between - # playblast and viewer - preset['viewer'] = False - - # Update preset with current panel setting - # if override_viewport_options is turned off - if not override_viewport_options: - panel_preset = capture.parse_view(instance.data["panel"]) - panel_preset.pop("camera") - preset.update(panel_preset) - - self.log.info( - "Using preset:\n{}".format( - json.dumps(preset, sort_keys=True, indent=4) + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between + # playblast and viewer + preset["viewer"] = False + + # Update preset with current panel setting + # if override_viewport_options is turned off + if not override_viewport_options: + panel_preset = capture.parse_view(instance.data["panel"]) + panel_preset.pop("camera") + preset.update(panel_preset) + + # Need to ensure Python 2 compatibility. + # TODO: Remove once dropping Python 2. + if getattr(contextlib, "nested", None): + # Python 3 compatibility. + with contextlib.nested( + lib.maintained_time(), + panel_camera(instance.data["panel"], preset["camera"]) + ): + self._capture(preset) + else: + # Python 2 compatibility. + with contextlib.ExitStack() as stack: + stack.enter_context(lib.maintained_time()) + stack.enter_context( + panel_camera(instance.data["panel"], preset["camera"]) ) - ) - path = capture.capture(log=self.log, **preset) + self._capture(preset) # Restoring viewport options. if viewport_defaults: @@ -169,18 +199,17 @@ def process(self, instance): cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom) - self.log.debug("playblast path {}".format(path)) - collected_files = os.listdir(stagingdir) patterns = [clique.PATTERNS["frames"]] collections, remainder = clique.assemble(collected_files, minimum_items=1, patterns=patterns) + filename = preset.get("filename", "%TEMP%") self.log.debug("filename {}".format(filename)) frame_collection = None for collection in collections: - filebase = collection.format('{head}').rstrip(".") + filebase = collection.format("{head}").rstrip(".") self.log.debug("collection head {}".format(filebase)) if filebase in filename: frame_collection = collection @@ -196,7 +225,7 @@ def process(self, instance): tags.append("delete") # Add camera node name to representation data - camera_node_name = pm.ls(camera)[0].getTransform().name() + camera_node_name = cmds.listRelatives(camera, parent=True)[0] collected_files = list(frame_collection) # single frame file shouldn't be in list, only as a string @@ -204,15 +233,14 @@ def process(self, instance): collected_files = collected_files[0] representation = { - 'name': 'png', - 'ext': 'png', - 'files': collected_files, + "name": self.capture_preset["Codec"]["compression"], + "ext": self.capture_preset["Codec"]["compression"], + "files": collected_files, "stagingDir": stagingdir, "frameStart": start, "frameEnd": end, - 'fps': fps, - 'preview': True, - 'tags': tags, - 'camera_name': camera_node_name + "fps": fps, + "tags": tags, + "camera_name": camera_node_name } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 1d94bd58c56..b4ed8dce4c1 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -8,7 +8,6 @@ from openpype.hosts.maya.api import lib from maya import cmds -import pymel.core as pm class ExtractThumbnail(publish.Extractor): @@ -26,28 +25,28 @@ class ExtractThumbnail(publish.Extractor): def process(self, instance): self.log.info("Extracting capture..") - camera = instance.data['review_camera'] + camera = instance.data["review_camera"] - capture_preset = ( - instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast']['capture_preset'] - ) + maya_setting = instance.context.data["project_settings"]["maya"] + plugin_setting = maya_setting["publish"]["ExtractPlayblast"] + capture_preset = plugin_setting["capture_preset"] override_viewport_options = ( - capture_preset['Viewport Options']['override_viewport_options'] + capture_preset["Viewport Options"]["override_viewport_options"] ) try: preset = lib.load_capture_preset(data=capture_preset) except KeyError as ke: - self.log.error('Error loading capture presets: {}'.format(str(ke))) + self.log.error("Error loading capture presets: {}".format(str(ke))) preset = {} - self.log.info('Using viewport preset: {}'.format(preset)) + self.log.info("Using viewport preset: {}".format(preset)) # preset["off_screen"] = False - preset['camera'] = camera - preset['start_frame'] = instance.data["frameStart"] - preset['end_frame'] = instance.data["frameStart"] - preset['camera_options'] = { + preset["camera"] = camera + preset["start_frame"] = instance.data["frameStart"] + preset["end_frame"] = instance.data["frameStart"] + preset["camera_options"] = { "displayGateMask": False, "displayResolution": False, "displayFilmGate": False, @@ -74,14 +73,14 @@ def process(self, instance): # used, if not then the asset resolution is # used if review_instance_width and review_instance_height: - preset['width'] = review_instance_width - preset['height'] = review_instance_height + preset["width"] = review_instance_width + preset["height"] = review_instance_height elif width_preset and height_preset: - preset['width'] = width_preset - preset['height'] = height_preset + preset["width"] = width_preset + preset["height"] = height_preset elif asset_width and asset_height: - preset['width'] = asset_width - preset['height'] = asset_height + preset["width"] = asset_width + preset["height"] = asset_height # Create temp directory for thumbnail # - this is to avoid "override" of source file @@ -96,14 +95,14 @@ def process(self, instance): self.log.info("Outputting images to %s" % path) - preset['filename'] = path - preset['overwrite'] = True + preset["filename"] = path + preset["overwrite"] = True - pm.refresh(f=True) + cmds.refresh(force=True) - refreshFrameInt = int(pm.playbackOptions(q=True, minTime=True)) - pm.currentTime(refreshFrameInt - 1, edit=True) - pm.currentTime(refreshFrameInt, edit=True) + refreshFrameInt = int(cmds.playbackOptions(q=True, minTime=True)) + cmds.currentTime(refreshFrameInt - 1, edit=True) + cmds.currentTime(refreshFrameInt, edit=True) # Override transparency if requested. transparency = instance.data.get("transparency", 0) @@ -123,14 +122,14 @@ def process(self, instance): preset["viewport_options"] = {"imagePlane": image_plane} # Disable Pan/Zoom. - pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"])) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False) + preset.pop("pan_zoom", None) + preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"] with lib.maintained_time(): # Force viewer to False in call to capture because we have our own # viewer opening call to allow a signal to trigger between # playblast and viewer - preset['viewer'] = False + preset["viewer"] = False # Update preset with current panel setting # if override_viewport_options is turned off @@ -145,17 +144,15 @@ def process(self, instance): _, thumbnail = os.path.split(playblast) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom) - self.log.info("file list {}".format(thumbnail)) if "representations" not in instance.data: instance.data["representations"] = [] representation = { - 'name': 'thumbnail', - 'ext': 'jpg', - 'files': thumbnail, + "name": "thumbnail", + "ext": "jpg", + "files": thumbnail, "stagingDir": dst_staging, "thumbnail": True } diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py index 9b10d2737de..df16c6c3572 100644 --- a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py @@ -30,9 +30,7 @@ def process(self, instance): # non-animated subsets keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "frameStartHandle", "frameEndHandle", - # Backwards compatibility - "handles"] + "frameStartHandle", "frameEndHandle"] for key in keys: instance.data.pop(key, None) diff --git a/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py b/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py index b90885663c3..d8e8554b686 100644 --- a/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py +++ b/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py @@ -30,7 +30,7 @@ def process(self, instance): cmds.setAttr(palette + ".xgExportAsDelta", True) # Need to save the scene, cause the attribute changes above does not - # mark the scene as modified so user can exit without commiting the + # mark the scene as modified so user can exit without committing the # changes. self.log.info("Saving changes.") cmds.file(save=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py index 3b0ffd52d79..7055dc145ea 100644 --- a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py +++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -1,5 +1,3 @@ -import maya.cmds as cmds - import pyblish.api from openpype.pipeline.publish import ( ValidateContentsOrder, PublishValidationError @@ -22,10 +20,11 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): families = ["ass"] label = "Validate Arnold Scene Source" - def _get_nodes_data(self, nodes): + def _get_nodes_by_name(self, nodes): ungrouped_nodes = [] nodes_by_name = {} parents = [] + same_named_nodes = {} for node in nodes: node_split = node.split("|") if len(node_split) == 2: @@ -35,21 +34,38 @@ def _get_nodes_data(self, nodes): if parent: parents.append(parent) - nodes_by_name[node_split[-1]] = node - for shape in cmds.listRelatives(node, shapes=True): - nodes_by_name[shape.split("|")[-1]] = shape + node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + + # Check for same same nodes, which can happen in different + # hierarchies. + if node_name in nodes_by_name: + try: + same_named_nodes[node_name].append(node) + except KeyError: + same_named_nodes[node_name] = [ + nodes_by_name[node_name], node + ] + + nodes_by_name[node_name] = node + + if same_named_nodes: + message = "Found nodes with the same name:" + for name, nodes in same_named_nodes.items(): + message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes)) + + raise PublishValidationError(message) return ungrouped_nodes, nodes_by_name, parents def process(self, instance): ungrouped_nodes = [] - nodes, content_nodes_by_name, content_parents = self._get_nodes_data( - instance.data["setMembers"] + nodes, content_nodes_by_name, content_parents = ( + self._get_nodes_by_name(instance.data["contentMembers"]) ) ungrouped_nodes.extend(nodes) - nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data( + nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_by_name( instance.data.get("proxy", []) ) ungrouped_nodes.extend(nodes) @@ -66,11 +82,11 @@ def process(self, instance): return # Validate for content and proxy nodes amount being the same. - if len(instance.data["setMembers"]) != len(instance.data["proxy"]): + if len(instance.data["contentMembers"]) != len(instance.data["proxy"]): raise PublishValidationError( "Amount of content nodes ({}) and proxy nodes ({}) needs to " "be the same.".format( - len(instance.data["setMembers"]), + len(instance.data["contentMembers"]), len(instance.data["proxy"]) ) ) diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py new file mode 100644 index 00000000000..e27723e1045 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py @@ -0,0 +1,74 @@ +import pyblish.api +from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishValidationError, RepairAction +) + + +class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source Cbid. + + It is required for the proxy and content nodes to share the same cbid. + """ + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass"] + label = "Validate Arnold Scene Source CBID" + actions = [RepairAction] + + @staticmethod + def _get_nodes_by_name(nodes): + nodes_by_name = {} + for node in nodes: + node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + nodes_by_name[node_name] = node + + return nodes_by_name + + @classmethod + def get_invalid_couples(cls, instance): + content_nodes_by_name = cls._get_nodes_by_name( + instance.data["contentMembers"] + ) + proxy_nodes_by_name = cls._get_nodes_by_name( + instance.data.get("proxy", []) + ) + + invalid_couples = [] + for content_name, content_node in content_nodes_by_name.items(): + proxy_node = proxy_nodes_by_name.get(content_name, None) + + if not proxy_node: + cls.log.debug( + "Content node '{}' has no matching proxy node.".format( + content_node + ) + ) + continue + + content_id = lib.get_id(content_node) + proxy_id = lib.get_id(proxy_node) + if content_id != proxy_id: + invalid_couples.append((content_node, proxy_node)) + + return invalid_couples + + def process(self, instance): + # Proxy validation. + if not instance.data.get("proxy", []): + return + + # Validate for proxy nodes sharing the same cbId as content nodes. + invalid_couples = self.get_invalid_couples(instance) + if invalid_couples: + raise PublishValidationError( + "Found proxy nodes with mismatching cbid:\n{}".format( + invalid_couples + ) + ) + + @classmethod + def repair(cls, instance): + for content_node, proxy_node in cls.get_invalid_couples(cls, instance): + lib.set_id(proxy_node, lib.get_id(content_node), overwrite=False) diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py b/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py index bd1529e2527..13ea53a357e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py @@ -8,7 +8,7 @@ class ValidateCameraAttributes(pyblish.api.InstancePlugin): """Validates Camera has no invalid attribute keys or values. - The Alembic file format does not a specifc subset of attributes as such + The Alembic file format does not a specific subset of attributes as such we validate that no values are set there as the output will not match the current scene. For example the preScale, film offsets and film roll. diff --git a/openpype/hosts/maya/plugins/publish/validate_look_color_space.py b/openpype/hosts/maya/plugins/publish/validate_look_color_space.py deleted file mode 100644 index b1bdeb7541b..00000000000 --- a/openpype/hosts/maya/plugins/publish/validate_look_color_space.py +++ /dev/null @@ -1,26 +0,0 @@ -from maya import cmds - -import pyblish.api -from openpype.pipeline.publish import ValidateContentsOrder -from openpype.pipeline import PublishValidationError - - -class ValidateMayaColorSpace(pyblish.api.InstancePlugin): - """ - Check if the OCIO Color Management and maketx options - enabled at the same time - """ - - order = ValidateContentsOrder - families = ['look'] - hosts = ['maya'] - label = 'Color Management with maketx' - - def process(self, instance): - ocio_maya = cmds.colorManagementPrefs(q=True, - cmConfigFileEnabled=True, - cmEnabled=True) - maketx = instance.data["maketx"] - - if ocio_maya and maketx: - raise PublishValidationError("Maya is color managed and maketx option is on. OpenPype doesn't support this combination yet.") # noqa diff --git a/openpype/hosts/maya/plugins/publish/validate_look_contents.py b/openpype/hosts/maya/plugins/publish/validate_look_contents.py index 53501d11e52..2d38099f0f0 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_contents.py @@ -1,6 +1,7 @@ import pyblish.api import openpype.hosts.maya.api.action from openpype.pipeline.publish import ValidateContentsOrder +from maya import cmds # noqa class ValidateLookContents(pyblish.api.InstancePlugin): @@ -85,6 +86,7 @@ def validate_lookdata_attributes(cls, instance): invalid.add(instance.name) return list(invalid) + @classmethod def validate_looks(cls, instance): @@ -112,3 +114,23 @@ def validate_files(cls, instance): invalid.append(node) return invalid + + @classmethod + def validate_renderer(cls, instance): + # TODO: Rewrite this to be more specific and configurable + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + do_maketx = instance.data.get("maketx", False) + do_rstex = instance.data.get("rstex", False) + processors = [] + + if do_maketx: + processors.append('arnold') + if do_rstex: + processors.append('redshift') + + for processor in processors: + if processor == renderer: + continue + else: + cls.log.error("Converted texture does not match current renderer.") # noqa diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py index fa4c66952c8..a580a1c7874 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py @@ -1,8 +1,14 @@ -import pymel.core as pc from maya import cmds import pyblish.api + import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib import maintained_selection +from openpype.hosts.maya.api.lib import ( + maintained_selection, + delete_after, + undo_chunk, + get_attribute, + set_attribute +) from openpype.pipeline.publish import ( RepairAction, ValidateMeshOrder, @@ -31,60 +37,68 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin): else: active = False + @classmethod + def get_default_attributes(cls): + # Get default arnold attribute values for mesh type. + defaults = {} + with delete_after() as tmp: + transform = cmds.createNode("transform") + tmp.append(transform) + + mesh = cmds.createNode("mesh", parent=transform) + for attr in cmds.listAttr(mesh, string="ai*"): + plug = "{}.{}".format(mesh, attr) + try: + defaults[attr] = get_attribute(plug) + except RuntimeError: + cls.log.debug("Ignoring arnold attribute: {}".format(attr)) + + return defaults + @classmethod def get_invalid_attributes(cls, instance, compute=False): invalid = [] if compute: - # Get default arnold attributes. - temp_transform = pc.polyCube()[0] - - for shape in pc.ls(instance, type="mesh"): - for attr in temp_transform.getShape().listAttr(): - if not attr.attrName().startswith("ai"): - continue - target_attr = pc.PyNode( - "{}.{}".format(shape.name(), attr.attrName()) - ) - if attr.get() != target_attr.get(): - invalid.append(target_attr) + meshes = cmds.ls(instance, type="mesh", long=True) + if not meshes: + return [] - pc.delete(temp_transform) + # Compare the values against the defaults + defaults = cls.get_default_attributes() + for mesh in meshes: + for attr_name, default_value in defaults.items(): + plug = "{}.{}".format(mesh, attr_name) + if get_attribute(plug) != default_value: + invalid.append(plug) instance.data["nondefault_arnold_attributes"] = invalid - else: - invalid.extend(instance.data["nondefault_arnold_attributes"]) - return invalid + return instance.data.get("nondefault_arnold_attributes", []) @classmethod def get_invalid(cls, instance): - invalid = [] - - for attr in cls.get_invalid_attributes(instance, compute=False): - invalid.append(attr.node().name()) - - return invalid + invalid_attrs = cls.get_invalid_attributes(instance, compute=False) + invalid_nodes = set(attr.split(".", 1)[0] for attr in invalid_attrs) + return sorted(invalid_nodes) @classmethod def repair(cls, instance): with maintained_selection(): - with pc.UndoChunk(): - temp_transform = pc.polyCube()[0] - + with undo_chunk(): + defaults = cls.get_default_attributes() attributes = cls.get_invalid_attributes( instance, compute=False ) for attr in attributes: - source = pc.PyNode( - "{}.{}".format( - temp_transform.getShape(), attr.attrName() - ) + node, attr_name = attr.split(".", 1) + value = defaults[attr_name] + set_attribute( + node=node, + attribute=attr_name, + value=value ) - attr.set(source.get()) - - pc.delete(temp_transform) def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py index 2242550846d..421f82a102f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py @@ -49,7 +49,8 @@ def process(self, instance): resources = instance.data.get("resources", []) for resource in resources: files = resource["files"] - self.log.debug("Resouce '{}', files: [{}]".format(resource, files)) + self.log.debug( + "Resource '{}', files: [{}]".format(resource, files)) node = resource["node"] if len(files) == 0: self.log.error("File node '{}' uses no or non-existing " diff --git a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py index e91b99359dd..0ff03f91650 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py @@ -1,4 +1,3 @@ -import pymel.core as pm import maya.cmds as cmds import pyblish.api @@ -12,7 +11,7 @@ def get_namespace(node_name): # ensure only node's name (not parent path) - node_name = node_name.rsplit("|")[-1] + node_name = node_name.rsplit("|", 1)[-1] # ensure only namespace return node_name.rpartition(":")[0] @@ -45,13 +44,11 @@ def repair(cls, instance): invalid = cls.get_invalid(instance) - # Get nodes with pymel since we'll be renaming them - # Since we don't want to keep checking the hierarchy - # or full paths - nodes = pm.ls(invalid) + # Iterate over the nodes by long to short names to iterate the lowest + # in hierarchy nodes first. This way we avoid having renamed parents + # before renaming children nodes + for node in sorted(invalid, key=len, reverse=True): - for node in nodes: - namespace = node.namespace() - if namespace: - name = node.nodeName() - node.rename(name[len(namespace):]) + node_name = node.rsplit("|", 1)[-1] + node_name_without_namespace = node_name.rsplit(":")[-1] + cmds.rename(node, node_name_without_namespace) diff --git a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py index 6b6fb03eec7..7919a6eaa10 100644 --- a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py +++ b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py @@ -37,8 +37,8 @@ def get_invalid(self, instance): project_name = legacy_io.active_project() asset_doc = instance.data["assetEntity"] - render_passses = instance.data.get("renderPasses", []) - for render_pass in render_passses: + render_passes = instance.data.get("renderPasses", []) + for render_pass in render_passes: is_valid = self.validate_subset_registered( project_name, asset_doc, render_pass ) diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index f1976cb8b16..6bd77180f6e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -279,15 +279,6 @@ def get_invalid(cls, instance): # go through definitions and test if such node.attribute exists. # if so, compare its value from the one required. for attribute, data in cls.get_nodes(instance, renderer).items(): - # Validate the settings has values. - if not data["values"]: - cls.log.error( - "Settings for {}.{} is missing values.".format( - node, attribute - ) - ) - continue - for node in data["nodes"]: try: render_value = cmds.getAttr( @@ -320,6 +311,15 @@ def get_nodes(cls, instance, renderer): ) result = {} for attr, values in OrderedDict(validation_settings).items(): + values = [convert_to_int_or_float(v) for v in values if v] + + # Validate the settings has values. + if not values: + cls.log.error( + "Settings for {} is missing values.".format(attr) + ) + continue + cls.log.debug("{}: {}".format(attr, values)) if "." not in attr: cls.log.warning( @@ -328,8 +328,6 @@ def get_nodes(cls, instance, renderer): ) continue - values = [convert_to_int_or_float(v) for v in values] - node_type, attribute_name = attr.split(".", 1) # first get node of that type diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py index f3ed1a36efb..499bfd4e375 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py @@ -1,14 +1,22 @@ -import pymel.core as pc +from collections import defaultdict + +from maya import cmds import pyblish.api import openpype.hosts.maya.api.action +from openpype.hosts.maya.api.lib import get_id, set_id from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, ) +def get_basename(node): + """Return node short name without namespace""" + return node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + + class ValidateRigOutputIds(pyblish.api.InstancePlugin): """Validate rig output ids. @@ -30,43 +38,48 @@ def process(self, instance): @classmethod def get_invalid(cls, instance, compute=False): - invalid = cls.get_invalid_matches(instance, compute=compute) - return [x["node"].longName() for x in invalid] + invalid_matches = cls.get_invalid_matches(instance, compute=compute) + return list(invalid_matches.keys()) @classmethod def get_invalid_matches(cls, instance, compute=False): - invalid = [] + invalid = {} if compute: out_set = next(x for x in instance if x.endswith("out_SET")) - instance_nodes = pc.sets(out_set, query=True) - instance_nodes.extend( - [x.getShape() for x in instance_nodes if x.getShape()]) - scene_nodes = pc.ls(type="transform") + pc.ls(type="mesh") + instance_nodes = cmds.sets(out_set, query=True, nodesOnly=True) + instance_nodes = cmds.ls(instance_nodes, long=True) + for node in instance_nodes: + shapes = cmds.listRelatives(node, shapes=True, fullPath=True) + if shapes: + instance_nodes.extend(shapes) + + scene_nodes = cmds.ls(type="transform") + cmds.ls(type="mesh") scene_nodes = set(scene_nodes) - set(instance_nodes) + scene_nodes_by_basename = defaultdict(list) + for node in scene_nodes: + basename = get_basename(node) + scene_nodes_by_basename[basename].append(node) + for instance_node in instance_nodes: - matches = [] - basename = instance_node.name(stripNamespace=True) - for scene_node in scene_nodes: - if scene_node.name(stripNamespace=True) == basename: - matches.append(scene_node) - - if matches: - ids = [instance_node.cbId.get()] - ids.extend([x.cbId.get() for x in matches]) - ids = set(ids) - - if len(ids) > 1: - cls.log.error( - "\"{}\" id mismatch to: {}".format( - instance_node.longName(), matches - ) - ) - invalid.append( - {"node": instance_node, "matches": matches} + basename = get_basename(instance_node) + if basename not in scene_nodes_by_basename: + continue + + matches = scene_nodes_by_basename[basename] + + ids = set(get_id(node) for node in matches) + ids.add(get_id(instance_node)) + + if len(ids) > 1: + cls.log.error( + "\"{}\" id mismatch to: {}".format( + instance_node.longName(), matches ) + ) + invalid[instance_node] = matches instance.data["mismatched_output_ids"] = invalid else: @@ -76,19 +89,21 @@ def get_invalid_matches(cls, instance, compute=False): @classmethod def repair(cls, instance): - invalid = cls.get_invalid_matches(instance) + invalid_matches = cls.get_invalid_matches(instance) multiple_ids_match = [] - for data in invalid: - ids = [x.cbId.get() for x in data["matches"]] + for instance_node, matches in invalid_matches.items(): + ids = set(get_id(node) for node in matches) # If there are multiple scene ids matched, and error needs to be # raised for manual correction. if len(ids) > 1: - multiple_ids_match.append(data) + multiple_ids_match.append({"node": instance_node, + "matches": matches}) continue - data["node"].cbId.set(ids[0]) + id_to_set = next(iter(ids)) + set_id(instance_node, id_to_set, overwrite=True) if multiple_ids_match: raise RuntimeError( diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py index 2832391bd4e..cbc7ee9d5cf 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py @@ -26,7 +26,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin, - nurbsSurface: _NRB - locator: _LOC - null/group: _GRP - Suffices can also be overriden by project settings. + Suffices can also be overridden by project settings. .. warning:: This grabs the first child shape as a reference and doesn't use the diff --git a/openpype/hosts/maya/tools/mayalookassigner/app.py b/openpype/hosts/maya/tools/mayalookassigner/app.py index f9508657e50..2a8775fff6f 100644 --- a/openpype/hosts/maya/tools/mayalookassigner/app.py +++ b/openpype/hosts/maya/tools/mayalookassigner/app.py @@ -24,6 +24,7 @@ remove_unused_looks ) from .vray_proxies import vrayproxy_assign_look +from . import arnold_standin module = sys.modules[__name__] module.window = None @@ -43,7 +44,7 @@ def __init__(self, parent=None): filename = get_workfile() self.setObjectName("lookManager") - self.setWindowTitle("Look Manager 1.3.0 - [{}]".format(filename)) + self.setWindowTitle("Look Manager 1.4.0 - [{}]".format(filename)) self.setWindowFlags(QtCore.Qt.Window) self.setParent(parent) @@ -240,18 +241,37 @@ def on_process_selected(self): )) nodes = item["nodes"] + # Assign Vray Proxy look. if cmds.pluginInfo('vrayformaya', query=True, loaded=True): self.echo("Getting vray proxy nodes ...") vray_proxies = set(cmds.ls(type="VRayProxy", long=True)) - if vray_proxies: - for vp in vray_proxies: - if vp in nodes: - vrayproxy_assign_look(vp, subset_name) + for vp in vray_proxies: + if vp in nodes: + vrayproxy_assign_look(vp, subset_name) - nodes = list(set(item["nodes"]).difference(vray_proxies)) + nodes = list(set(item["nodes"]).difference(vray_proxies)) + else: + self.echo( + "Could not assign to VRayProxy because vrayformaya plugin " + "is not loaded." + ) + + # Assign Arnold Standin look. + if cmds.pluginInfo("mtoa", query=True, loaded=True): + arnold_standins = set(cmds.ls(type="aiStandIn", long=True)) + for standin in arnold_standins: + if standin in nodes: + arnold_standin.assign_look(standin, subset_name) + else: + self.echo( + "Could not assign to aiStandIn because mtoa plugin is not " + "loaded." + ) + + nodes = list(set(item["nodes"]).difference(arnold_standins)) - # Assign look + # Assign look if nodes: assign_look_by_version(nodes, version_id=version["_id"]) diff --git a/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py b/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py new file mode 100644 index 00000000000..7eeeb725530 --- /dev/null +++ b/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py @@ -0,0 +1,247 @@ +import os +import json +from collections import defaultdict +import logging + +from maya import cmds + +from openpype.pipeline import legacy_io +from openpype.client import get_last_version_by_subset_name +from openpype.hosts.maya import api +from . import lib + + +log = logging.getLogger(__name__) + + +ATTRIBUTE_MAPPING = { + "primaryVisibility": "visibility", # Camera + "castsShadows": "visibility", # Shadow + "receiveShadows": "receive_shadows", + "aiSelfShadows": "self_shadows", + "aiOpaque": "opaque", + "aiMatte": "matte", + "aiVisibleInDiffuseTransmission": "visibility", + "aiVisibleInSpecularTransmission": "visibility", + "aiVisibleInVolume": "visibility", + "aiVisibleInDiffuseReflection": "visibility", + "aiVisibleInSpecularReflection": "visibility", + "aiSubdivUvSmoothing": "subdiv_uv_smoothing", + "aiDispHeight": "disp_height", + "aiDispPadding": "disp_padding", + "aiDispZeroValue": "disp_zero_value", + "aiStepSize": "step_size", + "aiVolumePadding": "volume_padding", + "aiSubdivType": "subdiv_type", + "aiSubdivIterations": "subdiv_iterations" +} + + +def calculate_visibility_mask(attributes): + # https://arnoldsupport.com/2018/11/21/backdoor-setting-visibility/ + mapping = { + "primaryVisibility": 1, # Camera + "castsShadows": 2, # Shadow + "aiVisibleInDiffuseTransmission": 4, + "aiVisibleInSpecularTransmission": 8, + "aiVisibleInVolume": 16, + "aiVisibleInDiffuseReflection": 32, + "aiVisibleInSpecularReflection": 64 + } + mask = 255 + for attr, value in mapping.items(): + if attributes.get(attr, True): + continue + + mask -= value + + return mask + + +def get_nodes_by_id(standin): + """Get node id from aiStandIn via json sidecar. + + Args: + standin (string): aiStandIn node. + + Returns: + (dict): Dictionary with node full name/path and id. + """ + path = cmds.getAttr(standin + ".dso") + json_path = None + for f in os.listdir(os.path.dirname(path)): + if f.endswith(".json"): + json_path = os.path.join(os.path.dirname(path), f) + break + + if not json_path: + log.warning("Could not find json file for {}.".format(standin)) + return {} + + with open(json_path, "r") as f: + return json.load(f) + + +def shading_engine_assignments(shading_engine, attribute, nodes, assignments): + """Full assignments with shader or disp_map. + + Args: + shading_engine (string): Shading engine for material. + attribute (string): "surfaceShader" or "displacementShader" + nodes: (list): Nodes paths relative to aiStandIn. + assignments (dict): Assignments by nodes. + """ + shader_inputs = cmds.listConnections( + shading_engine + "." + attribute, source=True + ) + if not shader_inputs: + log.info( + "Shading engine \"{}\" missing input \"{}\"".format( + shading_engine, attribute + ) + ) + return + + # Strip off component assignments + for i, node in enumerate(nodes): + if "." in node: + log.warning( + "Converting face assignment to full object assignment. This " + "conversion can be lossy: {}".format(node) + ) + nodes[i] = node.split(".")[0] + + shader_type = "shader" if attribute == "surfaceShader" else "disp_map" + assignment = "{}='{}'".format(shader_type, shader_inputs[0]) + for node in nodes: + assignments[node].append(assignment) + + +def assign_look(standin, subset): + log.info("Assigning {} to {}.".format(subset, standin)) + + nodes_by_id = get_nodes_by_id(standin) + + # Group by asset id so we run over the look per asset + node_ids_by_asset_id = defaultdict(set) + for node_id in nodes_by_id: + asset_id = node_id.split(":", 1)[0] + node_ids_by_asset_id[asset_id].add(node_id) + + project_name = legacy_io.active_project() + for asset_id, node_ids in node_ids_by_asset_id.items(): + + # Get latest look version + version = get_last_version_by_subset_name( + project_name, + subset_name=subset, + asset_id=asset_id, + fields=["_id"] + ) + if not version: + log.info("Didn't find last version for subset name {}".format( + subset + )) + continue + + relationships = lib.get_look_relationships(version["_id"]) + shader_nodes, container_node = lib.load_look(version["_id"]) + namespace = shader_nodes[0].split(":")[0] + + # Get only the node ids and paths related to this asset + # And get the shader edits the look supplies + asset_nodes_by_id = { + node_id: nodes_by_id[node_id] for node_id in node_ids + } + edits = list( + api.lib.iter_shader_edits( + relationships, shader_nodes, asset_nodes_by_id + ) + ) + + # Create assignments + node_assignments = {} + for edit in edits: + for node in edit["nodes"]: + if node not in node_assignments: + node_assignments[node] = [] + + if edit["action"] == "assign": + if not cmds.ls(edit["shader"], type="shadingEngine"): + log.info("Skipping non-shader: %s" % edit["shader"]) + continue + + shading_engine_assignments( + shading_engine=edit["shader"], + attribute="surfaceShader", + nodes=edit["nodes"], + assignments=node_assignments + ) + shading_engine_assignments( + shading_engine=edit["shader"], + attribute="displacementShader", + nodes=edit["nodes"], + assignments=node_assignments + ) + + if edit["action"] == "setattr": + visibility = False + for attr, value in edit["attributes"].items(): + if attr not in ATTRIBUTE_MAPPING: + log.warning( + "Skipping setting attribute {} on {} because it is" + " not recognized.".format(attr, edit["nodes"]) + ) + continue + + if isinstance(value, str): + value = "'{}'".format(value) + + if ATTRIBUTE_MAPPING[attr] == "visibility": + visibility = True + continue + + assignment = "{}={}".format(ATTRIBUTE_MAPPING[attr], value) + + for node in edit["nodes"]: + node_assignments[node].append(assignment) + + if visibility: + mask = calculate_visibility_mask(edit["attributes"]) + assignment = "visibility={}".format(mask) + + for node in edit["nodes"]: + node_assignments[node].append(assignment) + + # Assign shader + # Clear all current shader assignments + plug = standin + ".operators" + num = cmds.getAttr(plug, size=True) + for i in reversed(range(num)): + cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True) + + # Create new assignment overrides + index = 0 + for node, assignments in node_assignments.items(): + if not assignments: + continue + + with api.lib.maintained_selection(): + operator = cmds.createNode("aiSetParameter") + operator = cmds.rename(operator, namespace + ":" + operator) + + cmds.setAttr(operator + ".selection", node, type="string") + for i, assignment in enumerate(assignments): + cmds.setAttr( + "{}.assignment[{}]".format(operator, i), + assignment, + type="string" + ) + + cmds.connectAttr( + operator + ".out", "{}[{}]".format(plug, index) + ) + + index += 1 + + cmds.sets(operator, edit=True, addElement=container_node) diff --git a/openpype/hosts/maya/tools/mayalookassigner/commands.py b/openpype/hosts/maya/tools/mayalookassigner/commands.py index 2e7a51efded..c5e6c973cfd 100644 --- a/openpype/hosts/maya/tools/mayalookassigner/commands.py +++ b/openpype/hosts/maya/tools/mayalookassigner/commands.py @@ -13,6 +13,7 @@ from openpype.hosts.maya.api import lib from .vray_proxies import get_alembic_ids_cache +from . import arnold_standin log = logging.getLogger(__name__) @@ -44,33 +45,11 @@ def get_namespace_from_node(node): return parts[0] if len(parts) > 1 else u":" -def list_descendents(nodes): - """Include full descendant hierarchy of given nodes. - - This is a workaround to cmds.listRelatives(allDescendents=True) because - this way correctly keeps children instance paths (see Maya documentation) - - This fixes LKD-26: assignments not working as expected on instanced shapes. - - Return: - list: List of children descendents of nodes - - """ - result = [] - while True: - nodes = cmds.listRelatives(nodes, - fullPath=True) - if nodes: - result.extend(nodes) - else: - return result - - def get_selected_nodes(): """Get information from current selection""" selection = cmds.ls(selection=True, long=True) - hierarchy = list_descendents(selection) + hierarchy = lib.get_all_children(selection) return list(set(selection + hierarchy)) @@ -80,21 +59,7 @@ def get_all_asset_nodes(): Returns: list: list of dictionaries """ - - host = registered_host() - - nodes = [] - for container in host.ls(): - # We are not interested in looks but assets! - if container["loader"] == "LookLoader": - continue - - # Gather all information - container_name = container["objectName"] - nodes += lib.get_container_members(container_name) - - nodes = list(set(nodes)) - return nodes + return cmds.ls(dag=True, noIntermediate=True, long=True) def create_asset_id_hash(nodes): @@ -119,10 +84,12 @@ def create_asset_id_hash(nodes): path = cmds.getAttr("{}.fileName".format(node)) ids = get_alembic_ids_cache(path) for k, _ in ids.items(): - pid = k.split(":")[0] - if node not in node_id_hash[pid]: - node_id_hash[pid].append(node) - + id = k.split(":")[0] + node_id_hash[id].append(node) + elif cmds.nodeType(node) == "aiStandIn": + for id, _ in arnold_standin.get_nodes_by_id(node).items(): + id = id.split(":")[0] + node_id_hash[id].append(node) else: value = lib.get_id(node) if value is None: diff --git a/openpype/hosts/maya/tools/mayalookassigner/lib.py b/openpype/hosts/maya/tools/mayalookassigner/lib.py new file mode 100644 index 00000000000..fddaf6112dd --- /dev/null +++ b/openpype/hosts/maya/tools/mayalookassigner/lib.py @@ -0,0 +1,87 @@ +import json +import logging + +from openpype.pipeline import ( + legacy_io, + get_representation_path, + registered_host, + discover_loader_plugins, + loaders_from_representation, + load_container +) +from openpype.client import get_representation_by_name +from openpype.hosts.maya.api import lib + + +log = logging.getLogger(__name__) + + +def get_look_relationships(version_id): + # type: (str) -> dict + """Get relations for the look. + + Args: + version_id (str): Parent version Id. + + Returns: + dict: Dictionary of relations. + """ + + project_name = legacy_io.active_project() + json_representation = get_representation_by_name( + project_name, representation_name="json", version_id=version_id + ) + + # Load relationships + shader_relation = get_representation_path(json_representation) + with open(shader_relation, "r") as f: + relationships = json.load(f) + + return relationships + + +def load_look(version_id): + # type: (str) -> list + """Load look from version. + + Get look from version and invoke Loader for it. + + Args: + version_id (str): Version ID + + Returns: + list of shader nodes. + + """ + + project_name = legacy_io.active_project() + # Get representations of shader file and relationships + look_representation = get_representation_by_name( + project_name, representation_name="ma", version_id=version_id + ) + + # See if representation is already loaded, if so reuse it. + host = registered_host() + representation_id = str(look_representation['_id']) + for container in host.ls(): + if (container['loader'] == "LookLoader" and + container['representation'] == representation_id): + log.info("Reusing loaded look ...") + container_node = container['objectName'] + break + else: + log.info("Using look for the first time ...") + + # Load file + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation(all_loaders, representation_id) + loader = next( + (i for i in loaders if i.__name__ == "LookLoader"), None) + if loader is None: + raise RuntimeError("Could not find LookLoader, this is a bug") + + # Reference the look file + with lib.maintained_selection(): + container_node = load_container(loader, look_representation)[0] + + return lib.get_container_members(container_node), container_node diff --git a/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py b/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py index 889396e555d..1d2ec5fd87e 100644 --- a/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py +++ b/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py @@ -3,26 +3,16 @@ import os from collections import defaultdict import logging -import json import six import alembic.Abc from maya import cmds -from openpype.client import ( - get_representation_by_name, - get_last_version_by_subset_name, -) -from openpype.pipeline import ( - legacy_io, - load_container, - loaders_from_representation, - discover_loader_plugins, - get_representation_path, - registered_host, -) -from openpype.hosts.maya.api import lib +from openpype.client import get_last_version_by_subset_name +from openpype.pipeline import legacy_io +import openpype.hosts.maya.lib as maya_lib +from . import lib log = logging.getLogger(__name__) @@ -149,79 +139,6 @@ def assign_vrayproxy_shaders(vrayproxy, assignments): index += 1 -def get_look_relationships(version_id): - # type: (str) -> dict - """Get relations for the look. - - Args: - version_id (str): Parent version Id. - - Returns: - dict: Dictionary of relations. - """ - - project_name = legacy_io.active_project() - json_representation = get_representation_by_name( - project_name, representation_name="json", version_id=version_id - ) - - # Load relationships - shader_relation = get_representation_path(json_representation) - with open(shader_relation, "r") as f: - relationships = json.load(f) - - return relationships - - -def load_look(version_id): - # type: (str) -> list - """Load look from version. - - Get look from version and invoke Loader for it. - - Args: - version_id (str): Version ID - - Returns: - list of shader nodes. - - """ - - project_name = legacy_io.active_project() - # Get representations of shader file and relationships - look_representation = get_representation_by_name( - project_name, representation_name="ma", version_id=version_id - ) - - # See if representation is already loaded, if so reuse it. - host = registered_host() - representation_id = str(look_representation['_id']) - for container in host.ls(): - if (container['loader'] == "LookLoader" and - container['representation'] == representation_id): - log.info("Reusing loaded look ...") - container_node = container['objectName'] - break - else: - log.info("Using look for the first time ...") - - # Load file - all_loaders = discover_loader_plugins() - loaders = loaders_from_representation(all_loaders, representation_id) - loader = next( - (i for i in loaders if i.__name__ == "LookLoader"), None) - if loader is None: - raise RuntimeError("Could not find LookLoader, this is a bug") - - # Reference the look file - with lib.maintained_selection(): - container_node = load_container(loader, look_representation) - - # Get container members - shader_nodes = lib.get_container_members(container_node) - return shader_nodes - - def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): # type: (str, str) -> None """Assign look to vray proxy. @@ -263,8 +180,8 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): )) continue - relationships = get_look_relationships(version["_id"]) - shadernodes = load_look(version["_id"]) + relationships = lib.get_look_relationships(version["_id"]) + shadernodes, _ = lib.load_look(version["_id"]) # Get only the node ids and paths related to this asset # And get the shader edits the look supplies @@ -272,8 +189,10 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): node_id: nodes_by_id[node_id] for node_id in node_ids } edits = list( - lib.iter_shader_edits( - relationships, shadernodes, asset_nodes_by_id)) + maya_lib.iter_shader_edits( + relationships, shadernodes, asset_nodes_by_id + ) + ) # Create assignments assignments = {} diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 2a14096f0eb..157a02b9aaa 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -148,7 +148,7 @@ def get_main_window(): def set_node_data(node, knobname, data): """Write data to node invisible knob - Will create new in case it doesnt exists + Will create new in case it doesn't exists or update the one already created. Args: @@ -506,7 +506,7 @@ def get_avalon_knob_data(node, prefix="avalon:", create=True): try: # check if data available on the node test = node[AVALON_DATA_GROUP].value() - log.debug("Only testing if data avalable: `{}`".format(test)) + log.debug("Only testing if data available: `{}`".format(test)) except NameError as e: # if it doesn't then create it log.debug("Creating avalon knob: `{}`".format(e)) @@ -908,11 +908,11 @@ def get_view_process_node(): continue if not ipn_node: - # in case a Viewer node is transfered from + # in case a Viewer node is transferred from # different workfile with old values raise NameError(( "Input process node name '{}' set in " - "Viewer '{}' is does't exists in nodes" + "Viewer '{}' is doesn't exists in nodes" ).format(ipn, v_.name())) ipn_node.setSelected(True) @@ -1662,7 +1662,7 @@ def create_write_node_legacy( tile_color = _data.get("tile_color", "0xff0000ff") GN["tile_color"].setValue(tile_color) - # overrie knob values from settings + # override knob values from settings for knob in knob_overrides: knob_type = knob["type"] knob_name = knob["name"] @@ -2117,7 +2117,7 @@ def set_writes_colorspace(self): write_node[knob["name"]].setValue(value) except TypeError: log.warning( - "Legacy workflow didnt work, switching to current") + "Legacy workflow didn't work, switching to current") set_node_knobs_from_settings( write_node, nuke_imageio_writes["knobs"]) @@ -2543,7 +2543,7 @@ def reset_selection(): def select_nodes(nodes): - """Selects all inputed nodes + """Selects all inputted nodes Arguments: nodes (list): nuke nodes to be selected @@ -2560,7 +2560,7 @@ def launch_workfiles_app(): Trigger to show workfiles tool on application launch. Can be executed only once all other calls are ignored. - Workfiles tool show is deffered after application initialization using + Workfiles tool show is deferred after application initialization using QTimer. """ @@ -2581,7 +2581,7 @@ def launch_workfiles_app(): # Show workfiles tool using timer # - this will be probably triggered during initialization in that case # the application is not be able to show uis so it must be - # deffered using timer + # deferred using timer # - timer should be processed when initialization ends # When applications starts to process events. timer = QtCore.QTimer() diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index aec87be5ab6..cc3af2a38f8 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -594,7 +594,7 @@ def get_representation_data( Defaults to None. range (bool, optional): flag for adding ranges. Defaults to False. - custom_tags (list[str], optional): user inputed custom tags. + custom_tags (list[str], optional): user inputted custom tags. Defaults to None. """ add_tags = tags or [] @@ -1110,7 +1110,7 @@ def process(self): def is_legacy(self): """Check if it needs to run legacy code - In case where `type` key is missing in singe + In case where `type` key is missing in single knob it is legacy project anatomy. Returns: diff --git a/openpype/hosts/nuke/api/utils.py b/openpype/hosts/nuke/api/utils.py index 6bcb752dd1d..2b3c35c23ad 100644 --- a/openpype/hosts/nuke/api/utils.py +++ b/openpype/hosts/nuke/api/utils.py @@ -87,7 +87,7 @@ def bake_gizmos_recursively(in_group=None): def colorspace_exists_on_node(node, colorspace_name): """ Check if colorspace exists on node - Look through all options in the colorpsace knob, and see if we have an + Look through all options in the colorspace knob, and see if we have an exact match to one of the items. Args: diff --git a/openpype/hosts/nuke/api/workfile_template_builder.py b/openpype/hosts/nuke/api/workfile_template_builder.py index fb0afb3d553..cf85a5ea056 100644 --- a/openpype/hosts/nuke/api/workfile_template_builder.py +++ b/openpype/hosts/nuke/api/workfile_template_builder.py @@ -42,7 +42,7 @@ def import_template(self, path): get_template_preset implementation) Returns: - bool: Wether the template was successfully imported or not + bool: Whether the template was successfully imported or not """ # TODO check if the template is already imported @@ -222,7 +222,7 @@ def cleanup_placeholder(self, placeholder, failed): self._imprint_siblings(placeholder) if placeholder.data["nb_children"] == 0: - # save initial nodes postions and dimensions, update them + # save initial nodes positions and dimensions, update them # and set inputs and outputs of loaded nodes self._imprint_inits() @@ -231,7 +231,7 @@ def cleanup_placeholder(self, placeholder, failed): elif placeholder.data["siblings"]: # create copies of placeholder siblings for the new loaded nodes, - # set their inputs and outpus and update all nodes positions and + # set their inputs and outputs and update all nodes positions and # dimensions and siblings names siblings = get_nodes_by_names(placeholder.data["siblings"]) @@ -632,7 +632,7 @@ def cleanup_placeholder(self, placeholder, failed): self._imprint_siblings(placeholder) if placeholder.data["nb_children"] == 0: - # save initial nodes postions and dimensions, update them + # save initial nodes positions and dimensions, update them # and set inputs and outputs of created nodes self._imprint_inits() @@ -641,7 +641,7 @@ def cleanup_placeholder(self, placeholder, failed): elif placeholder.data["siblings"]: # create copies of placeholder siblings for the new created nodes, - # set their inputs and outpus and update all nodes positions and + # set their inputs and outputs and update all nodes positions and # dimensions and siblings names siblings = get_nodes_by_names(placeholder.data["siblings"]) diff --git a/openpype/hosts/nuke/plugins/create/convert_legacy.py b/openpype/hosts/nuke/plugins/create/convert_legacy.py index d7341c625f4..c143e4cb278 100644 --- a/openpype/hosts/nuke/plugins/create/convert_legacy.py +++ b/openpype/hosts/nuke/plugins/create/convert_legacy.py @@ -39,7 +39,7 @@ def find_instances(self): break if legacy_found: - # if not item do not add legacy instance convertor + # if not item do not add legacy instance converter self.add_convertor_item("Convert legacy instances") def convert(self): diff --git a/openpype/hosts/nuke/plugins/create/create_source.py b/openpype/hosts/nuke/plugins/create/create_source.py index 06cf4e6cbf5..57504b5d53f 100644 --- a/openpype/hosts/nuke/plugins/create/create_source.py +++ b/openpype/hosts/nuke/plugins/create/create_source.py @@ -85,4 +85,4 @@ def set_selected_nodes(self, pre_create_data): raise NukeCreatorError("Creator error: No active selection") else: NukeCreatorError( - "Creator error: only supprted with active selection") + "Creator error: only supported with active selection") diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py index e562c74c58c..3227a7ed987 100644 --- a/openpype/hosts/nuke/plugins/load/actions.py +++ b/openpype/hosts/nuke/plugins/load/actions.py @@ -74,8 +74,7 @@ def load(self, context, name, namespace, data): return # Include handles - handles = version_data.get("handles", 0) - start -= handles - end += handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) lib.update_frame_range(start, end) diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index f227aa161ab..67c7877e609 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -54,22 +54,19 @@ def load(self, context, name, namespace, data): version = context['version'] version_data = version.get("data", {}) vname = version.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) namespace = namespace or context['asset']['name'] colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) # prepare data for imprinting # add additional metadata from the version to imprint to Avalon knob - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] + add_keys = ["source", "author", "fps"] - data_imprint = {"frameStart": first, - "frameEnd": last, - "version": vname, - "colorspaceInput": colorspace, - "objectName": object_name} + data_imprint = { + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name + } for k in add_keys: data_imprint.update({k: version_data[k]}) @@ -204,18 +201,13 @@ def update(self, container, representation): name = container['name'] version_data = version_doc.get("data", {}) vname = version_doc.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) namespace = container['namespace'] colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] + add_keys = ["source", "author", "fps"] data_imprint = {"representation": str(representation["_id"]), - "frameStart": first, - "frameEnd": last, "version": vname, "colorspaceInput": colorspace, "objectName": object_name} diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index 90581c2f228..53e9a760031 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -138,7 +138,6 @@ def update(self, container, representation): "version": version_doc.get("name"), "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handles": version_data.get("handles"), "fps": version_data.get("fps"), "author": version_data.get("author") }) diff --git a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py b/openpype/hosts/nuke/plugins/publish/collect_backdrop.py index 8eaefa68541..7d51af7e9e4 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/collect_backdrop.py @@ -51,38 +51,10 @@ def process(self, instance): instance.data["label"] = "{0} ({1} nodes)".format( bckn.name(), len(instance.data["transientData"]["childNodes"])) - instance.data["families"].append(instance.data["family"]) - - # Get frame range - handle_start = instance.context.data["handleStart"] - handle_end = instance.context.data["handleEnd"] - first_frame = int(nuke.root()["first_frame"].getValue()) - last_frame = int(nuke.root()["last_frame"].getValue()) - # get version version = instance.context.data.get('version') - if not version: - raise RuntimeError("Script name has no version in the name.") - - instance.data['version'] = version - - # Add version data to instance - version_data = { - "handles": handle_start, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "version": int(version), - "families": [instance.data["family"]] + instance.data["families"], - "subset": instance.data["subset"], - "fps": instance.context.data["fps"] - } + if version: + instance.data['version'] = version - instance.data.update({ - "versionData": version_data, - "frameStart": first_frame, - "frameEnd": last_frame - }) self.log.info("Backdrop instance collected: `{}`".format(instance)) diff --git a/openpype/hosts/nuke/plugins/publish/collect_context_data.py b/openpype/hosts/nuke/plugins/publish/collect_context_data.py index b487c946f05..f1b49652057 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_context_data.py +++ b/openpype/hosts/nuke/plugins/publish/collect_context_data.py @@ -49,8 +49,6 @@ def process(self, context): # sourcery skip: avoid-builtin-shadow "resolutionHeight": resolution_height, "pixelAspect": pixel_aspect, - # backward compatibility handles - "handles": handle_start, "handleStart": handle_start, "handleEnd": handle_end, "step": 1, diff --git a/openpype/hosts/nuke/plugins/publish/collect_gizmo.py b/openpype/hosts/nuke/plugins/publish/collect_gizmo.py index 3a877fc194a..e3c40a7a907 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/collect_gizmo.py @@ -28,7 +28,6 @@ def process(self, instance): # Add version data to instance version_data = { - "handles": handle_start, "handleStart": handle_start, "handleEnd": handle_end, "frameStart": first_frame + handle_start, diff --git a/openpype/hosts/nuke/plugins/publish/collect_model.py b/openpype/hosts/nuke/plugins/publish/collect_model.py index 9da056052be..3fdf376d0c7 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_model.py +++ b/openpype/hosts/nuke/plugins/publish/collect_model.py @@ -28,7 +28,6 @@ def process(self, instance): # Add version data to instance version_data = { - "handles": handle_start, "handleStart": handle_start, "handleEnd": handle_end, "frameStart": first_frame + handle_start, diff --git a/openpype/hosts/nuke/plugins/publish/collect_reads.py b/openpype/hosts/nuke/plugins/publish/collect_reads.py index a1144fbcc31..831ae29a27b 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_reads.py +++ b/openpype/hosts/nuke/plugins/publish/collect_reads.py @@ -103,7 +103,6 @@ def process(self, instance): # Add version data to instance version_data = { - "handles": handle_start, "handleStart": handle_start, "handleEnd": handle_end, "frameStart": first_frame + handle_start, @@ -123,7 +122,8 @@ def process(self, instance): "frameStart": first_frame, "frameEnd": last_frame, "colorspace": colorspace, - "handles": int(asset_doc["data"].get("handles", 0)), + "handleStart": handle_start, + "handleEnd": handle_end, "step": 1, "fps": int(nuke.root()['fps'].value()) }) diff --git a/openpype/hosts/nuke/plugins/publish/collect_writes.py b/openpype/hosts/nuke/plugins/publish/collect_writes.py index 0008a756bc4..536a0698f3b 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_writes.py +++ b/openpype/hosts/nuke/plugins/publish/collect_writes.py @@ -189,7 +189,7 @@ def process(self, instance): }) # make sure rendered sequence on farm will - # be used for exctract review + # be used for extract review if not instance.data["review"]: instance.data["useSequenceForReview"] = False diff --git a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py index 208d4a24985..5f4a5c3ab0c 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py @@ -48,7 +48,7 @@ def process(self, context, plugin): class ValidateBackdrop(pyblish.api.InstancePlugin): """ Validate amount of nodes on backdrop node in case user - forgoten to add nodes above the publishing backdrop node. + forgotten to add nodes above the publishing backdrop node. """ order = pyblish.api.ValidatorOrder diff --git a/openpype/hosts/photoshop/api/extension/host/index.jsx b/openpype/hosts/photoshop/api/extension/host/index.jsx index 2acec1ebc13..e2711fb960e 100644 --- a/openpype/hosts/photoshop/api/extension/host/index.jsx +++ b/openpype/hosts/photoshop/api/extension/host/index.jsx @@ -199,7 +199,7 @@ function getActiveDocumentName(){ function getActiveDocumentFullName(){ /** * Returns file name of active document with file path. - * activeDocument.fullName returns path in URI (eg /c/.. insted of c:/) + * activeDocument.fullName returns path in URI (eg /c/.. instead of c:/) * */ if (documents.length == 0){ return null; @@ -225,7 +225,7 @@ function getSelectedLayers(doc) { * Returns json representation of currently selected layers. * Works in three steps - 1) creates new group with selected layers * 2) traverses this group - * 3) deletes newly created group, not neede + * 3) deletes newly created group, not needed * Bit weird, but Adobe.. **/ if (doc == null){ @@ -284,7 +284,7 @@ function selectLayers(selectedLayers){ existing_ids.push(existing_layers[y]["id"]); } for (var i = 0; i < selectedLayers.length; i++) { - // a check to see if the id stil exists + // a check to see if the id still exists var id = selectedLayers[i]; if(existing_ids.toString().indexOf(id)>=0){ layers[i] = charIDToTypeID( "Lyr " ); diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 89ba6ad4e6c..25732446b51 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -66,11 +66,11 @@ def result(self): return self._result def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ log.debug("Executing process in main thread") if self.done: diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py index 01022ce0b29..9d7eff02113 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_review.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_review.py @@ -129,7 +129,6 @@ def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames, "frameStart": 1, "frameEnd": no_of_frames, "fps": fps, - "preview": True, "tags": self.mov_options['tags'] }) diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py index f41eb36cafe..b3ad20df399 100644 --- a/openpype/hosts/resolve/api/lib.py +++ b/openpype/hosts/resolve/api/lib.py @@ -250,7 +250,7 @@ def create_timeline_item(media_pool_item: object, media_pool_item, timeline) assert output_timeline_item, AssertionError( - "Track Item with name `{}` doesnt exist on the timeline: `{}`".format( + "Track Item with name `{}` doesn't exist on the timeline: `{}`".format( clip_name, timeline.GetName() )) return output_timeline_item @@ -571,7 +571,7 @@ def create_compound_clip(clip_data, name, folder): # Set current folder to input media_pool_folder: mp.SetCurrentFolder(folder) - # check if clip doesnt exist already: + # check if clip doesn't exist already: clips = folder.GetClipList() cct = next((c for c in clips if c.GetName() in name), None) @@ -582,7 +582,7 @@ def create_compound_clip(clip_data, name, folder): # Create empty timeline in current folder and give name: cct = mp.CreateEmptyTimeline(name) - # check if clip doesnt exist already: + # check if clip doesn't exist already: clips = folder.GetClipList() cct = next((c for c in clips if c.GetName() in name), None) diff --git a/openpype/hosts/resolve/api/menu_style.qss b/openpype/hosts/resolve/api/menu_style.qss index d2d3d1ed37a..3d51c7139fa 100644 --- a/openpype/hosts/resolve/api/menu_style.qss +++ b/openpype/hosts/resolve/api/menu_style.qss @@ -61,7 +61,7 @@ QVBoxLayout { background-color: #282828; } -#Devider { +#Divider { border: 1px solid #090909; background-color: #585858; } diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 77e30149fd6..609cff60f7a 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -715,7 +715,7 @@ def _convert_to_tag_data(self): # increasing steps by index of rename iteration self.count_steps *= self.rename_index - hierarchy_formating_data = dict() + hierarchy_formatting_data = dict() _data = self.timeline_item_default_data.copy() if self.ui_inputs: # adding tag metadata from ui @@ -749,13 +749,13 @@ def _convert_to_tag_data(self): # fill up pythonic expresisons in hierarchy data for k, _v in self.hierarchy_data.items(): - hierarchy_formating_data[k] = _v["value"].format(**_data) + hierarchy_formatting_data[k] = _v["value"].format(**_data) else: # if no gui mode then just pass default data - hierarchy_formating_data = self.hierarchy_data + hierarchy_formatting_data = self.hierarchy_data tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formating_data + hierarchy_formatting_data ) tag_hierarchy_data.update({"heroTrack": True}) @@ -792,18 +792,17 @@ def _convert_to_tag_data(self): else: self.tag_data.update({"reviewTrack": None}) - - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): """ Solve tag data from hierarchy data and templates. """ # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) - clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, "parents": self.parents, - "hierarchyData": hierarchy_formating_data, + "hierarchyData": hierarchy_formatting_data, "subset": self.subset, "family": self.subset_family, "families": ["clip"] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py index 7925b0ecf3a..6c3b0c3efdb 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py @@ -83,9 +83,9 @@ def process(self, instance): self.log.info(f"Created new instance: {instance_name}") - def convertor(value): + def converter(value): return str(value) self.log.debug("Instance data: {}".format( - json.dumps(new_instance.data, indent=4, default=convertor) + json.dumps(new_instance.data, indent=4, default=converter) )) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py index 2bf3917e2f2..96aaae23dc2 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -104,7 +104,7 @@ def multiple_instances(self, context, in_data): if repr.get(k): repr.pop(k) - # convert files to list if it isnt + # convert files to list if it isn't if not isinstance(files, (tuple, list)): files = [files] @@ -174,7 +174,7 @@ def prepare_mov_batch_instances(self, in_data): continue files = repre["files"] - # Convert files to list if it isnt + # Convert files to list if it isn't if not isinstance(files, (tuple, list)): files = [files] @@ -255,7 +255,9 @@ def create_instance(self, context, in_data): if ext.startswith("."): component["ext"] = ext[1:] - if component["preview"]: + # Remove 'preview' key from representation data + preview = component.pop("preview") + if preview: instance.data["families"].append("review") component["tags"] = ["review"] self.log.debug("Adding review family") diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py index 8633d4bf9dd..391cace7617 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py @@ -116,7 +116,7 @@ def process(self, instance): kwargs = {} if extension == ".edl": # EDL has no frame rate embedded so needs explicit - # frame rate else 24 is asssumed. + # frame rate else 24 is assumed. kwargs["rate"] = get_current_project_asset()["data"]["fps"] instance.data["otio_timeline"] = otio.adapters.read_from_file( diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py index 074c62ea0e9..e46fbe60988 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py @@ -29,7 +29,7 @@ def process(self, instance): for pattern in self.skip_timelines_check): self.log.info("Skipping for {} task".format(instance.data["task"])) - # TODO repace query with using 'instance.data["assetEntity"]' + # TODO replace query with using 'instance.data["assetEntity"]' asset_data = get_current_project_asset(instance.data["asset"])["data"] frame_start = asset_data["frameStart"] frame_end = asset_data["frameEnd"] diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py index 293db542a98..e8f76bd3141 100644 --- a/openpype/hosts/traypublisher/api/editorial.py +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -8,10 +8,10 @@ class ShotMetadataSolver: """ Solving hierarchical metadata - Used during editorial publishing. Works with imput + Used during editorial publishing. Works with input clip name and settings defining python formatable template. Settings also define searching patterns - and its token keys used for formating in templates. + and its token keys used for formatting in templates. """ NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}") @@ -40,13 +40,13 @@ def _rename_template(self, data): """Shot renaming function Args: - data (dict): formating data + data (dict): formatting data Raises: CreatorError: If missing keys Returns: - str: formated new name + str: formatted new name """ shot_rename_template = self.shot_rename[ "shot_rename_template"] @@ -58,7 +58,7 @@ def _rename_template(self, data): "Make sure all keys in settings are correct:: \n\n" f"From template string {shot_rename_template} > " f"`{_E}` has no equivalent in \n" - f"{list(data.keys())} input formating keys!" + f"{list(data.keys())} input formatting keys!" )) def _generate_tokens(self, clip_name, source_data): @@ -68,7 +68,7 @@ def _generate_tokens(self, clip_name, source_data): Args: clip_name (str): name of clip in editorial - source_data (dict): data for formating + source_data (dict): data for formatting Raises: CreatorError: if missing key @@ -106,14 +106,14 @@ def _generate_tokens(self, clip_name, source_data): return output_data def _create_parents_from_settings(self, parents, data): - """Formating parent components. + """formatting parent components. Args: parents (list): list of dict parent components - data (dict): formating data + data (dict): formatting data Raises: - CreatorError: missing formating key + CreatorError: missing formatting key CreatorError: missing token key KeyError: missing parent token @@ -126,7 +126,7 @@ def _create_parents_from_settings(self, parents, data): # fill parent keys data template from anatomy data try: - _parent_tokens_formating_data = { + _parent_tokens_formatting_data = { parent_token["name"]: parent_token["value"].format(**data) for parent_token in hierarchy_parents } @@ -143,17 +143,17 @@ def _create_parents_from_settings(self, parents, data): for _index, _parent in enumerate( shot_hierarchy["parents_path"].split("/") ): - # format parent token with value which is formated + # format parent token with value which is formatted try: parent_name = _parent.format( - **_parent_tokens_formating_data) + **_parent_tokens_formatting_data) except KeyError as _E: raise CreatorError(( "Make sure all keys in settings are correct : \n\n" f"`{_E}` from template string " f"{shot_hierarchy['parents_path']}, " f" has no equivalent in \n" - f"{list(_parent_tokens_formating_data.keys())} parents" + f"{list(_parent_tokens_formatting_data.keys())} parents" )) parent_token_name = ( @@ -225,7 +225,7 @@ def _get_parents_from_selected_asset( visual_hierarchy = [asset_doc] current_doc = asset_doc - # looping trought all available visual parents + # looping through all available visual parents # if they are not available anymore than it breaks while True: visual_parent_id = current_doc["data"]["visualParent"] @@ -288,7 +288,7 @@ def generate_data(self, clip_name, source_data): Args: clip_name (str): clip name - source_data (dict): formating data + source_data (dict): formatting data Returns: (str, dict): shot name and hierarchy data @@ -301,19 +301,19 @@ def generate_data(self, clip_name, source_data): # match clip to shot name at start shot_name = clip_name - # parse all tokens and generate formating data - formating_data = self._generate_tokens(shot_name, source_data) + # parse all tokens and generate formatting data + formatting_data = self._generate_tokens(shot_name, source_data) # generate parents from selected asset parents = self._get_parents_from_selected_asset(asset_doc, project_doc) if self.shot_rename["enabled"]: - shot_name = self._rename_template(formating_data) + shot_name = self._rename_template(formatting_data) self.log.info(f"Renamed shot name: {shot_name}") if self.shot_hierarchy["enabled"]: parents = self._create_parents_from_settings( - parents, formating_data) + parents, formatting_data) if self.shot_add_tasks: tasks = self._generate_tasks_from_settings( diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py index 73be43444e8..0630dfb3da1 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_editorial.py +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -260,7 +260,7 @@ def create(self, subset_name, instance_data, pre_create_data): ) if not first_otio_timeline: - # assing otio timeline for multi file to layer + # assign otio timeline for multi file to layer first_otio_timeline = otio_timeline # create otio editorial instance @@ -283,7 +283,7 @@ def _create_otio_instance( Args: subset_name (str): name of subset - data (dict): instnance data + data (dict): instance data sequence_path (str): path to sequence file media_path (str): path to media file otio_timeline (otio.Timeline): otio timeline object @@ -315,7 +315,7 @@ def _create_otio_timeline(self, sequence_path, fps): kwargs = {} if extension == ".edl": # EDL has no frame rate embedded so needs explicit - # frame rate else 24 is asssumed. + # frame rate else 24 is assumed. kwargs["rate"] = fps kwargs["ignore_timecode_mismatch"] = True @@ -358,7 +358,7 @@ def _get_clip_instances( sequence_file_name, first_otio_timeline=None ): - """Helping function fro creating clip instance + """Helping function for creating clip instance Args: otio_timeline (otio.Timeline): otio timeline object @@ -527,7 +527,7 @@ def _make_subset_instance( Args: otio_clip (otio.Clip): otio clip object - preset (dict): sigle family preset + preset (dict): single family preset instance_data (dict): instance data parenting_data (dict): shot instance parent data @@ -767,7 +767,7 @@ def _get_allowed_family_presets(self, pre_create_data): ] def _validate_clip_for_processing(self, otio_clip): - """Validate otio clip attribues + """Validate otio clip attributes Args: otio_clip (otio.Clip): otio clip object @@ -843,7 +843,7 @@ def get_pre_create_attr_defs(self): single_item=False, label="Media files", ), - # TODO: perhpas better would be timecode and fps input + # TODO: perhaps better would be timecode and fps input NumberDef( "timeline_offset", default=0, diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py index 183195a5155..c081216481c 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -14,7 +14,7 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): There is also possibility to have reviewable representation which can be stored under 'reviewable' attribute stored on instance data. If there was - already created representation with the same files as 'revieable' containes + already created representation with the same files as 'reviewable' contains Representations can be marked for review and in that case is also added 'review' family to instance families. For review can be marked only one diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index e94e64e04a6..6f76c25e0ce 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -389,11 +389,11 @@ def __init__(self, callback, *args, **kwargs): self.kwargs = kwargs def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ log.debug("Executing process in main thread") if self.done: diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index 575e6aa7553..58fbd095452 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -504,14 +504,9 @@ def set_context_settings(project_name, asset_doc): print("Frame range was not found!") return - handles = asset_doc["data"].get("handles") or 0 handle_start = asset_doc["data"].get("handleStart") handle_end = asset_doc["data"].get("handleEnd") - if handle_start is None or handle_end is None: - handle_start = handles - handle_end = handles - # Always start from 0 Mark In and set only Mark Out mark_in = 0 mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end diff --git a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py index 538c6e4c5e8..5cfa1faa506 100644 --- a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py +++ b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py @@ -55,7 +55,7 @@ def convert(self): self._convert_render_layers( to_convert["renderLayer"], current_instances) self._convert_render_passes( - to_convert["renderpass"], current_instances) + to_convert["renderPass"], current_instances) self._convert_render_scenes( to_convert["renderScene"], current_instances) self._convert_workfiles( @@ -116,7 +116,7 @@ def _convert_render_passes(self, render_passes, current_instances): render_layers_by_group_id = {} for instance in current_instances: if instance.get("creator_identifier") == "render.layer": - group_id = instance["creator_identifier"]["group_id"] + group_id = instance["creator_attributes"]["group_id"] render_layers_by_group_id[group_id] = instance for render_pass in render_passes: diff --git a/openpype/hosts/tvpaint/plugins/create/create_render.py b/openpype/hosts/tvpaint/plugins/create/create_render.py index 9711024c79c..2369c7329f6 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render.py @@ -415,11 +415,11 @@ def collect_instances(self): .get("creator_attributes", {}) .get("render_layer_instance_id") ) - render_layer_info = render_layers.get(render_layer_instance_id) + render_layer_info = render_layers.get(render_layer_instance_id, {}) self.update_instance_labels( instance_data, - render_layer_info["variant"], - render_layer_info["template_data"] + render_layer_info.get("variant"), + render_layer_info.get("template_data") ) instance = CreatedInstance.from_existing(instance_data, self) self._add_instance_to_context(instance) @@ -607,11 +607,11 @@ def get_pre_create_attr_defs(self): current_instances = self.host.list_instances() render_layers = [ { - "value": instance["instance_id"], - "label": instance["subset"] + "value": inst["instance_id"], + "label": inst["subset"] } - for instance in current_instances - if instance["creator_identifier"] == CreateRenderlayer.identifier + for inst in current_instances + if inst.get("creator_identifier") == CreateRenderlayer.identifier ] if not render_layers: render_layers.append({"value": None, "label": "N/A"}) @@ -697,6 +697,7 @@ def apply_settings(self, project_settings, system_settings): ["create"] ["auto_detect_render"] ) + self.enabled = plugin_settings.get("enabled", False) self.allow_group_rename = plugin_settings["allow_group_rename"] self.group_name_template = plugin_settings["group_name_template"] self.group_idx_offset = plugin_settings["group_idx_offset"] diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py index 5eb702a1da4..63f04cf3ce6 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -22,9 +22,11 @@ def process(self, instance): context = instance.context frame_start = asset_doc["data"]["frameStart"] + fps = asset_doc["data"]["fps"] frame_end = frame_start + ( context.data["sceneMarkOut"] - context.data["sceneMarkIn"] ) + instance.data["fps"] = fps instance.data["frameStart"] = frame_start instance.data["frameEnd"] = frame_end self.log.info( diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml index e7be7358881..5832c743500 100644 --- a/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml @@ -1,7 +1,7 @@ -Layers visiblity +Layers visibility ## All layers are not visible Layers visibility was changed during publishing which caused that all layers for subset "{instance_name}" are hidden. diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml index 7397f6ef0b1..0fc03c2948b 100644 --- a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml @@ -11,7 +11,7 @@ Your scene does not contain metadata about {missing_metadata}. Resave the scene using Workfiles tool or hit the "Repair" button on the right. -### How this could happend? +### How this could happen? You're using scene file that was not created using Workfiles tool. diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml index c4ffafc8b5f..bb57e93bf25 100644 --- a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml @@ -13,7 +13,7 @@ If the workfile belongs to project "{env_project_name}" then use Workfiles tool Otherwise close TVPaint and launch it again from project you want to publish in. -### How this could happend? +### How this could happen? You've opened workfile from different project. You've opened TVPaint on a task from "{env_project_name}" then you've opened TVPaint again on task from "{workfile_project_name}" without closing the TVPaint. Because TVPaint can run only once the project didn't change. diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py index 7e35726030d..9347960d3f4 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -1,5 +1,8 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.pipeline import ( list_instances, write_instances, @@ -31,8 +34,11 @@ def process(self, context, plugin): write_instances(new_instance_items) -class ValidateAssetNames(pyblish.api.ContextPlugin): - """Validate assset name present on instance. +class ValidateAssetName( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): + """Validate asset name present on instance. Asset name on instance should be the same as context's. """ @@ -43,6 +49,8 @@ class ValidateAssetNames(pyblish.api.ContextPlugin): actions = [FixAssetNames] def process(self, context): + if not self.is_active(context.data): + return context_asset_name = context.data["asset"] for instance in context: asset_name = instance.data.get("asset") diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index 6a496a2e496..8e52a636f4f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -11,7 +11,7 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): families = ["review", "render"] def process(self, instance): - layers = instance.data["layers"] + layers = instance.data.get("layers") # Instance have empty layers # - it is not job of this validator to check that if not layers: diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index 0030b0fd1ce..7b2cc62bb5a 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -1,7 +1,10 @@ import json import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.lib import execute_george @@ -23,7 +26,10 @@ def process(self, context, plugin): ) -class ValidateMarks(pyblish.api.ContextPlugin): +class ValidateMarks( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate mark in and out are enabled and it's duration. Mark In/Out does not have to match frameStart and frameEnd but duration is @@ -59,6 +65,9 @@ def get_expected_data(context): } def process(self, context): + if not self.is_active(context.data): + return + current_data = { "markIn": context.data["sceneMarkIn"], "markInState": context.data["sceneMarkInState"], diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py index 4473e4b1b79..0ab8e811f58 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py @@ -1,11 +1,17 @@ import json import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) # TODO @iLliCiTiT add fix action for fps -class ValidateProjectSettings(pyblish.api.ContextPlugin): +class ValidateProjectSettings( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate scene settings against database.""" label = "Validate Scene Settings" @@ -13,6 +19,9 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin): optional = True def process(self, context): + if not self.is_active(context.data): + return + expected_data = context.data["assetEntity"]["data"] scene_data = { "fps": context.data.get("sceneFps"), diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py index 066e54c6707..229ccfcd18f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -1,5 +1,8 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.lib import execute_george @@ -14,7 +17,10 @@ def process(self, context, plugin): execute_george("tv_startframe 0") -class ValidateStartFrame(pyblish.api.ContextPlugin): +class ValidateStartFrame( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate start frame being at frame 0.""" label = "Validate Start Frame" @@ -24,6 +30,9 @@ class ValidateStartFrame(pyblish.api.ContextPlugin): optional = True def process(self, context): + if not self.is_active(context.data): + return + start_frame = execute_george("tv_startframe") if start_frame == 0: return diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index 8a5a4591949..1a7c6269842 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -306,7 +306,7 @@ def imprint(node, data): def show_tools_popup(): """Show popup with tools. - Popup will disappear on click or loosing focus. + Popup will disappear on click or losing focus. """ from openpype.hosts.unreal.api import tools_ui diff --git a/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp index 008025e8166..34faba1f499 100644 --- a/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp +++ b/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp @@ -31,7 +31,7 @@ bool UOpenPypeLib::SetFolderColor(const FString& FolderPath, const FLinearColor& } /** - * Returns all poperties on given object + * Returns all properties on given object * @param cls - class * @return TArray of properties */ diff --git a/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h b/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h index c960bbf190d..322a23a3e81 100644 --- a/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h +++ b/openpype/hosts/unreal/integration/UE_4.7/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h @@ -16,7 +16,7 @@ /** * @brief This enum values are humanly readable mapping of error codes. * Here should be all error codes to be possible find what went wrong. -* TODO: In the future should exists an web document where is mapped error code & what problem occured & how to repair it... +* TODO: In the future a web document should exists with the mapped error code & what problem occurred & how to repair it... */ UENUM() namespace EOP_ActionResult @@ -27,11 +27,11 @@ namespace EOP_ActionResult ProjectNotCreated, ProjectNotLoaded, ProjectNotSaved, - //....Here insert another values + //....Here insert another values //Do not remove! //Usable for looping through enum values - __Last UMETA(Hidden) + __Last UMETA(Hidden) }; } @@ -63,10 +63,10 @@ struct FOP_ActionResult private: /** @brief Action status */ - EOP_ActionResult::Type Status; + EOP_ActionResult::Type Status; /** @brief Optional reason of fail */ - FText Reason; + FText Reason; public: /** @@ -77,7 +77,7 @@ struct FOP_ActionResult EOP_ActionResult::Type& GetStatus(); FText& GetReason(); -private: +private: void TryLog() const; }; diff --git a/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp index 008025e8166..34faba1f499 100644 --- a/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp +++ b/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Private/OpenPypeLib.cpp @@ -31,7 +31,7 @@ bool UOpenPypeLib::SetFolderColor(const FString& FolderPath, const FLinearColor& } /** - * Returns all poperties on given object + * Returns all properties on given object * @param cls - class * @return TArray of properties */ diff --git a/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h b/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h index c960bbf190d..322a23a3e81 100644 --- a/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h +++ b/openpype/hosts/unreal/integration/UE_5.0/OpenPype/Source/OpenPype/Public/Commandlets/OPActionResult.h @@ -16,7 +16,7 @@ /** * @brief This enum values are humanly readable mapping of error codes. * Here should be all error codes to be possible find what went wrong. -* TODO: In the future should exists an web document where is mapped error code & what problem occured & how to repair it... +* TODO: In the future a web document should exists with the mapped error code & what problem occurred & how to repair it... */ UENUM() namespace EOP_ActionResult @@ -27,11 +27,11 @@ namespace EOP_ActionResult ProjectNotCreated, ProjectNotLoaded, ProjectNotSaved, - //....Here insert another values + //....Here insert another values //Do not remove! //Usable for looping through enum values - __Last UMETA(Hidden) + __Last UMETA(Hidden) }; } @@ -63,10 +63,10 @@ struct FOP_ActionResult private: /** @brief Action status */ - EOP_ActionResult::Type Status; + EOP_ActionResult::Type Status; /** @brief Optional reason of fail */ - FText Reason; + FText Reason; public: /** @@ -77,7 +77,7 @@ struct FOP_ActionResult EOP_ActionResult::Type& GetStatus(); FText& GetReason(); -private: +private: void TryLog() const; }; diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py index ca6b0ce7368..2496440e5f0 100644 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ b/openpype/hosts/unreal/plugins/load/load_camera.py @@ -171,7 +171,7 @@ def load(self, context, name, namespace, data): project_name = legacy_io.active_project() # TODO refactor - # - Creationg of hierarchy should be a function in unreal integration + # - Creating of hierarchy should be a function in unreal integration # - it's used in multiple loaders but must not be loader's logic # - hard to say what is purpose of the loop # - variables does not match their meaning diff --git a/openpype/hosts/webpublisher/lib.py b/openpype/hosts/webpublisher/lib.py index 4bc3f1db80d..b207f85b46e 100644 --- a/openpype/hosts/webpublisher/lib.py +++ b/openpype/hosts/webpublisher/lib.py @@ -30,7 +30,7 @@ def parse_json(path): Returns: (dict) or None if unparsable Raises: - AsssertionError if 'path' doesn't exist + AssertionError if 'path' doesn't exist """ path = path.strip('\"') assert os.path.isfile(path), ( diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 7cc296f47bc..8adae348276 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -889,7 +889,8 @@ def __init__(self, application, executable, env_group=None, **data): self.modules_manager = ModulesManager() # Logger - logger_name = "{}-{}".format(self.__class__.__name__, self.app_name) + logger_name = "{}-{}".format(self.__class__.__name__, + self.application.full_name) self.log = Logger.get_logger(logger_name) self.executable = executable @@ -968,7 +969,7 @@ def _collect_addons_launch_hook_paths(self): """Helper to collect application launch hooks from addons. Module have to have implemented 'get_launch_hook_paths' method which - can expect appliction as argument or nothing. + can expect application as argument or nothing. Returns: List[str]: Paths to launch hook directories. @@ -1246,7 +1247,7 @@ def launch(self): args_len_str = " ({})".format(len(args)) self.log.info( "Launching \"{}\" with args{}: {}".format( - self.app_name, args_len_str, args + self.application.full_name, args_len_str, args ) ) self.launch_args = args @@ -1271,7 +1272,9 @@ def launch(self): exc_info=True ) - self.log.debug("Launch of {} finished.".format(self.app_name)) + self.log.debug("Launch of {} finished.".format( + self.application.full_name + )) return self.process @@ -1508,8 +1511,8 @@ def prepare_app_environments( if key in source_env: source_env[key] = value - # `added_env_keys` has debug purpose - added_env_keys = {app.group.name, app.name} + # `app_and_tool_labels` has debug purpose + app_and_tool_labels = [app.full_name] # Environments for application environments = [ app.group.environment, @@ -1532,15 +1535,14 @@ def prepare_app_environments( for group_name in sorted(groups_by_name.keys()): group = groups_by_name[group_name] environments.append(group.environment) - added_env_keys.add(group_name) for tool_name in sorted(tool_by_group_name[group_name].keys()): tool = tool_by_group_name[group_name][tool_name] environments.append(tool.environment) - added_env_keys.add(tool.name) + app_and_tool_labels.append(tool.full_name) log.debug( "Will add environments for apps and tools: {}".format( - ", ".join(added_env_keys) + ", ".join(app_and_tool_labels) ) ) diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index b5cd15f41af..6054d2a92ae 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -9,7 +9,7 @@ import six import clique -# Global variable which store attribude definitions by type +# Global variable which store attribute definitions by type # - default types are registered on import _attr_defs_by_type = {} @@ -93,7 +93,7 @@ def __call__(self, *args, **kwargs): @six.add_metaclass(AbstractAttrDefMeta) class AbstractAttrDef(object): - """Abstraction of attribute definiton. + """Abstraction of attribute definition. Each attribute definition must have implemented validation and conversion method. @@ -427,7 +427,7 @@ class EnumDef(AbstractAttrDef): """Enumeration of single item from items. Args: - items: Items definition that can be coverted using + items: Items definition that can be converted using 'prepare_enum_items'. default: Default value. Must be one key(value) from passed items. """ diff --git a/openpype/lib/events.py b/openpype/lib/events.py index 096201312fb..bed00fe6591 100644 --- a/openpype/lib/events.py +++ b/openpype/lib/events.py @@ -156,7 +156,7 @@ def set_enabled(self, enabled): self._enabled = enabled def deregister(self): - """Calling this funcion will cause that callback will be removed.""" + """Calling this function will cause that callback will be removed.""" # Fake reference self._ref_valid = False diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index 7a929a0ade1..ef456395e78 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -8,6 +8,8 @@ from .log import Logger from .vendor_bin_utils import find_executable +from .openpype_version import is_running_from_build + # MSDN process creation flag (Windows only) CREATE_NO_WINDOW = 0x08000000 @@ -161,18 +163,20 @@ def run_subprocess(*args, **kwargs): def clean_envs_for_openpype_process(env=None): - """Modify environemnts that may affect OpenPype process. + """Modify environments that may affect OpenPype process. Main reason to implement this function is to pop PYTHONPATH which may be affected by in-host environments. """ if env is None: env = os.environ - return { - key: value - for key, value in env.items() - if key not in ("PYTHONPATH",) - } + + # Exclude some environment variables from a copy of the environment + env = env.copy() + for key in ["PYTHONPATH", "PYTHONHOME"]: + env.pop(key, None) + + return env def run_openpype_process(*args, **kwargs): @@ -200,6 +204,11 @@ def run_openpype_process(*args, **kwargs): # Skip envs that can affect OpenPype process # - fill more if you find more env = clean_envs_for_openpype_process(os.environ) + + # Only keep OpenPype version if we are running from build. + if not is_running_from_build(): + env.pop("OPENPYPE_VERSION", None) + return run_subprocess(args, env=env, **kwargs) diff --git a/openpype/lib/file_transaction.py b/openpype/lib/file_transaction.py index 81332a88915..80f4e81f2c3 100644 --- a/openpype/lib/file_transaction.py +++ b/openpype/lib/file_transaction.py @@ -130,7 +130,7 @@ def process(self): path_same = self._same_paths(src, dst) if path_same: self.log.debug( - "Source and destionation are same files {} -> {}".format( + "Source and destination are same files {} -> {}".format( src, dst)) continue diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 799693554fa..57968b37002 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -540,7 +540,7 @@ def convert_for_ffmpeg( continue # Remove attributes that have string value longer than allowed length - # for ffmpeg or when containt unallowed symbols + # for ffmpeg or when contain unallowed symbols erase_reason = "Missing reason" erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: @@ -680,7 +680,7 @@ def convert_input_paths_for_ffmpeg( continue # Remove attributes that have string value longer than allowed - # length for ffmpeg or when containt unallowed symbols + # length for ffmpeg or when containing unallowed symbols erase_reason = "Missing reason" erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: @@ -968,7 +968,7 @@ def _ffmpeg_dnxhd_codec_args(stream_data, source_ffmpeg_cmd): if source_ffmpeg_cmd: # Define bitrate arguments bit_rate_args = ("-b:v", "-vb",) - # Seprate the two variables in case something else should be copied + # Separate the two variables in case something else should be copied # from source command copy_args = [] copy_args.extend(bit_rate_args) diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index e5deb7a6b23..f27c78d486c 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -260,7 +260,7 @@ def _oiio_executable_validation(filepath): that it can be executed. For that is used '--help' argument which is fast and does not need any other inputs. - Any possible crash of missing libraries or invalid build should be catched. + Any possible crash of missing libraries or invalid build should be caught. Main reason is to validate if executable can be executed on OS just running which can be issue ob linux machines. @@ -329,7 +329,7 @@ def _ffmpeg_executable_validation(filepath): that it can be executed. For that is used '-version' argument which is fast and does not need any other inputs. - Any possible crash of missing libraries or invalid build should be catched. + Any possible crash of missing libraries or invalid build should be caught. Main reason is to validate if executable can be executed on OS just running which can be issue ob linux machines. diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 0fd21492e82..ed1eeb04cd5 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -472,7 +472,7 @@ def modify_application_launch_arguments(self, application, env): Args: application (Application): Application that is launched. - env (dict): Current environemnt variables. + env (dict): Current environment variables. """ pass @@ -622,7 +622,7 @@ def initialize_modules(self): # Check if class is abstract (Developing purpose) if inspect.isabstract(modules_item): - # Find missing implementations by convetion on `abc` module + # Find abstract attributes by convention on `abc` module not_implemented = [] for attr_name in dir(modules_item): attr = getattr(modules_item, attr_name, None) @@ -708,13 +708,13 @@ def get_enabled_modules(self): ] def collect_global_environments(self): - """Helper to collect global enviornment variabled from modules. + """Helper to collect global environment variabled from modules. Returns: dict: Global environment variables from enabled modules. Raises: - AssertionError: Gobal environment variables must be unique for + AssertionError: Global environment variables must be unique for all modules. """ module_envs = {} @@ -1174,7 +1174,7 @@ def on_exit(self): def get_module_settings_defs(): - """Check loaded addons/modules for existence of thei settings definition. + """Check loaded addons/modules for existence of their settings definition. Check if OpenPype addon/module as python module has class that inherit from `ModuleSettingsDef` in python module variables (imported @@ -1204,7 +1204,7 @@ def get_module_settings_defs(): continue if inspect.isabstract(attr): - # Find missing implementations by convetion on `abc` module + # Find missing implementations by convention on `abc` module not_implemented = [] for attr_name in dir(attr): attr = getattr(attr, attr_name, None) @@ -1293,7 +1293,7 @@ def save_defaults(self, top_key, data): class ModuleSettingsDef(BaseModuleSettingsDef): - """Settings definiton with separated system and procect settings parts. + """Settings definition with separated system and procect settings parts. Reduce conditions that must be checked and adds predefined methods for each case. diff --git a/openpype/modules/clockify/clockify_api.py b/openpype/modules/clockify/clockify_api.py index 80979c83ab2..47af002f7ab 100644 --- a/openpype/modules/clockify/clockify_api.py +++ b/openpype/modules/clockify/clockify_api.py @@ -247,7 +247,7 @@ def start_time_entry( current_timer = self.get_in_progress() # Check if is currently run another times and has same values - # DO not restart the timer, if it is already running for curent task + # DO not restart the timer, if it is already running for current task if current_timer: current_timer_hierarchy = current_timer.get("description") current_project_id = current_timer.get("projectId") diff --git a/openpype/modules/clockify/clockify_module.py b/openpype/modules/clockify/clockify_module.py index 200a268ad7c..b6efec79074 100644 --- a/openpype/modules/clockify/clockify_module.py +++ b/openpype/modules/clockify/clockify_module.py @@ -76,7 +76,7 @@ def tray_exit(self, *_a, **_kw): return def get_plugin_paths(self): - """Implementaton of IPluginPaths to get plugin paths.""" + """Implementation of IPluginPaths to get plugin paths.""" actions_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "launcher_actions" ) diff --git a/openpype/modules/clockify/widgets.py b/openpype/modules/clockify/widgets.py index 8c28f38b6ef..86e67569f2b 100644 --- a/openpype/modules/clockify/widgets.py +++ b/openpype/modules/clockify/widgets.py @@ -34,7 +34,7 @@ def __init__(self, messages, title): def _ui_layout(self, messages): if not messages: - messages = ["*Misssing messages (This is a bug)*", ] + messages = ["*Missing messages (This is a bug)*", ] elif not isinstance(messages, (tuple, list)): messages = [messages, ] diff --git a/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py index 038ee4fc034..bcf08507687 100644 --- a/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py @@ -106,7 +106,7 @@ def payload_submit(self, # define chunk and priority chunk_size = instance.context.data.get("chunk") - if chunk_size == 0: + if not chunk_size: chunk_size = self.deadline_chunk_size # search for %02d pattern in name, and padding number diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py index 417a03de744..c728b6b9c72 100644 --- a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py @@ -3,7 +3,15 @@ import copy import attr -from openpype.pipeline import legacy_io +from openpype.lib import ( + TextDef, + BoolDef, + NumberDef, +) +from openpype.pipeline import ( + legacy_io, + OpenPypePyblishPluginMixin +) from openpype.settings import get_project_settings from openpype.hosts.max.api.lib import ( get_current_renderer, @@ -22,7 +30,8 @@ class MaxPluginInfo(object): IgnoreInputs = attr.ib(default=True) -class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): +class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, + OpenPypePyblishPluginMixin): label = "Submit Render to Deadline" hosts = ["max"] @@ -31,14 +40,22 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): use_published = True priority = 50 - tile_priority = 50 chunk_size = 1 jobInfo = {} pluginInfo = {} group = None - deadline_pool = None - deadline_pool_secondary = None - framePerTask = 1 + + @classmethod + def apply_settings(cls, project_settings, system_settings): + settings = project_settings["deadline"]["publish"]["MaxSubmitDeadline"] # noqa + + # Take some defaults from settings + cls.use_published = settings.get("use_published", + cls.use_published) + cls.priority = settings.get("priority", + cls.priority) + cls.chuck_size = settings.get("chunk_size", cls.chunk_size) + cls.group = settings.get("group", cls.group) def get_job_info(self): job_info = DeadlineJobInfo(Plugin="3dsmax") @@ -49,11 +66,11 @@ def get_job_info(self): instance = self._instance context = instance.context - # Always use the original work file name for the Job name even when # rendering is done from the published Work File. The original work # file name is clearer because it can also have subversion strings, # etc. which are stripped for the published file. + src_filepath = context.data["currentFile"] src_filename = os.path.basename(src_filepath) @@ -71,13 +88,13 @@ def get_job_info(self): job_info.Pool = instance.data.get("primaryPool") job_info.SecondaryPool = instance.data.get("secondaryPool") - job_info.ChunkSize = instance.data.get("chunkSize", 1) - job_info.Comment = context.data.get("comment") - job_info.Priority = instance.data.get("priority", self.priority) - job_info.FramesPerTask = instance.data.get("framesPerTask", 1) - if self.group: - job_info.Group = self.group + attr_values = self.get_attr_values_from_data(instance.data) + + job_info.ChunkSize = attr_values.get("chunkSize", 1) + job_info.Comment = context.data.get("comment") + job_info.Priority = attr_values.get("priority", self.priority) + job_info.Group = attr_values.get("group", self.group) # Add options from RenderGlobals render_globals = instance.data.get("renderGlobals", {}) @@ -216,3 +233,32 @@ def _clean_name(path): plugin_info.update(plugin_data) return job_info, plugin_info + + @classmethod + def get_attribute_defs(cls): + defs = super(MaxSubmitDeadline, cls).get_attribute_defs() + defs.extend([ + BoolDef("use_published", + default=cls.use_published, + label="Use Published Scene"), + + NumberDef("priority", + minimum=1, + maximum=250, + decimals=0, + default=cls.priority, + label="Priority"), + + NumberDef("chunkSize", + minimum=1, + maximum=50, + decimals=0, + default=cls.chunk_size, + label="Frame Per Task"), + + TextDef("group", + default=cls.group, + label="Group Name"), + ]) + + return defs diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index cc069cf51a8..5c598df94b0 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -66,7 +66,7 @@ def get_attribute_defs(cls): ), NumberDef( "concurrency", - label="Concurency", + label="Concurrency", default=cls.concurrent_tasks, decimals=0, minimum=1, @@ -76,6 +76,11 @@ def get_attribute_defs(cls): "use_gpu", default=cls.use_gpu, label="Use GPU" + ), + BoolDef( + "suspend_publish", + default=False, + label="Suspend publish" ) ] @@ -87,6 +92,10 @@ def process(self, instance): instance.data["attributeValues"] = self.get_attr_values_from_data( instance.data) + # add suspend_publish attributeValue to instance data + instance.data["suspend_publish"] = instance.data["attributeValues"][ + "suspend_publish"] + instance.data["toBeRenderedOn"] = "deadline" families = instance.data["families"] diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index b5e952a385b..9160e24a81e 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -111,7 +111,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, - publishJobState (str, Optional): "Active" or "Suspended" This defaults to "Suspended" - - expectedFiles (list or dict): explained bellow + - expectedFiles (list or dict): explained below """ @@ -161,8 +161,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin, # regex for finding frame number in string R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') - # mapping of instance properties to be transfered to new instance for every - # specified family + # mapping of instance properties to be transferred to new instance + # for every specified family instance_transfer = { "slate": ["slateFrames", "slate"], "review": ["lutPath"], @@ -405,7 +405,7 @@ def _copy_extend_frames(self, instance, representation): continue r_col.indexes.remove(frame) - # now we need to translate published names from represenation + # now we need to translate published names from representation # back. This is tricky, right now we'll just use same naming # and only switch frame numbers resource_files = [] @@ -542,7 +542,7 @@ def _create_instances_for_aov( if preview: new_instance["review"] = True - # create represenation + # create representation if isinstance(col, (list, tuple)): files = [os.path.basename(f) for f in col] else: @@ -755,7 +755,7 @@ def process(self, instance): # type: (pyblish.api.Instance) -> None """Process plugin. - Detect type of renderfarm submission and create and post dependend job + Detect type of renderfarm submission and create and post dependent job in case of Deadline. It creates json file with metadata needed for publishing in directory of render. @@ -993,7 +993,7 @@ def process(self, instance): instances = [instance_skeleton_data] # if we are attaching to other subsets, create copy of existing - # instances, change data to match thats subset and replace + # instances, change data to match its subset and replace # existing instances with modified data if instance.data.get("attachTo"): self.log.info("Attaching render to subset:") diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py index 05afa5080d8..7c8ab62d4d8 100644 --- a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py +++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -17,7 +17,11 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, label = "Validate Deadline Pools" order = pyblish.api.ValidatorOrder - families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + families = ["rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender"] optional = True def process(self, instance): diff --git a/openpype/modules/example_addons/example_addon/addon.py b/openpype/modules/example_addons/example_addon/addon.py index ead647b41d0..be1d3ff9205 100644 --- a/openpype/modules/example_addons/example_addon/addon.py +++ b/openpype/modules/example_addons/example_addon/addon.py @@ -44,7 +44,7 @@ def get_settings_root_path(self): class ExampleAddon(OpenPypeAddOn, IPluginPaths, ITrayAction): - """This Addon has defined it's settings and interface. + """This Addon has defined its settings and interface. This example has system settings with an enabled option. And use few other interfaces: diff --git a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py index 1ad7a17785e..333228c6999 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py +++ b/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py @@ -44,7 +44,7 @@ def clone_review_session(session, entity): class CloneReviewSession(ServerAction): '''Generate Client Review action - `label` a descriptive string identifing your action. + `label` a descriptive string identifying your action. `varaint` To group actions together, give them the same label and specify a unique variant per action. `identifier` a unique identifier for your action. diff --git a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py index 21382007a0f..42a279e3333 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py +++ b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py @@ -230,7 +230,7 @@ def _process_review_session( if not today_session_name: continue - # Find matchin review session + # Find matching review session project_review_sessions = review_sessions_by_project_id[project_id] todays_session = None yesterdays_session = None diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 332648cd028..02231cbe3ce 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -124,7 +124,7 @@ def prepare_root_items(self, project_anatom_settings): root_items.append({ "type": "label", "value": ( - "

NOTE: Roots are crutial for path filling" + "

NOTE: Roots are crucial for path filling" " (and creating folder structure).

" ) }) diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py index 1209375f821..a698195c59d 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py @@ -9,7 +9,7 @@ class PushHierValuesToNonHier(ServerAction): - """Action push hierarchical custom attribute values to non hierarchical. + """Action push hierarchical custom attribute values to non-hierarchical. Hierarchical value is also pushed to their task entities. @@ -119,17 +119,109 @@ def attrs_configurations(self, session, object_ids, interest_attributes): self.join_query_keys(object_ids) )).all() - output = {} + attrs_by_obj_id = collections.defaultdict(list) hiearchical = [] for attr in attrs: if attr["is_hierarchical"]: hiearchical.append(attr) continue obj_id = attr["object_type_id"] - if obj_id not in output: - output[obj_id] = [] - output[obj_id].append(attr) - return output, hiearchical + attrs_by_obj_id[obj_id].append(attr) + return attrs_by_obj_id, hiearchical + + def query_attr_value( + self, + session, + hier_attrs, + attrs_by_obj_id, + dst_object_type_ids, + task_entity_ids, + non_task_entity_ids, + parent_id_by_entity_id + ): + all_non_task_ids_with_parents = set() + for entity_id in non_task_entity_ids: + all_non_task_ids_with_parents.add(entity_id) + _entity_id = entity_id + while True: + parent_id = parent_id_by_entity_id.get(_entity_id) + if ( + parent_id is None + or parent_id in all_non_task_ids_with_parents + ): + break + all_non_task_ids_with_parents.add(parent_id) + _entity_id = parent_id + + all_entity_ids = ( + set(all_non_task_ids_with_parents) + | set(task_entity_ids) + ) + attr_ids = {attr["id"] for attr in hier_attrs} + for obj_id in dst_object_type_ids: + attrs = attrs_by_obj_id.get(obj_id) + if attrs is not None: + for attr in attrs: + attr_ids.add(attr["id"]) + + real_values_by_entity_id = { + entity_id: {} + for entity_id in all_entity_ids + } + + attr_values = query_custom_attributes( + session, attr_ids, all_entity_ids, True + ) + for item in attr_values: + entity_id = item["entity_id"] + attr_id = item["configuration_id"] + real_values_by_entity_id[entity_id][attr_id] = item["value"] + + # Fill hierarchical values + hier_attrs_key_by_id = { + hier_attr["id"]: hier_attr + for hier_attr in hier_attrs + } + hier_values_per_entity_id = {} + for entity_id in all_non_task_ids_with_parents: + real_values = real_values_by_entity_id[entity_id] + hier_values_per_entity_id[entity_id] = {} + for attr_id, attr in hier_attrs_key_by_id.items(): + key = attr["key"] + hier_values_per_entity_id[entity_id][key] = ( + real_values.get(attr_id) + ) + + output = {} + for entity_id in non_task_entity_ids: + output[entity_id] = {} + for attr in hier_attrs_key_by_id.values(): + key = attr["key"] + value = hier_values_per_entity_id[entity_id][key] + tried_ids = set() + if value is None: + tried_ids.add(entity_id) + _entity_id = entity_id + while value is None: + parent_id = parent_id_by_entity_id.get(_entity_id) + if not parent_id: + break + value = hier_values_per_entity_id[parent_id][key] + if value is not None: + break + _entity_id = parent_id + tried_ids.add(parent_id) + + if value is None: + value = attr["default"] + + if value is not None: + for ent_id in tried_ids: + hier_values_per_entity_id[ent_id][key] = value + + output[entity_id][key] = value + + return real_values_by_entity_id, output def propagate_values(self, session, event, selected_entities): ftrack_settings = self.get_ftrack_settings( @@ -156,29 +248,24 @@ def propagate_values(self, session, event, selected_entities): } task_object_type = object_types_by_low_name["task"] - destination_object_types = [task_object_type] + dst_object_type_ids = {task_object_type["id"]} for ent_type in interest_entity_types: obj_type = object_types_by_low_name.get(ent_type) - if obj_type and obj_type not in destination_object_types: - destination_object_types.append(obj_type) - - destination_object_type_ids = set( - obj_type["id"] - for obj_type in destination_object_types - ) + if obj_type: + dst_object_type_ids.add(obj_type["id"]) interest_attributes = action_settings["interest_attributes"] # Find custom attributes definitions attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, destination_object_type_ids, interest_attributes + session, dst_object_type_ids, interest_attributes ) # Filter destination object types if they have any object specific # custom attribute - for obj_id in tuple(destination_object_type_ids): + for obj_id in tuple(dst_object_type_ids): if obj_id not in attrs_by_obj_id: - destination_object_type_ids.remove(obj_id) + dst_object_type_ids.remove(obj_id) - if not destination_object_type_ids: + if not dst_object_type_ids: # TODO report that there are not matching custom attributes return { "success": True, @@ -192,14 +279,14 @@ def propagate_values(self, session, event, selected_entities): session, selected_ids, project_entity, - destination_object_type_ids + dst_object_type_ids ) self.log.debug("Preparing whole project hierarchy by ids.") entities_by_obj_id = { obj_id: [] - for obj_id in destination_object_type_ids + for obj_id in dst_object_type_ids } self.log.debug("Filtering Task entities.") @@ -223,10 +310,16 @@ def propagate_values(self, session, event, selected_entities): "message": "Nothing to do in your selection." } - self.log.debug("Getting Hierarchical custom attribute values parents.") - hier_values_by_entity_id = self.get_hier_values( + self.log.debug("Getting Custom attribute values.") + ( + real_values_by_entity_id, + hier_values_by_entity_id + ) = self.query_attr_value( session, hier_attrs, + attrs_by_obj_id, + dst_object_type_ids, + task_entity_ids, non_task_entity_ids, parent_id_by_entity_id ) @@ -237,7 +330,8 @@ def propagate_values(self, session, event, selected_entities): hier_attrs, task_entity_ids, hier_values_by_entity_id, - parent_id_by_entity_id + parent_id_by_entity_id, + real_values_by_entity_id ) self.log.debug("Setting values to entities themselves.") @@ -245,7 +339,8 @@ def propagate_values(self, session, event, selected_entities): session, entities_by_obj_id, attrs_by_obj_id, - hier_values_by_entity_id + hier_values_by_entity_id, + real_values_by_entity_id ) return True @@ -322,112 +417,64 @@ def all_hierarchy_entities( return parent_id_by_entity_id, filtered_entities - def get_hier_values( - self, - session, - hier_attrs, - focus_entity_ids, - parent_id_by_entity_id - ): - all_ids_with_parents = set() - for entity_id in focus_entity_ids: - all_ids_with_parents.add(entity_id) - _entity_id = entity_id - while True: - parent_id = parent_id_by_entity_id.get(_entity_id) - if ( - not parent_id - or parent_id in all_ids_with_parents - ): - break - all_ids_with_parents.add(parent_id) - _entity_id = parent_id - - hier_attr_ids = tuple(hier_attr["id"] for hier_attr in hier_attrs) - hier_attrs_key_by_id = { - hier_attr["id"]: hier_attr["key"] - for hier_attr in hier_attrs - } - - values_per_entity_id = {} - for entity_id in all_ids_with_parents: - values_per_entity_id[entity_id] = {} - for key in hier_attrs_key_by_id.values(): - values_per_entity_id[entity_id][key] = None - - values = query_custom_attributes( - session, hier_attr_ids, all_ids_with_parents, True - ) - for item in values: - entity_id = item["entity_id"] - key = hier_attrs_key_by_id[item["configuration_id"]] - - values_per_entity_id[entity_id][key] = item["value"] - - output = {} - for entity_id in focus_entity_ids: - output[entity_id] = {} - for key in hier_attrs_key_by_id.values(): - value = values_per_entity_id[entity_id][key] - tried_ids = set() - if value is None: - tried_ids.add(entity_id) - _entity_id = entity_id - while value is None: - parent_id = parent_id_by_entity_id.get(_entity_id) - if not parent_id: - break - value = values_per_entity_id[parent_id][key] - if value is not None: - break - _entity_id = parent_id - tried_ids.add(parent_id) - - if value is not None: - for ent_id in tried_ids: - values_per_entity_id[ent_id][key] = value - - output[entity_id][key] = value - return output - def set_task_attr_values( self, session, hier_attrs, task_entity_ids, hier_values_by_entity_id, - parent_id_by_entity_id + parent_id_by_entity_id, + real_values_by_entity_id ): hier_attr_id_by_key = { attr["key"]: attr["id"] for attr in hier_attrs } + filtered_task_ids = set() for task_id in task_entity_ids: - parent_id = parent_id_by_entity_id.get(task_id) or {} + parent_id = parent_id_by_entity_id.get(task_id) parent_values = hier_values_by_entity_id.get(parent_id) - if not parent_values: - continue + if parent_values: + filtered_task_ids.add(task_id) + + if not filtered_task_ids: + return + for task_id in filtered_task_ids: + parent_id = parent_id_by_entity_id[task_id] + parent_values = hier_values_by_entity_id[parent_id] hier_values_by_entity_id[task_id] = {} + real_task_attr_values = real_values_by_entity_id[task_id] for key, value in parent_values.items(): hier_values_by_entity_id[task_id][key] = value + if value is None: + continue + configuration_id = hier_attr_id_by_key[key] _entity_key = collections.OrderedDict([ ("configuration_id", configuration_id), ("entity_id", task_id) ]) - - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + op = None + if configuration_id not in real_task_attr_values: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + _entity_key, + {"value": value} + ) + elif real_task_attr_values[configuration_id] != value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", _entity_key, "value", - ftrack_api.symbol.NOT_SET, + real_task_attr_values[configuration_id], value ) - ) - if len(session.recorded_operations) > 100: - session.commit() + + if op is not None: + session.recorded_operations.push(op) + if len(session.recorded_operations) > 100: + session.commit() session.commit() @@ -436,39 +483,68 @@ def push_values_to_entities( session, entities_by_obj_id, attrs_by_obj_id, - hier_values_by_entity_id + hier_values_by_entity_id, + real_values_by_entity_id ): + """Push values from hierarchical custom attributes to non-hierarchical. + + Args: + session (ftrack_api.Sessison): Session which queried entities, + values and which is used for change propagation. + entities_by_obj_id (dict[str, list[str]]): TypedContext + ftrack entity ids where the attributes are propagated by their + object ids. + attrs_by_obj_id (dict[str, ftrack_api.Entity]): Objects of + 'CustomAttributeConfiguration' by their ids. + hier_values_by_entity_id (doc[str, dict[str, Any]]): Attribute + values by entity id and by their keys. + real_values_by_entity_id (doc[str, dict[str, Any]]): Real attribute + values of entities. + """ + for object_id, entity_ids in entities_by_obj_id.items(): attrs = attrs_by_obj_id.get(object_id) if not attrs or not entity_ids: continue - for attr in attrs: - for entity_id in entity_ids: - value = ( - hier_values_by_entity_id - .get(entity_id, {}) - .get(attr["key"]) - ) + for entity_id in entity_ids: + real_values = real_values_by_entity_id.get(entity_id) + hier_values = hier_values_by_entity_id.get(entity_id) + if hier_values is None: + continue + + for attr in attrs: + attr_id = attr["id"] + attr_key = attr["key"] + value = hier_values.get(attr_key) if value is None: continue _entity_key = collections.OrderedDict([ - ("configuration_id", attr["id"]), + ("configuration_id", attr_id), ("entity_id", entity_id) ]) - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + op = None + if attr_id not in real_values: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + _entity_key, + {"value": value} + ) + elif real_values[attr_id] != value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", _entity_key, "value", - ftrack_api.symbol.NOT_SET, + real_values[attr_id], value ) - ) - if len(session.recorded_operations) > 100: - session.commit() + + if op is not None: + session.recorded_operations.push(op) + if len(session.recorded_operations) > 100: + session.commit() session.commit() diff --git a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py index d160b7200db..f6899843a39 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py +++ b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py @@ -12,7 +12,7 @@ class TransferHierarchicalValues(ServerAction): - """Transfer values across hierarhcical attributes. + """Transfer values across hierarchical attributes. Aalso gives ability to convert types meanwhile. That is limited to conversions between numbers and strings @@ -67,7 +67,7 @@ def _selection_interface(self, session, event_values=None): "type": "label", "value": ( "Didn't found custom attributes" - " that can be transfered." + " that can be transferred." ) }] } diff --git a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py b/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py index a65ae46545c..a100c34f679 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py +++ b/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py @@ -279,7 +279,7 @@ def set_next_task_statuses( except Exception: session.rollback() self.log.warning( - "\"{}\" status couldnt be set to \"{}\"".format( + "\"{}\" status couldn't be set to \"{}\"".format( ent_path, new_status["name"] ), exc_info=True diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index dc76920a57c..ed630ad59d9 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,6 +1,6 @@ import collections -import datetime import copy +from typing import Any import ftrack_api from openpype_modules.ftrack.lib import ( @@ -9,13 +9,30 @@ ) -class PushFrameValuesToTaskEvent(BaseEvent): +class PushHierValuesToNonHierEvent(BaseEvent): + """Push value changes between hierarchical and non-hierarchical attributes. + + Changes of non-hierarchical attributes are pushed to hierarchical and back. + The attributes must have same definition of custom attribute. + + Handler does not handle changes of hierarchical parents. So if entity does + not have explicitly set value of hierarchical attribute and any parent + would change it the change would not be propagated. + + The handler also push the value to task entity on task creation + and movement. To push values between hierarchical & non-hierarchical + add 'Task' to entity types in settings. + + Todos: + Task attribute values push on create/move should be possible to + enabled by settings. + """ + # Ignore event handler by default cust_attrs_query = ( "select id, key, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" - " where key in ({}) and" - " (object_type_id in ({}) or is_hierarchical is true)" + " where key in ({})" ) _cached_task_object_id = None @@ -26,35 +43,35 @@ class PushFrameValuesToTaskEvent(BaseEvent): settings_key = "sync_hier_entity_attributes" - def session_user_id(self, session): - if self._cached_user_id is None: - user = session.query( - "User where username is \"{}\"".format(session.api_user) - ).one() - self._cached_user_id = user["id"] - return self._cached_user_id + def filter_entities_info( + self, event: ftrack_api.event.base.Event + ) -> dict[str, list[dict[str, Any]]]: + """Basic entities filter info we care about. - def launch(self, session, event): - filtered_entities_info = self.filter_entities_info(event) - if not filtered_entities_info: - return + This filtering is first of many filters. This does not query anything + from ftrack nor use settings. - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) + Args: + event (ftrack_api.event.base.Event): Ftrack event with update + information. + + Returns: + dict[str, list[dict[str, Any]]]: Filtered entity changes by + project id. + """ - def filter_entities_info(self, event): # Filter if event contain relevant data entities_info = event["data"].get("entities") if not entities_info: return - entities_info_by_project_id = {} + entities_info_by_project_id = collections.defaultdict(list) for entity_info in entities_info: - # Care only about tasks - if entity_info.get("entityType") != "task": + # Ignore removed entities + if entity_info.get("action") == "remove": continue - # Care only about changes of status + # Care only about information with changes of entities changes = entity_info.get("changes") if not changes: continue @@ -69,367 +86,287 @@ def filter_entities_info(self, event): if project_id is None: continue - # Skip `Task` entity type if parent didn't change - if entity_info["entity_type"].lower() == "task": - if ( - "parent_id" not in changes - or changes["parent_id"]["new"] is None - ): - continue - - if project_id not in entities_info_by_project_id: - entities_info_by_project_id[project_id] = [] entities_info_by_project_id[project_id].append(entity_info) return entities_info_by_project_id - def process_by_project(self, session, event, project_id, entities_info): - project_name = self.get_project_name_from_event( + def _get_attrs_configurations(self, session, interest_attributes): + """Get custom attribute configurations by name. + + Args: + session (ftrack_api.Session): Ftrack sesson. + interest_attributes (list[str]): Names of custom attributes + that should be synchronized. + + Returns: + tuple[dict[str, list], list]: Attributes by object id and + hierarchical attributes. + """ + + attrs = session.query(self.cust_attrs_query.format( + self.join_query_keys(interest_attributes) + )).all() + + attrs_by_obj_id = collections.defaultdict(list) + hier_attrs = [] + for attr in attrs: + if attr["is_hierarchical"]: + hier_attrs.append(attr) + continue + obj_id = attr["object_type_id"] + attrs_by_obj_id[obj_id].append(attr) + return attrs_by_obj_id, hier_attrs + + def _get_handler_project_settings( + self, + session: ftrack_api.Session, + event: ftrack_api.event.base.Event, + project_id: str + ) -> tuple[set[str], set[str]]: + """Get handler settings based on the project. + + Args: + session (ftrack_api.Session): Ftrack session. + event (ftrack_api.event.base.Event): Ftrack event which triggered + the changes. + project_id (str): Project id where the current changes are handled. + + Returns: + tuple[set[str], set[str]]: Attribute names we care about and + entity types we care about. + """ + + project_name: str = self.get_project_name_from_event( session, event, project_id ) # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name + project_settings: dict[str, Any] = ( + self.get_project_settings_from_event(event, project_name) ) # Load status mapping from presets - event_settings = ( + event_settings: dict[str, Any] = ( project_settings ["ftrack"] ["events"] - ["sync_hier_entity_attributes"] + [self.settings_key] ) # Skip if event is not enabled if not event_settings["enabled"]: self.log.debug("Project \"{}\" has disabled {}".format( project_name, self.__class__.__name__ )) - return + return set(), set() - interest_attributes = event_settings["interest_attributes"] + interest_attributes: list[str] = event_settings["interest_attributes"] if not interest_attributes: self.log.info(( "Project \"{}\" does not have filled 'interest_attributes'," " skipping." )) - return - interest_entity_types = event_settings["interest_entity_types"] + + interest_entity_types: list[str] = ( + event_settings["interest_entity_types"]) if not interest_entity_types: self.log.info(( "Project \"{}\" does not have filled 'interest_entity_types'," " skipping." )) - return - - interest_attributes = set(interest_attributes) - interest_entity_types = set(interest_entity_types) - # Separate value changes and task parent changes - _entities_info = [] - added_entities = [] - added_entity_ids = set() - task_parent_changes = [] - for entity_info in entities_info: - if entity_info["entity_type"].lower() == "task": - task_parent_changes.append(entity_info) - elif entity_info.get("action") == "add": - added_entities.append(entity_info) - added_entity_ids.add(entity_info["entityId"]) - else: - _entities_info.append(entity_info) - entities_info = _entities_info - - # Filter entities info with changes - interesting_data, changed_keys_by_object_id = self.filter_changes( - session, event, entities_info, interest_attributes - ) - self.interesting_data_for_added( - session, - added_entities, - interest_attributes, - interesting_data, - changed_keys_by_object_id - ) - if not interesting_data and not task_parent_changes: - return - - # Prepare object types - object_types = session.query("select id, name from ObjectType").all() - object_types_by_name = {} - for object_type in object_types: - name_low = object_type["name"].lower() - object_types_by_name[name_low] = object_type - - # NOTE it would be nice to check if `interesting_data` do not contain - # value changs of tasks that were created or moved - # - it is a complex way how to find out - if interesting_data: - self.process_attribute_changes( - session, - object_types_by_name, - interesting_data, - changed_keys_by_object_id, - interest_entity_types, - interest_attributes, - added_entity_ids - ) - - if task_parent_changes: - self.process_task_parent_change( - session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes - ) + # Unify possible issues from settings ('Asset Build' -> 'assetbuild') + interest_entity_types: set[str] = { + entity_type.replace(" ", "").lower() + for entity_type in interest_entity_types + } + return set(interest_attributes), interest_entity_types - def process_task_parent_change( + def _entities_filter_by_settings( self, - session, - object_types_by_name, - task_parent_changes, - interest_entity_types, - interest_attributes + entities_info: list[dict[str, Any]], + interest_attributes: set[str], + interest_entity_types: set[str] ): - """Push custom attribute values if task parent has changed. - - Parent is changed if task is created or if is moved under different - entity. We don't care about all task changes only about those that - have it's parent in interest types (from settings). + new_entities_info = [] + for entity_info in entities_info: + entity_type_low = entity_info["entity_type"].lower() - Tasks hierarchical value should be unset or set based on parents - real hierarchical value and non hierarchical custom attribute value - should be set to hierarchical value. - """ + changes = entity_info["changes"] + # SPECIAL CASE: Capture changes of task created/moved under + # interested entity type + if ( + entity_type_low == "task" + and "parent_id" in changes + ): + # Direct parent is always second item in 'parents' and 'Task' + # must have at least one parent + parent_info = entity_info["parents"][1] + parent_entity_type = ( + parent_info["entity_type"] + .replace(" ", "") + .lower() + ) + if parent_entity_type in interest_entity_types: + new_entities_info.append(entity_info) + continue - # Store task ids which were created or moved under parent with entity - # type defined in settings (interest_entity_types). - task_ids = set() - # Store parent ids of matching task ids - matching_parent_ids = set() - # Store all entity ids of all entities to be able query hierarchical - # values. - whole_hierarchy_ids = set() - # Store parent id of each entity id - parent_id_by_entity_id = {} - for entity_info in task_parent_changes: - # Ignore entities with less parents than 2 - # NOTE entity itself is also part of "parents" value - parents = entity_info.get("parents") or [] - if len(parents) < 2: + # Skip if entity type is not enabled for attr value sync + if entity_type_low not in interest_entity_types: continue - parent_info = parents[1] - # Check if parent has entity type we care about. - if parent_info["entity_type"] not in interest_entity_types: - continue + valid_attr_change = entity_info.get("action") == "add" + for attr_key in interest_attributes: + if valid_attr_change: + break - task_ids.add(entity_info["entityId"]) - matching_parent_ids.add(parent_info["entityId"]) + if attr_key not in changes: + continue - # Store whole hierarchi of task entity - prev_id = None - for item in parents: - item_id = item["entityId"] - whole_hierarchy_ids.add(item_id) + if changes[attr_key]["new"] is not None: + valid_attr_change = True - if prev_id is None: - prev_id = item_id - continue + if not valid_attr_change: + continue - parent_id_by_entity_id[prev_id] = item_id - if item["entityType"] == "show": - break - prev_id = item_id + new_entities_info.append(entity_info) - # Just skip if nothing is interesting for our settings - if not matching_parent_ids: - return + return new_entities_info - # Query object type ids of parent ids for custom attribute - # definitions query - entities = session.query( - "select object_type_id from TypedContext where id in ({})".format( - self.join_query_keys(matching_parent_ids) - ) - ) + def propagate_attribute_changes( + self, + session, + interest_attributes, + entities_info, + attrs_by_obj_id, + hier_attrs, + real_values_by_entity_id, + hier_values_by_entity_id, + ): + hier_attr_ids_by_key = { + attr["key"]: attr["id"] + for attr in hier_attrs + } + filtered_interest_attributes = { + attr_name + for attr_name in interest_attributes + if attr_name in hier_attr_ids_by_key + } + attrs_keys_by_obj_id = {} + for obj_id, attrs in attrs_by_obj_id.items(): + attrs_keys_by_obj_id[obj_id] = { + attr["key"]: attr["id"] + for attr in attrs + } - # Prepare task object id - task_object_id = object_types_by_name["task"]["id"] + op_changes = [] + for entity_info in entities_info: + entity_id = entity_info["entityId"] + obj_id = entity_info["objectTypeId"] + # Skip attributes sync if does not have object specific custom + # attribute + if obj_id not in attrs_keys_by_obj_id: + continue + attr_keys = attrs_keys_by_obj_id[obj_id] + real_values = real_values_by_entity_id[entity_id] + hier_values = hier_values_by_entity_id[entity_id] + + changes = copy.deepcopy(entity_info["changes"]) + obj_id_attr_keys = { + attr_key + for attr_key in filtered_interest_attributes + if attr_key in attr_keys + } + if not obj_id_attr_keys: + continue - # All object ids for which we're querying custom attribute definitions - object_type_ids = set() - object_type_ids.add(task_object_id) - for entity in entities: - object_type_ids.add(entity["object_type_id"]) + value_by_key = {} + is_new_entity = entity_info.get("action") == "add" + for attr_key in obj_id_attr_keys: + if ( + attr_key in changes + and changes[attr_key]["new"] is not None + ): + value_by_key[attr_key] = changes[attr_key]["new"] - attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, object_type_ids, interest_attributes - ) + if not is_new_entity: + continue - # Skip if all task attributes are not available - task_attrs = attrs_by_obj_id.get(task_object_id) - if not task_attrs: - return + hier_attr_id = hier_attr_ids_by_key[attr_key] + attr_id = attr_keys[attr_key] + if hier_attr_id in real_values or attr_id in real_values: + continue - # Skip attributes that is not in both hierarchical and nonhierarchical - # TODO be able to push values if hierarchical is available - for key in interest_attributes: - if key not in hier_attrs: - task_attrs.pop(key, None) + value_by_key[attr_key] = hier_values[hier_attr_id] - elif key not in task_attrs: - hier_attrs.pop(key) + for key, new_value in value_by_key.items(): + if new_value is None: + continue - # Skip if nothing remained - if not task_attrs: - return + hier_id = hier_attr_ids_by_key[key] + std_id = attr_keys[key] + real_hier_value = real_values.get(hier_id) + real_std_value = real_values.get(std_id) + hier_value = hier_values[hier_id] + # Get right type of value for conversion + # - values in event are strings + type_value = real_hier_value + if type_value is None: + type_value = real_std_value + if type_value is None: + type_value = hier_value + # Skip if current values are not set + if type_value is None: + continue - # Do some preparations for custom attribute values query - attr_key_by_id = {} - nonhier_id_by_key = {} - hier_attr_ids = [] - for key, attr_id in hier_attrs.items(): - attr_key_by_id[attr_id] = key - hier_attr_ids.append(attr_id) - - conf_ids = list(hier_attr_ids) - task_conf_ids = [] - for key, attr_id in task_attrs.items(): - attr_key_by_id[attr_id] = key - nonhier_id_by_key[key] = attr_id - conf_ids.append(attr_id) - task_conf_ids.append(attr_id) - - # Query custom attribute values - # - result does not contain values for all entities only result of - # query callback to ftrack server - result = query_custom_attributes( - session, list(hier_attr_ids), whole_hierarchy_ids, True - ) - result.extend( - query_custom_attributes( - session, task_conf_ids, whole_hierarchy_ids, False - ) - ) + try: + new_value = type(type_value)(new_value) + except Exception: + self.log.warning(( + "Couldn't convert from {} to {}." + " Skipping update values." + ).format(type(new_value), type(type_value))) + continue - # Prepare variables where result will be stored - # - hierachical values should not contain attribute with value by - # default - hier_values_by_entity_id = { - entity_id: {} - for entity_id in whole_hierarchy_ids - } - # - real values of custom attributes - values_by_entity_id = { - entity_id: { - attr_id: None - for attr_id in conf_ids - } - for entity_id in whole_hierarchy_ids - } - for item in result: - attr_id = item["configuration_id"] - entity_id = item["entity_id"] - value = item["value"] - - values_by_entity_id[entity_id][attr_id] = value - - if attr_id in hier_attr_ids and value is not None: - hier_values_by_entity_id[entity_id][attr_id] = value - - # Prepare values for all task entities - # - going through all parents and storing first value value - # - store None to those that are already known that do not have set - # value at all - for task_id in tuple(task_ids): - for attr_id in hier_attr_ids: - entity_ids = [] - value = None - entity_id = task_id - while value is None: - entity_value = hier_values_by_entity_id[entity_id] - if attr_id in entity_value: - value = entity_value[attr_id] - if value is None: - break - - if value is None: - entity_ids.append(entity_id) - - entity_id = parent_id_by_entity_id.get(entity_id) - if entity_id is None: - break - - for entity_id in entity_ids: - hier_values_by_entity_id[entity_id][attr_id] = value - - # Prepare changes to commit - changes = [] - for task_id in tuple(task_ids): - parent_id = parent_id_by_entity_id[task_id] - for attr_id in hier_attr_ids: - attr_key = attr_key_by_id[attr_id] - nonhier_id = nonhier_id_by_key[attr_key] - - # Real value of hierarchical attribute on parent - # - If is none then should be unset - real_parent_value = values_by_entity_id[parent_id][attr_id] - # Current hierarchical value of a task - # - Will be compared to real parent value - hier_value = hier_values_by_entity_id[task_id][attr_id] - - # Parent value that can be inherited from it's parent entity - parent_value = hier_values_by_entity_id[parent_id][attr_id] - # Task value of nonhierarchical custom attribute - nonhier_value = values_by_entity_id[task_id][nonhier_id] - - if real_parent_value != hier_value: - changes.append({ - "new_value": real_parent_value, - "attr_id": attr_id, - "entity_id": task_id, - "attr_key": attr_key - }) - - if parent_value != nonhier_value: - changes.append({ - "new_value": parent_value, - "attr_id": nonhier_id, - "entity_id": task_id, - "attr_key": attr_key - }) - - self._commit_changes(session, changes) - - def _commit_changes(self, session, changes): - uncommited_changes = False - for idx, item in enumerate(changes): - new_value = item["new_value"] - old_value = item["old_value"] - attr_id = item["attr_id"] - entity_id = item["entity_id"] - attr_key = item["attr_key"] + real_std_value_is_same = new_value == real_std_value + real_hier_value_is_same = new_value == real_hier_value + # New value does not match anything in current entity values + if ( + not is_new_entity + and not real_std_value_is_same + and not real_hier_value_is_same + ): + continue - entity_key = collections.OrderedDict(( + if not real_std_value_is_same: + op_changes.append(( + std_id, + entity_id, + new_value, + real_values.get(std_id), + std_id in real_values + )) + + if not real_hier_value_is_same: + op_changes.append(( + hier_id, + entity_id, + new_value, + real_values.get(hier_id), + hier_id in real_values + )) + + for change in op_changes: + ( + attr_id, + entity_id, + new_value, + old_value, + do_update + ) = change + + entity_key = collections.OrderedDict([ ("configuration_id", attr_id), ("entity_id", entity_id) - )) - self._cached_changes.append({ - "attr_key": attr_key, - "entity_id": entity_id, - "value": new_value, - "time": datetime.datetime.now() - }) - old_value_is_set = ( - old_value is not ftrack_api.symbol.NOT_SET - and old_value is not None - ) - if new_value is None: - if not old_value_is_set: - continue - op = ftrack_api.operation.DeleteEntityOperation( - "CustomAttributeValue", - entity_key - ) - - elif old_value_is_set: + ]) + if do_update: op = ftrack_api.operation.UpdateEntityOperation( "CustomAttributeValue", entity_key, @@ -446,449 +383,116 @@ def _commit_changes(self, session, changes): ) session.recorded_operations.push(op) - self.log.info(( - "Changing Custom Attribute \"{}\" to value" - " \"{}\" on entity: {}" - ).format(attr_key, new_value, entity_id)) - - if (idx + 1) % 20 == 0: - uncommited_changes = False - try: - session.commit() - except Exception: - session.rollback() - self.log.warning( - "Changing of values failed.", exc_info=True - ) - else: - uncommited_changes = True - if uncommited_changes: - try: + if len(session.recorded_operations) > 100: session.commit() - except Exception: - session.rollback() - self.log.warning("Changing of values failed.", exc_info=True) + session.commit() - def process_attribute_changes( + def process_by_project( self, - session, - object_types_by_name, - interesting_data, - changed_keys_by_object_id, - interest_entity_types, - interest_attributes, - added_entity_ids + session: ftrack_api.Session, + event: ftrack_api.event.base.Event, + project_id: str, + entities_info: list[dict[str, Any]] ): - # Prepare task object id - task_object_id = object_types_by_name["task"]["id"] - - # Collect object type ids based on settings - interest_object_ids = [] - for entity_type in interest_entity_types: - _entity_type = entity_type.lower() - object_type = object_types_by_name.get(_entity_type) - if not object_type: - self.log.warning("Couldn't find object type \"{}\"".format( - entity_type - )) - - interest_object_ids.append(object_type["id"]) - - # Query entities by filtered data and object ids - entities = self.get_entities( - session, interesting_data, interest_object_ids - ) - if not entities: - return - - # Pop not found entities from interesting data - entity_ids = set( - entity["id"] - for entity in entities - ) - for entity_id in tuple(interesting_data.keys()): - if entity_id not in entity_ids: - interesting_data.pop(entity_id) - - # Add task object type to list - attr_obj_ids = list(interest_object_ids) - attr_obj_ids.append(task_object_id) - - attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, attr_obj_ids, interest_attributes - ) - - task_attrs = attrs_by_obj_id.get(task_object_id) - - changed_keys = set() - # Skip keys that are not both in hierachical and type specific - for object_id, keys in changed_keys_by_object_id.items(): - changed_keys |= set(keys) - object_id_attrs = attrs_by_obj_id.get(object_id) - for key in keys: - if key not in hier_attrs: - attrs_by_obj_id[object_id].pop(key) - continue + """Process changes in single project. + + Args: + session (ftrack_api.Session): Ftrack session. + event (ftrack_api.event.base.Event): Event which has all changes + information. + project_id (str): Project id related to changes. + entities_info (list[dict[str, Any]]): Changes of entities. + """ - if ( - (not object_id_attrs or key not in object_id_attrs) - and (not task_attrs or key not in task_attrs) - ): - hier_attrs.pop(key) - - # Clean up empty values - for key, value in tuple(attrs_by_obj_id.items()): - if not value: - attrs_by_obj_id.pop(key) - - if not attrs_by_obj_id: - self.log.warning(( - "There is not created Custom Attributes {} " - " for entity types: {}" - ).format( - self.join_query_keys(interest_attributes), - self.join_query_keys(interest_entity_types) - )) + ( + interest_attributes, + interest_entity_types + ) = self._get_handler_project_settings(session, event, project_id) + if not interest_attributes or not interest_entity_types: return - # Prepare task entities - task_entities = [] - # If task entity does not contain changed attribute then skip - if task_attrs: - task_entities = self.get_task_entities(session, interesting_data) - - task_entity_ids = set() - parent_id_by_task_id = {} - for task_entity in task_entities: - task_id = task_entity["id"] - task_entity_ids.add(task_id) - parent_id_by_task_id[task_id] = task_entity["parent_id"] - - self.finalize_attribute_changes( - session, - interesting_data, - changed_keys, - attrs_by_obj_id, - hier_attrs, - task_entity_ids, - parent_id_by_task_id, - added_entity_ids - ) - - def finalize_attribute_changes( - self, - session, - interesting_data, - changed_keys, - attrs_by_obj_id, - hier_attrs, - task_entity_ids, - parent_id_by_task_id, - added_entity_ids - ): - attr_id_to_key = {} - for attr_confs in attrs_by_obj_id.values(): - for key in changed_keys: - custom_attr_id = attr_confs.get(key) - if custom_attr_id: - attr_id_to_key[custom_attr_id] = key - - for key in changed_keys: - custom_attr_id = hier_attrs.get(key) - if custom_attr_id: - attr_id_to_key[custom_attr_id] = key - - entity_ids = ( - set(interesting_data.keys()) | task_entity_ids - ) - attr_ids = set(attr_id_to_key.keys()) - - current_values_by_id = self.get_current_values( - session, - attr_ids, - entity_ids, - task_entity_ids, - hier_attrs + entities_info: list[dict[str, Any]] = ( + self._entities_filter_by_settings( + entities_info, + interest_attributes, + interest_entity_types + ) ) - - changes = [] - for entity_id, current_values in current_values_by_id.items(): - parent_id = parent_id_by_task_id.get(entity_id) - if not parent_id: - parent_id = entity_id - values = interesting_data[parent_id] - - added_entity = entity_id in added_entity_ids - for attr_id, old_value in current_values.items(): - if added_entity and attr_id in hier_attrs: - continue - - attr_key = attr_id_to_key.get(attr_id) - if not attr_key: - continue - - # Convert new value from string - new_value = values.get(attr_key) - new_value_is_valid = ( - old_value is not ftrack_api.symbol.NOT_SET - and new_value is not None - ) - if added_entity and not new_value_is_valid: - continue - - if new_value is not None and new_value_is_valid: - try: - new_value = type(old_value)(new_value) - except Exception: - self.log.warning(( - "Couldn't convert from {} to {}." - " Skipping update values." - ).format(type(new_value), type(old_value))) - if new_value == old_value: - continue - - changes.append({ - "new_value": new_value, - "attr_id": attr_id, - "old_value": old_value, - "entity_id": entity_id, - "attr_key": attr_key - }) - self._commit_changes(session, changes) - - def filter_changes( - self, session, event, entities_info, interest_attributes - ): - session_user_id = self.session_user_id(session) - user_data = event["data"].get("user") - changed_by_session = False - if user_data and user_data.get("userid") == session_user_id: - changed_by_session = True - - current_time = datetime.datetime.now() - - interesting_data = {} - changed_keys_by_object_id = {} - - for entity_info in entities_info: - # Care only about changes if specific keys - entity_changes = {} - changes = entity_info["changes"] - for key in interest_attributes: - if key in changes: - entity_changes[key] = changes[key]["new"] - - entity_id = entity_info["entityId"] - if changed_by_session: - for key, new_value in tuple(entity_changes.items()): - for cached in tuple(self._cached_changes): - if ( - cached["entity_id"] != entity_id - or cached["attr_key"] != key - ): - continue - - cached_value = cached["value"] - try: - new_value = type(cached_value)(new_value) - except Exception: - pass - - if cached_value == new_value: - self._cached_changes.remove(cached) - entity_changes.pop(key) - break - - delta = (current_time - cached["time"]).seconds - if delta > self._max_delta: - self._cached_changes.remove(cached) - - if not entity_changes: - continue - - entity_id = entity_info["entityId"] - object_id = entity_info["objectTypeId"] - interesting_data[entity_id] = entity_changes - if object_id not in changed_keys_by_object_id: - changed_keys_by_object_id[object_id] = set() - changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) - - return interesting_data, changed_keys_by_object_id - - def interesting_data_for_added( - self, - session, - added_entities, - interest_attributes, - interesting_data, - changed_keys_by_object_id - ): - if not added_entities or not interest_attributes: + if not entities_info: return - object_type_ids = set() - entity_ids = set() - all_entity_ids = set() - object_id_by_entity_id = {} - project_id = None - entity_ids_by_parent_id = collections.defaultdict(set) - for entity_info in added_entities: - object_id = entity_info["objectTypeId"] - entity_id = entity_info["entityId"] - object_type_ids.add(object_id) - entity_ids.add(entity_id) - object_id_by_entity_id[entity_id] = object_id - - for item in entity_info["parents"]: - entity_id = item["entityId"] - all_entity_ids.add(entity_id) - parent_id = item["parentId"] - if not parent_id: - project_id = entity_id - else: - entity_ids_by_parent_id[parent_id].add(entity_id) - - hier_attrs = self.get_hierarchical_configurations( + attrs_by_obj_id, hier_attrs = self._get_attrs_configurations( session, interest_attributes ) - if not hier_attrs: + # Skip if attributes are not available + # - there is nothing to sync + if not attrs_by_obj_id or not hier_attrs: return - hier_attrs_key_by_id = { - attr_conf["id"]: attr_conf["key"] - for attr_conf in hier_attrs - } - default_values_by_key = { - attr_conf["key"]: attr_conf["default"] - for attr_conf in hier_attrs - } - - values = query_custom_attributes( - session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + entity_ids_by_parent_id = collections.defaultdict(set) + all_entity_ids = set() + for entity_info in entities_info: + entity_id = None + for item in entity_info["parents"]: + item_id = item["entityId"] + all_entity_ids.add(item_id) + if entity_id is not None: + entity_ids_by_parent_id[item_id].add(entity_id) + entity_id = item_id + + attr_ids = {attr["id"] for attr in hier_attrs} + for attrs in attrs_by_obj_id.values(): + attr_ids |= {attr["id"] for attr in attrs} + + # Query real custom attribute values + # - we have to know what are the real values, if are set and to what + # value + value_items = query_custom_attributes( + session, attr_ids, all_entity_ids, True ) - values_per_entity_id = {} - for entity_id in all_entity_ids: - values_per_entity_id[entity_id] = {} - for attr_name in interest_attributes: - values_per_entity_id[entity_id][attr_name] = None - - for item in values: + real_values_by_entity_id = collections.defaultdict(dict) + for item in value_items: entity_id = item["entity_id"] - key = hier_attrs_key_by_id[item["configuration_id"]] - values_per_entity_id[entity_id][key] = item["value"] - - fill_queue = collections.deque() - fill_queue.append((project_id, default_values_by_key)) - while fill_queue: - item = fill_queue.popleft() - entity_id, values_by_key = item - entity_values = values_per_entity_id[entity_id] - new_values_by_key = copy.deepcopy(values_by_key) - for key, value in values_by_key.items(): - current_value = entity_values[key] - if current_value is None: - entity_values[key] = value - else: - new_values_by_key[key] = current_value - - for child_id in entity_ids_by_parent_id[entity_id]: - fill_queue.append((child_id, new_values_by_key)) - - for entity_id in entity_ids: - entity_changes = {} - for key, value in values_per_entity_id[entity_id].items(): - if value is not None: - entity_changes[key] = value - - if not entity_changes: - continue - - interesting_data[entity_id] = entity_changes - object_id = object_id_by_entity_id[entity_id] - if object_id not in changed_keys_by_object_id: - changed_keys_by_object_id[object_id] = set() - changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) + attr_id = item["configuration_id"] + real_values_by_entity_id[entity_id][attr_id] = item["value"] - def get_current_values( - self, - session, - attr_ids, - entity_ids, - task_entity_ids, - hier_attrs - ): - current_values_by_id = {} - if not attr_ids or not entity_ids: - return current_values_by_id - - for entity_id in entity_ids: - current_values_by_id[entity_id] = {} - for attr_id in attr_ids: - current_values_by_id[entity_id][attr_id] = ( - ftrack_api.symbol.NOT_SET + hier_values_by_entity_id = {} + default_values = { + attr["id"]: attr["default"] + for attr in hier_attrs + } + hier_queue = collections.deque() + hier_queue.append((default_values, [project_id])) + while hier_queue: + parent_values, entity_ids = hier_queue.popleft() + for entity_id in entity_ids: + entity_values = copy.deepcopy(parent_values) + real_values = real_values_by_entity_id[entity_id] + for attr_id, value in real_values.items(): + entity_values[attr_id] = value + hier_values_by_entity_id[entity_id] = entity_values + hier_queue.append( + (entity_values, entity_ids_by_parent_id[entity_id]) ) - values = query_custom_attributes( - session, attr_ids, entity_ids, True + self.propagate_attribute_changes( + session, + interest_attributes, + entities_info, + attrs_by_obj_id, + hier_attrs, + real_values_by_entity_id, + hier_values_by_entity_id, ) - for item in values: - entity_id = item["entity_id"] - attr_id = item["configuration_id"] - if entity_id in task_entity_ids and attr_id in hier_attrs: - continue - - if entity_id not in current_values_by_id: - current_values_by_id[entity_id] = {} - current_values_by_id[entity_id][attr_id] = item["value"] - return current_values_by_id - - def get_entities(self, session, interesting_data, interest_object_ids): - return session.query(( - "select id from TypedContext" - " where id in ({}) and object_type_id in ({})" - ).format( - self.join_query_keys(interesting_data.keys()), - self.join_query_keys(interest_object_ids) - )).all() - - def get_task_entities(self, session, interesting_data): - return session.query( - "select id, parent_id from Task where parent_id in ({})".format( - self.join_query_keys(interesting_data.keys()) - ) - ).all() - - def attrs_configurations(self, session, object_ids, interest_attributes): - attrs = session.query(self.cust_attrs_query.format( - self.join_query_keys(interest_attributes), - self.join_query_keys(object_ids) - )).all() + def launch(self, session, event): + filtered_entities_info = self.filter_entities_info(event) + if not filtered_entities_info: + return - output = {} - hiearchical = {} - for attr in attrs: - if attr["is_hierarchical"]: - hiearchical[attr["key"]] = attr["id"] - continue - obj_id = attr["object_type_id"] - if obj_id not in output: - output[obj_id] = {} - output[obj_id][attr["key"]] = attr["id"] - return output, hiearchical - - def get_hierarchical_configurations(self, session, interest_attributes): - hier_attr_query = ( - "select id, key, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where key in ({}) and is_hierarchical is true" - ) - if not interest_attributes: - return [] - return list(session.query(hier_attr_query.format( - self.join_query_keys(interest_attributes), - )).all()) + for project_id, entities_info in filtered_entities_info.items(): + self.process_by_project(session, event, project_id, entities_info) def register(session): - PushFrameValuesToTaskEvent(session).register() + PushHierValuesToNonHierEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py b/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py index 99ad3aec374..358a8d2310d 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py +++ b/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py @@ -7,7 +7,7 @@ class RadioButtons(BaseEvent): ignore_me = True def launch(self, session, event): - '''Provides a readio button behaviour to any bolean attribute in + '''Provides a radio button behaviour to any boolean attribute in radio_button group.''' # start of event procedure ---------------------------------- diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 0058a428e33..0aa0b9f9f54 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -787,7 +787,7 @@ def launch(self, session, event): # Filter updates where name is changing for ftrack_id, ent_info in updated.items(): ent_keys = ent_info["keys"] - # Seprate update info from rename + # Separate update info from rename if "name" not in ent_keys: continue @@ -827,7 +827,7 @@ def launch(self, session, event): # 5.) Process updated self.process_updated() time_6 = time.time() - # 6.) Process changes in hierarchy or hier custom attribues + # 6.) Process changes in hierarchy or hier custom attributes self.process_hier_cleanup() time_7 = time.time() self.process_task_updates() @@ -1094,7 +1094,7 @@ def process_removed(self): def check_names_synchronizable(self, names): """Check if entities with specific names are importable. - This check should happend after removing entity or renaming entity. + This check should happen after removing entity or renaming entity. When entity was removed or renamed then it's name is possible to sync. """ joined_passed_names = ", ".join( @@ -1743,7 +1743,7 @@ def process_added(self): def process_moved(self): """ - Handles moved entities to different place in hiearchy. + Handles moved entities to different place in hierarchy. (Not tasks - handled separately.) """ if not self.ftrack_moved: @@ -1792,7 +1792,7 @@ def process_moved(self): self.log.warning("{} <{}>".format(error_msg, ent_path)) continue - # THIS MUST HAPPEND AFTER CREATING NEW ENTITIES !!!! + # THIS MUST HAPPEN AFTER CREATING NEW ENTITIES !!!! # - because may be moved to new created entity if "data" not in self.updates[mongo_id]: self.updates[mongo_id]["data"] = {} @@ -2323,7 +2323,7 @@ def process_hier_cleanup(self): items.append("{} - \"{}\"".format(ent_path, value)) self.report_items["error"][fps_msg] = items - # Get dictionary with not None hierarchical values to pull to childs + # Get dictionary with not None hierarchical values to pull to children project_values = {} for key, value in ( entities_dict[ftrack_project_id]["hier_attrs"].items() @@ -2460,7 +2460,7 @@ def process_task_updates(self): def update_entities(self): """ Update Avalon entities by mongo bulk changes. - Expects self.updates which are transfered to $set part of update + Expects self.updates which are transferred to $set part of update command. Resets self.updates afterwards. """ diff --git a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py b/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py index a0e039926e0..25fa3b0535b 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py +++ b/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py @@ -291,7 +291,7 @@ def process_by_project(self, session, event, project_id, entities_info): except Exception: session.rollback() self.log.warning( - "\"{}\" status couldnt be set to \"{}\"".format( + "\"{}\" status couldn't be set to \"{}\"".format( ent_path, new_status["name"] ), exc_info=True @@ -399,7 +399,7 @@ def new_status_by_remainders( # For cases there are multiple tasks in changes # - task status which match any new status item by order in the - # list `single_match` is preffered + # list `single_match` is preferred best_order = len(single_match) best_order_status = None for task_entity in task_entities: diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index c4e48b92f0d..9539a34f5e1 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -10,11 +10,11 @@ class UserAssigmentEvent(BaseEvent): """ - This script will intercept user assigment / de-assigment event and + This script will intercept user assignment / de-assignment event and run shell script, providing as much context as possible. It expects configuration file ``presets/ftrack/user_assigment_event.json``. - In it, you define paths to scripts to be run for user assigment event and + In it, you define paths to scripts to be run for user assignment event and for user-deassigment:: { "add": [ diff --git a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py b/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py index e36c3eecd98..fb40fd6417f 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py +++ b/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py @@ -102,7 +102,7 @@ def process_by_project(self, session, event, project_id, entities_info): asset_version_entities.append(asset_version) task_ids.add(asset_version["task_id"]) - # Skipt if `task_ids` are empty + # Skip if `task_ids` are empty if not task_ids: return diff --git a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py b/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py index c7fb1af98b9..06d572601dc 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py +++ b/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py @@ -7,7 +7,7 @@ class BatchTasksAction(BaseAction): '''Batch Tasks action - `label` a descriptive string identifing your action. + `label` a descriptive string identifying your action. `varaint` To group actions together, give them the same label and specify a unique variant per action. `identifier` a unique identifier for your action. diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py index c19cfd1502f..471a8c41824 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py @@ -82,9 +82,9 @@ write_security_roles/read_security_roles (array of strings) - default: ["ALL"] - strings should be role names (e.g.: ["API", "Administrator"]) - - if set to ["ALL"] - all roles will be availabled + - if set to ["ALL"] - all roles will be available - if first is 'except' - roles will be set to all except roles in array - - Warning: Be carefull with except - roles can be different by company + - Warning: Be careful with except - roles can be different by company - example: write_security_roles = ["except", "User"] read_security_roles = ["ALL"] # (User is can only read) @@ -500,7 +500,7 @@ def process_attr_data(self, cust_attr_data, event): data = {} # Get key, label, type data.update(self.get_required(cust_attr_data)) - # Get hierachical/ entity_type/ object_id + # Get hierarchical/ entity_type/ object_id data.update(self.get_entity_type(cust_attr_data)) # Get group, default, security roles data.update(self.get_optional(cust_attr_data)) diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index 9806f83773d..cbeff5343fb 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -51,7 +51,7 @@ def interface(self, session, entities, event): }, { "type": "label", - "value": "With all chilren entities" + "value": "With all children entities" }, { "name": "children_included", diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py index 03d029b0c18..72a5efbcfee 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py @@ -18,7 +18,7 @@ class DeleteAssetSubset(BaseAction): # Action label. label = "Delete Asset/Subsets" # Action description. - description = "Removes from Avalon with all childs and asset from Ftrack" + description = "Removes from Avalon with all children and asset from Ftrack" icon = statics_icon("ftrack", "action_icons", "DeleteAsset.svg") settings_key = "delete_asset_subset" diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index c543dc8834f..ec14c6918bf 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -27,7 +27,7 @@ class DeleteOldVersions(BaseAction): variant = "- Delete old versions" description = ( "Delete files from older publishes so project can be" - " archived with only lates versions." + " archived with only latest versions." ) icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") @@ -307,7 +307,7 @@ def sort_func(ent): file_path, seq_path = self.path_from_represenation(repre, anatomy) if file_path is None: self.log.warning(( - "Could not format path for represenation \"{}\"" + "Could not format path for representation \"{}\"" ).format(str(repre))) continue diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index a400c8f5f03..559de3a24d7 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -601,7 +601,7 @@ def real_launch(self, session, entities, event): return self.report(report_items) def report(self, report_items): - """Returns dict with final status of delivery (succes, fail etc.).""" + """Returns dict with final status of delivery (success, fail etc.).""" items = [] for msg, _items in report_items.items(): diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index fb1cdf340e2..36d29db96b6 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -246,7 +246,7 @@ def in_job_process( project_name = project_entity["full_name"] - # Find matchin asset documents and map them by ftrack task entities + # Find matching asset documents and map them by ftrack task entities # - result stored to 'asset_docs_with_task_entities' is list with # tuple `(asset document, [task entitis, ...])` # Quety all asset documents diff --git a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py b/openpype/modules/ftrack/event_handlers_user/action_job_killer.py index f489c0c54c3..dd68c75f846 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py +++ b/openpype/modules/ftrack/event_handlers_user/action_job_killer.py @@ -54,14 +54,14 @@ def interface(self, session, entities, event): for job in jobs: try: data = json.loads(job["data"]) - desctiption = data["description"] + description = data["description"] except Exception: - desctiption = "*No description*" + description = "*No description*" user_id = job["user_id"] username = usernames_by_id.get(user_id) or "Unknown user" created = job["created_at"].strftime('%d.%m.%Y %H:%M:%S') label = "{} - {} - {}".format( - username, desctiption, created + username, description, created ) item_label = { "type": "label", diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index e8251981803..19d5701e084 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -24,7 +24,7 @@ class PrepareProjectLocal(BaseAction): settings_key = "prepare_project" - # Key to store info about trigerring create folder structure + # Key to store info about triggering create folder structure create_project_structure_key = "create_folder_structure" create_project_structure_identifier = "create.project.structure" item_splitter = {"type": "label", "value": "---"} @@ -146,7 +146,7 @@ def prepare_root_items(self, project_anatom_settings): root_items.append({ "type": "label", "value": ( - "

NOTE: Roots are crutial for path filling" + "

NOTE: Roots are crucial for path filling" " (and creating folder structure).

" ) }) diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index d05f0c47f6d..39cf33d6056 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -66,7 +66,7 @@ def preregister(self): def get_components_from_entity(self, session, entity, components): """Get components from various entity types. - The components dictionary is modifid in place, so nothing is returned. + The components dictionary is modified in place, so nothing is returned. Args: entity (Ftrack entity) diff --git a/openpype/modules/ftrack/event_handlers_user/action_seed.py b/openpype/modules/ftrack/event_handlers_user/action_seed.py index 4021d70c0ac..657cd07a9fd 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_seed.py +++ b/openpype/modules/ftrack/event_handlers_user/action_seed.py @@ -325,8 +325,8 @@ def create_assets(self, project, asset_count): ): index = 0 - self.log.debug("*** Commiting Assets") - self.log.debug("Commiting entities. {}/{}".format( + self.log.debug("*** Committing Assets") + self.log.debug("Committing entities. {}/{}".format( created_entities, to_create_length )) self.session.commit() @@ -414,8 +414,8 @@ def create_shots(self, project, seq_count, shots_count): ): index = 0 - self.log.debug("*** Commiting Shots") - self.log.debug("Commiting entities. {}/{}".format( + self.log.debug("*** Committing Shots") + self.log.debug("Committing entities. {}/{}".format( created_entities, to_create_length )) self.session.commit() @@ -423,7 +423,7 @@ def create_shots(self, project, seq_count, shots_count): def temp_commit(self, index, created_entities, to_create_length): if index < self.max_entities_created_at_one_commit: return False - self.log.debug("Commiting {} entities. {}/{}".format( + self.log.debug("Committing {} entities. {}/{}".format( index, created_entities, to_create_length )) self.session.commit() diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py index 8748f426bdc..c9e0901623b 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py @@ -184,7 +184,7 @@ def launch(self, session, entities, event): self.db_con.install() for entity in entities: - # Skip if entity is not AssetVersion (never should happend, but..) + # Skip if entity is not AssetVersion (should never happen, but..) if entity.entity_type.lower() != "assetversion": continue diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py index ad7ffd8e25f..77f479ee207 100644 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/openpype/modules/ftrack/ftrack_server/event_server_cli.py @@ -33,7 +33,7 @@ class MongoPermissionsError(Exception): """Is used when is created multiple objects of same RestApi class.""" def __init__(self, message=None): if not message: - message = "Exiting because have issue with acces to MongoDB" + message = "Exiting because have issue with access to MongoDB" super().__init__(message) @@ -340,7 +340,7 @@ def on_exit(processor_thread, storer_thread, statuser_thread): return 1 # ====== STORER ======= - # Run backup thread which does not requeire mongo to work + # Run backup thread which does not require mongo to work if storer_thread is None: if storer_failed_count < max_fail_count: storer_thread = socket_thread.SocketThread( @@ -399,7 +399,7 @@ def on_exit(processor_thread, storer_thread, statuser_thread): elif not processor_thread.is_alive(): if processor_thread.mongo_error: raise Exception( - "Exiting because have issue with acces to MongoDB" + "Exiting because have issue with access to MongoDB" ) processor_thread.join() processor_thread = None diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 0341c257178..8b4c4619a19 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -891,7 +891,7 @@ def filter_by_ignore_sync(self): parent_dict = self.entities_dict.get(parent_id, {}) for child_id in parent_dict.get("children", []): - # keep original `remove` value for all childs + # keep original `remove` value for all children _remove = (remove is True) if not _remove: if self.entities_dict[child_id]["avalon_attrs"].get( @@ -1191,8 +1191,8 @@ def set_hierarchical_attribute( avalon_hier = [] for item in items: value = item["value"] - # WARNING It is not possible to propage enumerate hierachical - # attributes with multiselection 100% right. Unseting all values + # WARNING It is not possible to propagate enumerate hierarchical + # attributes with multiselection 100% right. Unsetting all values # will cause inheritance from parent. if ( value is None @@ -1231,7 +1231,7 @@ def set_hierarchical_attribute( items.append("{} - \"{}\"".format(ent_path, value)) self.report_items["error"][fps_msg] = items - # Get dictionary with not None hierarchical values to pull to childs + # Get dictionary with not None hierarchical values to pull to children top_id = self.ft_project_id project_values = {} for key, value in self.entities_dict[top_id]["hier_attrs"].items(): @@ -1749,7 +1749,7 @@ def prepare_changes(self): # TODO logging ent_path = self.get_ent_path(ftrack_id) msg = ( - " It is not possible" + " It is not possible" " to change the hierarchy of an entity or it's parents," " if it already contained published data." ) @@ -2584,8 +2584,8 @@ def delete_entities(self): # # ent_dict = self.entities_dict[found_by_name_id] - # TODO report - CRITICAL entity with same name alread exists in - # different hierarchy - can't recreate entity + # TODO report - CRITICAL entity with same name already exists + # in different hierarchy - can't recreate entity continue _vis_parent = deleted_entity["data"]["visualParent"] diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py index 2f538153680..3e40bb02f21 100644 --- a/openpype/modules/ftrack/lib/custom_attributes.py +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -65,7 +65,7 @@ def get_openpype_attr(session, split_hierarchical=True, query_keys=None): cust_attrs_query = ( "select {}" " from CustomAttributeConfiguration" - # Kept `pype` for Backwards Compatiblity + # Kept `pype` for Backwards Compatibility " where group.name in (\"pype\", \"{}\")" ).format(", ".join(query_keys), CUST_ATTR_GROUP) all_avalon_attr = session.query(cust_attrs_query).all() diff --git a/openpype/modules/ftrack/lib/ftrack_action_handler.py b/openpype/modules/ftrack/lib/ftrack_action_handler.py index b24fe5f12ae..07b3a780a2b 100644 --- a/openpype/modules/ftrack/lib/ftrack_action_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_action_handler.py @@ -12,7 +12,7 @@ def statics_icon(*icon_statics_file_parts): class BaseAction(BaseHandler): '''Custom Action base class - `label` a descriptive string identifing your action. + `label` a descriptive string identifying your action. `varaint` To group actions together, give them the same label and specify a unique variant per action. diff --git a/openpype/modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/ftrack/lib/ftrack_base_handler.py index c0b03f8a417..55400c22aba 100644 --- a/openpype/modules/ftrack/lib/ftrack_base_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_base_handler.py @@ -30,7 +30,7 @@ def __init__(self, message=None): class BaseHandler(object): '''Custom Action base class -