diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 2028ef2eb..cbab3e9ab 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -8,7 +8,6 @@ from functools import cached_property import inspect import logging -import random from aenum import IntEnum, auto from math import ceil import cv2 @@ -647,7 +646,7 @@ def _grasp(self, obj): # Allow grasping from suboptimal extents if we've tried enough times. grasp_poses = get_grasp_poses_for_object_sticky(obj) - grasp_pose, object_direction = random.choice(grasp_poses) + grasp_pose, object_direction = np.random.choice(grasp_poses) # Prepare data for the approach later. approach_pos = grasp_pose[0] + object_direction * m.GRASP_APPROACH_DISTANCE @@ -1648,8 +1647,8 @@ def _sample_position_on_aabb_side(target_obj): """ aabb_center, aabb_extent = target_obj.aabb_center, target_obj.aabb_extent # We want to sample only from the side-facing faces. - face_normal_axis = random.choice([0, 1]) - face_normal_direction = random.choice([-1, 1]) + face_normal_axis = np.random.choice([0, 1]) + face_normal_direction = np.random.choice([-1, 1]) face_center = aabb_center + np.eye(3)[face_normal_axis] * aabb_extent * face_normal_direction face_lateral_axis = 0 if face_normal_axis == 1 else 1 face_lateral_half_extent = np.eye(3)[face_lateral_axis] * aabb_extent / 2 diff --git a/omnigibson/maps/segmentation_map.py b/omnigibson/maps/segmentation_map.py index bf981ba66..0517e5b41 100644 --- a/omnigibson/maps/segmentation_map.py +++ b/omnigibson/maps/segmentation_map.py @@ -3,6 +3,7 @@ import numpy as np from PIL import Image +# Accommodate large maps (e.g. 10k x 10k) while suppressing DecompressionBombError Image.MAX_IMAGE_PIXELS = None import omnigibson as og diff --git a/omnigibson/maps/traversable_map.py b/omnigibson/maps/traversable_map.py index 69ea4f185..603e3be43 100644 --- a/omnigibson/maps/traversable_map.py +++ b/omnigibson/maps/traversable_map.py @@ -3,6 +3,7 @@ import cv2 import numpy as np from PIL import Image +# Accommodate large maps (e.g. 10k x 10k) while suppressing DecompressionBombError Image.MAX_IMAGE_PIXELS = None from omnigibson.maps.map_base import BaseMap diff --git a/omnigibson/object_states/open_state.py b/omnigibson/object_states/open_state.py index 3fbfbd1c1..d48b7a995 100644 --- a/omnigibson/object_states/open_state.py +++ b/omnigibson/object_states/open_state.py @@ -1,4 +1,4 @@ -import random +import numpy as np from omnigibson.macros import create_module_macros from omnigibson.object_states.object_state_base import BooleanStateMixin, AbsoluteObjectState @@ -216,12 +216,14 @@ def _set_value(self, new_value, fully=False): sides = [1, -1] if both_sides else [1] for _ in range(m.OPEN_SAMPLING_ATTEMPTS): - side = random.choice(sides) + side = np.random.choice(sides) # All joints are relevant if we are closing, but if we are opening let's sample a subset. if new_value and not fully: - num_to_open = random.randint(1, len(relevant_joints)) - relevant_joints = random.sample(relevant_joints, num_to_open) + num_to_open = np.random.randint(1, len(relevant_joints) + 1) + random_indices = np.random.choice(range(len(relevant_joints)), size=num_to_open, replace=False) + relevant_joints = [relevant_joints[i] for i in random_indices] + joint_directions = [joint_directions[i] for i in random_indices] # Go through the relevant joints & set random positions. for joint, joint_direction in zip(relevant_joints, joint_directions): @@ -241,7 +243,7 @@ def _set_value(self, new_value, fully=False): high = max(joint_range) # Sample a position. - joint_pos = random.uniform(low, high) + joint_pos = np.random.uniform(low, high) # Save sampled position. joint.set_pos(joint_pos) diff --git a/omnigibson/objects/object_base.py b/omnigibson/objects/object_base.py index a5ee03608..d9cea7cd6 100644 --- a/omnigibson/objects/object_base.py +++ b/omnigibson/objects/object_base.py @@ -28,6 +28,9 @@ m.HIGHLIGHT_RGB = [1.0, 0.1, 0.92] # Default highlighting (R,G,B) color when highlighting objects m.HIGHLIGHT_INTENSITY = 10000.0 # Highlight intensity to apply, range [0, 10000) +# Physics settings for objects -- see https://nvidia-omniverse.github.io/PhysX/physx/5.3.1/docs/RigidBodyDynamics.html?highlight=velocity%20iteration#solver-iterations +m.DEFAULT_SOLVER_POSITION_ITERATIONS = 32 +m.DEFAULT_SOLVER_VELOCITY_ITERATIONS = 1 class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta): """This is the interface that all OmniGibson objects must implement.""" @@ -185,6 +188,12 @@ def _post_load(self): lazy.pxr.PhysxSchema.PhysxArticulationAPI.Apply(root_prim) self.self_collisions = self._load_config["self_collisions"] + # Set position / velocity solver iterations if we're not cloth + if self._prim_type != PrimType.CLOTH: + self.solver_position_iteration_count = m.DEFAULT_SOLVER_POSITION_ITERATIONS + self.solver_velocity_iteration_count = m.DEFAULT_SOLVER_VELOCITY_ITERATIONS + + # Add semantics lazy.omni.isaac.core.utils.semantics.add_update_semantics( prim=self._prim, semantic_label=self.category, diff --git a/omnigibson/prims/cloth_prim.py b/omnigibson/prims/cloth_prim.py index 921b6fcca..996aae7f8 100644 --- a/omnigibson/prims/cloth_prim.py +++ b/omnigibson/prims/cloth_prim.py @@ -88,7 +88,7 @@ def _post_load(self): self.mass = self._load_config["mass"] # Clothify this prim, which is assumed to be a mesh - ClothPrim.cloth_system.clothify_mesh_prim(mesh_prim=self._prim) + ClothPrim.cloth_system.clothify_mesh_prim(mesh_prim=self._prim, remesh=self._load_config.get("remesh", True)) # Track generated particle count positions = self.compute_particle_positions() diff --git a/omnigibson/prims/entity_prim.py b/omnigibson/prims/entity_prim.py index 33713ebce..c3294ba8e 100644 --- a/omnigibson/prims/entity_prim.py +++ b/omnigibson/prims/entity_prim.py @@ -239,6 +239,7 @@ def update_links(self): link_load_config = { "kinematic_only": self._load_config.get("kinematic_only", False) if link_name == self._root_link_name else False, + "remesh": self._load_config.get("remesh", True), } self._links[link_name] = link_cls( prim_path=prim.GetPrimPath().__str__(), diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index 18552e890..2fd6b7ca9 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -1067,9 +1067,8 @@ def clear(self) -> None: # Clear all materials MaterialPrim.clear() - # Clear all transition rules if being used - if gm.ENABLE_TRANSITION_RULES: - TransitionRuleAPI.clear() + # Clear all transition rules + TransitionRuleAPI.clear() # Clear uniquely named items and other internal states clear_pu() diff --git a/omnigibson/systems/macro_particle_system.py b/omnigibson/systems/macro_particle_system.py index c707bf661..7518fa24a 100644 --- a/omnigibson/systems/macro_particle_system.py +++ b/omnigibson/systems/macro_particle_system.py @@ -202,11 +202,12 @@ def process_particle_object(cls): """ # Update color if the particle object has any material color = np.ones(3) - if cls.particle_object.material.is_glass: - color = cls.particle_object.material.glass_color - else: - diffuse_texture = cls.particle_object.material.diffuse_texture - color = plt.imread(diffuse_texture).mean(axis=(0, 1)) if diffuse_texture else cls.particle_object.material.diffuse_color_constant + if cls.particle_object.has_material(): + if cls.particle_object.material.is_glass: + color = cls.particle_object.material.glass_color + else: + diffuse_texture = cls.particle_object.material.diffuse_texture + color = plt.imread(diffuse_texture).mean(axis=(0, 1)) if diffuse_texture else cls.particle_object.material.diffuse_color_constant cls._color = color @classmethod diff --git a/omnigibson/systems/micro_particle_system.py b/omnigibson/systems/micro_particle_system.py index 524409cd2..675c14c3c 100644 --- a/omnigibson/systems/micro_particle_system.py +++ b/omnigibson/systems/micro_particle_system.py @@ -1567,8 +1567,13 @@ def clothify_mesh_prim(cls, mesh_prim, remesh=True, particle_distance=None): cloth particles are roughly touching each other, given cls.particle_contact_offset and @mesh_prim's scale """ - # Possibly remesh if requested - if remesh: + has_uv_mapping = mesh_prim.GetAttribute("primvars:st").Get() is not None + if not remesh: + # We always load into trimesh to remove redundant particles (since natively omni redundantly represents + # the number of vertices as 6x the total unique number of vertices) + tm = mesh_prim_to_trimesh_mesh(mesh_prim=mesh_prim, include_normals=True, include_texcoord=True, world_frame=False) + texcoord = np.array(mesh_prim.GetAttribute("primvars:st").Get()) if has_uv_mapping else None + else: # We will remesh in pymeshlab, but it doesn't allow programmatic construction of a mesh with texcoords so # we convert our mesh into a trimesh mesh, then export it to a temp file, then load it into pymeshlab scaled_world_transform = PoseAPI.get_world_pose_with_scale(mesh_prim.GetPath().pathString) @@ -1619,27 +1624,25 @@ def clothify_mesh_prim(cls, mesh_prim, remesh=True, particle_distance=None): # Re-write data to @mesh_prim new_faces = cm.face_matrix() - new_face_vertex_ids = new_faces.flatten() - new_texcoord = cm.wedge_tex_coord_matrix() new_vertices = cm.vertex_matrix() new_normals = cm.vertex_normal_matrix() - n_faces = len(cm.face_matrix()) - new_face_vertex_counts = np.ones(n_faces, dtype=int) * 3 - - tm_new = trimesh.Trimesh( + texcoord = np.array(cm.wedge_tex_coord_matrix()) if has_uv_mapping else None + tm = trimesh.Trimesh( vertices=new_vertices, faces=new_faces, vertex_normals=new_normals, ) # Apply the inverse of the world transform to get the mesh back into its local frame - tm_new.apply_transform(np.linalg.inv(scaled_world_transform)) - - # Update the mesh prim - mesh_prim.GetAttribute("faceVertexCounts").Set(new_face_vertex_counts) - mesh_prim.GetAttribute("points").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm_new.vertices)) - mesh_prim.GetAttribute("faceVertexIndices").Set(new_face_vertex_ids) - mesh_prim.GetAttribute("normals").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm_new.vertex_normals)) - mesh_prim.GetAttribute("primvars:st").Set(lazy.pxr.Vt.Vec2fArray.FromNumpy(new_texcoord)) + tm.apply_transform(np.linalg.inv(scaled_world_transform)) + + # Update the mesh prim + face_vertex_counts = np.array([len(face) for face in tm.faces], dtype=int) + mesh_prim.GetAttribute("faceVertexCounts").Set(face_vertex_counts) + mesh_prim.GetAttribute("points").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertices)) + mesh_prim.GetAttribute("faceVertexIndices").Set(tm.faces.flatten()) + mesh_prim.GetAttribute("normals").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertex_normals)) + if has_uv_mapping: + mesh_prim.GetAttribute("primvars:st").Set(lazy.pxr.Vt.Vec2fArray.FromNumpy(texcoord)) # Convert into particle cloth lazy.omni.physx.scripts.particleUtils.add_physx_particle_cloth( diff --git a/omnigibson/utils/bddl_utils.py b/omnigibson/utils/bddl_utils.py index 47572afad..d664da08f 100644 --- a/omnigibson/utils/bddl_utils.py +++ b/omnigibson/utils/bddl_utils.py @@ -931,18 +931,24 @@ def _import_sampleable_objects(self): if obj_inst in self._inroom_object_instances: continue - category = np.random.choice(categories) + # Shuffle categories and sample to find a valid model + np.random.shuffle(categories) + model_choices, category = set(), None + for category in categories: + # Get all available models that support all of its synset abilities + model_choices = set(get_all_object_category_models_with_abilities( + category=category, + abilities=OBJECT_TAXONOMY.get_abilities(OBJECT_TAXONOMY.get_synset_from_category(category)), + )) + if len(model_choices) > 0: + break - # Get all available models that support all of its synset abilities - model_choices = get_all_object_category_models_with_abilities( - category=category, - abilities=OBJECT_TAXONOMY.get_abilities(OBJECT_TAXONOMY.get_synset_from_category(category)), - ) if len(model_choices) == 0: - return f"Missing valid object models for category: {category}" + # We failed to find ANY valid model across ALL valid categories + return f"Missing valid object models for all categories: {categories}" # Randomly select an object model - model = np.random.choice(model_choices) + model = np.random.choice(list(model_choices)) # create the object simulator_obj = DatasetObject( diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index dc482ddf6..6b9bc41b9 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -1,5 +1,4 @@ import numpy as np -import random from scipy.spatial.transform import Rotation as R, Slerp from math import ceil from omnigibson.macros import create_module_macros @@ -60,8 +59,8 @@ def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj): ) # Pick an axis and a direction. - approach_axis = random.choice([0, 1, 2]) - approach_direction = random.choice([-1, 1]) if approach_axis != 2 else 1 + approach_axis = np.random.choice([0, 1, 2]) + approach_direction = np.random.choice([-1, 1]) if approach_axis != 2 else 1 constant_dimension_in_base_frame = approach_direction * bbox_extent_in_base_frame * np.eye(3)[approach_axis] randomizable_dimensions_in_base_frame = bbox_extent_in_base_frame - np.abs(constant_dimension_in_base_frame) random_dimensions_in_base_frame = np.random.uniform([-1, -1, 0], [1, 1, 1]) # note that we don't allow going below center @@ -118,7 +117,7 @@ def get_grasp_position_for_open(robot, target_obj, should_open, relevant_joint=N raise ValueError("Cannot open/close object without relevant joints.") # Make sure what we got is an appropriately open/close joint. - random.shuffle(relevant_joints) + np.random.shuffle(relevant_joints) selected_joint = None for joint in relevant_joints: current_position = joint.get_state()[0][0] diff --git a/omnigibson/utils/ui_utils.py b/omnigibson/utils/ui_utils.py index 0484f0e5d..2d5ef3e2b 100644 --- a/omnigibson/utils/ui_utils.py +++ b/omnigibson/utils/ui_utils.py @@ -17,7 +17,6 @@ from scipy.spatial.transform import Rotation as R from scipy.interpolate import CubicSpline from scipy.integrate import quad -import random import imageio from IPython import embed @@ -282,7 +281,7 @@ def choose_from_options(options, name, random_selection=False): k = 0 print("Input is not valid. Use {} by default.".format(list(options)[k])) else: - k = random.choice(range(len(options))) + k = np.random.choice(range(len(options))) # Return requested option return list(options)[k] diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 550e9e89b..e3f978b5e 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -638,9 +638,10 @@ def mesh_prim_mesh_to_trimesh_mesh(mesh_prim, include_normals=True, include_texc if include_normals: kwargs["vertex_normals"] = np.array(mesh_prim.GetAttribute("normals").Get()) - raw_texture = mesh_prim.GetAttribute("primvars:st").Get() - if raw_texture is not None: - kwargs["visual"] = trimesh.visual.TextureVisuals(uv=np.array(raw_texture)) + if include_texcoord: + raw_texture = mesh_prim.GetAttribute("primvars:st").Get() + if raw_texture is not None: + kwargs["visual"] = trimesh.visual.TextureVisuals(uv=np.array(raw_texture)) return trimesh.Trimesh(**kwargs) diff --git a/omnigibson/utils/vision_utils.py b/omnigibson/utils/vision_utils.py index 5b64d5167..7e83e75cb 100644 --- a/omnigibson/utils/vision_utils.py +++ b/omnigibson/utils/vision_utils.py @@ -1,5 +1,4 @@ import colorsys -import random import numpy as np from PIL import Image @@ -42,7 +41,7 @@ def __call__(self, img): PIL.Image: Rescaled image. """ - size = random.randint(self.minsize, self.maxsize) + size = np.random.randint(self.minsize, self.maxsize + 1) if isinstance(size, int): w, h = img.size diff --git a/tests/test_object_states.py b/tests/test_object_states.py index 43565be58..ab9faa268 100644 --- a/tests/test_object_states.py +++ b/tests/test_object_states.py @@ -632,8 +632,8 @@ def test_toggled_on(): stove = og.sim.scene.object_registry("name", "stove") robot = og.sim.scene.object_registry("name", "robot0") - stove.set_position_orientation([1.46, 0.3, 0.45], T.euler2quat([0, 0, -np.pi / 2.0])) - robot.set_position_orientation([0.0, 0.38, 0.01], [0, 0, 0, 1]) + stove.set_position_orientation([1.48, 0.3, 0.443], T.euler2quat([0, 0, -np.pi / 2.0])) + robot.set_position_orientation([0.0, 0.38, 0.0], [0, 0, 0, 1]) assert not stove.states[ToggledOn].get_value()