Skip to content

Commit

Permalink
Merge branch 'main' into lgulich/ignore-articulation-when-loading-rig…
Browse files Browse the repository at this point in the history
…id-body

Signed-off-by: lgulich <[email protected]>
  • Loading branch information
lgulich authored Nov 5, 2024
2 parents d5bba66 + d05c7bb commit 9836abb
Show file tree
Hide file tree
Showing 52 changed files with 466 additions and 395 deletions.
2 changes: 1 addition & 1 deletion source/extensions/omni.isaac.lab/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Note: Semantic Versioning is used: https://semver.org/
version = "0.27.11"
version = "0.27.13"

# Description
title = "Isaac Lab framework for Robot Learning"
Expand Down
25 changes: 21 additions & 4 deletions source/extensions/omni.isaac.lab/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,17 +1,34 @@
Changelog
---------

0.27.12 (2024-11-04)
0.27.14 (2024-11-04)
~~~~~~~~~~~~~~~~~~~~

Added
^^^^^

* If a USD that contains an articulation root is loaded using a
:attr:`omni.isaac_lab.assets.RigidBody` we now fail unless the articulation root is explicitly
disabled.


0.27.13 (2024-10-30)
~~~~~~~~~~~~~~~~~~~~

Added
^^^^^

* Added the attributes :attr:`~omni.isaac.lab.sim.converters.MeshConverterCfg.translation`, :attr:`~omni.isaac.lab.sim.converters.MeshConverterCfg.rotation`,
:attr:`~omni.isaac.lab.sim.converters.MeshConverterCfg.scale` to translate, rotate, and scale meshes
when importing them with :class:`~omni.isaac.lab.sim.converters.MeshConverter`.


0.27.12 (2024-01-04)
~~~~~~~~~~~~~~~~~~~

Removed
^^^^^^^

* Removed TensorDict usage in favor of Python dictionary in sensors


0.27.11 (2024-10-31)
~~~~~~~~~~~~~~~~~~~~

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import re
import torch
from collections.abc import Sequence
from tensordict import TensorDict
from typing import TYPE_CHECKING, Any, Literal

import carb
Expand Down Expand Up @@ -156,7 +155,7 @@ def __str__(self) -> str:
# message for class
return (
f"Camera @ '{self.cfg.prim_path}': \n"
f"\tdata types : {self.data.output.sorted_keys} \n"
f"\tdata types : {list(self.data.output.keys())} \n"
f"\tsemantic filter : {self.cfg.semantic_filter}\n"
f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n"
f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n"
Expand Down Expand Up @@ -497,7 +496,7 @@ def _update_buffers_impl(self, env_ids: Sequence[int]):
self._update_poses(env_ids)
# -- read the data from annotator registry
# check if buffer is called for the first time. If so then, allocate the memory
if len(self._data.output.sorted_keys) == 0:
if len(self._data.output) == 0:
# this is the first time buffer is called
# it allocates memory for all the sensors
self._create_annotator_data()
Expand Down Expand Up @@ -552,7 +551,7 @@ def _create_buffers(self):
# lazy allocation of data dictionary
# since the size of the output data is not known in advance, we leave it as None
# the memory will be allocated when the buffer() function is called for the first time.
self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device)
self._data.output = {}
self._data.info = [{name: None for name in self.cfg.data_types} for _ in range(self._view.count)]

def _update_intrinsic_matrices(self, env_ids: Sequence[int]):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import torch
from dataclasses import dataclass
from tensordict import TensorDict
from typing import Any

from omni.isaac.lab.utils.math import convert_camera_frame_orientation_convention
Expand Down Expand Up @@ -47,7 +46,7 @@ class CameraData:
Shape is (N, 3, 3) where N is the number of sensors.
"""

output: TensorDict = None
output: dict[str, torch.Tensor] = None
"""The retrieved sensor data with sensor types as key.
The format of the data is available in the `Replicator Documentation`_. For semantic-based data,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import numpy as np
import torch
from collections.abc import Sequence
from tensordict import TensorDict
from typing import TYPE_CHECKING, Any

import carb
Expand Down Expand Up @@ -106,7 +105,7 @@ def __str__(self) -> str:
# message for class
return (
f"Tiled Camera @ '{self.cfg.prim_path}': \n"
f"\tdata types : {self.data.output.sorted_keys} \n"
f"\tdata types : {list(self.data.output.keys())} \n"
f"\tsemantic filter : {self.cfg.semantic_filter}\n"
f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n"
f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n"
Expand Down Expand Up @@ -372,7 +371,7 @@ def _create_buffers(self):
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32
).contiguous()

self._data.output = TensorDict(data_dict, batch_size=self._view.count, device=self.device)
self._data.output = data_dict
self._data.info = dict()

def _tiled_image_shape(self) -> tuple[int, int]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

import torch
from collections.abc import Sequence
from tensordict import TensorDict
from typing import TYPE_CHECKING, ClassVar, Literal

import omni.isaac.core.utils.stage as stage_utils
Expand Down Expand Up @@ -347,7 +346,7 @@ def _create_buffers(self):
self._data.image_shape = self.image_shape
# -- output data
# create the buffers to store the annotator data.
self._data.output = TensorDict({}, batch_size=self._view.count, device=self.device)
self._data.output = {}
self._data.info = [{name: None for name in self.cfg.data_types}] * self._view.count
for name in self.cfg.data_types:
if name in ["distance_to_image_plane", "distance_to_camera"]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ def __init__(self, cfg: AssetConverterBaseCfg):
Raises:
ValueError: When provided asset file does not exist.
"""
# check that the config is valid
cfg.validate()
# check if the asset file exists
if not check_file_path(cfg.asset_path):
raise ValueError(f"The asset path does not exist: {cfg.asset_path}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import omni.kit.commands
import omni.usd
from omni.isaac.core.utils.extensions import enable_extension
from pxr import Tf, Usd, UsdGeom, UsdPhysics, UsdUtils
from pxr import Gf, Tf, Usd, UsdGeom, UsdPhysics, UsdUtils

from omni.isaac.lab.sim.converters.asset_converter_base import AssetConverterBase
from omni.isaac.lab.sim.converters.mesh_converter_cfg import MeshConverterCfg
Expand Down Expand Up @@ -64,12 +64,13 @@ def __init__(self, cfg: MeshConverterCfg):
def _convert_asset(self, cfg: MeshConverterCfg):
"""Generate USD from OBJ, STL or FBX.
It stores the asset in the following format:
The USD file has Y-up axis and is scaled to meters.
The asset hierarchy is arranged as follows:
/file_name (default prim)
|- /geometry <- Made instanceable if requested
|- /Looks
|- /mesh
.. code-block:: none
mesh_file_basename (default prim)
|- /geometry/Looks
|- /geometry/mesh
Args:
cfg: The configuration for conversion of mesh to USD.
Expand All @@ -93,15 +94,25 @@ def _convert_asset(self, cfg: MeshConverterCfg):

# Convert USD
asyncio.get_event_loop().run_until_complete(
self._convert_mesh_to_usd(
in_file=cfg.asset_path, out_file=self.usd_path, prim_path=f"/{mesh_file_basename}"
)
self._convert_mesh_to_usd(in_file=cfg.asset_path, out_file=self.usd_path)
)
# Create a new stage, set Z up and meters per unit
temp_stage = Usd.Stage.CreateInMemory()
UsdGeom.SetStageUpAxis(temp_stage, UsdGeom.Tokens.z)
UsdGeom.SetStageMetersPerUnit(temp_stage, 1.0)
UsdPhysics.SetStageKilogramsPerUnit(temp_stage, 1.0)
# Add mesh to stage
base_prim = temp_stage.DefinePrim(f"/{mesh_file_basename}", "Xform")
prim = temp_stage.DefinePrim(f"/{mesh_file_basename}/geometry", "Xform")
prim.GetReferences().AddReference(self.usd_path)
temp_stage.SetDefaultPrim(base_prim)
temp_stage.Export(self.usd_path)

# Open converted USD stage
# note: This opens a new stage and does not use the stage created earlier by the user
# create a new stage
stage = Usd.Stage.Open(self.usd_path)
# add USD to stage cache
# Need to reload the stage to get the new prim structure, otherwise it can be taken from the cache
stage.Reload()
# Add USD to stage cache
stage_id = UsdUtils.StageCache.Get().Insert(stage)
# Get the default prim (which is the root prim) -- "/{mesh_file_basename}"
xform_prim = stage.GetDefaultPrim()
Expand All @@ -121,6 +132,32 @@ def _convert_asset(self, cfg: MeshConverterCfg):
)
# Delete the old Xform and make the new Xform the default prim
stage.SetDefaultPrim(xform_prim)
# Apply default Xform rotation to mesh -> enable to set rotation and scale
omni.kit.commands.execute(
"CreateDefaultXformOnPrimCommand",
prim_path=xform_prim.GetPath(),
**{"stage": stage},
)

# Apply translation, rotation, and scale to the Xform
geom_xform = UsdGeom.Xform(geom_prim)
geom_xform.ClearXformOpOrder()

# Remove any existing rotation attributes
rotate_attr = geom_prim.GetAttribute("xformOp:rotateXYZ")
if rotate_attr:
geom_prim.RemoveProperty(rotate_attr.GetName())

# translation
translate_op = geom_xform.AddTranslateOp(UsdGeom.XformOp.PrecisionDouble)
translate_op.Set(Gf.Vec3d(*cfg.translation))
# rotation
orient_op = geom_xform.AddOrientOp(UsdGeom.XformOp.PrecisionDouble)
orient_op.Set(Gf.Quatd(*cfg.rotation))
# scale
scale_op = geom_xform.AddScaleOp(UsdGeom.XformOp.PrecisionDouble)
scale_op.Set(Gf.Vec3d(*cfg.scale))

# Handle instanceable
# Create a new Xform prim that will be the prototype prim
if cfg.make_instanceable:
Expand Down Expand Up @@ -158,40 +195,28 @@ def _convert_asset(self, cfg: MeshConverterCfg):
"""

@staticmethod
async def _convert_mesh_to_usd(
in_file: str, out_file: str, prim_path: str = "/World", load_materials: bool = True
) -> bool:
async def _convert_mesh_to_usd(in_file: str, out_file: str, load_materials: bool = True) -> bool:
"""Convert mesh from supported file types to USD.
This function uses the Omniverse Asset Converter extension to convert a mesh file to USD.
It is an asynchronous function and should be called using `asyncio.get_event_loop().run_until_complete()`.
The converted asset is stored in the USD format in the specified output file.
The USD file has Y-up axis and is scaled to meters.
The asset hierarchy is arranged as follows:
.. code-block:: none
prim_path (default prim)
|- /geometry/Looks
|- /geometry/mesh
The USD file has Y-up axis and is scaled to cm.
Args:
in_file: The file to convert.
out_file: The path to store the output file.
prim_path: The prim path of the mesh.
load_materials: Set to True to enable attaching materials defined in the input file
to the generated USD mesh. Defaults to True.
Returns:
True if the conversion succeeds.
"""
enable_extension("omni.kit.asset_converter")
enable_extension("omni.usd.metrics.assembler")

import omni.kit.asset_converter
import omni.usd
from omni.metrics.assembler.core import get_metrics_assembler_interface

# Create converter context
converter_context = omni.kit.asset_converter.AssetConverterContext()
Expand All @@ -212,29 +237,9 @@ async def _convert_mesh_to_usd(

# Create converter task
instance = omni.kit.asset_converter.get_instance()
out_file_non_metric = out_file.replace(".usd", "_non_metric.usd")
task = instance.create_converter_task(in_file, out_file_non_metric, None, converter_context)
task = instance.create_converter_task(in_file, out_file, None, converter_context)
# Start conversion task and wait for it to finish
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break

temp_stage = Usd.Stage.CreateInMemory()
UsdGeom.SetStageUpAxis(temp_stage, UsdGeom.Tokens.z)
UsdGeom.SetStageMetersPerUnit(temp_stage, 1.0)
UsdPhysics.SetStageKilogramsPerUnit(temp_stage, 1.0)

base_prim = temp_stage.DefinePrim(prim_path, "Xform")
prim = temp_stage.DefinePrim(f"{prim_path}/geometry", "Xform")
prim.GetReferences().AddReference(out_file_non_metric)
cache = UsdUtils.StageCache.Get()
cache.Insert(temp_stage)
stage_id = cache.GetId(temp_stage).ToLongInt()
get_metrics_assembler_interface().resolve_stage(stage_id)
temp_stage.SetDefaultPrim(base_prim)
temp_stage.Export(out_file)
success = await task.wait_until_finished()
if not success:
raise RuntimeError(f"Failed to convert {in_file} to USD. Error: {task.get_error_message()}")
return success
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,21 @@
class MeshConverterCfg(AssetConverterBaseCfg):
"""The configuration class for MeshConverter."""

mass_props: schemas_cfg.MassPropertiesCfg = None
mass_props: schemas_cfg.MassPropertiesCfg | None = None
"""Mass properties to apply to the USD. Defaults to None.
Note:
If None, then no mass properties will be added.
"""

rigid_props: schemas_cfg.RigidBodyPropertiesCfg = None
rigid_props: schemas_cfg.RigidBodyPropertiesCfg | None = None
"""Rigid body properties to apply to the USD. Defaults to None.
Note:
If None, then no rigid body properties will be added.
"""

collision_props: schemas_cfg.CollisionPropertiesCfg = None
collision_props: schemas_cfg.CollisionPropertiesCfg | None = None
"""Collision properties to apply to the USD. Defaults to None.
Note:
Expand All @@ -42,3 +42,12 @@ class MeshConverterCfg(AssetConverterBaseCfg):
"none" causes no collision mesh to be added.
"""

translation: tuple[float, float, float] = (0.0, 0.0, 0.0)
"""The translation of the mesh to the origin. Defaults to (0.0, 0.0, 0.0)."""

rotation: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
"""The rotation of the mesh in quaternion format (w, x, y, z). Defaults to (1.0, 0.0, 0.0, 0.0)."""

scale: tuple[float, float, float] = (1.0, 1.0, 1.0)
"""The scale of the mesh. Defaults to (1.0, 1.0, 1.0)."""
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class UrdfConverterCfg(AssetConverterBaseCfg):
"""Decompose a convex mesh into smaller pieces for a closer fit. Defaults to False."""

fix_base: bool = MISSING
"""Create a fix joint to the root/base link. Defaults to True."""
"""Create a fix joint to the root/base link."""

merge_fixed_joints: bool = False
"""Consolidate links that are connected by fixed joints. Defaults to False."""
Expand Down
6 changes: 3 additions & 3 deletions source/extensions/omni.isaac.lab/test/sensors/test_camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def test_camera_init(self):
# update camera
camera.update(self.dt)
# check image data
for im_data in camera.data.output.to_dict().values():
for im_data in camera.data.output.values():
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width, 1))

def test_camera_init_offset(self):
Expand Down Expand Up @@ -228,7 +228,7 @@ def test_multi_camera_init(self):
cam_2.update(self.dt)
# check image data
for cam in [cam_1, cam_2]:
for im_data in cam.data.output.to_dict().values():
for im_data in cam.data.output.values():
self.assertEqual(im_data.shape, (1, self.camera_cfg.height, self.camera_cfg.width, 1))

def test_multi_camera_with_different_resolution(self):
Expand Down Expand Up @@ -705,7 +705,7 @@ def test_throughput(self):
with Timer(f"Time taken for writing data with shape {camera.image_shape} "):
# Pack data back into replicator format to save them using its writer
rep_output = {"annotators": {}}
camera_data = convert_dict_to_backend(camera.data.output[0].to_dict(), backend="numpy")
camera_data = convert_dict_to_backend({k: v[0] for k, v in camera.data.output.items()}, backend="numpy")
for key, data, info in zip(camera_data.keys(), camera_data.values(), camera.data.info[0].values()):
if info is not None:
rep_output["annotators"][key] = {"render_product": {"data": data, **info}}
Expand Down
Loading

0 comments on commit 9836abb

Please sign in to comment.