Skip to content

Commit

Permalink
Merge pull request #650 from StanfordVL/fix-primitives-2
Browse files Browse the repository at this point in the history
Fix primitives for navigate, grasp and place
  • Loading branch information
cgokmen authored Mar 16, 2024
2 parents 4c5df20 + db89187 commit b22e60a
Show file tree
Hide file tree
Showing 7 changed files with 129 additions and 136 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from functools import cached_property
import inspect
import logging
import random
from aenum import IntEnum, auto
from math import ceil
import cv2
Expand Down Expand Up @@ -60,7 +61,7 @@
m.MAX_CARTESIAN_HAND_STEP = 0.002
m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500
m.MAX_STEPS_FOR_HAND_MOVE_IK = 1000
m.MAX_STEPS_FOR_GRASP_OR_RELEASE = 30
m.MAX_STEPS_FOR_GRASP_OR_RELEASE = 250
m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION = 500
m.MAX_ATTEMPTS_FOR_OPEN_CLOSE = 20

Expand Down Expand Up @@ -274,7 +275,8 @@ def __init__(self, env, add_context=False, enable_head_tracking=True, always_tra

def _postprocess_action(self, action):
"""Postprocesses action by applying head tracking and adding context if necessary."""
action = self._overwrite_head_action(action)
if self._enable_head_tracking:
action = self._overwrite_head_action(action)

if not self.add_context:
return action
Expand Down Expand Up @@ -646,7 +648,7 @@ def _grasp(self, obj):

# Allow grasping from suboptimal extents if we've tried enough times.
grasp_poses = get_grasp_poses_for_object_sticky(obj)
grasp_pose, object_direction = np.random.choice(grasp_poses)
grasp_pose, object_direction = random.choice(grasp_poses)

# Prepare data for the approach later.
approach_pos = grasp_pose[0] + object_direction * m.GRASP_APPROACH_DISTANCE
Expand Down Expand Up @@ -836,7 +838,7 @@ def _manipulation_control_idx(self):
return np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]])

# Otherwise just return the default arm control idx
return self.robot.arm_control_idx[self.arm]
return np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]])

@cached_property
def _manipulation_descriptor_path(self):
Expand Down Expand Up @@ -1051,7 +1053,7 @@ def _move_hand_direct_ik(self, target_pose, stop_on_contact=False, ignore_failur
# make sure controller is InverseKinematicsController and in expected mode
controller_config = self.robot._controller_config["arm_" + self.arm]
assert controller_config["name"] == "InverseKinematicsController", "Controller must be InverseKinematicsController"
assert controller_config["mode"] == "pose_absolute_ori", "Controller must be in pose_delta_ori mode"
assert controller_config["mode"] == "pose_absolute_ori", "Controller must be in pose_absolute_ori mode"
if in_world_frame:
target_pose = self._get_pose_in_robot_frame(target_pose)
target_pos = target_pose[0]
Expand Down Expand Up @@ -1325,7 +1327,7 @@ def _empty_action(self):
action[action_idx] = self.robot.get_joint_positions()[joint_idx]
elif self.robot._controller_config[name]["name"] == "InverseKinematicsController":
# overwrite the goal orientation, since it is in absolute frame.
assert self.robot._controller_config["arm_" + self.arm]["mode"] == "pose_absolute_ori", "Controller must be in pose_delta_ori mode"
assert self.robot._controller_config["arm_" + self.arm]["mode"] == "pose_absolute_ori", "Controller must be in pose_absolute_ori mode"
current_quat = self.robot.get_relative_eef_orientation()
current_ori = T.quat2axisangle(current_quat)
control_idx = self.robot.controller_action_idx["arm_" + self.arm]
Expand Down Expand Up @@ -1363,7 +1365,7 @@ def _get_reset_eef_pose(self):
if self.robot_model == "Tiago":
return np.array([0.28493954, 0.37450749, 1.1512334]), np.array([-0.21533823, 0.05361032, -0.08631776, 0.97123871])
else:
raise NotImplementedError
return np.array([ 0.48688125, -0.12507881, 0.97888719]), np.array([ 0.61324748, 0.61305553, -0.35266518, 0.35173529])

def _get_reset_joint_pos(self):
reset_pose_fetch = np.array(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
env:
action_frequency: 60 # (int): environment executes action at the action_frequency rate
physics_frequency: 60 # (int): physics frequency (1 / physics_timestep for physx)
action_frequency: 30 # (int): environment executes action at the action_frequency rate
physics_frequency: 120 # (int): physics frequency (1 / physics_timestep for physx)
device: null # (None or str): specifies the device to be used if running on the gpu with torch backend
automatic_reset: false # (bool): whether to automatic reset after an episode finishes
flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array
Expand Down Expand Up @@ -31,8 +31,7 @@ scene:
include_robots: false

robots:
- name: tiago
type: Tiago
- type: Fetch
obs_modalities: [scan, rgb, depth]
scale: 1.0
self_collisions: true
Expand All @@ -44,40 +43,24 @@ robots:
default_arm_pose: diagonal30
controller_config:
base:
name: JointController
motor_type: velocity
arm_left:
name: InverseKinematicsController
motor_type: velocity
command_input_limits: null
command_output_limits: null
mode : pose_absolute_ori
kv: 3.0
arm_right:
name: DifferentialDriveController
arm_0:
name: InverseKinematicsController
motor_type: velocity
command_input_limits: null
command_output_limits: null
mode : pose_absolute_ori
kv: 3.0
gripper_left:
name: JointController
motor_type: position
command_input_limits: [-1, 1]
command_output_limits: null
use_delta_commands: true
gripper_right:
command_input_limits: default
command_output_limits:
- [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]
- [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]
mode: pose_absolute_ori
kp: 300.0
gripper_0:
name: JointController
motor_type: position
command_input_limits: [-1, 1]
command_output_limits: null
use_delta_commands: true
camera:
name: JointController
motor_type: position
command_input_limits: null
command_output_limits: null
use_delta_commands: false
use_delta_commands: False

objects: []

Expand Down
64 changes: 64 additions & 0 deletions omnigibson/examples/action_primitives/rs_int_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import os
import yaml
import numpy as np

import omnigibson as og
from omnigibson.macros import gm
from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet

# Don't use GPU dynamics and use flatcache for performance boost
# gm.USE_GPU_DYNAMICS = True
# gm.ENABLE_FLATCACHE = True

def execute_controller(ctrl_gen, env):
for action in ctrl_gen:
env.step(action)

def main():
"""
Demonstrates how to use the action primitives to pick and place an object in a crowded scene.
It loads Rs_int with a Fetch robot, and the robot picks and places an apple.
"""
# Load the config
config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml")
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)

# Update it to run a grocery shopping task
config["scene"]["scene_model"] = "Rs_int"
config["scene"]["not_load_object_categories"] = ["ceilings"]
config["objects"] = [
{
"type": "DatasetObject",
"name": "apple",
"category": "apple",
"model": "agveuv",
"position": [-0.3, -1.1, 0.5],
"orientation": [0, 0, 0, 1]
},
]

# Load the environment
env = og.Environment(configs=config)
scene = env.scene
robot = env.robots[0]

# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()

controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)
cabinet = scene.object_registry("name", "bottom_cabinet_slgzfc_0")
apple = scene.object_registry("name", "apple")

# Grasp apple
print("Executing controller")
execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, apple), env)
print("Finished executing grasp")

# Place on cabinet
print("Executing controller")
execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, cabinet), env)
print("Finished executing place")

if __name__ == "__main__":
main()
26 changes: 7 additions & 19 deletions omnigibson/examples/action_primitives/solve_simple_task.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
"""
Example script demo'ing robot primitive to solve a task
"""
import os
import yaml
import numpy as np
Expand All @@ -17,22 +14,14 @@ def execute_controller(ctrl_gen, env):
for action in ctrl_gen:
env.step(action)

def set_start_pose(robot):
reset_pose_tiago = np.array([
-1.78029833e-04, 3.20231302e-05, -1.85759447e-07, -1.16488536e-07,
4.55182843e-08, 2.36128806e-04, 1.50000000e-01, 9.40000000e-01,
-1.10000000e+00, 0.00000000e+00, -0.90000000e+00, 1.47000000e+00,
0.00000000e+00, 2.10000000e+00, 2.71000000e+00, 1.50000000e+00,
1.71000000e+00, 1.30000000e+00, -1.57000000e+00, -1.40000000e+00,
1.39000000e+00, 0.00000000e+00, 0.00000000e+00, 4.50000000e-02,
4.50000000e-02, 4.50000000e-02, 4.50000000e-02,
])
robot.set_joint_positions(reset_pose_tiago)
og.sim.step()

def main():
"""
Demonstrates how to use the action primitives to pick and place an object in an empty scene.
It loads Rs_int with a Fetch robot, and the robot picks and places a bottle of cologne.
"""
# Load the config
config_filename = os.path.join(og.example_config_path, "tiago_primitives.yaml")
config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml")
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)

# Update it to create a custom environment and run some actions
Expand Down Expand Up @@ -66,8 +55,7 @@ def main():
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()

controller = StarterSemanticActionPrimitives(env)
set_start_pose(robot)
controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)

# Grasp of cologne
grasp_obj = scene.object_registry("name", "cologne")
Expand Down
27 changes: 8 additions & 19 deletions omnigibson/examples/action_primitives/wip_solve_behavior_task.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
"""
Example script demo'ing robot primitive to solve a task
"""
import os
import yaml
import numpy as np
Expand All @@ -17,22 +14,15 @@ def execute_controller(ctrl_gen, env):
for action in ctrl_gen:
env.step(action)

def set_start_pose(robot):
reset_pose_tiago = np.array([
-1.78029833e-04, 3.20231302e-05, -1.85759447e-07, -1.16488536e-07,
4.55182843e-08, 2.36128806e-04, 1.50000000e-01, 9.40000000e-01,
-1.10000000e+00, 0.00000000e+00, -0.90000000e+00, 1.47000000e+00,
0.00000000e+00, 2.10000000e+00, 2.71000000e+00, 1.50000000e+00,
1.71000000e+00, 1.30000000e+00, -1.57000000e+00, -1.40000000e+00,
1.39000000e+00, 0.00000000e+00, 0.00000000e+00, 4.50000000e-02,
4.50000000e-02, 4.50000000e-02, 4.50000000e-02,
])
robot.set_joint_positions(reset_pose_tiago)
og.sim.step()

def main():
"""
Demonstrates how to use the action primitives to solve a simple BEHAVIOR-1K task.
It loads Benevolence_1_int with a Fetch robot, and the robot attempts to solve the
picking_up_trash task using a hardcoded sequence of primitives.
"""
# Load the config
config_filename = os.path.join(og.example_config_path, "tiago_primitives.yaml")
config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml")
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)

# Update it to run a grocery shopping task
Expand All @@ -56,8 +46,7 @@ def main():
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()

controller = StarterSemanticActionPrimitives(env)
set_start_pose(robot)
controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)

# Grasp can of soda
grasp_obj = scene.object_registry("name", "can_of_soda_89")
Expand Down
11 changes: 5 additions & 6 deletions omnigibson/utils/motion_planning_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,9 @@ def get_angle_between_poses(p1, p2):
return np.arctan2(segment[1], segment[0])

def create_state(space, x, y, yaw):
x = float(x)
y = float(y)
yaw = float(yaw)
state = ob.State(space)
state().setX(x)
state().setY(y)
Expand Down Expand Up @@ -260,12 +263,8 @@ def state_valid_fn(q):
end_conf[i] = joint.upper_limit
if end_conf[i] < joint.lower_limit:
end_conf[i] = joint.lower_limit
if joint.upper_limit - joint.lower_limit > 2 * np.pi:
bounds.setLow(i, 0.0)
bounds.setHigh(i, 2 * np.pi)
else:
bounds.setLow(i, float(joint.lower_limit))
bounds.setHigh(i, float(joint.upper_limit))
bounds.setLow(i, float(joint.lower_limit))
bounds.setHigh(i, float(joint.upper_limit))
space.setBounds(bounds)

# create a simple setup object
Expand Down
Loading

0 comments on commit b22e60a

Please sign in to comment.