-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathviperx.py
More file actions
444 lines (361 loc) · 15.8 KB
/
viperx.py
File metadata and controls
444 lines (361 loc) · 15.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
from typing import Dict, Optional, Tuple, Union
from gymnasium.spaces import Box
from gymnasium.envs.mujoco import MujocoEnv
import numpy as np
import mujoco
DEFAULT_CAMERA_CONFIG = {
"distance": 4.0,
}
class ViperXEnv(MujocoEnv):
"""
ViperXEnv uses the Trossen ViperX 300s robot arm to grab objects and move them in a scene.
On initialization, a box will be spawned at a random location in the scene, reachable by the robot arm.
The robot arm should then grab the box to move it towards a target location.
Args:
xml_file (str): The path to the xml file to load the environment from.
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
}
def __init__(
self,
xml_file: str = "trossen_vx300s/scene_box.xml",
frame_skip: int = 5, # each 'step' of the environment corresponds to 5 timesteps in the simulation
default_camera_config: Dict[str, Union[float, int]] = DEFAULT_CAMERA_CONFIG,
gripper_distance_reward_weight: float = 0.5,
box_distance_reward_weight: float = 1,
grasp_reward_weight: float = 2,
success_reward: float = 100,
time_penalty_weight: float = 0.0001, # penalize long episodes
collision_penalty_weight: float = 0.0, # penalize collisions - set to 0 for now
ctrl_cost_weight: float = 0.1, # penalize large/jerky actions
goal_tolerance: float = 0.015, # tolerated distance from goal position to be considered as success
reset_noise_scale: float = 0.005, # noise scale for resetting the robot's position
contact_force_range: Tuple[float, float] = (-1.0, 1.0),
include_cfrc_ext_in_observation: bool = False,
randomize_box_position: bool = False,
**kwargs,
):
# initialize the environment variables
self._gripper_distance_reward_weight = gripper_distance_reward_weight
self._box_distance_reward_weight = box_distance_reward_weight
self._grasp_reward_weight = grasp_reward_weight
self._success_reward = success_reward
self._time_penalty_weight = time_penalty_weight
self._collision_penalty_weight = collision_penalty_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._goal_tolerance = goal_tolerance
self._reset_noise_scale = reset_noise_scale
# clip range for rewards related to contact forces
self._contact_force_range = contact_force_range
self._include_cfrc_ext_in_observation = include_cfrc_ext_in_observation
self._randomize_box_position = randomize_box_position
self.metadata = ViperXEnv.metadata
MujocoEnv.__init__(
self,
xml_file,
frame_skip,
observation_space=None, # needs to be defined after
default_camera_config=default_camera_config,
**kwargs,
)
# required for MujocoEnv
self.metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": int(np.round(1.0 / self.dt)),
}
self._reset_goal(np.array([0.0, 0.4, 0.025]))
# for observations, we collect the model's qpos and qvel, and the goal position
obs_size = self.data.qpos.size + self.data.qvel.size + 3
# we may include the external forces acting on the gripper
obs_size += len(self.gripper_cfrc_ext()) * include_cfrc_ext_in_observation
# metadata for the final observation space
self.observation_structure = {
"qpos": self.data.qpos.size,
"qvel": self.data.qvel.size,
"goal_position": 3,
"cfrc_ext": len(self.gripper_cfrc_ext()) * include_cfrc_ext_in_observation,
}
self.observation_space = Box(
low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float64
)
def _generate_valid_location(self) -> np.ndarray:
spawn_height = 0.025
spawn_radius_lower = 0.3
spawn_radius_upper = 0.5
r = np.random.uniform(spawn_radius_lower, spawn_radius_upper)
theta = np.random.uniform(0, 2 * np.pi)
x = r * np.cos(theta)
y = r * np.sin(theta)
z = spawn_height
return np.array([x, y, z])
def _get_box_position(self):
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
return self.data.xpos[box_id]
def _spawn_box(self, spawn_position: Optional[np.ndarray] = None):
"""
Spawns the box at the given position or at a random position near the origin.
Parameters:
- spawn_position: Optional np.array of shape (3,) specifying x, y, z coordinates.
"""
if spawn_position is None:
# Randomly generate spawn position within the limits
spawn_position = self._generate_valid_location()
else:
spawn_position = np.array(spawn_position)
self.spawn_position = spawn_position
# Get the index of the box's free joint in qpos
box_joint_name = "box_free_joint"
box_joint_id = mujoco.mj_name2id(
self.model, mujoco.mjtObj.mjOBJ_JOINT, box_joint_name
)
# Get the starting index of the box's free joint in qpos
box_qpos_adr = self.model.jnt_qposadr[box_joint_id]
# Set the position and orientation in qpos
# For a free joint, qpos has 7 elements: [x, y, z, qw, qx, qy, qz]
# We'll set orientation to no rotation (identity quaternion)
self.data.qpos[box_qpos_adr : box_qpos_adr + 3] = spawn_position # Set position
self.data.qpos[box_qpos_adr + 3 : box_qpos_adr + 7] = np.array(
[1, 0, 0, 0]
) # Set orientation (w, x, y, z)
# Reset the box's velocities to zero
box_qvel_adr = self.model.jnt_dofadr[box_joint_id]
self.data.qvel[box_qvel_adr : box_qvel_adr + 6] = np.zeros(6)
# Recompute positions and velocities
mujoco.mj_forward(self.model, self.data)
def _get_child_body_ids(self, body_id):
ids = []
child_body_ids = np.where(self.model.body_parentid == body_id)[0]
for child_body_id in child_body_ids:
ids.append(child_body_id)
ids.extend(self._get_child_body_ids(child_body_id))
return ids
@property
def gripper_body_ids(self):
# the ids of the bodies that are part of the gripper
gripper_body_id = mujoco.mj_name2id(
self.model, mujoco.mjtObj.mjOBJ_BODY, "gripper_link"
)
return [gripper_body_id] + self._get_child_body_ids(gripper_body_id)
@property
def gripper_geom_ids(self):
# the ids of the geoms that are part of the gripper
gripper_body_ids = self.gripper_body_ids
mask = np.isin(self.model.geom_bodyid, gripper_body_ids)
return np.nonzero(mask)[0]
@property
def box_geom_ids(self):
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
return np.array([self.model.body_geomadr[box_id]])
def gripper_cfrc_ext(self):
# We only care about the contact forces acting on the gripper
raw_contact_forces = self.data.cfrc_ext[self.gripper_body_ids]
min_value, max_value = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
def is_grasping_object(self, object_geom_ids, gripper_geom_ids):
"""
A loose sense of 'grasping' is defined as the gripper and the object having a contact point.
"""
for contact in self.data.contact:
geom1, geom2 = contact.geom1, contact.geom2
if geom1 in object_geom_ids and geom2 in gripper_geom_ids:
return True
if geom2 in object_geom_ids and geom1 in gripper_geom_ids:
return True
return False
def _is_box_at_goal(self):
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
box_position = self.data.xpos[box_id]
return np.linalg.norm(box_position - self.goal_position) < self._goal_tolerance
def _store_previous_state(self):
box_body_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
box_pos = self.data.xpos[box_body_id]
box_quat = self.data.xquat[box_body_id]
self.previous_state = {
"box_position": box_pos,
"box_orientation": box_quat,
}
# Reward 0: Gripper -> Box Distance Reward
# This motivates the robot arm to move the gripper closer to the box
@property
def gripper_distance_reward(self):
gripper_id = mujoco.mj_name2id(
self.model, mujoco.mjtObj.mjOBJ_BODY, "gripper_link"
)
gripper_position = self.data.xpos[gripper_id]
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
box_position = self.data.xpos[box_id]
# Scheme 1. reward the reduction in distance between the gripper and the box
# Get current and previous positions of the gripper
# current_distance = np.linalg.norm(gripper_position - box_position)
# previous_distance = np.linalg.norm(
# self.previous_state["box_position"] - box_position
# )
# Calculate reward based on reduction in distance
# distance_reward = self._gripper_distance_reward_weight * (
# previous_distance - current_distance
# )
# Scheme 2. penalize the distance between the gripper and the box
distance_reward = -self._gripper_distance_reward_weight * np.linalg.norm(
gripper_position - box_position
)
return distance_reward
# Reward 1: Box -> Goal Distance Reward
# This motivates the robot arm to move the box closer to the goal
@property
def box_distance_reward(self):
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
box_position = self.data.xpos[box_id]
# Scheme 1. reward the reduction in distance between the box and the goal
# Get current and previous positions of the box
# current_distance = np.linalg.norm(box_position - self.goal_position)
# previous_distance = np.linalg.norm(
# self.previous_state["box_position"] - self.goal_position
# )
# Calculate reward based on reduction in distance
# distance_reward = self._box_distance_reward_weight * (
# previous_distance - current_distance
# )
# Scheme 2. penalize the distance between the box and the goal
distance_reward = -self._box_distance_reward_weight * np.linalg.norm(
box_position - self.goal_position
)
return distance_reward
# Reward 2: Grasp Reward
# This motivates the robot arm to move closer to the box and grasp it
@property
def grasp_reward(self):
box_geom_ids = self.box_geom_ids
gripper_geom_ids = self.gripper_geom_ids
grasp_cost = self._grasp_reward_weight * self.is_grasping_object(
box_geom_ids, gripper_geom_ids
)
return grasp_cost
# Reward 3: Success Reward
# Note that once the success reward was given, the episode will be terminated
@property
def success_reward(self):
if self._is_box_at_goal():
return self._success_reward
else:
return 0
# Reward 4: Time Penalty
@property
def time_penalty_reward(self):
penalty = -self.data.time * self._time_penalty_weight
return penalty
# Reward 5: Collision Penalty
# Note that usually the grasp reward should be set high enough to outweigh the collision penalty
@property
def collision_penalty_reward(self):
penalty = -self.data.ncon * self._collision_penalty_weight
return penalty
# Reward 6: Control Cost
def control_penalty_reward(self, action):
control_cost = -self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def distance_from_box(self):
box_id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
box_position = self.data.xpos[box_id]
return np.linalg.norm(box_position - self.goal_position)
def step(self, action):
self._store_previous_state()
self.do_simulation(action, self.frame_skip)
observation = self._get_obs()
reward, reward_info = self._get_rew(action)
terminated = self._is_box_at_goal()
info = {
"distance_from_box": self.distance_from_box,
**reward_info,
}
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return observation, reward, terminated, False, info
def _get_rew(self, action):
gripper_distance_reward = self.gripper_distance_reward
box_distance_reward = self.box_distance_reward
grasp_reward = self.grasp_reward
success_reward = self.success_reward
# the penalty rewards are negative in nature
time_penalty = self.time_penalty_reward
collision_penalty = self.collision_penalty_reward
control_penalty = self.control_penalty_reward(action)
total_reward = (
gripper_distance_reward
+ box_distance_reward
+ grasp_reward
+ success_reward
+ time_penalty
+ collision_penalty
+ control_penalty
)
reward_info = {
"gripper_distance_reward": gripper_distance_reward,
"box_distance_reward": box_distance_reward,
"grasp_reward": grasp_reward,
"success_reward": success_reward,
"time_penalty": time_penalty,
"collision_penalty": collision_penalty,
"control_penalty": control_penalty,
"total_reward": total_reward,
}
return total_reward, reward_info
def _get_obs(self):
position = self.data.qpos.flatten()
velocity = self.data.qvel.flatten()
if self._include_cfrc_ext_in_observation:
contact_force = self.gripper_cfrc_ext().flatten()
return np.concatenate(
(position, velocity, self.goal_position, contact_force)
)
else:
return np.concatenate((position, velocity, self.goal_position))
def _reset_goal(self, spawn_position: Optional[np.ndarray] = None):
goal_site_id = mujoco.mj_name2id(
self.model, mujoco.mjtObj.mjOBJ_SITE, "goal_site"
)
if spawn_position is not None:
self.goal_position = np.array(spawn_position)
else:
self.goal_position = self._generate_valid_location()
self.model.site_pos[goal_site_id] = self.goal_position
mujoco.mj_forward(self.model, self.data)
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = (
self.init_qvel
+ self._reset_noise_scale * self.np_random.standard_normal(self.model.nv)
)
# TODO: check if this resets data.time
self.set_state(qpos, qvel)
if self._randomize_box_position:
self._spawn_box()
self._reset_goal()
else:
self._spawn_box(self._get_box_position())
self._reset_goal(self.goal_position)
observation = self._get_obs()
return observation
def _get_reset_info(self):
return {"distance_from_box": self.distance_from_box}
# def _get_info(self):
# return {
# "distance": np.linalg.norm(
# self._agent_location - self._target_location, ord=1
# )
# }