dynamicobstacles.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. from operator import add
  2. from gymnasium.spaces import Discrete
  3. from minigrid.minigrid import Ball, Goal, Grid, MiniGridEnv, MissionSpace
  4. class DynamicObstaclesEnv(MiniGridEnv):
  5. """
  6. ### Description
  7. This environment is an empty room with moving obstacles.
  8. The goal of the agent is to reach the green goal square without colliding
  9. with any obstacle. A large penalty is subtracted if the agent collides with
  10. an obstacle and the episode finishes. This environment is useful to test
  11. Dynamic Obstacle Avoidance for mobile robots with Reinforcement Learning in
  12. Partial Observability.
  13. ### Mission Space
  14. "get to the green goal square"
  15. ### Action Space
  16. | Num | Name | Action |
  17. |-----|--------------|--------------|
  18. | 0 | left | Turn left |
  19. | 1 | right | Turn right |
  20. | 2 | forward | Move forward |
  21. | 3 | pickup | Unused |
  22. | 4 | drop | Unused |
  23. | 5 | toggle | Unused |
  24. | 6 | done | Unused |
  25. ### Observation Encoding
  26. - Each tile is encoded as a 3 dimensional tuple:
  27. `(OBJECT_IDX, COLOR_IDX, STATE)`
  28. - `OBJECT_TO_IDX` and `COLOR_TO_IDX` mapping can be found in
  29. [minigrid/minigrid.py](minigrid/minigrid.py)
  30. - `STATE` refers to the door state with 0=open, 1=closed and 2=locked
  31. ### Rewards
  32. A reward of '1' is given for success, and '0' for failure. A '-1' penalty is
  33. subtracted if the agent collides with an obstacle.
  34. ### Termination
  35. The episode ends if any one of the following conditions is met:
  36. 1. The agent reaches the goal.
  37. 2. The agent collides with an obstacle.
  38. 3. Timeout (see `max_steps`).
  39. ### Registered Configurations
  40. - `MiniGrid-Dynamic-Obstacles-5x5-v0`
  41. - `MiniGrid-Dynamic-Obstacles-Random-5x5-v0`
  42. - `MiniGrid-Dynamic-Obstacles-6x6-v0`
  43. - `MiniGrid-Dynamic-Obstacles-Random-6x6-v0`
  44. - `MiniGrid-Dynamic-Obstacles-8x8-v0`
  45. - `MiniGrid-Dynamic-Obstacles-16x16-v0`
  46. """
  47. def __init__(
  48. self, size=8, agent_start_pos=(1, 1), agent_start_dir=0, n_obstacles=4, **kwargs
  49. ):
  50. self.agent_start_pos = agent_start_pos
  51. self.agent_start_dir = agent_start_dir
  52. # Reduce obstacles if there are too many
  53. if n_obstacles <= size / 2 + 1:
  54. self.n_obstacles = int(n_obstacles)
  55. else:
  56. self.n_obstacles = int(size / 2)
  57. mission_space = MissionSpace(
  58. mission_func=lambda: "get to the green goal square"
  59. )
  60. super().__init__(
  61. mission_space=mission_space,
  62. grid_size=size,
  63. max_steps=4 * size * size,
  64. # Set this to True for maximum speed
  65. see_through_walls=True,
  66. **kwargs
  67. )
  68. # Allow only 3 actions permitted: left, right, forward
  69. self.action_space = Discrete(self.actions.forward + 1)
  70. self.reward_range = (-1, 1)
  71. def _gen_grid(self, width, height):
  72. # Create an empty grid
  73. self.grid = Grid(width, height)
  74. # Generate the surrounding walls
  75. self.grid.wall_rect(0, 0, width, height)
  76. # Place a goal square in the bottom-right corner
  77. self.grid.set(width - 2, height - 2, Goal())
  78. # Place the agent
  79. if self.agent_start_pos is not None:
  80. self.agent_pos = self.agent_start_pos
  81. self.agent_dir = self.agent_start_dir
  82. else:
  83. self.place_agent()
  84. # Place obstacles
  85. self.obstacles = []
  86. for i_obst in range(self.n_obstacles):
  87. self.obstacles.append(Ball())
  88. self.place_obj(self.obstacles[i_obst], max_tries=100)
  89. self.mission = "get to the green goal square"
  90. def step(self, action):
  91. # Invalid action
  92. if action >= self.action_space.n:
  93. action = 0
  94. # Check if there is an obstacle in front of the agent
  95. front_cell = self.grid.get(*self.front_pos)
  96. not_clear = front_cell and front_cell.type != "goal"
  97. # Update obstacle positions
  98. for i_obst in range(len(self.obstacles)):
  99. old_pos = self.obstacles[i_obst].cur_pos
  100. top = tuple(map(add, old_pos, (-1, -1)))
  101. try:
  102. self.place_obj(
  103. self.obstacles[i_obst], top=top, size=(3, 3), max_tries=100
  104. )
  105. self.grid.set(old_pos[0], old_pos[1], None)
  106. except Exception:
  107. pass
  108. # Update the agent's position/direction
  109. obs, reward, terminated, truncated, info = super().step(action)
  110. # If the agent tried to walk over an obstacle or wall
  111. if action == self.actions.forward and not_clear:
  112. reward = -1
  113. terminated = True
  114. return obs, reward, terminated, truncated, info
  115. return obs, reward, terminated, truncated, info