dynamicobstacles.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. from operator import add
  2. from gymnasium.spaces import Discrete
  3. from minigrid.core.grid import Grid
  4. from minigrid.core.mission import MissionSpace
  5. from minigrid.core.world_object import Ball, Goal
  6. from minigrid.minigrid_env import MiniGridEnv
  7. class DynamicObstaclesEnv(MiniGridEnv):
  8. """
  9. ### Description
  10. This environment is an empty room with moving obstacles.
  11. The goal of the agent is to reach the green goal square without colliding
  12. with any obstacle. A large penalty is subtracted if the agent collides with
  13. an obstacle and the episode finishes. This environment is useful to test
  14. Dynamic Obstacle Avoidance for mobile robots with Reinforcement Learning in
  15. Partial Observability.
  16. ### Mission Space
  17. "get to the green goal square"
  18. ### Action Space
  19. | Num | Name | Action |
  20. |-----|--------------|--------------|
  21. | 0 | left | Turn left |
  22. | 1 | right | Turn right |
  23. | 2 | forward | Move forward |
  24. | 3 | pickup | Unused |
  25. | 4 | drop | Unused |
  26. | 5 | toggle | Unused |
  27. | 6 | done | Unused |
  28. ### Observation Encoding
  29. - Each tile is encoded as a 3 dimensional tuple:
  30. `(OBJECT_IDX, COLOR_IDX, STATE)`
  31. - `OBJECT_TO_IDX` and `COLOR_TO_IDX` mapping can be found in
  32. [minigrid/minigrid.py](minigrid/minigrid.py)
  33. - `STATE` refers to the door state with 0=open, 1=closed and 2=locked
  34. ### Rewards
  35. A reward of '1' is given for success, and '0' for failure. A '-1' penalty is
  36. subtracted if the agent collides with an obstacle.
  37. ### Termination
  38. The episode ends if any one of the following conditions is met:
  39. 1. The agent reaches the goal.
  40. 2. The agent collides with an obstacle.
  41. 3. Timeout (see `max_steps`).
  42. ### Registered Configurations
  43. - `MiniGrid-Dynamic-Obstacles-5x5-v0`
  44. - `MiniGrid-Dynamic-Obstacles-Random-5x5-v0`
  45. - `MiniGrid-Dynamic-Obstacles-6x6-v0`
  46. - `MiniGrid-Dynamic-Obstacles-Random-6x6-v0`
  47. - `MiniGrid-Dynamic-Obstacles-8x8-v0`
  48. - `MiniGrid-Dynamic-Obstacles-16x16-v0`
  49. """
  50. def __init__(
  51. self, size=8, agent_start_pos=(1, 1), agent_start_dir=0, n_obstacles=4, **kwargs
  52. ):
  53. self.agent_start_pos = agent_start_pos
  54. self.agent_start_dir = agent_start_dir
  55. # Reduce obstacles if there are too many
  56. if n_obstacles <= size / 2 + 1:
  57. self.n_obstacles = int(n_obstacles)
  58. else:
  59. self.n_obstacles = int(size / 2)
  60. mission_space = MissionSpace(
  61. mission_func=lambda: "get to the green goal square"
  62. )
  63. super().__init__(
  64. mission_space=mission_space,
  65. grid_size=size,
  66. max_steps=4 * size * size,
  67. # Set this to True for maximum speed
  68. see_through_walls=True,
  69. **kwargs
  70. )
  71. # Allow only 3 actions permitted: left, right, forward
  72. self.action_space = Discrete(self.actions.forward + 1)
  73. self.reward_range = (-1, 1)
  74. def _gen_grid(self, width, height):
  75. # Create an empty grid
  76. self.grid = Grid(width, height)
  77. # Generate the surrounding walls
  78. self.grid.wall_rect(0, 0, width, height)
  79. # Place a goal square in the bottom-right corner
  80. self.grid.set(width - 2, height - 2, Goal())
  81. # Place the agent
  82. if self.agent_start_pos is not None:
  83. self.agent_pos = self.agent_start_pos
  84. self.agent_dir = self.agent_start_dir
  85. else:
  86. self.place_agent()
  87. # Place obstacles
  88. self.obstacles = []
  89. for i_obst in range(self.n_obstacles):
  90. self.obstacles.append(Ball())
  91. self.place_obj(self.obstacles[i_obst], max_tries=100)
  92. self.mission = "get to the green goal square"
  93. def step(self, action):
  94. # Invalid action
  95. if action >= self.action_space.n:
  96. action = 0
  97. # Check if there is an obstacle in front of the agent
  98. front_cell = self.grid.get(*self.front_pos)
  99. not_clear = front_cell and front_cell.type != "goal"
  100. # Update obstacle positions
  101. for i_obst in range(len(self.obstacles)):
  102. old_pos = self.obstacles[i_obst].cur_pos
  103. top = tuple(map(add, old_pos, (-1, -1)))
  104. try:
  105. self.place_obj(
  106. self.obstacles[i_obst], top=top, size=(3, 3), max_tries=100
  107. )
  108. self.grid.set(old_pos[0], old_pos[1], None)
  109. except Exception:
  110. pass
  111. # Update the agent's position/direction
  112. obs, reward, terminated, truncated, info = super().step(action)
  113. # If the agent tried to walk over an obstacle or wall
  114. if action == self.actions.forward and not_clear:
  115. reward = -1
  116. terminated = True
  117. return obs, reward, terminated, truncated, info
  118. return obs, reward, terminated, truncated, info