dynamicobstacles.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. from operator import add
  2. import gym
  3. from gym_minigrid.minigrid import Ball, Goal, Grid, MiniGridEnv
  4. class DynamicObstaclesEnv(MiniGridEnv):
  5. """
  6. Single-room square grid environment with moving obstacles
  7. """
  8. def __init__(
  9. self, size=8, agent_start_pos=(1, 1), agent_start_dir=0, n_obstacles=4, **kwargs
  10. ):
  11. self.agent_start_pos = agent_start_pos
  12. self.agent_start_dir = agent_start_dir
  13. # Reduce obstacles if there are too many
  14. if n_obstacles <= size / 2 + 1:
  15. self.n_obstacles = int(n_obstacles)
  16. else:
  17. self.n_obstacles = int(size / 2)
  18. super().__init__(
  19. grid_size=size,
  20. max_steps=4 * size * size,
  21. # Set this to True for maximum speed
  22. see_through_walls=True,
  23. **kwargs
  24. )
  25. # Allow only 3 actions permitted: left, right, forward
  26. self.action_space = gym.spaces.Discrete(self.actions.forward + 1)
  27. self.reward_range = (-1, 1)
  28. def _gen_grid(self, width, height):
  29. # Create an empty grid
  30. self.grid = Grid(width, height)
  31. # Generate the surrounding walls
  32. self.grid.wall_rect(0, 0, width, height)
  33. # Place a goal square in the bottom-right corner
  34. self.grid.set(width - 2, height - 2, Goal())
  35. # Place the agent
  36. if self.agent_start_pos is not None:
  37. self.agent_pos = self.agent_start_pos
  38. self.agent_dir = self.agent_start_dir
  39. else:
  40. self.place_agent()
  41. # Place obstacles
  42. self.obstacles = []
  43. for i_obst in range(self.n_obstacles):
  44. self.obstacles.append(Ball())
  45. self.place_obj(self.obstacles[i_obst], max_tries=100)
  46. self.mission = "get to the green goal square"
  47. def step(self, action):
  48. # Invalid action
  49. if action >= self.action_space.n:
  50. action = 0
  51. # Check if there is an obstacle in front of the agent
  52. front_cell = self.grid.get(*self.front_pos)
  53. not_clear = front_cell and front_cell.type != "goal"
  54. # Update obstacle positions
  55. for i_obst in range(len(self.obstacles)):
  56. old_pos = self.obstacles[i_obst].cur_pos
  57. top = tuple(map(add, old_pos, (-1, -1)))
  58. try:
  59. self.place_obj(
  60. self.obstacles[i_obst], top=top, size=(3, 3), max_tries=100
  61. )
  62. self.grid.set(*old_pos, None)
  63. except Exception:
  64. pass
  65. # Update the agent's position/direction
  66. obs, reward, done, info = super().step(action)
  67. # If the agent tried to walk over an obstacle or wall
  68. if action == self.actions.forward and not_clear:
  69. reward = -1
  70. done = True
  71. return obs, reward, done, info
  72. return obs, reward, done, info