程序功能
这段代码实现了一个多智能体强化学习环境,其中两个智能体在5x5的网格上移动:
智能体目标:
Agent 1 从 (0, 0) 出发,目标是 (4, 4)。
Agent 2 从 (4, 4) 出发,目标是 (0, 0)。
动作空间:每个智能体有4个动作(上、下、左、右)。
奖励:到达目标位置获得 10 分,否则每步 -1 分。
终止条件:两个智能体都到达目标。
主程序中,两个智能体在随机动作下执行5个回合,并打印每一步的状态和奖励。
代码
import numpy as np
import gym
from gym import spaces# 定义多智能体环境
class MultiAgentEnv(gym.Env):def __init__(self):super(MultiAgentEnv, self).__init__()# 定义网格世界大小self.grid_size = 5# 智能体的初始位置self.agent1_pos = np.array([0, 0]) # Agent 1 起始点self.agent2_pos = np.array([4, 4]) # Agent 2 起始点# 智能体的目标位置self.goal1 = np.array([4, 4]) # Agent 1 的目标self.goal2 = np.array([0, 0]) # Agent 2 的目标# 定义动作空间和状态空间self.action_space = spaces.Discrete(4) # 上、下、左、右 4 个动作self.observation_space = spaces.Box(low=0, high=self.grid_size - 1, shape=(2,), dtype=np.int32)def reset(self):# 重置智能体的位置self.agent1_pos = np.array([0, 0])self.agent2_pos = np.array([4, 4])return self._get_obs()def step(self, actions):# 传入两个智能体的动作action1, action2 = actions# 更新智能体1的位置self.agent1_pos = self._move(self.agent1_pos, action1)# 更新智能体2的位置self.agent2_pos = self._move(self.agent2_pos, action2)# 检查是否到达目标reward1 = 10 if np.array_equal(self.agent1_pos, self.goal1) else -1reward2 = 10 if np.array_equal(self.agent2_pos, self.goal2) else -1done1 = np.array_equal(self.agent1_pos, self.goal1)done2 = np.array_equal(self.agent2_pos, self.goal2)done = done1 and done2return self._get_obs(), [reward1, reward2], done, {}def _move(self, position, action):# 根据动作移动智能体if action == 0 and position[0] > 0: # 向上position[0] -= 1elif action == 1 and position[0] < self.grid_size - 1: # 向下position[0] += 1elif action == 2 and position[1] > 0: # 向左position[1] -= 1elif action == 3 and position[1] < self.grid_size - 1: # 向右position[1] += 1return positiondef _get_obs(self):# 返回两个智能体的当前状态return np.array([self.agent1_pos, self.agent2_pos])# 运行多智能体环境
if __name__ == '__main__':env = MultiAgentEnv()for episode in range(5):print(f"Episode {episode + 1}:")obs = env.reset()done = Falsestep = 0while not done:actions = [env.action_space.sample(), env.action_space.sample()] # 随机动作obs, rewards, done, info = env.step(actions)step += 1print(f" Step {step}:")print(f" Agent 1 Position: {obs[0]}, Reward: {rewards[0]}")print(f" Agent 2 Position: {obs[1]}, Reward: {rewards[1]}")print("Episode finished!\n")