e8b51240f9
实现完整的DQN算法框架,用于Atari Space Invaders游戏训练。包括: - QNetwork和DuelingQNetwork神经网络架构 - 经验回放缓冲区(标准和优先级版本) - DQN智能体实现ε-greedy策略和Double DQN - 环境包装器(灰度化、调整大小、帧堆叠等) - 训练器、评估脚本和图表生成工具 - 详细的项目文档和依赖配置
170 lines
5.8 KiB
Python
170 lines
5.8 KiB
Python
"""Main training script for DQN on Space Invaders."""
|
|
import sys
|
|
import os
|
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
import argparse
|
|
import torch
|
|
import numpy as np
|
|
|
|
from src.network import QNetwork, DuelingQNetwork
|
|
from src.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
|
|
from src.agent import DQNAgent
|
|
from src.trainer import DQNTrainer
|
|
from src.utils import make_env, get_device
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="DQN for Space Invaders")
|
|
|
|
# 环境参数
|
|
parser.add_argument("--env", type=str, default="ALE/SpaceInvaders-v5",
|
|
help="Atari环境ID")
|
|
|
|
# 训练参数
|
|
parser.add_argument("--steps", type=int, default=2_000_000,
|
|
help="总训练步数")
|
|
parser.add_argument("--lr", type=float, default=1e-4,
|
|
help="学习率")
|
|
parser.add_argument("--gamma", type=float, default=0.99,
|
|
help="折扣因子")
|
|
parser.add_argument("--batch-size", type=int, default=32,
|
|
help="批次大小")
|
|
parser.add_argument("--buffer-size", type=int, default=100_000,
|
|
help="经验回放缓冲区大小")
|
|
|
|
# ε-greedy参数
|
|
parser.add_argument("--epsilon-start", type=float, default=1.0,
|
|
help="ε初始值")
|
|
parser.add_argument("--epsilon-end", type=float, default=0.01,
|
|
help="ε最终值")
|
|
parser.add_argument("--epsilon-decay", type=int, default=1_000_000,
|
|
help="ε衰减步数")
|
|
|
|
# 网络参数
|
|
parser.add_argument("--target-update", type=int, default=1000,
|
|
help="目标网络更新频率")
|
|
parser.add_argument("--double-dqn", action="store_true", default=True,
|
|
help="使用Double DQN")
|
|
parser.add_argument("--dueling", action="store_true", default=False,
|
|
help="使用Dueling DQN架构")
|
|
|
|
# 评估参数
|
|
parser.add_argument("--eval-freq", type=int, default=10000,
|
|
help="评估频率")
|
|
parser.add_argument("--eval-episodes", type=int, default=10,
|
|
help="评估episode数")
|
|
parser.add_argument("--save-freq", type=int, default=50000,
|
|
help="模型保存频率")
|
|
parser.add_argument("--warmup", type=int, default=10000,
|
|
help="预热步数")
|
|
|
|
# 优先经验回放
|
|
parser.add_argument("--prioritized", action="store_true", default=False,
|
|
help="使用优先经验回放")
|
|
|
|
# 其他
|
|
parser.add_argument("--seed", type=int, default=42,
|
|
help="随机种子")
|
|
parser.add_argument("--save-dir", type=str, default="models",
|
|
help="模型保存目录")
|
|
parser.add_argument("--log-dir", type=str, default="logs",
|
|
help="日志目录")
|
|
|
|
args = parser.parse_args()
|
|
|
|
# 设置随机种子
|
|
torch.manual_seed(args.seed)
|
|
np.random.seed(args.seed)
|
|
|
|
# 获取设备
|
|
device = get_device()
|
|
|
|
# 创建环境
|
|
print(f"创建环境: {args.env}")
|
|
env = make_env(args.env, gray_scale=True, resize=True, frame_stack=4)
|
|
eval_env = make_env(args.env, gray_scale=True, resize=True, frame_stack=4)
|
|
|
|
# 获取动作空间大小
|
|
num_actions = env.action_space.n
|
|
print(f"动作空间: {num_actions}")
|
|
|
|
# 创建网络
|
|
state_shape = (4, 84, 84) # 4帧堆叠,84x84灰度图
|
|
|
|
if args.dueling:
|
|
print("使用Dueling DQN架构")
|
|
q_network = DuelingQNetwork(state_shape, num_actions).to(device)
|
|
target_network = DuelingQNetwork(state_shape, num_actions).to(device)
|
|
else:
|
|
print("使用标准DQN架构")
|
|
q_network = QNetwork(state_shape, num_actions).to(device)
|
|
target_network = QNetwork(state_shape, num_actions).to(device)
|
|
|
|
# 复制初始权重到目标网络
|
|
target_network.load_state_dict(q_network.state_dict())
|
|
target_network.eval()
|
|
|
|
print(f"网络参数量: {sum(p.numel() for p in q_network.parameters()):,}")
|
|
|
|
# 创建经验回放缓冲区
|
|
if args.prioritized:
|
|
print("使用优先经验回放")
|
|
replay_buffer = PrioritizedReplayBuffer(
|
|
args.buffer_size, state_shape, device
|
|
)
|
|
else:
|
|
print("使用标准经验回放")
|
|
replay_buffer = ReplayBuffer(
|
|
args.buffer_size, state_shape, device
|
|
)
|
|
|
|
# 创建智能体
|
|
agent = DQNAgent(
|
|
q_network=q_network,
|
|
target_network=target_network,
|
|
replay_buffer=replay_buffer,
|
|
device=device,
|
|
num_actions=num_actions,
|
|
gamma=args.gamma,
|
|
lr=args.lr,
|
|
epsilon_start=args.epsilon_start,
|
|
epsilon_end=args.epsilon_end,
|
|
epsilon_decay_steps=args.epsilon_decay,
|
|
target_update_freq=args.target_update,
|
|
batch_size=args.batch_size,
|
|
double_dqn=args.double_dqn,
|
|
)
|
|
|
|
# 创建训练器
|
|
trainer = DQNTrainer(
|
|
agent=agent,
|
|
env=env,
|
|
eval_env=eval_env,
|
|
log_dir=args.log_dir,
|
|
save_dir=args.save_dir,
|
|
eval_freq=args.eval_freq,
|
|
save_freq=args.save_freq,
|
|
num_eval_episodes=args.eval_episodes,
|
|
warmup_steps=args.warmup,
|
|
)
|
|
|
|
# 打印配置
|
|
print("\n训练配置:")
|
|
print(f" 总步数: {args.steps:,}")
|
|
print(f" 学习率: {args.lr}")
|
|
print(f" 折扣因子: {args.gamma}")
|
|
print(f" 批次大小: {args.batch_size}")
|
|
print(f" 缓冲区大小: {args.buffer_size:,}")
|
|
print(f" ε衰减: {args.epsilon_start} -> {args.epsilon_end} ({args.epsilon_decay:,}步)")
|
|
print(f" 目标网络更新: 每{args.target_update}步")
|
|
print(f" Double DQN: {args.double_dqn}")
|
|
print(f" 预热步数: {args.warmup:,}")
|
|
print("=" * 60)
|
|
|
|
# 开始训练
|
|
trainer.train(args.steps)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |