perf: 为PPO和DQN添加GPU优化——AMP混合精度、pinned memory、torch.compile

- PPO (CW1_id_name): 添加 AMP GradScaler + autocast 混合精度训练,pinned memory 加速 CPU→GPU 传输,torch.compile JIT 编译支持,调整默认超参适配 RTX 5090
- DQN (Atari): 添加 AMP 混合精度、pinned memory 回放缓冲区、向量化批量添加经验 (add_batch) 和批量动作选择 (batch_select_actions),消除 Python 循环
- train_parallel.py: 重写为无缓冲脚本,集成所有优化,64 并行环境 + 每步 4 次训练更新

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
This commit is contained in:
2026-05-05 00:50:16 +08:00
parent ed0822966b
commit d5c9baffe6
7 changed files with 495 additions and 883 deletions
@@ -1,31 +1,36 @@
"""并行环境 DQN 训练脚本 - 使用 AsyncVectorEnv 加速数据收集.
"""Dueling Double DQN - Space Invaders 并行训练脚本
每个训练迭代并行采集 N 个环境的转移,批量 GPU 推理,显著提升 FPS
适合在 AutoDL 等多核服务器+GPU 环境运行。
使用 AsyncVectorEnv 并行运行多个 Atari 环境,GPU 批量推理加速
适合在 AutoDL 等多核服务器环境运行。
与 notebooks/train_parallel.ipynb 内容一致,但使用 Python 脚本直接运行,
确保 stdout 实时输出(无缓冲)。
"""
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import argparse
import time
import numpy as np
import torch
import torch.nn.functional as F
from collections import deque
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, PROJECT_ROOT)
from src.network import QNetwork, DuelingQNetwork
from src.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from src.agent import DQNAgent
from src.utils import make_env, get_device
# 强制无缓冲输出
sys.stdout.reconfigure(line_buffering=True) if hasattr(sys.stdout, 'reconfigure') else None
os.environ['PYTHONUNBUFFERED'] = '1'
# ── 环境工厂函数(供 AsyncVectorEnv 子进程使用)──
print("导入完成", flush=True)
# ── 环境工厂 ──
def _make_env_fn(env_id):
"""环境工厂 - 必须在模块级别以便 multiprocessing pickle."""
# AsyncVectorEnv 子进程需要独立注册 ALE
try:
import ale_py
import gymnasium as gym
@@ -37,28 +42,16 @@ def _make_env_fn(env_id):
return make_env(env_id, gray_scale=True, resize=True, frame_stack=4)
return _make
print("环境工厂就绪", flush=True)
# ── 并行训练器 ──
class ParallelTrainer:
"""并行环境 DQN 训练器.
使用 AsyncVectorEnv 并行运行 N 个环境,
同时收集转移 + 批量推理,大幅提升训练速度。
"""
def __init__(
self,
agent,
envs,
eval_env,
num_envs,
save_dir="models",
eval_freq=10000,
save_freq=50000,
num_eval_episodes=10,
warmup_steps=10000,
n_steps_per_env=1,
self, agent, envs, eval_env, num_envs,
save_dir="models", eval_freq=10000, save_freq=50000,
num_eval_episodes=10, warmup_steps=10000,
train_steps_per_update=1,
):
self.agent = agent
self.envs = envs
@@ -69,256 +62,173 @@ class ParallelTrainer:
self.save_freq = save_freq
self.num_eval_episodes = num_eval_episodes
self.warmup_steps = warmup_steps
self.n_steps_per_env = n_steps_per_env
self.train_steps_per_update = train_steps_per_update
self.episode_rewards = deque(maxlen=100)
self.eval_rewards = []
self.best_eval_reward = -float("inf")
def train(self, total_steps):
"""主并行训练循环.
Args:
total_steps: 总环境交互步数
"""
num_envs = self.num_envs
device = self.agent.device
envs = self.envs
print(f"开始并行训练,总步数: {total_steps:,}")
print(f"并行环境数: {num_envs}")
print(f"预热步数: {self.warmup_steps:,}")
print("=" * 60)
# 重置所有环境
states, _ = envs.reset()
episode_rewards = np.zeros(num_envs, dtype=np.float32)
episode_lengths = np.zeros(num_envs, dtype=np.int32)
episode_count = 0
start_time = time.time()
step = 0
while step < total_steps:
# ── 动作选择 ──
if step < self.warmup_steps:
actions = np.array([envs.single_action_space.sample() for _ in range(num_envs)])
else:
actions = self._batch_select_actions(states)
# ── 环境步进(N 个环境并行)──
next_states, rewards, terminateds, truncateds, _ = envs.step(actions)
dones = np.logical_or(terminateds, truncateds)
# ── 存储转移 ──
for i in range(num_envs):
self.agent.replay_buffer.add(
states[i], actions[i], rewards[i], next_states[i], dones[i]
)
# ── 统计 ──
episode_rewards += rewards
episode_lengths += 1
# 处理结束的 episode
for i in range(num_envs):
if dones[i]:
self.episode_rewards.append(episode_rewards[i])
episode_count += 1
episode_rewards[i] = 0
episode_lengths[i] = 0
step += num_envs
states = next_states
# ── 训练(环境每步一个 mini-batch)──
if step >= self.warmup_steps:
self.agent.train_step()
# ── 进度打印 ──
if episode_count > 0 and episode_count % 10 == 0:
avg_reward = np.mean(self.episode_rewards) if self.episode_rewards else 0
elapsed = time.time() - start_time
fps = step / elapsed
current_lr = self.agent.optimizer.param_groups[0]["lr"]
print(
f"Step: {step:>10,} | "
f"Ep: {episode_count:>5} | "
f"AvgReward: {avg_reward:>7.1f} | "
f"Epsilon: {self.agent.epsilon:.3f} | "
f"LR: {current_lr:.2e} | "
f"FPS: {fps:.0f}"
)
# ── 定期评估 ──
if step % self.eval_freq == 0 and step > 0:
eval_reward = self.evaluate()
self.eval_rewards.append((step, eval_reward))
print(f"\n[Eval] Step: {step:>10,} | AvgReward: {eval_reward:.1f}\n" + "-" * 60)
if eval_reward > self.best_eval_reward:
self.best_eval_reward = eval_reward
self.agent.save(f"{self.save_dir}/dqn_best.pt")
# ── 定期保存 ──
if step % self.save_freq == 0:
self.agent.save(f"{self.save_dir}/dqn_step_{step}.pt")
# 训练结束
total_time = time.time() - start_time
print("\n" + "=" * 60)
print(f"训练完成!总时间: {total_time:.1f}")
print(f"平均 FPS: {total_steps / total_time:.0f}")
print(f"最佳评估回报: {self.best_eval_reward:.1f}")
self.agent.save(f"{self.save_dir}/dqn_final.pt")
def _batch_select_actions(self, states):
"""批量选择动作(使用 GPU 批量推理)."""
epsilon = self.agent.epsilon
num_envs = len(states)
# 随机探索
random_mask = np.random.random(num_envs) < epsilon
actions = np.zeros(num_envs, dtype=np.int64)
# 对非随机的环境做批量推理
non_random = ~random_mask
if non_random.any():
state_tensor = (
torch.from_numpy(states[non_random]).float().to(self.agent.device)
)
with torch.no_grad():
q_values = self.agent.q_network(state_tensor)
actions[non_random] = q_values.argmax(dim=1).cpu().numpy()
# 随机的环境
if random_mask.any():
actions[random_mask] = np.random.randint(
0, self.agent.num_actions, size=random_mask.sum()
)
return actions
def evaluate(self):
"""评估智能体."""
rewards = []
for _ in range(self.num_eval_episodes):
state, _ = self.eval_env.reset()
episode_reward = 0
ep_reward = 0
done = False
while not done:
action = self.agent.select_action(state, evaluate=True)
state, reward, terminated, truncated, _ = self.eval_env.step(action)
done = terminated or truncated
episode_reward += reward
rewards.append(episode_reward)
ep_reward += reward
rewards.append(ep_reward)
return np.mean(rewards)
def train(self, total_steps):
n = self.num_envs
device = self.agent.device
envs = self.envs
print(f"开始训练: {total_steps:,} 步, {n} 并行环境, 每步训练 {self.train_steps_per_update}", flush=True)
print("=" * 60, flush=True)
states, _ = envs.reset()
ep_rewards = np.zeros(n, dtype=np.float32)
ep_count = 0
start_time = time.time()
step = 0
while step < total_steps:
if step < self.warmup_steps:
actions = np.array([envs.single_action_space.sample() for _ in range(n)])
else:
actions = self.agent.batch_select_actions(states, self.agent.epsilon)
next_states, rewards, terminateds, truncateds, _ = envs.step(actions)
dones = np.logical_or(terminateds, truncateds)
# 向量化批量添加经验
self.agent.replay_buffer.add_batch(states, actions, rewards, next_states, dones)
ep_rewards += rewards
for i in range(n):
if dones[i]:
self.episode_rewards.append(ep_rewards[i])
ep_count += 1
ep_rewards[i] = 0
step += n
states = next_states
if step >= self.warmup_steps:
for _ in range(self.train_steps_per_update):
self.agent.train_step()
if ep_count > 0 and ep_count % 20 == 0:
avg_r = np.mean(self.episode_rewards) if self.episode_rewards else 0
elapsed = time.time() - start_time
fps = step / elapsed
lr = self.agent.optimizer.param_groups[0]["lr"]
print(f"Step:{step:>10,} | Ep:{ep_count:>5} | AvgR:{avg_r:>7.1f} | "
f"Eps:{self.agent.epsilon:.3f} | LR:{lr:.2e} | FPS:{fps:.0f}", flush=True)
if step % self.eval_freq == 0 and step > 0:
eval_r = self.evaluate()
self.eval_rewards.append((step, eval_r))
print(f"\n[评估] Step:{step:>10,} | 平均回报:{eval_r:.1f}\n", flush=True)
if eval_r > self.best_eval_reward:
self.best_eval_reward = eval_r
self.agent.save(f"{self.save_dir}/dqn_best.pt")
print(f"最佳模型已更新 (回报: {eval_r:.1f})", flush=True)
if step % self.save_freq == 0 and step > 0:
self.agent.save(f"{self.save_dir}/dqn_step_{step}.pt")
total_time = time.time() - start_time
print("\n" + "=" * 60, flush=True)
print(f"训练完成!总时间: {total_time:.1f} 秒 | FPS: {total_steps/total_time:.0f}", flush=True)
print(f"最佳评估回报: {self.best_eval_reward:.1f}", flush=True)
self.agent.save(f"{self.save_dir}/dqn_final.pt")
print("训练器就绪", flush=True)
# ── 主入口 ──
def main():
parser = argparse.ArgumentParser(description="Parallel DQN for Space Invaders")
# ── 超参数 ──
ENV_ID = "ALE/SpaceInvaders-v5"
N_ENVS = 64
TOTAL_STEPS = 2_000_000
LR = 1e-4
GAMMA = 0.99
BATCH_SIZE = 2048
BUFFER_SIZE = 1_000_000
EPSILON_START = 1.0
EPSILON_END = 0.01
EPSILON_DECAY = 4_000_000
TARGET_UPDATE = 5000
LR_DECAY_STEPS = 5_000_000
LR_DECAY_FACTOR = 0.5
WARMUP_STEPS = 50_000
EVAL_FREQ = 200000
EVAL_EPISODES = 10
SAVE_FREQ = 500000
SEED = 42
SAVE_DIR = os.path.join(PROJECT_ROOT, "models")
# 并行参数
parser.add_argument("--n-envs", type=int, default=8, help="并行环境数")
TRAIN_STEPS_PER_UPDATE = 4
USE_AMP = True
USE_COMPILE = True
USE_DUELING = True
USE_DOUBLE = True
USE_PER = True
# 训练参数
parser.add_argument("--env", type=str, default="ALE/SpaceInvaders-v5")
parser.add_argument("--steps", type=int, default=10_000_000, help="总训练步数")
parser.add_argument("--lr", type=float, default=5e-5, help="学习率")
parser.add_argument("--gamma", type=float, default=0.99, help="折扣因子")
parser.add_argument("--batch-size", type=int, default=64, help="批次大小")
parser.add_argument("--buffer-size", type=int, default=500_000, help="回放缓冲区大小")
os.makedirs(SAVE_DIR, exist_ok=True)
# ε-greedy
parser.add_argument("--epsilon-start", type=float, default=1.0)
parser.add_argument("--epsilon-end", type=float, default=0.01)
parser.add_argument("--epsilon-decay", type=int, default=2_000_000)
print(f"配置: {TOTAL_STEPS/1e6:.0f}M 步, {N_ENVS} 并行环境", flush=True)
print(f"每步训练 {TRAIN_STEPS_PER_UPDATE} 次, Batch {BATCH_SIZE}", flush=True)
print(f"AMP: {USE_AMP}, torch.compile: {USE_COMPILE}", flush=True)
print(f"模型保存: {SAVE_DIR}", flush=True)
# 网络
parser.add_argument("--target-update", type=int, default=1000)
parser.add_argument("--double-dqn", action="store_true", default=True)
parser.add_argument("--dueling", action="store_true", default=True)
torch.manual_seed(SEED)
np.random.seed(SEED)
import platform
# 学习率
parser.add_argument("--lr-decay-steps", type=int, default=5_000_000)
parser.add_argument("--lr-decay-factor", type=float, default=0.5)
parser.add_argument("--warmup-steps", type=int, default=10_000)
# 评估
parser.add_argument("--eval-freq", type=int, default=50000)
parser.add_argument("--eval-episodes", type=int, default=10)
parser.add_argument("--save-freq", type=int, default=100000)
# 优先回放
parser.add_argument("--prioritized", action="store_true", default=True)
# 其他
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--save-dir", type=str, default="models")
parser.add_argument("--log-dir", type=str, default="logs")
args = parser.parse_args()
# 随机种子
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 设备
device = get_device()
print(f"使用设备: {device}", flush=True)
# 创建并行训练环境
print(f"创建 {args.n_envs} 个并行训练环境...")
try:
from gymnasium.vector import AsyncVectorEnv
env_fns = [_make_env_fn(args.env) for _ in range(args.n_envs)]
envs = AsyncVectorEnv(env_fns, shared_memory=True)
except ImportError:
print("AsyncVectorEnv 不可用,回退到 SyncVectorEnv")
from gymnasium.vector import SyncVectorEnv
env_fns = [_make_env_fn(args.env) for _ in range(args.n_envs)]
envs = SyncVectorEnv(env_fns)
# 创建评估环境(单环境)
eval_env = make_env(args.env, gray_scale=True, resize=True, frame_stack=4)
from gymnasium.vector import SyncVectorEnv
env_fns = [_make_env_fn(ENV_ID) for _ in range(N_ENVS)]
envs = SyncVectorEnv(env_fns)
print(f"SyncVectorEnv: {envs.num_envs} 个环境", flush=True)
eval_env = make_env(ENV_ID, gray_scale=True, resize=True, frame_stack=4)
num_actions = envs.single_action_space.n
print(f"动作空间: {num_actions}")
print(f"实际环境数: {envs.num_envs}")
print(f"动作空间: {num_actions}", flush=True)
state_shape = (4, 84, 84)
# 创建网络
if args.dueling:
print("使用 Dueling Double DQN")
if USE_DUELING:
q_network = DuelingQNetwork(state_shape, num_actions).to(device)
target_network = DuelingQNetwork(state_shape, num_actions).to(device)
print(f"Dueling DQN: {sum(p.numel() for p in q_network.parameters()):,} 参数", flush=True)
else:
print("使用标准 DQN")
q_network = QNetwork(state_shape, num_actions).to(device)
target_network = QNetwork(state_shape, num_actions).to(device)
print(f"标准 DQN: {sum(p.numel() for p in q_network.parameters()):,} 参数", flush=True)
if USE_COMPILE and hasattr(torch, 'compile'):
print("应用 torch.compile 加速...", flush=True)
q_network = torch.compile(q_network)
target_network = torch.compile(target_network)
print("torch.compile 完成", flush=True)
target_network.load_state_dict(q_network.state_dict())
target_network.eval()
print(f"网络参数量: {sum(p.numel() for p in q_network.parameters()):,}")
# 回放缓冲区
if args.prioritized:
print("使用优先经验回放")
replay_buffer = PrioritizedReplayBuffer(args.buffer_size, state_shape, device)
if USE_PER:
replay_buffer = PrioritizedReplayBuffer(BUFFER_SIZE, state_shape, device)
print("优先经验回放 (Pinned Memory)", flush=True)
else:
print("使用标准经验回放")
replay_buffer = ReplayBuffer(args.buffer_size, state_shape, device)
# 创建 Agent
from src.agent import DQNAgent
replay_buffer = ReplayBuffer(BUFFER_SIZE, state_shape, device)
print("标准经验回放 (Pinned Memory)", flush=True)
agent = DQNAgent(
q_network=q_network,
@@ -326,45 +236,70 @@ def main():
replay_buffer=replay_buffer,
device=device,
num_actions=num_actions,
gamma=args.gamma,
lr=args.lr,
epsilon_start=args.epsilon_start,
epsilon_end=args.epsilon_end,
epsilon_decay_steps=args.epsilon_decay,
target_update_freq=args.target_update,
batch_size=args.batch_size,
double_dqn=args.double_dqn,
lr_decay_steps=args.lr_decay_steps,
lr_decay_factor=args.lr_decay_factor,
warmup_steps=args.warmup_steps,
gamma=GAMMA,
lr=LR,
epsilon_start=EPSILON_START,
epsilon_end=EPSILON_END,
epsilon_decay_steps=EPSILON_DECAY,
target_update_freq=TARGET_UPDATE,
batch_size=BATCH_SIZE,
double_dqn=USE_DOUBLE,
lr_decay_steps=LR_DECAY_STEPS,
lr_decay_factor=LR_DECAY_FACTOR,
warmup_steps=WARMUP_STEPS,
use_amp=USE_AMP,
)
print(f"Agent 创建完成 (AMP: {USE_AMP})", flush=True)
# 创建训练器
trainer = ParallelTrainer(
agent=agent,
envs=envs,
eval_env=eval_env,
num_envs=args.n_envs,
save_dir=args.save_dir,
eval_freq=args.eval_freq,
save_freq=args.save_freq,
num_eval_episodes=args.eval_episodes,
warmup_steps=args.warmup_steps,
num_envs=N_ENVS,
save_dir=SAVE_DIR,
eval_freq=EVAL_FREQ,
save_freq=SAVE_FREQ,
num_eval_episodes=EVAL_EPISODES,
warmup_steps=WARMUP_STEPS,
train_steps_per_update=TRAIN_STEPS_PER_UPDATE,
)
# 打印配置
print("\n训练配置:")
print(f" 并行环境数: {args.n_envs}")
print(f" 总步数: {args.steps:,}")
print(f" 学习率: {args.lr} (Warmup: {args.warmup_steps:,} 步)")
print(f" ε衰减: {args.epsilon_start} -> {args.epsilon_end} ({args.epsilon_decay:,} 步)")
print(f" 批次大小: {args.batch_size}")
print(f" 缓冲区大小: {args.buffer_size:,}")
print(f" Double DQN: {args.double_dqn}")
print(f" Dueling: {args.dueling}")
print("=" * 60)
print("\n" + "=" * 60, flush=True)
print(f"开始 10M 步并行训练(全优化版)", flush=True)
print(f" GPU: {device}", flush=True)
print(f" 并行环境: {N_ENVS}", flush=True)
print(f" Batch Size: {BATCH_SIZE}", flush=True)
print(f" 每步训练: {TRAIN_STEPS_PER_UPDATE} ", flush=True)
print(f" AMP 混合精度: {USE_AMP}", flush=True)
print(f" torch.compile: {USE_COMPILE}", flush=True)
print(f" Dueling: {USE_DUELING}", flush=True)
print(f" Double DQN: {USE_DOUBLE}", flush=True)
print(f" PER: {USE_PER}", flush=True)
print("=" * 60 + "\n", flush=True)
trainer.train(args.steps)
trainer.train(TOTAL_STEPS)
# ── 评估最佳模型 ──
print("\n加载最佳模型...", flush=True)
agent.load(f"{SAVE_DIR}/dqn_best.pt")
print("\n评估中...", flush=True)
all_rewards = []
for i in range(20):
state, _ = eval_env.reset()
ep_r = 0
done = False
while not done:
action = agent.select_action(state, evaluate=True)
state, reward, terminated, truncated, _ = eval_env.step(action)
done = terminated or truncated
ep_r += reward
all_rewards.append(ep_r)
print(f" Episode {i+1:>2}: {ep_r:.1f}", flush=True)
print(f"\n结果: 平均 {np.mean(all_rewards):.2f} +/- {np.std(all_rewards):.2f}", flush=True)
print(f"最佳: {max(all_rewards):.1f} | 最差: {min(all_rewards):.1f}", flush=True)
print(f"中位数: {np.median(all_rewards):.1f}", flush=True)
if __name__ == "__main__":