chore: 更新项目文档、依赖和训练脚本

- 更新 requirements.txt,添加 opencv-python-headless 并补充 uv 安装说明
- 修复 CSV 文件中的换行符格式(CRLF 转 LF)
- 更新 TASK_PROGRESS.md,记录并行训练实现和 WSL 支持
- 优化 train_improved.py 代码格式,移除多余空行和注释
- 更新课程作业要求文档的字符编码
- 添加新的 TensorBoard 日志文件和训练模型
This commit is contained in:
2026-05-01 09:26:23 +08:00
parent 6b929e9790
commit d6860f1f15
16 changed files with 25712 additions and 25680 deletions
+38 -17
View File
@@ -26,6 +26,9 @@
| ✅ 环境预处理 | 灰度化 + Resize(84×84) + 帧堆叠(4帧) Wrapper | [src/utils.py](src/utils.py) |
| ✅ 评估脚本 | 渲染测试 + 多回合平均分数评估 | [src/evaluate.py](src/evaluate.py) |
| ✅ 训练入口 | 主训练循环、TensorBoard 记录、模型保存 | [train.py](train.py) |
| ✅ 并行训练 | 多环境并行采集 + WSL 支持 | [train_parallel.py](train_parallel.py) |
| ✅ WSL 脚本 | 环境配置 + 启动脚本 | [setup_wsl.sh](setup_wsl.sh)、[run_wsl.sh](run_wsl.sh)、[start_wsl_training.bat](start_wsl_training.bat) |
| ✅ 测试脚本 | 快速验证并行环境和网络 | [test_parallel.py](test_parallel.py) |
**核心算法实现要点**
- 策略网络:3 层 CNN + FC(512) → μ, σ(高斯策略,tanh 激活)
@@ -60,36 +63,54 @@
│ ├── trainer.py # PPO 更新逻辑
│ ├── utils.py # 环境预处理 wrappers
│ └── evaluate.py # 评估脚本
├── train.py # 训练入口
├── train.py # 单线程训练入口
├── train_parallel.py # 多环境并行训练(推荐)
├── setup_wsl.sh # WSL 环境配置
├── run_wsl.sh # WSL 训练启动脚本
├── start_wsl_training.bat # Windows 一键启动 WSL 训练
├── test_parallel.py # 并行训练测试
├── requirements.txt
├── README.md
── TASK_PROGRESS.md # 本文档
── WSL_README.md # WSL 训练指南
└── TASK_PROGRESS.md # 本文档
```
---
## 四、超参数配置
| 参数 | |
|------|-----|
| Learning rate | 3e-4 |
| Gamma | 0.99 |
| GAE lambda | 0.95 |
| Clip epsilon | 0.2 |
| PPO epochs | 4 |
| Mini-batch size | 64 |
| Rollout steps | 2048 |
| Entropy coefficient | 0.01 |
| Value coefficient | 0.5 |
| Max gradient norm | 0.5 |
| State shape | (84, 84, 4) |
| Action dim | 3(连续:steer, gas, brake |
| 参数 | train.py (单线程) | train_parallel.py (并行) |
|------|-------------------|--------------------------|
| Learning rate | 3e-4 | 3e-4 |
| Gamma | 0.99 | 0.99 |
| GAE lambda | 0.95 | 0.98 |
| Clip epsilon | 0.2 | 0.1 |
| PPO epochs | 4 | 10 |
| Mini-batch size | 64 | 128 |
| Rollout steps | 2048 | 2048 |
| Entropy coefficient | 0.01 | 0.005 |
| Value coefficient | 0.5 | 0.75 |
| Max gradient norm | 0.5 | 0.5 |
| 总步数 | 500,000 | 2,000,000 |
| 环境数 | 1 | 4 |
| 预计时长 | ~8h | ~5h (4x) |
---
## 五、下一步行动
### 立即执行
### 方案 AWSL 并行训练(推荐)
```bash
# Windows 下双击 start_wsl_training.bat
# 或手动:
wsl
cd "/mnt/d/Code/doing_exercises/programs/外教作业外快/强化学习个人项目报告"
chmod +x setup_wsl.sh run_wsl.sh
./setup_wsl.sh # 首次运行
./run_wsl.sh # 开始训练
```
### 方案 BWindows 单线程训练
```bash
# 1. 安装依赖
uv pip install --system -r requirements.txt
@@ -2,4 +2,9 @@ torch
gymnasium[box2d]
numpy
matplotlib
tensorboard
tensorboard
opencv-python-headless
# uv 安装方式(可选):
# curl -LsSf https://astral.sh/uv/install.sh | sh
# uv pip install -r requirements.txt
+136 -130
View File
@@ -1,4 +1,5 @@
"""Improved training script with reward shaping and better hyperparameters."""
"""Improved training script for CarRacing-v3 PPO with reward shaping."""
import os
import time
import argparse
@@ -12,36 +13,34 @@ import cv2
class RewardShapingWrapper(gym.Wrapper):
"""Add reward shaping for better learning."""
def __init__(self, env):
super().__init__(env)
self.steps_on_track = 0
def reset(self, **kwargs):
obs, info = self.env.reset(**kwargs)
self.steps_on_track = 0
return obs, info
def step(self, action):
obs, reward, terminated, truncated, info = self.env.step(action)
done = terminated or truncated
shaped_reward = reward
if info.get('speed', 0) > 0.1:
shaped_reward += info['speed'] * 0.1
if not info.get('offtrack', False):
if info.get("speed", 0) > 0.1:
shaped_reward += info["speed"] * 0.1
if not info.get("offtrack", False):
shaped_reward += 0.1
self.steps_on_track += 1
else:
shaped_reward -= 0.5
self.steps_on_track = 0
if info.get('lap_complete', False):
if info.get("lap_complete", False):
shaped_reward += 100
return obs, shaped_reward, terminated, truncated, info
@@ -70,9 +69,7 @@ class FrameStackWrapper(gym.ObservationWrapper):
self.frames = deque(maxlen=num_stack)
obs_shape = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0, high=255,
shape=(num_stack, *obs_shape[-2:]),
dtype=np.uint8
low=0, high=255, shape=(num_stack, *obs_shape[-2:]), dtype=np.uint8
)
def reset(self, **kwargs):
@@ -115,7 +112,7 @@ class Actor(nn.Module):
def __init__(self, state_shape=(84, 84, 4), action_dim=3):
super().__init__()
c, h, w = state_shape[2], state_shape[0], state_shape[1]
self.conv = nn.Sequential(
nn.Conv2d(c, 32, kernel_size=8, stride=4),
nn.LeakyReLU(0.2),
@@ -126,28 +123,28 @@ class Actor(nn.Module):
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.LeakyReLU(0.2),
)
out_h = (h - 8) // 4 + 1
out_h = (out_h - 4) // 2 + 1
out_h = (out_h - 3) // 1 + 1
feat_size = 64 * out_h * out_h
self.fc = nn.Sequential(
nn.Linear(feat_size, 512),
nn.LeakyReLU(0.2),
)
self.mu_head = nn.Linear(512, action_dim)
self.log_std_head = nn.Linear(512, action_dim)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.orthogonal_(self.mu_head.weight, gain=0.01)
nn.init.orthogonal_(self.log_std_head.weight, gain=0.01)
def forward(self, x):
x = x / 255.0
x = self.conv(x)
@@ -162,7 +159,7 @@ class Critic(nn.Module):
def __init__(self, state_shape=(84, 84, 4)):
super().__init__()
c, h, w = state_shape[2], state_shape[0], state_shape[1]
self.conv = nn.Sequential(
nn.Conv2d(c, 32, kernel_size=8, stride=4),
nn.LeakyReLU(0.2),
@@ -173,24 +170,20 @@ class Critic(nn.Module):
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.LeakyReLU(0.2),
)
out_h = (h - 8) // 4 + 1
out_h = (out_h - 4) // 2 + 1
out_h = (out_h - 3) // 1 + 1
feat_size = 64 * out_h * out_h
self.fc = nn.Sequential(
nn.Linear(feat_size, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 1)
)
self.fc = nn.Sequential(nn.Linear(feat_size, 512), nn.LeakyReLU(0.2), nn.Linear(512, 1))
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = x / 255.0
x = self.conv(x)
@@ -203,14 +196,14 @@ class RolloutBuffer:
self.buffer_size = buffer_size
self.ptr = 0
self.size = 0
self.states = np.zeros((buffer_size, *state_shape), dtype=np.uint8)
self.actions = np.zeros((buffer_size, action_dim), dtype=np.float32)
self.rewards = np.zeros(buffer_size, dtype=np.float32)
self.dones = np.zeros(buffer_size, dtype=np.bool_)
self.values = np.zeros(buffer_size, dtype=np.float32)
self.log_probs = np.zeros(buffer_size, dtype=np.float32)
def add(self, state, action, reward, done, value, log_prob):
self.states[self.ptr] = state
self.actions[self.ptr] = action
@@ -220,34 +213,34 @@ class RolloutBuffer:
self.log_probs[self.ptr] = log_prob
self.ptr = (self.ptr + 1) % self.buffer_size
self.size = min(self.size + 1, self.buffer_size)
def compute_returns(self, last_value, gamma=0.99, gae_lambda=0.98):
advantages = np.zeros(self.size, dtype=np.float32)
last_gae = 0
for t in reversed(range(self.size)):
if t == self.size - 1:
next_value = last_value
else:
next_value = self.values[t + 1]
delta = self.rewards[t] + gamma * next_value * (1 - self.dones[t]) - self.values[t]
last_gae = delta + gamma * gae_lambda * (1 - self.dones[t]) * last_gae
advantages[t] = last_gae
returns = advantages + self.values[:self.size]
returns = advantages + self.values[: self.size]
return returns, advantages
def get(self):
return (
self.states[:self.size],
self.actions[:self.size],
self.rewards[:self.size],
self.dones[:self.size],
self.values[:self.size],
self.log_probs[:self.size],
self.states[: self.size],
self.actions[: self.size],
self.rewards[: self.size],
self.dones[: self.size],
self.values[: self.size],
self.log_probs[: self.size],
)
def reset(self):
self.ptr = 0
self.size = 0
@@ -282,55 +275,53 @@ class PPOTrainer:
self.max_grad_norm = max_grad_norm
self.ppo_epochs = ppo_epochs
self.mini_batch_size = mini_batch_size
self.actor_optim = torch.optim.Adam(actor.parameters(), lr=lr, eps=1e-5)
self.critic_optim = torch.optim.Adam(critic.parameters(), lr=lr, eps=1e-5)
self.total_updates = 0
def update(self, last_value):
states, actions, rewards, dones, values, log_probs_old = self.buffer.get()
returns, advantages = self.buffer.compute_returns(last_value, self.gamma, self.gae_lambda)
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
states_t = torch.from_numpy(states).float().permute(0, 3, 1, 2).to(self.device)
actions_t = torch.from_numpy(actions).float().to(self.device)
log_probs_old_t = torch.from_numpy(log_probs_old).float().to(self.device)
returns_t = torch.from_numpy(returns).float().to(self.device)
advantages_t = torch.from_numpy(advantages).float().to(self.device)
dataset = torch.utils.data.TensorDataset(
states_t, actions_t, log_probs_old_t, returns_t, advantages_t
)
loader = torch.utils.data.DataLoader(dataset, batch_size=self.mini_batch_size, shuffle=True)
total_actor_loss = 0
total_critic_loss = 0
total_entropy = 0
count = 0
for _ in range(self.ppo_epochs):
for batch in loader:
s, a, log_pi_old, ret, adv = batch
mu, std = self.actor(s)
dist = torch.distributions.Normal(mu, std)
log_pi = dist.log_prob(a).sum(dim=-1)
entropy = dist.entropy().sum(dim=-1)
ratio = torch.exp(log_pi - log_pi_old)
surr1 = ratio * adv
surr2 = torch.clamp(ratio, 1 - self.clip_eps, 1 + self.clip_eps) * adv
actor_loss = -torch.min(surr1, surr2).mean()
value = self.critic(s)
critic_loss = nn.MSELoss()(value.squeeze(), ret)
loss = actor_loss + self.vf_coef * critic_loss - self.ent_coef * entropy.mean()
self.actor_optim.zero_grad()
self.critic_optim.zero_grad()
loss.backward()
@@ -338,18 +329,16 @@ class PPOTrainer:
nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm)
self.actor_optim.step()
self.critic_optim.step()
total_actor_loss += actor_loss.item()
total_critic_loss += critic_loss.item()
total_entropy += entropy.mean().item()
count += 1
self.total_updates += 1
avg_actor = total_actor_loss / count
avg_critic = total_critic_loss / count
avg_entropy = total_entropy / count
self.buffer.reset()
return avg_actor, avg_critic, avg_entropy
@@ -357,10 +346,10 @@ class PPOTrainer:
def collect_rollout(actor, critic, env, buffer, device, rollout_steps):
obs, _ = env.reset()
obs = np.transpose(obs, (1, 2, 0))
for step in range(rollout_steps):
obs_t = torch.from_numpy(obs).float().unsqueeze(0).permute(0, 3, 1, 2).to(device)
with torch.no_grad():
mu, std = actor(obs_t)
dist = torch.distributions.Normal(mu, std)
@@ -368,27 +357,27 @@ def collect_rollout(actor, critic, env, buffer, device, rollout_steps):
action = torch.clamp(action, -1, 1)
log_prob = dist.log_prob(action).sum(dim=-1)
value = critic(obs_t).squeeze(0).item()
action_np = action.squeeze(0).cpu().numpy()
log_prob_np = log_prob.squeeze(0).cpu().numpy()
next_obs, reward, terminated, truncated, _ = env.step(action_np)
done = terminated or truncated
next_obs_stored = np.transpose(next_obs, (1, 2, 0))
buffer.add(obs.copy(), action_np, reward, done, value, log_prob_np)
obs = next_obs_stored
if done:
obs, _ = env.reset()
obs = np.transpose(obs, (1, 2, 0))
return obs
def train_improved(
def train(
total_steps=2000000,
rollout_steps=2048,
eval_interval=10,
@@ -397,22 +386,22 @@ def train_improved(
):
if device is None:
device = get_device()
env = make_env()
eval_env = make_env()
state_shape = (84, 84, 4)
action_dim = 3
actor = Actor(state_shape=state_shape, action_dim=action_dim).to(device)
critic = Critic(state_shape=state_shape).to(device)
buffer = RolloutBuffer(
buffer_size=rollout_steps,
state_shape=state_shape,
action_dim=action_dim,
)
trainer = PPOTrainer(
actor=actor,
critic=critic,
@@ -428,46 +417,48 @@ def train_improved(
ppo_epochs=10,
mini_batch_size=128,
)
log_dir = os.path.join("logs", "tensorboard", f"run_improved_{int(time.time())}")
writer = SummaryWriter(log_dir)
print(f"Training on {device}")
print(f"Log directory: {log_dir}")
print("Improvements: LeakyReLU, BatchNorm, He init, Reward shaping, LR decay, More epochs")
print("Improvements: LeakyReLU, BatchNorm, He init, Reward shaping, More epochs")
episode = 0
total_timesteps = 0
episode_rewards = []
best_eval = -float('inf')
best_eval = -float("inf")
while total_timesteps < total_steps:
obs = collect_rollout(actor, critic, env, buffer, device, rollout_steps)
with torch.no_grad():
obs_t = torch.from_numpy(obs).float().unsqueeze(0).permute(0, 3, 1, 2).to(device)
last_value = critic(obs_t).squeeze(0).item()
actor_loss, critic_loss, entropy = trainer.update(last_value)
writer.add_scalar("Loss/Actor", actor_loss, total_timesteps)
writer.add_scalar("Loss/Critic", critic_loss, total_timesteps)
writer.add_scalar("Loss/Entropy", entropy, total_timesteps)
total_timesteps += rollout_steps
episode += 1
ep_reward = buffer.rewards[:buffer.size].sum()
ep_reward = buffer.rewards[: buffer.size].sum()
episode_rewards.append(ep_reward)
recent_rewards = episode_rewards[-10:] if len(episode_rewards) >= 10 else episode_rewards
avg_reward = np.mean(recent_rewards)
writer.add_scalar("Reward/Episode", ep_reward, total_timesteps)
writer.add_scalar("Reward/AvgLast10", avg_reward, total_timesteps)
print(f"Episode {episode}, steps {total_timesteps}, ep_reward={ep_reward:.1f}, avg_10={avg_reward:.1f}")
print(
f"Episode {episode}, steps {total_timesteps}, ep_reward={ep_reward:.1f}, avg_10={avg_reward:.1f}"
)
if episode % eval_interval == 0:
eval_returns = []
for _ in range(5):
@@ -475,54 +466,69 @@ def train_improved(
eval_obs = np.transpose(eval_obs, (1, 2, 0))
eval_reward = 0
done = False
while not done:
with torch.no_grad():
eval_obs_t = torch.from_numpy(eval_obs).float().unsqueeze(0).permute(0, 3, 1, 2).to(device)
eval_obs_t = (
torch.from_numpy(eval_obs)
.float()
.unsqueeze(0)
.permute(0, 3, 1, 2)
.to(device)
)
mu, std = actor(eval_obs_t)
action = torch.clamp(mu, -1, 1).squeeze(0).cpu().numpy()
eval_obs, reward, terminated, truncated, _ = eval_env.step(action)
eval_obs = np.transpose(eval_obs, (1, 2, 0))
eval_reward += reward
done = terminated or truncated
eval_returns.append(eval_reward)
mean_eval = np.mean(eval_returns)
writer.add_scalar("Eval/MeanReturn", mean_eval, episode)
print(f" Eval: mean_return={mean_eval:.2f}")
if mean_eval > best_eval:
best_eval = mean_eval
os.makedirs("models", exist_ok=True)
torch.save({
torch.save(
{
"actor": actor.state_dict(),
"critic": critic.state_dict(),
"episode": episode,
"timesteps": total_timesteps,
"best_eval": best_eval,
},
os.path.join("models", "ppo_improved_best.pt"),
)
print(f" New best model saved! eval={best_eval:.2f}")
if episode % save_interval == 0:
os.makedirs("models", exist_ok=True)
torch.save(
{
"actor": actor.state_dict(),
"critic": critic.state_dict(),
"episode": episode,
"timesteps": total_timesteps,
"best_eval": best_eval,
}, os.path.join("models", "ppo_improved_best.pt"))
print(f" New best model saved! eval={best_eval:.2f}")
if episode % save_interval == 0:
os.makedirs("models", exist_ok=True)
torch.save({
"actor": actor.state_dict(),
"critic": critic.state_dict(),
"episode": episode,
"timesteps": total_timesteps,
}, os.path.join("models", f"ppo_improved_ep{episode}.pt"))
},
os.path.join("models", f"ppo_improved_ep{episode}.pt"),
)
print(f" Saved model at episode {episode}")
os.makedirs("models", exist_ok=True)
torch.save({
"actor": actor.state_dict(),
"critic": critic.state_dict(),
"episode": episode,
"timesteps": total_timesteps,
"best_eval": best_eval,
}, os.path.join("models", "ppo_improved_final.pt"))
torch.save(
{
"actor": actor.state_dict(),
"critic": critic.state_dict(),
"episode": episode,
"timesteps": total_timesteps,
"best_eval": best_eval,
},
os.path.join("models", "ppo_improved_final.pt"),
)
writer.close()
env.close()
eval_env.close()
@@ -534,6 +540,6 @@ if __name__ == "__main__":
parser.add_argument("--steps", type=int, default=2000000, help="Total training steps")
parser.add_argument("--rollout", type=int, default=2048, help="Rollout buffer size")
args = parser.parse_args()
device = get_device()
train_improved(total_steps=args.steps, rollout_steps=args.rollout, device=device)
train(total_steps=args.steps, rollout_steps=args.rollout, device=device)