Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Algorithm] Update TD3 Example #1523

Merged
merged 35 commits into from
Oct 3, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
6339a07
update executable
BY571 Sep 6, 2023
9e890b3
fix objective
BY571 Sep 7, 2023
117c477
fix objective
BY571 Sep 7, 2023
d2b3ad4
Update initial frames and general structure
BY571 Sep 12, 2023
9c6c358
fixes
BY571 Sep 12, 2023
1adbff5
Merge branch 'main' into td3_benchmark
BY571 Sep 12, 2023
2422ef8
naming fix
BY571 Sep 12, 2023
0e67de2
single step td3
BY571 Sep 13, 2023
1fc0847
small fixes
BY571 Sep 14, 2023
7a02b83
fix
BY571 Sep 14, 2023
243d712
add update counter
BY571 Sep 14, 2023
af31bd9
naming fixes
BY571 Sep 14, 2023
1122808
update logging and small fixes
BY571 Sep 15, 2023
b4df32b
no eps
BY571 Sep 18, 2023
13f367a
update tests
BY571 Sep 19, 2023
72ddf7e
update objective
BY571 Sep 20, 2023
c830891
set gym backend
BY571 Sep 20, 2023
1a2f08e
Merge branch 'main' into td3_benchmark
vmoens Sep 21, 2023
4cdbb3b
update tests
BY571 Sep 21, 2023
76dcdeb
update fix max episode steps
BY571 Sep 22, 2023
68d4c26
Merge branch 'main' into td3_benchmark
BY571 Sep 26, 2023
ec8b089
fix
BY571 Sep 27, 2023
bcc3bc6
fix
BY571 Sep 27, 2023
42748e0
amend
vmoens Sep 28, 2023
0052cd9
Merge remote-tracking branch 'BY571/td3_benchmark' into td3_benchmark
vmoens Sep 28, 2023
e2c28c8
amend
vmoens Sep 28, 2023
bb496ef
update scratch_dir, frame skip, config
BY571 Sep 28, 2023
9b4704b
Merge branch 'main' into td3_benchmark
BY571 Oct 2, 2023
e622bf7
merge main
BY571 Oct 2, 2023
57bc54a
merge main
BY571 Oct 2, 2023
29977df
step counter
BY571 Oct 2, 2023
854e2a2
merge main
BY571 Oct 3, 2023
619f2ea
small fixes
BY571 Oct 3, 2023
8d36787
solve logger issue
vmoens Oct 3, 2023
a24ab8d
reset notensordict test
vmoens Oct 3, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
update logging and small fixes
  • Loading branch information
BY571 committed Sep 15, 2023
commit 112280874d747102b8bdf524d7e64766b85261e9
4 changes: 2 additions & 2 deletions examples/td3/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
env:
name: HalfCheetah-v3
task: ""
exp_name: "HalfCheetah-TD3-ICLR"
exp_name: "HalfCheetah-TD3"
library: gym
frame_skip: 1
seed: 42
Expand All @@ -12,7 +12,7 @@ collector:
total_frames: 3000000
init_random_frames: 25_000
init_env_steps: 1000
frames_per_batch: 1
frames_per_batch: 1000
max_frames_per_traj: 1000
async_collection: 1
collector_device: cpu
Expand Down
32 changes: 16 additions & 16 deletions examples/td3/td3.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

from torchrl.record.loggers import generate_exp_name, get_logger
from utils import (
log_metrics,
make_collector,
make_environment,
make_loss_module,
Expand Down Expand Up @@ -116,7 +117,11 @@ def main(cfg: "DictConfig"): # noqa: F821
q_losses,
) = ([], [])
for _ in range(num_updates):

# Update actor every delayed_updates
update_counter += 1
update_actor = update_counter % delayed_updates == 0

# Sample from replay buffer
sampled_tensordict = replay_buffer.sample().clone()

Expand All @@ -128,7 +133,6 @@ def main(cfg: "DictConfig"): # noqa: F821

# Update critic
optimizer_critic.zero_grad()
update_actor = update_counter % delayed_updates == 0
q_loss.backward(retain_graph=update_actor)
optimizer_critic.step()
q_losses.append(q_loss.item())
Expand All @@ -154,27 +158,22 @@ def main(cfg: "DictConfig"): # noqa: F821
]

# Logging
metrics_to_log = {}
if len(episode_rewards) > 0:
episode_length = tensordict["next", "step_count"][
tensordict["next", "done"]
]
logger.log_scalar(
"train/reward", episode_rewards.mean().item(), collected_frames
)
logger.log_scalar(
"train/episode_length",
episode_length.sum().item() / len(episode_length),
collected_frames,
metrics_to_log["train/reward"] = episode_rewards.mean().item()
metrics_to_log["train/episode_length"] = episode_length.sum().item() / len(
episode_length
)

if collected_frames >= init_random_frames:
logger.log_scalar("train/q_loss", np.mean(q_losses), step=collected_frames)
metrics_to_log["train/q_loss"] = np.mean(q_losses)
if update_actor:
logger.log_scalar(
"train/a_loss", np.mean(actor_losses), step=collected_frames
)
logger.log_scalar("train/sampling_time", sampling_time, collected_frames)
logger.log_scalar("train/training_time", training_time, collected_frames)
metrics_to_log["train/a_loss"] = np.mean(actor_losses)
metrics_to_log["train/sampling_time"] = sampling_time
metrics_to_log["train/training_time"] = training_time

# Evaluation
if abs(collected_frames % eval_iter) < frames_per_batch * frame_skip:
Expand All @@ -188,9 +187,10 @@ def main(cfg: "DictConfig"): # noqa: F821
)
eval_time = time.time() - eval_start
eval_reward = eval_rollout["next", "reward"].sum(-2).mean().item()
logger.log_scalar("eval/reward", eval_reward, step=collected_frames)
logger.log_scalar("eval/time", eval_time, step=collected_frames)
metrics_to_log["eval/reward"] = eval_reward
metrics_to_log["eval/time"] = eval_time

log_metrics(logger, metrics_to_log, collected_frames)
sampling_start = time.time()

collector.shutdown()
Expand Down
32 changes: 21 additions & 11 deletions examples/td3/utils.py
BY571 marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -131,17 +131,6 @@ def make_replay_buffer(
# -----


def get_activation(cfg):
if cfg.network.activation == "relu":
return nn.ReLU
elif cfg.network.activation == "tanh":
return nn.Tanh
elif cfg.network.activation == "leaky_relu":
return nn.LeakyReLU
else:
raise NotImplementedError


def make_td3_agent(cfg, train_env, eval_env, device):
"""Make TD3 agent."""
# Define Actor Network
Expand Down Expand Up @@ -253,3 +242,24 @@ def make_optimizer(cfg, loss_module):
weight_decay=cfg.optim.weight_decay,
)
return optimizer_actor, optimizer_critic


# ====================================================================
# General utils
# ---------


def log_metrics(logger, metrics, step):
for metric_name, metric_value in metrics.items():
logger.log_scalar(metric_name, metric_value, step)


def get_activation(cfg):
if cfg.network.activation == "relu":
return nn.ReLU
elif cfg.network.activation == "tanh":
return nn.Tanh
elif cfg.network.activation == "leaky_relu":
return nn.LeakyReLU
else:
raise NotImplementedError