Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[BugFix] Allow for composite action distributions in PPO/A2C losses #2391

Merged
merged 19 commits into from
Sep 4, 2024
Prev Previous commit
Next Next commit
format
  • Loading branch information
albertbou92 committed Aug 14, 2024
commit 8f00828b2b10946e15f8dfdb9fba28675d3961db
14 changes: 11 additions & 3 deletions test/test_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -8392,7 +8392,12 @@ def test_ppo_tensordict_keys_run(
@pytest.mark.parametrize("reward_key", ["reward", "reward2"])
@pytest.mark.parametrize("done_key", ["done", "done2"])
@pytest.mark.parametrize("terminated_key", ["terminated", "terminated2"])
@pytest.mark.parametrize("composite_action_dist", [False, ])
@pytest.mark.parametrize(
"composite_action_dist",
[
False,
],
)
def test_ppo_notensordict(
self,
loss_class,
Expand Down Expand Up @@ -8457,6 +8462,7 @@ def test_ppo_notensordict(
loss_val = loss(**kwargs)
torch.manual_seed(self.seed)
if beta is not None:

loss.beta = beta.clone()
loss_val_td = loss(td)

Expand Down Expand Up @@ -9232,19 +9238,21 @@ def test_a2c_tensordict_keys_run(
@pytest.mark.parametrize("reward_key", ["reward", "reward2"])
@pytest.mark.parametrize("done_key", ["done", "done2"])
@pytest.mark.parametrize("terminated_key", ["terminated", "terminated2"])
@pytest.mark.parametrize("composite_action_dist", [False, ])
def test_a2c_notensordict(
self, action_key, observation_key, reward_key, done_key, terminated_key
self, action_key, observation_key, reward_key, done_key, terminated_key, composite_action_dist
):
torch.manual_seed(self.seed)

actor = self._create_mock_actor(observation_key=observation_key)
actor = self._create_mock_actor(observation_key=observation_key, composite_action_dist=composite_action_dist)
value = self._create_mock_value(observation_key=observation_key)
td = self._create_seq_mock_data_a2c(
action_key=action_key,
observation_key=observation_key,
reward_key=reward_key,
done_key=done_key,
terminated_key=terminated_key,
composite_action_dist=composite_action_dist,
)

loss = A2CLoss(actor, value)
Expand Down
1 change: 0 additions & 1 deletion torchrl/objectives/ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,6 @@ def _log_weight(
self.actor_network
) if self.functional else contextlib.nullcontext():
dist = self.actor_network.get_dist(tensordict)
# dist = TransformedDistribution(dist, ExpTransform())

def check_requires_grad(tensor, key=self.tensor_keys.action):
albertbou92 marked this conversation as resolved.
Show resolved Hide resolved
if tensor.requires_grad:
Expand Down
Loading