Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] SAC compatibility with composite distributions. #2447

Merged
merged 8 commits into from
Oct 11, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
test fixes
  • Loading branch information
albertbou92 authored and vmoens committed Oct 10, 2024
commit c936df85b5da558ddb50ab01347556bd7fe37566
56 changes: 41 additions & 15 deletions test/test_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
from mocking_classes import ContinuousActionConvMockEnv

# from torchrl.data.postprocs.utils import expand_as_right
from tensordict import assert_allclose_td, TensorDict
from tensordict import assert_allclose_td, TensorDict, TensorDictBase
from tensordict.nn import NormalParamExtractor, TensorDictModule
from tensordict.nn.utils import Buffer
from tensordict.utils import unravel_key
Expand Down Expand Up @@ -3465,9 +3465,6 @@ def _create_mock_actor(
distribution_map={
"action1": TanhNormal,
},
name_map={
"action1": (action_key, "action1"),
},
)
module_out_keys = [
("params", "action1", "loc"),
Expand Down Expand Up @@ -3505,7 +3502,7 @@ def __init__(self):
self.linear = nn.Linear(obs_dim + action_dim, 1)

def forward(self, obs, act):
if isinstance(act, TensorDict):
if isinstance(act, TensorDictBase):
act = act.get("action1")
return self.linear(torch.cat([obs, act], -1))

Expand Down Expand Up @@ -3535,7 +3532,13 @@ def _create_mock_value(
return value.to(device)

def _create_mock_common_layer_setup(
self, n_obs=3, n_act=4, ncells=4, batch=2, n_hidden=2, composite_action_dist=False,
self,
n_obs=3,
n_act=4,
ncells=4,
batch=2,
n_hidden=2,
composite_action_dist=False,
):
common = MLP(
num_cells=ncells,
Expand Down Expand Up @@ -3653,7 +3656,14 @@ def _create_mock_data_sac(
return td

def _create_seq_mock_data_sac(
self, batch=8, T=4, obs_dim=3, action_dim=4, atoms=None, device="cpu", composite_action_dist=False
self,
batch=8,
T=4,
obs_dim=3,
action_dim=4,
atoms=None,
device="cpu",
composite_action_dist=False,
):
# create a tensordict
total_obs = torch.randn(batch, T + 1, obs_dim, device=device)
Expand Down Expand Up @@ -3710,9 +3720,13 @@ def test_sac(
pytest.skip("incompatible config")

torch.manual_seed(self.seed)
td = self._create_mock_data_sac(device=device, composite_action_dist=composite_action_dist)
td = self._create_mock_data_sac(
device=device, composite_action_dist=composite_action_dist
)

actor = self._create_mock_actor(device=device, composite_action_dist=composite_action_dist)
actor = self._create_mock_actor(
device=device, composite_action_dist=composite_action_dist
)
qvalue = self._create_mock_qvalue(device=device)
if version == 1:
value = self._create_mock_value(device=device)
Expand Down Expand Up @@ -3878,7 +3892,9 @@ def test_sac_state_dict(

torch.manual_seed(self.seed)

actor = self._create_mock_actor(device=device, composite_action_dist=composite_action_dist)
actor = self._create_mock_actor(
device=device, composite_action_dist=composite_action_dist
)
qvalue = self._create_mock_qvalue(device=device)
if version == 1:
value = self._create_mock_value(device=device)
Expand Down Expand Up @@ -3924,7 +3940,9 @@ def test_sac_separate_losses(
n_act=4,
):
torch.manual_seed(self.seed)
actor, qvalue, common, td = self._create_mock_common_layer_setup(n_act=n_act, composite_action_dist=composite_action_dist)
actor, qvalue, common, td = self._create_mock_common_layer_setup(
n_act=n_act, composite_action_dist=composite_action_dist
)

loss_fn = SACLoss(
actor_network=actor,
Expand Down Expand Up @@ -4025,9 +4043,13 @@ def test_sac_batcher(
if (delay_actor or delay_qvalue) and not delay_value:
pytest.skip("incompatible config")
torch.manual_seed(self.seed)
td = self._create_seq_mock_data_sac(device=device, composite_action_dist=composite_action_dist)
td = self._create_seq_mock_data_sac(
device=device, composite_action_dist=composite_action_dist
)

actor = self._create_mock_actor(device=device, composite_action_dist=composite_action_dist)
actor = self._create_mock_actor(
device=device, composite_action_dist=composite_action_dist
)
qvalue = self._create_mock_qvalue(device=device)
if version == 1:
value = self._create_mock_value(device=device)
Expand Down Expand Up @@ -4372,8 +4394,12 @@ def test_sac_reduction(self, reduction, version, composite_action_dist):
if torch.cuda.device_count() == 0
else torch.device("cuda")
)
td = self._create_mock_data_sac(device=device, composite_action_dist=composite_action_dist)
actor = self._create_mock_actor(device=device, composite_action_dist=composite_action_dist)
td = self._create_mock_data_sac(
device=device, composite_action_dist=composite_action_dist
)
actor = self._create_mock_actor(
device=device, composite_action_dist=composite_action_dist
)
qvalue = self._create_mock_qvalue(device=device)
if version == 1:
value = self._create_mock_value(device=device)
Expand Down
6 changes: 3 additions & 3 deletions torchrl/objectives/sac.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,21 +45,21 @@ def new_func(self, *args, **kwargs):

return new_func


def compute_log_prob(action_dist, action_or_tensordict, tensor_key):
if isinstance(action_or_tensordict, torch.Tensor):
log_p = action_dist.log_prob(action_or_tensordict)
else:
tensordict = action_dist.log_prob(action_or_tensordict)
import ipdb; ipdb.set_trace()
log_p = tensordict.get(tensor_key)
log_p = tensordict.get(tensor_key)
maybe_log_prob = action_dist.log_prob(tensordict)
if not isinstance(maybe_log_prob, torch.Tensor):
import ipdb; ipdb.set_trace()
log_p = maybe_log_prob.get(tensor_key)
else:
log_p = maybe_log_prob
return log_p


class SACLoss(LossModule):
"""TorchRL implementation of the SAC loss.

Expand Down