Skip to content

Commit

Permalink
[Feature] Add Dropout to MLP module (pytorch#988)
Browse files Browse the repository at this point in the history
Co-authored-by: vmoens <vincentmoens@gmail.com>
  • Loading branch information
BY571 and vmoens authored Mar 24, 2023
1 parent 69ff6a6 commit 0d1823c
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 5 deletions.
9 changes: 8 additions & 1 deletion test/test_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,13 @@ def double_prec_fixture():
)
@pytest.mark.parametrize(
"norm_class, norm_kwargs",
[(nn.LazyBatchNorm1d, {}), (nn.BatchNorm1d, {"num_features": 32})],
[
(nn.LazyBatchNorm1d, {}),
(nn.BatchNorm1d, {"num_features": 32}),
(nn.LayerNorm, {"normalized_shape": 32}),
],
)
@pytest.mark.parametrize("dropout", [0.0, 0.5])
@pytest.mark.parametrize("bias_last_layer", [True, False])
@pytest.mark.parametrize("single_bias_last_layer", [True, False])
@pytest.mark.parametrize("layer_class", [nn.Linear, NoisyLinear])
Expand All @@ -70,6 +75,7 @@ def test_mlp(
num_cells,
activation_class,
activation_kwargs,
dropout,
bias_last_layer,
norm_class,
norm_kwargs,
Expand All @@ -89,6 +95,7 @@ def test_mlp(
activation_kwargs=activation_kwargs,
norm_class=norm_class,
norm_kwargs=norm_kwargs,
dropout=dropout,
bias_last_layer=bias_last_layer,
single_bias_last_layer=False,
layer_class=layer_class,
Expand Down
15 changes: 11 additions & 4 deletions torchrl/modules/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ class MLP(nn.Sequential):
activation_kwargs (dict, optional): kwargs to be used with the activation class;
norm_class (Type, optional): normalization class, if any.
norm_kwargs (dict, optional): kwargs to be used with the normalization layers;
dropout (float, optional): dropout probability. Defaults to ``None`` (no
dropout);
bias_last_layer (bool): if ``True``, the last Linear layer will have a bias parameter.
default: True;
single_bias_last_layer (bool): if ``True``, the last dimension of the bias of the last layer will be a singleton
Expand Down Expand Up @@ -147,6 +149,7 @@ def __init__(
activation_kwargs: Optional[dict] = None,
norm_class: Optional[Type[nn.Module]] = None,
norm_kwargs: Optional[dict] = None,
dropout: Optional[float] = None,
bias_last_layer: bool = True,
single_bias_last_layer: bool = False,
layer_class: Type[nn.Module] = nn.Linear,
Expand Down Expand Up @@ -178,6 +181,7 @@ def __init__(
)
self.norm_class = norm_class
self.norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
self.dropout = dropout
self.bias_last_layer = bias_last_layer
self.single_bias_last_layer = single_bias_last_layer
self.layer_class = layer_class
Expand Down Expand Up @@ -235,15 +239,18 @@ def _make_net(self, device: Optional[DEVICE_TYPING]) -> List[nn.Module]:
)

if i < self.depth or self.activate_last_layer:
if self.dropout is not None:
layers.append(create_on_device(nn.Dropout, device, p=self.dropout))
if self.norm_class is not None:
layers.append(
create_on_device(self.norm_class, device, **self.norm_kwargs)
)
layers.append(
create_on_device(
self.activation_class, device, **self.activation_kwargs
)
)
if self.norm_class is not None:
layers.append(
create_on_device(self.norm_class, device, **self.norm_kwargs)
)

return layers

def forward(self, *inputs: Tuple[torch.Tensor]) -> torch.Tensor:
Expand Down

0 comments on commit 0d1823c

Please sign in to comment.