Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Versioning] Deprecations for 0.4 #2109

Merged
merged 12 commits into from
Apr 25, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
amend
  • Loading branch information
vmoens committed Apr 24, 2024
commit 49b015df5a5413a9344cf725048c8a4d42da6dde
6 changes: 3 additions & 3 deletions test/test_shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_shared(self, indexing_method):
batch_size=[],
).share_memory_()
elif indexing_method == 1:
subtd = td.get_sub_tensordict(0)
subtd = td._get_sub_tensordict(0)
elif indexing_method == 2:
subtd = td[0]
else:
Expand Down Expand Up @@ -182,14 +182,14 @@ def test_memmap(idx, dtype, large_scale=False):
torchrl_logger.info("\nTesting writing to TD")
for i in range(2):
t0 = time.time()
sub_td_sm = td_sm.get_sub_tensordict(idx)
sub_td_sm = td_sm._get_sub_tensordict(idx)
sub_td_sm.update_(td_to_copy)
if i == 1:
torchrl_logger.info(f"sm td: {time.time() - t0:4.4f} sec")
torch.testing.assert_close(sub_td_sm.get("a"), td_to_copy.get("a"))

t0 = time.time()
sub_td_sm = td_memmap.get_sub_tensordict(idx)
sub_td_sm = td_memmap._get_sub_tensordict(idx)
sub_td_sm.update_(td_to_copy)
if i == 1:
torchrl_logger.info(f"memmap td: {time.time() - t0:4.4f} sec")
Expand Down
45 changes: 21 additions & 24 deletions test/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -785,7 +785,7 @@ def test_transform_env_clone(self):

@pytest.mark.parametrize("dim", [-1])
@pytest.mark.parametrize("N", [3, 4])
@pytest.mark.parametrize("padding", ["zeros", "constant", "same"])
@pytest.mark.parametrize("padding", ["constant", "same"])
def test_transform_model(self, dim, N, padding):
# test equivalence between transforms within an env and within a rb
key1 = "observation"
Expand Down Expand Up @@ -838,7 +838,7 @@ def test_transform_model(self, dim, N, padding):

@pytest.mark.parametrize("dim", [-1])
@pytest.mark.parametrize("N", [3, 4])
@pytest.mark.parametrize("padding", ["same", "zeros", "constant"])
@pytest.mark.parametrize("padding", ["same", "constant"])
@pytest.mark.parametrize("rbclass", [ReplayBuffer, TensorDictReplayBuffer])
def test_transform_rb(self, dim, N, padding, rbclass):
# test equivalence between transforms within an env and within a rb
Expand Down Expand Up @@ -870,7 +870,7 @@ def test_transform_rb(self, dim, N, padding, rbclass):

@pytest.mark.parametrize("dim", [-1])
@pytest.mark.parametrize("N", [3, 4])
@pytest.mark.parametrize("padding", ["same", "zeros", "constant"])
@pytest.mark.parametrize("padding", ["same", "constant"])
def test_transform_as_inverse(self, dim, N, padding):
# test equivalence between transforms within an env and within a rb
in_keys = ["observation", ("next", "observation")]
Expand Down Expand Up @@ -987,7 +987,7 @@ def test_transform_no_env(self, device, d, batch_size, dim, N):
assert v1 is not v2

@pytest.mark.skipif(not _has_gym, reason="gym required for this test")
@pytest.mark.parametrize("padding", ["zeros", "constant", "same"])
@pytest.mark.parametrize("padding", ["constant", "same"])
@pytest.mark.parametrize("envtype", ["gym", "conv"])
def test_tranform_offline_against_online(self, padding, envtype):
torch.manual_seed(0)
Expand Down Expand Up @@ -1027,10 +1027,7 @@ def test_tranform_offline_against_online(self, padding, envtype):
@pytest.mark.parametrize("device", get_default_devices())
@pytest.mark.parametrize("batch_size", [(), (1,), (1, 2)])
@pytest.mark.parametrize("d", range(2, 3))
@pytest.mark.parametrize(
"dim",
[-3],
)
@pytest.mark.parametrize("dim", [-3])
@pytest.mark.parametrize("N", [2, 4])
def test_transform_compose(self, device, d, batch_size, dim, N):
key1 = "first key"
Expand Down Expand Up @@ -4177,11 +4174,11 @@ def test_observationnorm(
)
observation_spec = on.transform_observation_spec(observation_spec)
if standard_normal:
assert (observation_spec.space.minimum == -loc / scale).all()
assert (observation_spec.space.maximum == (1 - loc) / scale).all()
assert (observation_spec.space.low == -loc / scale).all()
assert (observation_spec.space.high == (1 - loc) / scale).all()
else:
assert (observation_spec.space.minimum == loc).all()
assert (observation_spec.space.maximum == scale + loc).all()
assert (observation_spec.space.low == loc).all()
assert (observation_spec.space.high == scale + loc).all()

else:
observation_spec = CompositeSpec(
Expand Down Expand Up @@ -5097,9 +5094,9 @@ def test_keys_length_errors(self, in_keys, reset_keys, out_keys, batch=10):
f"Could not match the env reset_keys {reset_keys} with the in_keys {in_keys}"
),
):
t.reset(td)
t._reset(td, td.empty())
else:
t.reset(td)
t._reset(td, td.empty())


class TestReward2Go(TransformBase):
Expand Down Expand Up @@ -6149,8 +6146,8 @@ def test_transform_no_env(self, keys, batch, device):
observation_spec
)
assert observation_spec.shape == torch.Size([3, 16, 16])
assert (observation_spec.space.minimum == 0).all()
assert (observation_spec.space.maximum == 1).all()
assert (observation_spec.space.low == 0).all()
assert (observation_spec.space.high == 1).all()
else:
observation_spec = CompositeSpec(
{
Expand Down Expand Up @@ -6198,8 +6195,8 @@ def test_transform_compose(self, keys, batch, device):
observation_spec
)
assert observation_spec.shape == torch.Size([3, 16, 16])
assert (observation_spec.space.minimum == 0).all()
assert (observation_spec.space.maximum == 1).all()
assert (observation_spec.space.low == 0).all()
assert (observation_spec.space.high == 1).all()
else:
observation_spec = CompositeSpec(
{
Expand Down Expand Up @@ -8039,14 +8036,14 @@ def test_independent_reward_specs_from_shared_env(self):
t1_reward_spec = t1.reward_spec
t2_reward_spec = t2.reward_spec

assert t1_reward_spec.space.minimum == 0
assert t1_reward_spec.space.maximum == 4
assert t1_reward_spec.space.low == 0
assert t1_reward_spec.space.high == 4

assert t2_reward_spec.space.minimum == -2
assert t2_reward_spec.space.maximum == 2
assert t2_reward_spec.space.low == -2
assert t2_reward_spec.space.high == 2

assert base_env.reward_spec.space.minimum == -np.inf
assert base_env.reward_spec.space.maximum == np.inf
assert base_env.reward_spec.space.low == -np.inf
assert base_env.reward_spec.space.high == np.inf

def test_allow_done_after_reset(self):
base_env = ContinuousActionVecMockEnv(allow_done_after_reset=True)
Expand Down
4 changes: 2 additions & 2 deletions torchrl/envs/libs/dm_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def _dmcontrol_to_torchrl_spec_transform(
shape = torch.Size([1])
return BoundedTensorSpec(
shape=shape,
low=spec.minimum,
high=spec.maximum,
low=spec.low,
high=spec.high,
dtype=dtype,
device=device,
)
Expand Down
4 changes: 2 additions & 2 deletions torchrl/envs/libs/jumanji.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ def _jumanji_to_torchrl_spec_transform(
dtype = numpy_to_torch_dtype_dict[spec.dtype]
return BoundedTensorSpec(
shape=shape,
low=np.asarray(spec.minimum),
high=np.asarray(spec.maximum),
low=np.asarray(spec.low),
high=np.asarray(spec.high),
dtype=dtype,
device=device,
)
Expand Down
Loading