Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Jumanji from_pixels=True #2129

Merged
merged 9 commits into from
Apr 30, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
amend
  • Loading branch information
vmoens committed Apr 29, 2024
commit 700e92a5aced92e5db52819aa2276e47064f13df
2 changes: 1 addition & 1 deletion .github/unittest/linux_libs/scripts_jumanji/run_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,6 @@ export MAGNUM_LOG=verbose MAGNUM_GPU_VALIDATION=ON
# this workflow only tests the libs
python -c "import jumanji"

python .github/unittest/helpers/coverage_run_parallel.py -m pytest test/test_libs.py --instafail -v --durations 200 --capture no -k TestJumanji --error-for-skips
python .github/unittest/helpers/coverage_run_parallel.py -m pytest test/test_libs.py --instafail -v --durations 200 --capture no -k TestJumanji --error-for-skips --runslow
coverage combine
coverage xml -i
24 changes: 17 additions & 7 deletions test/test_libs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1327,14 +1327,15 @@ def test_habitat_render(self, envname, from_pixels):
assert "pixels" in rollout.keys()


def _jumanji_envs():
if not _has_jumanji:
return ()
return JumanjiEnv.available_envs[-10:-5]


@pytest.mark.skipif(not _has_jumanji, reason="jumanji not installed")
@pytest.mark.parametrize(
"envname",
[
"TSP-v1",
"Snake-v1",
],
)
@pytest.mark.slow
@pytest.mark.parametrize("envname", _jumanji_envs())
class TestJumanji:
def test_jumanji_seeding(self, envname):
final_seed = []
Expand Down Expand Up @@ -1413,6 +1414,15 @@ def test_jumanji_consistency(self, envname, batch_size):
t2 = torch.tensor(onp.asarray(t2)).view_as(t1)
torch.testing.assert_close(t1, t2)

def test_jumanji_rendering(self, envname):
# check that this works with a batch-size
env = JumanjiEnv(envname, from_pixels=True, batch_size=[3])
env.set_seed(0)
check_env_specs(env)
r = env.rollout(10)
assert r["pixels"].unique().numel() > 1
assert r["pixels"].dtype == torch.uint8


ENVPOOL_CLASSIC_CONTROL_ENVS = [
PENDULUM_VERSIONED(),
Expand Down
21 changes: 17 additions & 4 deletions torchrl/record/recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@
from __future__ import annotations

import importlib.util
import math
from copy import copy
from typing import Callable, List, Optional, Sequence, Union

import numpy as np
import torch

from tensordict import NonTensorData, TensorDict, TensorDictBase
from tensordict import NonTensorData, TensorDictBase

from tensordict.utils import NestedKey

Expand Down Expand Up @@ -216,7 +217,10 @@ def _apply_transform(self, observation: torch.Tensor) -> torch.Tensor:
)
from torchvision.utils import make_grid

observation_trsf = make_grid(observation_trsf.flatten(0, -4))
obs_flat = observation_trsf.flatten(0, -4)
observation_trsf = make_grid(
obs_flat, nrow=int(math.ceil(math.sqrt(obs_flat.shape[0])))
)
self.obs.append(observation_trsf.to(torch.uint8))
elif observation_trsf.ndimension() >= 4:
self.obs.extend(observation_trsf.to(torch.uint8).flatten(0, -4))
Expand Down Expand Up @@ -346,6 +350,8 @@ class PixelRenderTransform(Transform):
thereby relaxing the shape requirements. If not provided, it will be inferred automatically from the
input data type and shape.
render_method (str, optional): the name of the render method. Defaults to ``"render"``.
pass_tensordict (bool, optional): if ``True``, the input tensordict will be passed to the
render method. This enables rendering for stateless environments. Defaults to ``False``.
**kwargs: additional keyword arguments to pass to the render function (e.g. ``mode="rgb_array"``).

Examples:
Expand Down Expand Up @@ -422,6 +428,7 @@ def __init__(
] = None,
as_non_tensor: bool = None,
render_method: str = "render",
pass_tensordict: bool = False,
**kwargs,
) -> None:
if out_keys is None:
Expand All @@ -439,6 +446,7 @@ def __init__(
self.kwargs = kwargs
self.render_method = render_method
self._enabled = True
self.pass_tensordict = pass_tensordict
super().__init__(in_keys=[], out_keys=out_keys)

def _reset(
Expand All @@ -450,7 +458,12 @@ def _call(self, tensordict: TensorDictBase) -> TensorDictBase:
if not self._enabled:
return tensordict

array = getattr(self.parent, self.render_method)(**self.kwargs)
method = getattr(self.parent, self.render_method)
if not self.pass_tensordict:
array = method(**self.kwargs)
else:
array = method(tensordict, **self.kwargs)

if self.preproc:
array = self.preproc(array)
if self.as_non_tensor is None:
Expand Down Expand Up @@ -489,7 +502,7 @@ def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec
switch = True
self.switch()
parent = self.parent
td_in = TensorDict({}, batch_size=parent.batch_size, device=parent.device)
td_in = parent.reset()
self._call(td_in)
obs = td_in.get(self.out_keys[0])
if isinstance(obs, NonTensorData):
Expand Down
Loading