diff --git a/.github/unittest/linux_libs/scripts_meltingpot/environment.yml b/.github/unittest/linux_libs/scripts_meltingpot/environment.yml new file mode 100644 index 00000000000..31e8c01407f --- /dev/null +++ b/.github/unittest/linux_libs/scripts_meltingpot/environment.yml @@ -0,0 +1,15 @@ +channels: + - pytorch + - defaults +dependencies: + - pip + - pip: + - cloudpickle + - torch + - pytest + - pytest-cov + - pytest-mock + - pytest-instafail + - pytest-rerunfailures + - pytest-error-for-skips + - expecttest diff --git a/.github/unittest/linux_libs/scripts_meltingpot/install.sh b/.github/unittest/linux_libs/scripts_meltingpot/install.sh new file mode 100755 index 00000000000..7c13fbf54d1 --- /dev/null +++ b/.github/unittest/linux_libs/scripts_meltingpot/install.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +unset PYTORCH_VERSION +# For unittest, nightly PyTorch is used as the following section, +# so no need to set PYTORCH_VERSION. +# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env + +if [ "${CU_VERSION:-}" == cpu ] ; then + version="cpu" +else + if [[ ${#CU_VERSION} -eq 4 ]]; then + CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}" + elif [[ ${#CU_VERSION} -eq 5 ]]; then + CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}" + fi + echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION ($CU_VERSION)" + version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" +fi + +# submodules +git submodule sync && git submodule update --init --recursive + +printf "Installing PyTorch with cu121" +if [[ "$TORCH_VERSION" == "nightly" ]]; then + if [ "${CU_VERSION:-}" == cpu ] ; then + pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu -U + else + pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121 -U + fi +elif [[ "$TORCH_VERSION" == "stable" ]]; then + if [ "${CU_VERSION:-}" == cpu ] ; then + pip3 install torch --index-url https://download.pytorch.org/whl/cpu + else + pip3 install torch --index-url https://download.pytorch.org/whl/cu121 + fi +else + printf "Failed to install pytorch" + exit 1 +fi + +# install tensordict +if [[ "$RELEASE" == 0 ]]; then + pip3 install git+https://github.com/pytorch/tensordict.git +else + pip3 install tensordict +fi + +# smoke test +python -c "import tensordict" + +printf "* Installing torchrl\n" +python setup.py develop +python -c "import torchrl" + +conda install conda-forge::jq -y +# Install meltingpot from git +#pip3 install dmlab2d +LATEST_TAG=$(curl "https://api.github.com/repos/google-deepmind/meltingpot/tags" | jq -r '.[0].name') + +echo $(ldd --version) + +pip3 install git+https://github.com/google-deepmind/meltingpot@${LATEST_TAG} diff --git a/.github/unittest/linux_libs/scripts_meltingpot/post_process.sh b/.github/unittest/linux_libs/scripts_meltingpot/post_process.sh new file mode 100755 index 00000000000..e97bf2a7b1b --- /dev/null +++ b/.github/unittest/linux_libs/scripts_meltingpot/post_process.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env diff --git a/.github/unittest/linux_libs/scripts_meltingpot/run-clang-format.py b/.github/unittest/linux_libs/scripts_meltingpot/run-clang-format.py new file mode 100755 index 00000000000..5783a885d86 --- /dev/null +++ b/.github/unittest/linux_libs/scripts_meltingpot/run-clang-format.py @@ -0,0 +1,356 @@ +#!/usr/bin/env python +""" +MIT License + +Copyright (c) 2017 Guillaume Papin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +A wrapper script around clang-format, suitable for linting multiple files +and to use for continuous integration. + +This is an alternative API for the clang-format command line. +It runs over multiple files and directories in parallel. +A diff output is produced and a sensible exit code is returned. + +""" + +import argparse +import difflib +import fnmatch +import multiprocessing +import os +import signal +import subprocess +import sys +import traceback +from functools import partial + +try: + from subprocess import DEVNULL # py3k +except ImportError: + DEVNULL = open(os.devnull, "wb") + + +DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu" + + +class ExitStatus: + SUCCESS = 0 + DIFF = 1 + TROUBLE = 2 + + +def list_files(files, recursive=False, extensions=None, exclude=None): + if extensions is None: + extensions = [] + if exclude is None: + exclude = [] + + out = [] + for file in files: + if recursive and os.path.isdir(file): + for dirpath, dnames, fnames in os.walk(file): + fpaths = [os.path.join(dirpath, fname) for fname in fnames] + for pattern in exclude: + # os.walk() supports trimming down the dnames list + # by modifying it in-place, + # to avoid unnecessary directory listings. + dnames[:] = [ + x + for x in dnames + if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern) + ] + fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)] + for f in fpaths: + ext = os.path.splitext(f)[1][1:] + if ext in extensions: + out.append(f) + else: + out.append(file) + return out + + +def make_diff(file, original, reformatted): + return list( + difflib.unified_diff( + original, + reformatted, + fromfile=f"{file}\t(original)", + tofile=f"{file}\t(reformatted)", + n=3, + ) + ) + + +class DiffError(Exception): + def __init__(self, message, errs=None): + super().__init__(message) + self.errs = errs or [] + + +class UnexpectedError(Exception): + def __init__(self, message, exc=None): + super().__init__(message) + self.formatted_traceback = traceback.format_exc() + self.exc = exc + + +def run_clang_format_diff_wrapper(args, file): + try: + ret = run_clang_format_diff(args, file) + return ret + except DiffError: + raise + except Exception as e: + raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e) + + +def run_clang_format_diff(args, file): + try: + with open(file, encoding="utf-8") as f: + original = f.readlines() + except OSError as exc: + raise DiffError(str(exc)) + invocation = [args.clang_format_executable, file] + + # Use of utf-8 to decode the process output. + # + # Hopefully, this is the correct thing to do. + # + # It's done due to the following assumptions (which may be incorrect): + # - clang-format will returns the bytes read from the files as-is, + # without conversion, and it is already assumed that the files use utf-8. + # - if the diagnostics were internationalized, they would use utf-8: + # > Adding Translations to Clang + # > + # > Not possible yet! + # > Diagnostic strings should be written in UTF-8, + # > the client can translate to the relevant code page if needed. + # > Each translation completely replaces the format string + # > for the diagnostic. + # > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation + + try: + proc = subprocess.Popen( + invocation, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + encoding="utf-8", + ) + except OSError as exc: + raise DiffError( + f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}" + ) + proc_stdout = proc.stdout + proc_stderr = proc.stderr + + # hopefully the stderr pipe won't get full and block the process + outs = list(proc_stdout.readlines()) + errs = list(proc_stderr.readlines()) + proc.wait() + if proc.returncode: + raise DiffError( + "Command '{}' returned non-zero exit status {}".format( + subprocess.list2cmdline(invocation), proc.returncode + ), + errs, + ) + return make_diff(file, original, outs), errs + + +def bold_red(s): + return "\x1b[1m\x1b[31m" + s + "\x1b[0m" + + +def colorize(diff_lines): + def bold(s): + return "\x1b[1m" + s + "\x1b[0m" + + def cyan(s): + return "\x1b[36m" + s + "\x1b[0m" + + def green(s): + return "\x1b[32m" + s + "\x1b[0m" + + def red(s): + return "\x1b[31m" + s + "\x1b[0m" + + for line in diff_lines: + if line[:4] in ["--- ", "+++ "]: + yield bold(line) + elif line.startswith("@@ "): + yield cyan(line) + elif line.startswith("+"): + yield green(line) + elif line.startswith("-"): + yield red(line) + else: + yield line + + +def print_diff(diff_lines, use_color): + if use_color: + diff_lines = colorize(diff_lines) + sys.stdout.writelines(diff_lines) + + +def print_trouble(prog, message, use_colors): + error_text = "error:" + if use_colors: + error_text = bold_red(error_text) + print(f"{prog}: {error_text} {message}", file=sys.stderr) + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--clang-format-executable", + metavar="EXECUTABLE", + help="path to the clang-format executable", + default="clang-format", + ) + parser.add_argument( + "--extensions", + help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})", + default=DEFAULT_EXTENSIONS, + ) + parser.add_argument( + "-r", + "--recursive", + action="store_true", + help="run recursively over directories", + ) + parser.add_argument("files", metavar="file", nargs="+") + parser.add_argument("-q", "--quiet", action="store_true") + parser.add_argument( + "-j", + metavar="N", + type=int, + default=0, + help="run N clang-format jobs in parallel (default number of cpus + 1)", + ) + parser.add_argument( + "--color", + default="auto", + choices=["auto", "always", "never"], + help="show colored diff (default: auto)", + ) + parser.add_argument( + "-e", + "--exclude", + metavar="PATTERN", + action="append", + default=[], + help="exclude paths matching the given glob-like pattern(s) from recursive search", + ) + + args = parser.parse_args() + + # use default signal handling, like diff return SIGINT value on ^C + # https://bugs.python.org/issue14229#msg156446 + signal.signal(signal.SIGINT, signal.SIG_DFL) + try: + signal.SIGPIPE + except AttributeError: + # compatibility, SIGPIPE does not exist on Windows + pass + else: + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + colored_stdout = False + colored_stderr = False + if args.color == "always": + colored_stdout = True + colored_stderr = True + elif args.color == "auto": + colored_stdout = sys.stdout.isatty() + colored_stderr = sys.stderr.isatty() + + version_invocation = [args.clang_format_executable, "--version"] + try: + subprocess.check_call(version_invocation, stdout=DEVNULL) + except subprocess.CalledProcessError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + return ExitStatus.TROUBLE + except OSError as e: + print_trouble( + parser.prog, + f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}", + use_colors=colored_stderr, + ) + return ExitStatus.TROUBLE + + retcode = ExitStatus.SUCCESS + files = list_files( + args.files, + recursive=args.recursive, + exclude=args.exclude, + extensions=args.extensions.split(","), + ) + + if not files: + return + + njobs = args.j + if njobs == 0: + njobs = multiprocessing.cpu_count() + 1 + njobs = min(len(files), njobs) + + if njobs == 1: + # execute directly instead of in a pool, + # less overhead, simpler stacktraces + it = (run_clang_format_diff_wrapper(args, file) for file in files) + pool = None + else: + pool = multiprocessing.Pool(njobs) + it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files) + while True: + try: + outs, errs = next(it) + except StopIteration: + break + except DiffError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + retcode = ExitStatus.TROUBLE + sys.stderr.writelines(e.errs) + except UnexpectedError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + sys.stderr.write(e.formatted_traceback) + retcode = ExitStatus.TROUBLE + # stop at the first unexpected error, + # something could be very wrong, + # don't process all files unnecessarily + if pool: + pool.terminate() + break + else: + sys.stderr.writelines(errs) + if outs == []: + continue + if not args.quiet: + print_diff(outs, use_color=colored_stdout) + if retcode == ExitStatus.SUCCESS: + retcode = ExitStatus.DIFF + return retcode + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.github/unittest/linux_libs/scripts_meltingpot/run_test.sh b/.github/unittest/linux_libs/scripts_meltingpot/run_test.sh new file mode 100755 index 00000000000..6f7ec265f74 --- /dev/null +++ b/.github/unittest/linux_libs/scripts_meltingpot/run_test.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env +apt-get update && apt-get install -y git wget + + +export PYTORCH_TEST_WITH_SLOW='1' +export LAZY_LEGACY_OP=False +python -m torch.utils.collect_env +# Avoid error: "fatal: unsafe repository" +git config --global --add safe.directory '*' + +root_dir="$(git rev-parse --show-toplevel)" +env_dir="${root_dir}/env" +lib_dir="${env_dir}/lib" + +# solves ImportError: /lib64/libstdc++.so.6: version `GLIBCXX_3.4.21' not found +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$lib_dir +export MKL_THREADING_LAYER=GNU +# more logging +export MAGNUM_LOG=verbose MAGNUM_GPU_VALIDATION=ON + +# this workflow only tests the libs +python -c "import meltingpot" + +python .github/unittest/helpers/coverage_run_parallel.py -m pytest test/test_libs.py --instafail -v --durations 200 --capture no -k TestMeltingpot --error-for-skips +coverage combine +coverage xml -i diff --git a/.github/unittest/linux_libs/scripts_meltingpot/setup_env.sh b/.github/unittest/linux_libs/scripts_meltingpot/setup_env.sh new file mode 100755 index 00000000000..b342c57f099 --- /dev/null +++ b/.github/unittest/linux_libs/scripts_meltingpot/setup_env.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +# This script is for setting up environment in which unit test is ran. +# To speed up the CI time, the resulting environment is cached. +# +# Do not install PyTorch and torchvision here, otherwise they also get cached. + +set -e -v + +apt-get update && apt-get upgrade -y +apt-get install -y git wget g++ gcc libglfw3 libgl1-mesa-glx libosmesa6 libglew-dev libglvnd0 libgl1 libglx0 libegl1 libgles2 curl +apt-get upgrade -y libstdc++6 + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +# Avoid error: "fatal: unsafe repository" +git config --global --add safe.directory '*' +root_dir="$(git rev-parse --show-toplevel)" +conda_dir="${root_dir}/conda" +env_dir="${root_dir}/env" + + +cd "${root_dir}" + +case "$(uname -s)" in + Darwin*) os=MacOSX;; + *) os=Linux +esac + +# 1. Install conda at ./conda +if [ ! -d "${conda_dir}" ]; then + printf "* Installing conda\n" + wget -O miniconda.sh "http://repo.continuum.io/miniconda/Miniconda3-latest-${os}-x86_64.sh" + bash ./miniconda.sh -b -f -p "${conda_dir}" +fi +eval "$(${conda_dir}/bin/conda shell.bash hook)" + +# 2. Create test environment at ./env +printf "python: ${PYTHON_VERSION}\n" +if [ ! -d "${env_dir}" ]; then + printf "* Creating a test environment\n" + conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION" +fi +conda activate "${env_dir}" + +# 4. Install Conda dependencies +printf "* Installing dependencies (except PyTorch)\n" +echo " - python=${PYTHON_VERSION}" >> "${this_dir}/environment.yml" +cat "${this_dir}/environment.yml" + +pip install pip --upgrade + +conda env update --file "${this_dir}/environment.yml" --prune diff --git a/.github/workflows/test-linux-libs.yml b/.github/workflows/test-linux-libs.yml index 91b7dc8c742..9e1875cac18 100644 --- a/.github/workflows/test-linux-libs.yml +++ b/.github/workflows/test-linux-libs.yml @@ -259,6 +259,40 @@ jobs: bash .github/unittest/linux_libs/scripts_jumanji/run_test.sh bash .github/unittest/linux_libs/scripts_jumanji/post_process.sh + unittests-meltingpot: + uses: pytorch/test-infra/.github/workflows/linux_job.yml@main + if: ${{ github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'Environments') }} + with: + repository: pytorch/rl + runner: "linux.g5.4xlarge.nvidia.gpu" + gpu-arch-type: cuda + gpu-arch-version: "12.1" + docker-image: "nvidia/cuda:12.4.1-runtime-ubuntu22.04" + timeout: 120 + script: | + if [[ "${{ github.ref }}" =~ release/* ]]; then + export RELEASE=1 + export TORCH_VERSION=stable + else + export RELEASE=0 + export TORCH_VERSION=nightly + fi + + set -euo pipefail + export PYTHON_VERSION="3.11" + export CU_VERSION="12.1" + export TAR_OPTIONS="--no-same-owner" + export UPLOAD_CHANNEL="nightly" + export TF_CPP_MIN_LOG_LEVEL=0 + export BATCHED_PIPE_TIMEOUT=60 + + nvidia-smi + + bash .github/unittest/linux_libs/scripts_meltingpot/setup_env.sh + bash .github/unittest/linux_libs/scripts_meltingpot/install.sh + bash .github/unittest/linux_libs/scripts_meltingpot/run_test.sh + bash .github/unittest/linux_libs/scripts_meltingpot/post_process.sh + unittests-minari: strategy: matrix: diff --git a/docs/source/reference/envs.rst b/docs/source/reference/envs.rst index f24b31c71d3..519865d1d00 100644 --- a/docs/source/reference/envs.rst +++ b/docs/source/reference/envs.rst @@ -891,6 +891,8 @@ the following function will return ``1`` when queried: IsaacGymWrapper JumanjiEnv JumanjiWrapper + MeltingpotEnv + MeltingpotWrapper MOGymEnv MOGymWrapper MultiThreadedEnv diff --git a/setup.py b/setup.py index 44e772528a7..3c844400306 100644 --- a/setup.py +++ b/setup.py @@ -224,7 +224,7 @@ def _main(argv): "h5py", "pillow", ], - "marl": ["vmas>=1.2.10", "pettingzoo>=1.24.1"], + "marl": ["vmas>=1.2.10", "pettingzoo>=1.24.1", "dm-meltingpot"], } extra_requires["all"] = set() for key in list(extra_requires.keys()): diff --git a/test/test_libs.py b/test/test_libs.py index 067da7b4af7..2861e24c3f6 100644 --- a/test/test_libs.py +++ b/test/test_libs.py @@ -108,6 +108,7 @@ ) from torchrl.envs.libs.habitat import _has_habitat, HabitatEnv from torchrl.envs.libs.jumanji import _has_jumanji, JumanjiEnv +from torchrl.envs.libs.meltingpot import MeltingpotEnv, MeltingpotWrapper from torchrl.envs.libs.openml import OpenMLEnv from torchrl.envs.libs.pettingzoo import _has_pettingzoo, PettingZooEnv from torchrl.envs.libs.robohive import _has_robohive, RoboHiveEnv @@ -144,6 +145,8 @@ assert gym_backend() is gym +_has_meltingpot = importlib.util.find_spec("meltingpot") is not None + def get_gym_pixel_wrapper(): try: @@ -3508,6 +3511,54 @@ def test_collector(self): collector.shutdown() +@pytest.mark.skipif(not _has_meltingpot, reason="Meltingpot not found") +class TestMeltingpot: + @pytest.mark.parametrize("substrate", MeltingpotWrapper.available_envs) + def test_all_envs(self, substrate): + env = MeltingpotEnv(substrate=substrate) + check_env_specs(env) + + def test_passing_config(self, substrate="commons_harvest__open"): + from meltingpot import substrate as mp_substrate + + substrate_config = mp_substrate.get_config(substrate) + env_torchrl = MeltingpotEnv(substrate_config) + env_torchrl.rollout(max_steps=5) + + def test_wrapper(self, substrate="commons_harvest__open"): + from meltingpot import substrate as mp_substrate + + substrate_config = mp_substrate.get_config(substrate) + mp_env = mp_substrate.build_from_config( + substrate_config, roles=substrate_config.default_player_roles + ) + env_torchrl = MeltingpotWrapper(env=mp_env) + env_torchrl.rollout(max_steps=5) + + @pytest.mark.parametrize("max_steps", [1, 5]) + def test_max_steps(self, max_steps): + env = MeltingpotEnv(substrate="commons_harvest__open", max_steps=max_steps) + td = env.rollout(max_steps=100, break_when_any_done=True) + assert td.batch_size[0] == max_steps + + @pytest.mark.parametrize("categorical_actions", [True, False]) + def test_categorical_actions(self, categorical_actions): + env = MeltingpotEnv( + substrate="commons_harvest__open", categorical_actions=categorical_actions + ) + check_env_specs(env) + + @pytest.mark.parametrize("rollout_steps", [1, 3]) + def test_render(self, rollout_steps): + env = MeltingpotEnv(substrate="commons_harvest__open") + td = env.rollout(2) + rollout_penultimate_image = td[-1].get("RGB") + rollout_last_image = td[-1].get(("next", "RGB")) + image_from_env = env.get_rgb_image() + assert torch.equal(rollout_last_image, image_from_env) + assert not torch.equal(rollout_penultimate_image, image_from_env) + + if __name__ == "__main__": args, unknown = argparse.ArgumentParser().parse_known_args() pytest.main([__file__, "--capture", "no", "--exitfirst"] + unknown) diff --git a/torchrl/envs/__init__.py b/torchrl/envs/__init__.py index 297445f1dd2..e8f7fbe3ff2 100644 --- a/torchrl/envs/__init__.py +++ b/torchrl/envs/__init__.py @@ -20,6 +20,8 @@ IsaacGymWrapper, JumanjiEnv, JumanjiWrapper, + MeltingpotEnv, + MeltingpotWrapper, MOGymEnv, MOGymWrapper, MultiThreadedEnv, diff --git a/torchrl/envs/libs/__init__.py b/torchrl/envs/libs/__init__.py index 9121ea4c677..e322c2cbf01 100644 --- a/torchrl/envs/libs/__init__.py +++ b/torchrl/envs/libs/__init__.py @@ -17,6 +17,7 @@ from .habitat import HabitatEnv from .isaacgym import IsaacGymEnv, IsaacGymWrapper from .jumanji import JumanjiEnv, JumanjiWrapper +from .meltingpot import MeltingpotEnv, MeltingpotWrapper from .openml import OpenMLEnv from .pettingzoo import PettingZooEnv, PettingZooWrapper from .robohive import RoboHiveEnv diff --git a/torchrl/envs/libs/dm_control.py b/torchrl/envs/libs/dm_control.py index 9293dd195a0..3e1aac917e0 100644 --- a/torchrl/envs/libs/dm_control.py +++ b/torchrl/envs/libs/dm_control.py @@ -19,6 +19,7 @@ BoundedTensorSpec, CompositeSpec, DiscreteTensorSpec, + OneHotDiscreteTensorSpec, TensorSpec, UnboundedContinuousTensorSpec, UnboundedDiscreteTensorSpec, @@ -43,15 +44,34 @@ def _dmcontrol_to_torchrl_spec_transform( spec, dtype: Optional[torch.dtype] = None, device: DEVICE_TYPING = None, + categorical_discrete_encoding: bool = False, ) -> TensorSpec: import dm_env - if isinstance(spec, collections.OrderedDict): + if isinstance(spec, collections.OrderedDict) or isinstance(spec, Dict): spec = { - k: _dmcontrol_to_torchrl_spec_transform(item, device=device) + k: _dmcontrol_to_torchrl_spec_transform( + item, + device=device, + categorical_discrete_encoding=categorical_discrete_encoding, + ) for k, item in spec.items() } return CompositeSpec(**spec) + elif isinstance(spec, dm_env.specs.DiscreteArray): + # DiscreteArray is a type of BoundedArray so this block needs to go first + action_space_cls = ( + DiscreteTensorSpec + if categorical_discrete_encoding + else OneHotDiscreteTensorSpec + ) + if dtype is None: + dtype = ( + numpy_to_torch_dtype_dict[spec.dtype] + if categorical_discrete_encoding + else torch.long + ) + return action_space_cls(spec.num_values, device=device, dtype=dtype) elif isinstance(spec, dm_env.specs.BoundedArray): if dtype is None: dtype = numpy_to_torch_dtype_dict[spec.dtype] @@ -77,7 +97,6 @@ def _dmcontrol_to_torchrl_spec_transform( ) else: return UnboundedDiscreteTensorSpec(shape=shape, dtype=dtype, device=device) - else: raise NotImplementedError(type(spec)) diff --git a/torchrl/envs/libs/meltingpot.py b/torchrl/envs/libs/meltingpot.py new file mode 100644 index 00000000000..446b3dac292 --- /dev/null +++ b/torchrl/envs/libs/meltingpot.py @@ -0,0 +1,603 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from __future__ import annotations + +import importlib + +from typing import Dict, List, Mapping, Sequence + +import torch + +from tensordict import TensorDict, TensorDictBase + +from torchrl.data import CompositeSpec, DiscreteTensorSpec, TensorSpec +from torchrl.envs.common import _EnvWrapper +from torchrl.envs.libs.dm_control import _dmcontrol_to_torchrl_spec_transform +from torchrl.envs.utils import _classproperty, check_marl_grouping, MarlGroupMapType + +_has_meltingpot = importlib.util.find_spec("meltingpot") is not None + +PLAYER_STR_FORMAT = "player_{index}" +_WORLD_PREFIX = "WORLD." + + +def _get_envs(): + if not _has_meltingpot: + raise ImportError("meltingpot is not installed in your virtual environment.") + from meltingpot.configs import substrates as substrate_configs + + return list(substrate_configs.SUBSTRATES) + + +def _filter_global_state_from_dict(obs_dict: Dict, world: bool) -> Dict: # noqa + return { + key: value + for key, value in obs_dict.items() + if ((_WORLD_PREFIX not in key) if not world else (_WORLD_PREFIX in key)) + } + + +def _remove_world_observations_from_obs_spec( + observation_spec: Sequence[Mapping[str, "dm_env.specs.Array"]], # noqa +) -> Sequence[Mapping[str, "dm_env.specs.Array"]]: # noqa + return [ + _filter_global_state_from_dict(agent_obs, world=False) + for agent_obs in observation_spec + ] + + +def _global_state_spec_from_obs_spec( + observation_spec: Sequence[Mapping[str, "dm_env.specs.Array"]] # noqa +) -> Mapping[str, "dm_env.specs.Array"]: # noqa + # We only look at agent 0 since world entries are the same for all agents + world_entries = _filter_global_state_from_dict(observation_spec[0], world=True) + if len(world_entries) != 1 and _WORLD_PREFIX + "RGB" not in world_entries: + raise ValueError( + f"Expected only one world entry named {_WORLD_PREFIX}RGB in observation_spec, but got {world_entries}" + ) + return _remove_world_prefix(world_entries) + + +def _remove_world_prefix(world_entries: Dict) -> Dict: + return {key[len(_WORLD_PREFIX) :]: value for key, value in world_entries.items()} + + +class MeltingpotWrapper(_EnvWrapper): + """Meltingpot environment wrapper. + + GitHub: https://github.com/google-deepmind/meltingpot + + Paper: https://arxiv.org/abs/2211.13746 + + Melting Pot assesses generalization to novel social situations involving both familiar and unfamiliar individuals, + and has been designed to test a broad range of social interactions such as: cooperation, competition, deception, + reciprocation, trust, stubbornness and so on. Melting Pot offers researchers a set of over 50 multi-agent + reinforcement learning substrates (multi-agent games) on which to train agents, and over 256 unique test scenarios + on which to evaluate these trained agents. + + Args: + env (``meltingpot.utils.substrates.substrate.Substrate``): the meltingpot substrate to wrap. + + Keyword Args: + max_steps (int, optional): Horizon of the task. Defaults to ``None`` (infinite horizon). + Each Meltingpot substrate can + be terminating or not. If ``max_steps`` is specified, + the scenario is also terminated (and the ``"terminated"`` flag is set) whenever this horizon is reached. + Unlike gym's ``TimeLimit`` transform or torchrl's :class:`~torchrl.envs.transforms.StepCounter`, + this argument will not set the ``"truncated"`` entry in the tensordict. + categorical_actions (bool, optional): if the environment actions are discrete, whether to transform + them to categorical or one-hot. Defaults to ``True``. + group_map (MarlGroupMapType or Dict[str, List[str]], optional): how to group agents in tensordicts for + input/output. By default, they will be all put + in one group named ``"agents"``. + Otherwise, a group map can be specified or selected from some premade options. + See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info. + + Attributes: + group_map (Dict[str, List[str]]): how to group agents in tensordicts for + input/output. See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info. + agent_names (list of str): names of the agent in the environment + agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the environment + available_envs (List[str]): the list of the scenarios available to build. + + .. warning:: + Meltingpot returns a single ``done`` flag which does not distinguish between + when the env reached ``max_steps`` and termination. + If you deem the ``truncation`` signal necessary, set ``max_steps`` to + ``None`` and use a :class:`~torchrl.envs.transforms.StepCounter` transform. + + Examples: + >>> from meltingpot import substrate + >>> from torchrl.envs.libs.meltingpot import MeltingpotWrapper + >>> substrate_config = substrate.get_config("commons_harvest__open") + >>> mp_env = substrate.build_from_config( + ... substrate_config, roles=substrate_config.default_player_roles + ... ) + >>> env_torchrl = MeltingpotWrapper(env=mp_env) + >>> print(env_torchrl.rollout(max_steps=5)) + TensorDict( + fields={ + RGB: Tensor(shape=torch.Size([5, 144, 192, 3]), device=cpu, dtype=torch.uint8, is_shared=False), + agents: TensorDict( + fields={ + action: Tensor(shape=torch.Size([5, 7]), device=cpu, dtype=torch.int64, is_shared=False), + observation: TensorDict( + fields={ + COLLECTIVE_REWARD: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + READY_TO_SHOOT: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + RGB: Tensor(shape=torch.Size([5, 7, 88, 88, 3]), device=cpu, dtype=torch.uint8, is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False), + done: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False), + next: TensorDict( + fields={ + RGB: Tensor(shape=torch.Size([5, 144, 192, 3]), device=cpu, dtype=torch.uint8, is_shared=False), + agents: TensorDict( + fields={ + observation: TensorDict( + fields={ + COLLECTIVE_REWARD: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + READY_TO_SHOOT: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + RGB: Tensor(shape=torch.Size([5, 7, 88, 88, 3]), device=cpu, dtype=torch.uint8, is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False), + reward: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False), + done: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False), + terminated: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False)}, + batch_size=torch.Size([5]), + device=cpu, + is_shared=False), + terminated: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False)}, + batch_size=torch.Size([5]), + device=cpu, + is_shared=False) + + """ + + git_url = "https://github.com/google-deepmind/meltingpot" + libname = "melitingpot" + + @property + def lib(self): + import meltingpot + + return meltingpot + + @_classproperty + def available_envs(cls): + if not _has_meltingpot: + return [] + return _get_envs() + + def __init__( + self, + env: "meltingpot.utils.substrates.substrate.Substrate" = None, # noqa + categorical_actions: bool = True, + group_map: MarlGroupMapType + | Dict[str, List[str]] = MarlGroupMapType.ALL_IN_ONE_GROUP, + max_steps: int = None, + **kwargs, + ): + if env is not None: + kwargs["env"] = env + self.group_map = group_map + self.categorical_actions = categorical_actions + self.max_steps = max_steps + self.num_cycles = 0 + super().__init__(**kwargs) + + def _build_env( + self, + env: "meltingpot.utils.substrates.substrate.Substrate", # noqa + ): + return env + + def _make_group_map(self): + if isinstance(self.group_map, MarlGroupMapType): + self.group_map = self.group_map.get_group_map(self.agent_names) + check_marl_grouping(self.group_map, self.agent_names) + + def _make_specs( + self, env: "meltingpot.utils.substrates.substrate.Substrate" # noqa + ) -> None: + mp_obs_spec = self._env.observation_spec() # List of dict of arrays + mp_obs_spec_no_world = _remove_world_observations_from_obs_spec( + mp_obs_spec + ) # List of dict of arrays + mp_global_state_spec = _global_state_spec_from_obs_spec( + mp_obs_spec + ) # Dict of arrays + mp_act_spec = self._env.action_spec() # List of discrete arrays + mp_rew_spec = self._env.reward_spec() # List of arrays + + torchrl_agent_obs_specs = [ + _dmcontrol_to_torchrl_spec_transform(agent_obs_spec) + for agent_obs_spec in mp_obs_spec_no_world + ] + torchrl_agent_act_specs = [ + _dmcontrol_to_torchrl_spec_transform( + agent_act_spec, categorical_discrete_encoding=self.categorical_actions + ) + for agent_act_spec in mp_act_spec + ] + torchrl_state_spec = _dmcontrol_to_torchrl_spec_transform(mp_global_state_spec) + torchrl_rew_spec = [ + _dmcontrol_to_torchrl_spec_transform(agent_rew_spec) + for agent_rew_spec in mp_rew_spec + ] + + # Create and check group map + _num_players = len(torchrl_rew_spec) + self.agent_names = [ + PLAYER_STR_FORMAT.format(index=index) for index in range(_num_players) + ] + self.agent_names_to_indices_map = { + agent_name: i for i, agent_name in enumerate(self.agent_names) + } + self._make_group_map() + + action_spec = CompositeSpec() + observation_spec = CompositeSpec() + reward_spec = CompositeSpec() + + for group in self.group_map.keys(): + ( + group_observation_spec, + group_action_spec, + group_reward_spec, + ) = self._make_group_specs( + group, + torchrl_agent_obs_specs, + torchrl_agent_act_specs, + torchrl_rew_spec, + ) + action_spec[group] = group_action_spec + observation_spec[group] = group_observation_spec + reward_spec[group] = group_reward_spec + + observation_spec.update(torchrl_state_spec) + self.done_spec = CompositeSpec( + { + "done": DiscreteTensorSpec( + n=2, shape=torch.Size((1,)), dtype=torch.bool + ), + }, + ) + self.action_spec = action_spec + self.observation_spec = observation_spec + self.reward_spec = reward_spec + + def _make_group_specs( + self, + group: str, + torchrl_agent_obs_specs: List[TensorSpec], + torchrl_agent_act_specs: List[TensorSpec], + torchrl_rew_spec: List[TensorSpec], + ): + # Agent specs + action_specs = [] + observation_specs = [] + reward_specs = [] + + for agent_name in self.group_map[group]: + agent_index = self.agent_names_to_indices_map[agent_name] + action_specs.append( + CompositeSpec( + { + "action": torchrl_agent_act_specs[ + agent_index + ] # shape = (n_actions_per_agent,) + }, + ) + ) + observation_specs.append( + CompositeSpec( + { + "observation": torchrl_agent_obs_specs[ + agent_index + ] # shape = (n_obs_per_agent,) + }, + ) + ) + reward_specs.append( + CompositeSpec({"reward": torchrl_rew_spec[agent_index]}) # shape = (1,) + ) + + # Create multi-agent specs + group_action_spec = torch.stack( + action_specs, dim=0 + ) # shape = (n_agents_in_group, n_actions_per_agent) + group_observation_spec = torch.stack( + observation_specs, dim=0 + ) # shape = (n_agents_in_group, n_obs_per_agent) + group_reward_spec = torch.stack( + reward_specs, dim=0 + ) # shape = (n_agents_in_group, 1) + return ( + group_observation_spec, + group_action_spec, + group_reward_spec, + ) + + def _check_kwargs(self, kwargs: Dict): + meltingpot = self.lib + + if "env" not in kwargs: + raise TypeError("Could not find environment key 'env' in kwargs.") + env = kwargs["env"] + if not isinstance(env, meltingpot.utils.substrates.substrate.Substrate): + raise TypeError( + "env is not of type 'meltingpot.utils.substrates.substrate.Substrate'." + ) + + def _init_env(self): + # Caching + self.cached_full_done_spec_zero = self.full_done_spec.zero() + + def _set_seed(self, seed: int | None): + raise NotImplementedError( + "It is currently unclear how to set a seed in Meltingpot" + "see https://github.com/google-deepmind/meltingpot/issues/129 to track the issue." + ) + + def _reset( + self, tensordict: TensorDictBase | None = None, **kwargs + ) -> TensorDictBase: + self.num_cycles = 0 + timestep = self._env.reset() + obs = timestep.observation + + td = self.cached_full_done_spec_zero.clone() + + for group, agent_names in self.group_map.items(): + agent_tds = [] + for index_in_group, agent_name in enumerate(agent_names): + global_index = self.agent_names_to_indices_map[agent_name] + agent_obs = self.observation_spec[group, "observation"][ + index_in_group + ].encode(_filter_global_state_from_dict(obs[global_index], world=False)) + agent_td = TensorDict( + source={ + "observation": agent_obs, + }, + batch_size=self.batch_size, + device=self.device, + ) + + agent_tds.append(agent_td) + agent_tds = torch.stack(agent_tds, dim=0) + td.set(group, agent_tds) + + # Global state + td.update( + _remove_world_prefix(_filter_global_state_from_dict(obs[0], world=True)) + ) + + tensordict_out = TensorDict( + source=td, + batch_size=self.batch_size, + device=self.device, + ) + return tensordict_out + + def _step( + self, + tensordict: TensorDictBase, + ) -> TensorDictBase: + action_dict = {} + for group, agents in self.group_map.items(): + group_action = tensordict.get((group, "action")) + group_action_np = self.full_action_spec[group, "action"].to_numpy( + group_action + ) + for index, agent in enumerate(agents): + action_dict[agent] = group_action_np[index] + + actions = [action_dict[agent] for agent in self.agent_names] + timestep = self._env.step(actions) + self.num_cycles += 1 + + rewards = timestep.reward + done = timestep.last() or ( + (self.num_cycles >= self.max_steps) if self.max_steps is not None else False + ) + obs = timestep.observation + + td = TensorDict( + { + "done": self.full_done_spec["done"].encode(done), + "terminated": self.full_done_spec["terminated"].encode(done), + }, + batch_size=self.batch_size, + ) + # Global state + td.update( + _remove_world_prefix(_filter_global_state_from_dict(obs[0], world=True)) + ) + + for group, agent_names in self.group_map.items(): + agent_tds = [] + for index_in_group, agent_name in enumerate(agent_names): + global_index = self.agent_names_to_indices_map[agent_name] + agent_obs = self.observation_spec[group, "observation"][ + index_in_group + ].encode(_filter_global_state_from_dict(obs[global_index], world=False)) + agent_reward = self.full_reward_spec[group, "reward"][ + index_in_group + ].encode(rewards[global_index]) + agent_td = TensorDict( + source={ + "observation": agent_obs, + "reward": agent_reward, + }, + batch_size=self.batch_size, + device=self.device, + ) + + agent_tds.append(agent_td) + agent_tds = torch.stack(agent_tds, dim=0) + td.set(group, agent_tds) + + return td + + def get_rgb_image(self) -> torch.Tensor: + """Returns an RGB image of the environment. + + Returns: + a ``torch.Tensor`` containing image in format WHC. + + """ + return torch.from_numpy(self._env.observation()[0][_WORLD_PREFIX + "RGB"]) + + +class MeltingpotEnv(MeltingpotWrapper): + """Meltingpot environment wrapper. + + GitHub: https://github.com/google-deepmind/meltingpot + + Paper: https://arxiv.org/abs/2211.13746 + + Melting Pot assesses generalization to novel social situations involving both familiar and unfamiliar individuals, + and has been designed to test a broad range of social interactions such as: cooperation, competition, deception, + reciprocation, trust, stubbornness and so on. Melting Pot offers researchers a set of over 50 multi-agent + reinforcement learning substrates (multi-agent games) on which to train agents, and over 256 unique test scenarios + on which to evaluate these trained agents. + + Args: + substrate(str or ml_collections.config_dict.ConfigDict): the meltingpot substrate to build. + Can be a string from :attr:`~.available_envs` or a ConfigDict for the substrate + + Keyword Args: + max_steps (int, optional): Horizon of the task. Defaults to ``None`` (infinite horizon). + Each Meltingpot substrate can + be terminating or not. If ``max_steps`` is specified, + the scenario is also terminated (and the ``"terminated"`` flag is set) whenever this horizon is reached. + Unlike gym's ``TimeLimit`` transform or torchrl's :class:`~torchrl.envs.transforms.StepCounter`, + this argument will not set the ``"truncated"`` entry in the tensordict. + categorical_actions (bool, optional): if the environment actions are discrete, whether to transform + them to categorical or one-hot. Defaults to ``True``. + group_map (MarlGroupMapType or Dict[str, List[str]], optional): how to group agents in tensordicts for + input/output. By default, they will be all put + in one group named ``"agents"``. + Otherwise, a group map can be specified or selected from some premade options. + See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info. + + + Attributes: + group_map (Dict[str, List[str]]): how to group agents in tensordicts for + input/output. See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info. + agent_names (list of str): names of the agent in the environment + agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the enviornment + available_envs (List[str]): the list of the scenarios available to build. + + .. warning:: + Meltingpot returns a single ``done`` flag which does not distinguish between + when the env reached ``max_steps`` and termination. + If you deem the ``truncation`` signal necessary, set ``max_steps`` to + ``None`` and use a :class:`~torchrl.envs.transforms.StepCounter` transform. + + Examples: + >>> from torchrl.envs.libs.meltingpot import MeltingpotEnv + >>> env_torchrl = MeltingpotEnv("commons_harvest__open") + >>> print(env_torchrl.rollout(max_steps=5)) + TensorDict( + fields={ + RGB: Tensor(shape=torch.Size([5, 144, 192, 3]), device=cpu, dtype=torch.uint8, is_shared=False), + agents: TensorDict( + fields={ + action: Tensor(shape=torch.Size([5, 7]), device=cpu, dtype=torch.int64, is_shared=False), + observation: TensorDict( + fields={ + COLLECTIVE_REWARD: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + READY_TO_SHOOT: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + RGB: Tensor(shape=torch.Size([5, 7, 88, 88, 3]), device=cpu, dtype=torch.uint8, is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False), + done: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False), + next: TensorDict( + fields={ + RGB: Tensor(shape=torch.Size([5, 144, 192, 3]), device=cpu, dtype=torch.uint8, is_shared=False), + agents: TensorDict( + fields={ + observation: TensorDict( + fields={ + COLLECTIVE_REWARD: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + READY_TO_SHOOT: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False), + RGB: Tensor(shape=torch.Size([5, 7, 88, 88, 3]), device=cpu, dtype=torch.uint8, is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False), + reward: Tensor(shape=torch.Size([5, 7, 1]), device=cpu, dtype=torch.float64, is_shared=False)}, + batch_size=torch.Size([5, 7]), + device=cpu, + is_shared=False), + done: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False), + terminated: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False)}, + batch_size=torch.Size([5]), + device=cpu, + is_shared=False), + terminated: Tensor(shape=torch.Size([5, 1]), device=cpu, dtype=torch.bool, is_shared=False)}, + batch_size=torch.Size([5]), + device=cpu, + is_shared=False) + + + """ + + def __init__( + self, + substrate: str | "ml_collections.config_dict.ConfigDict", # noqa + *, + max_steps: int | None = None, + categorical_actions: bool = True, + group_map: MarlGroupMapType + | Dict[str, List[str]] = MarlGroupMapType.ALL_IN_ONE_GROUP, + **kwargs, + ): + if not _has_meltingpot: + raise ImportError( + f"meltingpot python package was not found. Please install this dependency. " + f"More info: {self.git_url}." + ) + super().__init__( + substrate=substrate, + max_steps=max_steps, + categorical_actions=categorical_actions, + group_map=group_map, + **kwargs, + ) + + def _check_kwargs(self, kwargs: Dict): + if "substrate" not in kwargs: + raise TypeError("Could not find environment key 'substrate' in kwargs.") + + def _build_env( + self, + substrate: str | "ml_collections.config_dict.ConfigDict", # noqa + ) -> "meltingpot.utils.substrates.substrate.Substrate": # noqa + from meltingpot import substrate as mp_substrate + + if isinstance(substrate, str): + substrate_config = mp_substrate.get_config(substrate) + else: + substrate_config = substrate + + return super()._build_env( + env=mp_substrate.build_from_config( + substrate_config, roles=substrate_config.default_player_roles + ) + ) diff --git a/torchrl/envs/libs/vmas.py b/torchrl/envs/libs/vmas.py index 51d3970fded..cb1d6294a2d 100644 --- a/torchrl/envs/libs/vmas.py +++ b/torchrl/envs/libs/vmas.py @@ -141,7 +141,7 @@ class VmasWrapper(_EnvWrapper): group_map (Dict[str, List[str]]): how to group agents in tensordicts for input/output. See :class:`~torchrl.envs.utils.MarlGroupMapType` for more info. agent_names (list of str): names of the agent in the environment - agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the enviornment + agent_names_to_indices_map (Dict[str, int]): dictionary mapping agent names to their index in the environment unbatched_action_spec (TensorSpec): version of the spec without the vectorized dimension unbatched_observation_spec (TensorSpec): version of the spec without the vectorized dimension unbatched_reward_spec (TensorSpec): version of the spec without the vectorized dimension