diff --git a/.gitignore b/.gitignore index aed00b64..9971006e 100644 --- a/.gitignore +++ b/.gitignore @@ -147,3 +147,18 @@ checklink/cookies.txt # .gitconfig is now autogenerated .gitconfig + +# Additions for satip +*.tif +*.xml +*.grb +log.txt +rss.zarr +cloud.zarr +hrv.zarr +hrv_RSS.png +hrv_UK.png +rss_RSS.png +rss_UK.png +cloud_mask_RSS.png +cloud_mask_UK.png diff --git a/README.md b/README.md index c916ee65..e3d6af09 100644 --- a/README.md +++ b/README.md @@ -36,3 +36,7 @@ If you plan to work on the development of Satip then also consider installing th conda install pytest flake8 jedi mypy black pre-commit pre-commit install ``` + +## Operation + +* `scripts/convert_native_to_zarr.py` converts EUMETSAT `.nat` files to Zarr datasets, using very mild lossy [JPEG-XL](https://en.wikipedia.org/wiki/JPEG_XL) compression. (JPEG-XL is the "new kid on the block" of image compression algorithms). JPEG-XL makes the files about a quarter the size of the equivalent `bz2` compressed files, whilst the images are visually indistinguishable. JPEG-XL cannot represent NaNs so NaNs. JPEG-XL understands float32 values in the range `[0, 1]`. NaNs are encoded as the value `0.025`. All "real" values are in the range `[0.075, 1]`. We leave a gap between "NaNs" and "real values" because there is very slight "ringing" around areas of constant value (see [this comment for more details](https://github.com/openclimatefix/Satip/issues/67#issuecomment-1036456502)). Use `satip.jpeg_xl_float_with_nans.JpegXlFloatWithNaNs` to decode the satellite data. This class will reconstruct the NaNs and rescale the data to the range `[0, 1]`. diff --git a/environment.yml b/environment.yml index 2c4f0a85..af09faa4 100644 --- a/environment.yml +++ b/environment.yml @@ -3,6 +3,7 @@ channels: - conda-forge dependencies: - python>=3.9 + - pip - numpy>=1.19.4 - pandas>=1.1.4 - requests @@ -14,3 +15,7 @@ dependencies: - tqdm - matplotlib - cartopy +pip: + # The conda version of imagecodecs doesn't currently include libjxl, + # which is required for JPEG-XL compression. So we must install using pip: + - imagecodecs diff --git a/requirements.txt b/requirements.txt index 68a6d3df..bf597c3b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,3 +8,4 @@ satpy[seviri_l2_grib]>=0.30.1 tqdm xarray zarr +imagecodecs diff --git a/satip/intermediate.py b/satip/intermediate.py index 97cda10b..f2a44867 100644 --- a/satip/intermediate.py +++ b/satip/intermediate.py @@ -89,22 +89,20 @@ def split_per_month( save_dataset_to_zarr( dataset, zarr_path=month_zarr_path, + compressor_name="jpeg-xl", zarr_mode="w", x_size_per_chunk=spatial_chunk_size, y_size_per_chunk=spatial_chunk_size, timesteps_per_chunk=temporal_chunk_size, - channel_chunk_size=11, - dtype="int16", ) save_dataset_to_zarr( hrv_dataset, zarr_path=hrv_month_zarr_path, + compressor_name="jpeg-xl", zarr_mode="w", x_size_per_chunk=spatial_chunk_size, y_size_per_chunk=spatial_chunk_size, timesteps_per_chunk=temporal_chunk_size, - channel_chunk_size=1, - dtype="int16", ) print(dirs) print(zarrs) @@ -182,11 +180,10 @@ def cloudmask_split_per_month( save_dataset_to_zarr( dataset, zarr_path=month_zarr_path, + compressor_name="bz2", x_size_per_chunk=spatial_chunk_size, y_size_per_chunk=spatial_chunk_size, timesteps_per_chunk=temporal_chunk_size, - channel_chunk_size=1, - dtype="int8", zarr_mode="w", ) print(dirs) @@ -259,11 +256,10 @@ def create_or_update_zarr_with_cloud_mask_files( save_dataset_to_zarr( dataset, zarr_path=zarr_path, + compressor_name="bz2", x_size_per_chunk=spatial_chunk_size, y_size_per_chunk=spatial_chunk_size, timesteps_per_chunk=temporal_chunk_size, - channel_chunk_size=1, - dtype="int8", ) except Exception as e: print(f"Failed with: {e}") @@ -324,18 +320,18 @@ def create_or_update_zarr_with_native_files( save_dataset_to_zarr( dataset, zarr_path=zarr_path, + compressor_name="jpeg-xl", x_size_per_chunk=spatial_chunk_size, y_size_per_chunk=spatial_chunk_size, timesteps_per_chunk=temporal_chunk_size, - channel_chunk_size=11, ) save_dataset_to_zarr( hrv_dataset, zarr_path=hrv_zarr_path, + compressor_name="jpeg-xl", x_size_per_chunk=spatial_chunk_size, y_size_per_chunk=spatial_chunk_size, timesteps_per_chunk=temporal_chunk_size, - channel_chunk_size=1, ) except Exception as e: print(f"Failed with: {e}") diff --git a/satip/jpeg_xl_float_with_nans.py b/satip/jpeg_xl_float_with_nans.py new file mode 100644 index 00000000..1b812de4 --- /dev/null +++ b/satip/jpeg_xl_float_with_nans.py @@ -0,0 +1,182 @@ +"""Thin wrapper around imagecodecs.JpegXl. +""" +from typing import Optional + +import numpy as np +from imagecodecs.numcodecs import JpegXl +from numcodecs.registry import register_codec + +# See the docs in `encode_nans` for an explanation of what these consts do. +LOWER_BOUND_FOR_REAL_PIXELS = 0.075 +NAN_THRESHOLD = 0.05 +NAN_VALUE = 0.025 + + +class JpegXlFloatWithNaNs(JpegXl): + """Thin wrapper around imagecodecs.JpegXl for floats with NaNs.""" + + codec_id = "imagecodecs_jpegxl_float_with_nans" + + def __init__( + self, + lossless: Optional[bool] = None, + distance: Optional[int] = None, + level: Optional[int] = None, + decodingspeed: Optional[float] = None, + *args, + **kwargs, + ): + """Initialise. + + This __init__ function is a simple hack to make the JpegXl compressor in the currently + released version of imagecodecs (version 2021.11.20) look like the version in development. + (We can't simply use the version in development because the imagecodecs author does not + develop on GitHub. The imagecodecs authors just uses GitHub as one of several mechanisms + to release imagecodecs.) + + See https://github.com/cgohlke/imagecodecs/issues/31#issuecomment-1026179413 + + Args: + lossless: Set to True to enable lossless compression. + distance: Lowest settings are 0.00 or 0.01. If 0.0 then also set lossless to True. + To quote the cjxl man page: + The preferred way to specify quality. It is specified in multiples of a + just-noticeable difference. That is, -d 0 is mathematically lossless, + -d 1 should be visually lossless, and higher distances yield denser and + denser files with lower and lower fidelity. + effort: To quote the cjxl man page: + Controls the amount of effort that goes into producing an “optimal” file in + terms of quality/size. That is to say, all other parameters being equal, + a higher effort should yield a file that is at least as dense and possibly + denser, and with at least as high and possibly higher quality. + 1 is fastest. 9 is slowest. + level: DON'T SET THIS WITH THIS JpegXlFuture wrapper! + In imagecodecs version 2021.11.20, level is mapped (incorrectly) to the decoding + speed tier. Minimum is 0 (highest quality), and maximum is 4 (lowest quality). + Default is 0. + decodingspeed: DON'T SET THIS WITH THIS JpegXlFuture wrapper! + """ + assert decodingspeed is None + if lossless is not None: + if lossless: + assert ( + level is None + ) # level must be None to enable lossless in imagecodecs 2021.11.20. + assert distance is None or distance == 0 + else: + # Enable lossy compression. + # level must be set to 0, 1, 2, 3, or 4 to enable lossy + # compression in imagecodecs 2021.11.20. + level = 0 + super().__init__(level=level, distance=distance, *args, **kwargs) + + def encode(self, buf: np.ndarray) -> None: + """Encode `buf` with JPEG-XL. + + Under the hood, NaNs will be encoded as NAN_VALUE, and all "real" + values will be in the range [LOWER_BOUND_FOR_REAL_PIXELS, 1]. But + this is all taken care of by encode and decode. + + Args: + buf: The input array to compress as JPEG-XL. + Expects buf to be of shape (n_timesteps, y, x, n_channels). + n_timesteps and n_channels must == 1. + buf can be float16 or float32. + All values must be in the range [0, 1]. + Use as much of the range [0, 1] as possible. 0 is black and 1 is white. + If all the information is squished into, say, the range [0, 0.1] then + JPEG-XL will interpret the image as very dark, and will agressively + compress the data because JPEG-XL assumes that human viewers do not + notice if detail is lost in the shaddows. + """ + assert buf.dtype in ( + np.float16, + np.float32, + ), f"dtype must be float16 or float32, not {buf.dtype}" + + # Even if the original DataArray doesn't have NaNs, + # when Zarr saves chunks at the edges of the image, the image data for that chunk + # might be smaller than the chunk. In that case, `buf` will be the same shape + # as the nominal chunk size, but will include NaNs. Those NaNs must be encoded + # with a floating point value in the range [0, 1] because JPEG-XL cannot + # handle NaN values. + buf = encode_nans(buf) + + # Sanity checks. + assert np.all(np.isfinite(buf)) + assert np.amin(buf) >= 0 + assert np.amax(buf) <= 1 + + # In the future, JPEG-XL will be able to encode multiple images per file. + # But, for now, it can only compress one image at a time. See: + # https://github.com/cgohlke/imagecodecs/issues/32 + n_timesteps = buf.shape[0] + n_channels = buf.shape[-1] + assert n_timesteps == 1 + assert n_channels == 1 + + return super().encode(buf) + + def decode(self, buf, out=None) -> np.ndarray: + """Decode JPEG-XL `buf`. + + Reconstruct the NaNs encoded by encode_nans. + """ + out = super().decode(buf, out) + out = decode_nans(out) + return out + + +def encode_nans(data: np.ndarray) -> np.ndarray: + """Encode NaNs as the value NAN_VALUE. + + Encode all other values in the range [LOWER_BOUND_FOR_REAL_PIXELS, 1]. + + JPEG-XL does not understand "NaN" values. JPEG-XL only understands floating + point values in the range [0, 1]. So we must encode NaN values + as real values in the range [0, 1]. + + After lossy JPEG-XL compression, there is slight "ringing" around the edges + of regions with filled with a constant number. In experiments, when NAN_VALUE = 0.025, + it appears that the values at the inner edges of a "NaN region" vary in the range + [0.0227, 0.0280]. But, to be safe, we use a nice wide margin: We don't set + the value of "NaNs" to be 0.00 because the ringing would cause the values + to drop below zero, which is illegal for JPEG-XL images. + + After decompression, reconstruct regions of NaNs using "image < NAN_THRESHOLD" + to find NaNs. + + See this comment for more info: + https://github.com/openclimatefix/Satip/issues/67#issuecomment-1036456502 + + Args: + data: The input data. All values must already + be in the range [0, 1]. The original data is modified in place. + + Returns: + The returned array. "Real" values will be shifted to + the range [LOWER_BOUND_FOR_REAL_PIXELS, 1]. + NaNs will be encoded as NAN_VALUE. + """ + assert issubclass( + data.dtype.type, np.floating + ), f"dataarray.dtype must be floating point not {data.dtype}!" + + # Shift all the "real" values up to the range [0.075, 1] + data += LOWER_BOUND_FOR_REAL_PIXELS * (1 + LOWER_BOUND_FOR_REAL_PIXELS) + data /= 1 + LOWER_BOUND_FOR_REAL_PIXELS + data = np.nan_to_num(data, nan=NAN_VALUE) + return data + + +def decode_nans(data: np.ndarray) -> np.ndarray: + """Reconstruct the NaNs encoded by encode_nans.""" + assert np.all(np.isfinite(data)) + assert issubclass(data.dtype.type, np.floating) + data[data <= NAN_THRESHOLD] = np.NaN + data *= 1 + LOWER_BOUND_FOR_REAL_PIXELS + data -= LOWER_BOUND_FOR_REAL_PIXELS * (1 + LOWER_BOUND_FOR_REAL_PIXELS) + return data + + +register_codec(JpegXlFloatWithNaNs) diff --git a/satip/compression.py b/satip/scale_to_zero_to_one.py similarity index 63% rename from satip/compression.py rename to satip/scale_to_zero_to_one.py index 46144e9c..ba1727b7 100644 --- a/satip/compression.py +++ b/satip/scale_to_zero_to_one.py @@ -1,15 +1,11 @@ -"""Module to compress data and check its sanity. - -Class which handles compression of a XArray Dataset by forcing it to use -10-bit-int and of a cloud mask to clip values. Data completeness and serialization -are also checked. +"""Module to scale values to the range [0, 1] and check if the data is sane. Usage Example: - from satip.compression import Compressor - compressor = Compressor(bits_per_pixel, mins_array, maxs_array, variable_order) - compressor.fit(dataset, dims) # Optional, if you want to set new limits based on `dataset` - data_array = compressor.compress(data_array) - mask = compressor.compress_maks(mask) + from satip.scale_to_zero_to_one import ScaleToZeroToOne + scaler = ScaleToZeroToOne(mins_array, maxs_array, variable_order) + scaler.fit(dataset, dims) # Optional, if you want to set new limits based on `dataset` + data_array = scaler.scale(data_array) + mask = scaler.compress_masks(mask) """ from typing import Iterable, Union @@ -20,12 +16,11 @@ from satip.serialize import serialize_attrs -class Compressor: - """Compressor class which handles compression of dataarrays and masks.""" +class ScaleToZeroToOne: + """ScaleToZeroToOne: rescales dataarrays so all values lie in the range [0, 1].""" def __init__( self, - bits_per_pixel=10, mins=np.array( [ -1.2278595, @@ -73,16 +68,13 @@ def __init__( "WV_073", ], ): - """Initial setting for Compressor class. + """Initial setting for ScaleToZeroToOne class. Args: - bits_per_pixel: integer bit-length into which XArray Datasets will be compressed. mins: Initial setting of min-values. maxs: Intial setting of max-values. variable_order: Order in which variables will appear in the compressed XArray Dataset. """ - - self.bits_per_pixel = bits_per_pixel self.mins = mins self.maxs = maxs self.variable_order = variable_order @@ -105,15 +97,18 @@ def fit(self, dataset: xr.Dataset, dims: Iterable = ("time", "y", "x")) -> None: print(f"The maxs are: {self.maxs}") print(f"The variable order is: {self.variable_order}") - def compress(self, dataarray: xr.DataArray) -> Union[xr.DataArray, None]: + def rescale(self, dataarray: xr.DataArray) -> Union[xr.DataArray, None]: """ - Compress Xarray DataArray to use 10-bit integers + Rescale Xarray DataArray so all values lie in the range [0, 1]. + + Warning: The original `dataarray` will be modified in-place. Args: - dataarray: DataArray to compress + dataarray: DataArray to rescale. Returns: - The compressed DataArray + The DataArray rescaled to [0, 1]. NaNs in the original `dataarray` will still + be present in the returned dataarray. The returned DataArray will be float32. """ for attr in ["mins", "maxs"]: assert ( @@ -124,34 +119,37 @@ def compress(self, dataarray: xr.DataArray) -> Union[xr.DataArray, None]: "time", "y_geostationary", "x_geostationary", "variable" ) - upper_bound = (2 ** self.bits_per_pixel) - 1 - new_max = self.maxs - self.mins - + range = self.maxs - self.mins dataarray -= self.mins - dataarray /= new_max - dataarray *= upper_bound - dataarray = dataarray.round().clip(min=0, max=upper_bound).astype(np.int16) + dataarray /= range + dataarray = dataarray.clip(min=0, max=1) + dataarray = dataarray.astype(np.float32) dataarray.attrs = serialize_attrs(dataarray.attrs) # Must be serializable - return dataarray def compress_mask(self, dataarray: xr.DataArray) -> Union[xr.DataArray, None]: - """ - Compresses Cloud masks DataArrays + """Compresses Cloud masks DataArrays. See compress_mask docstring.""" + dataarray = dataarray.reindex({"variable": self.variable_order}) + return compress_mask(dataarray) - Args: - dataarray: DataArray to compress - Returns: - The compressed DataArray - """ - dataarray = dataarray.reindex({"variable": self.variable_order}).transpose( - "time", "y_geostationary", "x_geostationary", "variable" - ) - dataarray = dataarray.round().clip(min=0, max=3).astype(np.int16) - dataarray.attrs = serialize_attrs(dataarray.attrs) # Must be serializable +def compress_mask(dataarray: xr.DataArray) -> xr.DataArray: + """ + Compresses Cloud masks DataArrays. - return dataarray + Args: + dataarray: DataArray to compress + + Returns: + The compressed DataArray. The returned array will be an int8 array, + with NaNs represented as -1. + """ + dataarray = dataarray.transpose("time", "y_geostationary", "x_geostationary", "variable") + dataarray = dataarray.round().clip(min=0, max=3) + dataarray = dataarray.fillna(-1) + dataarray = dataarray.astype(np.int8) + dataarray.attrs = serialize_attrs(dataarray.attrs) # Must be serializable + return dataarray def is_dataset_clean(dataarray: xr.DataArray) -> bool: diff --git a/satip/utils.py b/satip/utils.py index 8bc522a7..c8cf70ea 100644 --- a/satip/utils.py +++ b/satip/utils.py @@ -22,9 +22,9 @@ import xarray as xr from satpy import Scene -from satip.compression import Compressor, is_dataset_clean from satip.geospatial import GEOGRAPHIC_BOUNDS, lat_lon_to_osgb -from satip.serialize import serialize_attrs +from satip.jpeg_xl_float_with_nans import JpegXlFloatWithNaNs +from satip.scale_to_zero_to_one import ScaleToZeroToOne, compress_mask, is_dataset_clean warnings.filterwarnings("ignore", message="divide by zero encountered in true_divide") warnings.filterwarnings("ignore", message="invalid value encountered in sin") @@ -90,10 +90,10 @@ def load_native_to_dataset( Returns: Returns Xarray DataArray if script worked, else returns None """ - hrv_compressor = Compressor( + hrv_scaler = ScaleToZeroToOne( variable_order=["HRV"], maxs=np.array([103.90016]), mins=np.array([-1.2278595]) ) - compressor = Compressor( + scaler = ScaleToZeroToOne( mins=np.array( [ -2.5118103, @@ -184,8 +184,8 @@ def load_native_to_dataset( os.remove(decompressed_filename) # Compress and return - dataarray = compressor.compress(dataarray) - hrv_dataarray = hrv_compressor.compress(hrv_dataarray) + dataarray = scaler.rescale(dataarray) + hrv_dataarray = hrv_scaler.rescale(hrv_dataarray) return dataarray, hrv_dataarray @@ -214,19 +214,12 @@ def load_cloudmask_to_dataset( ] ) try: - # Selected bounds empirically for have no NaN values from off disk image, + # Selected bounds empirically that have no NaN values from off disk image, # and are covering the UK + a bit dataarray: xr.DataArray = convert_scene_to_dataarray( scene, band="cloud_mask", area=area, calculate_osgb=calculate_osgb ) - - # Compress and return - dataarray = dataarray.transpose("time", "y_geostationary", "x_geostationary", "variable") - dataarray = dataarray.round().clip(min=0, max=3).astype(np.int8) - dataarray.attrs = serialize_attrs(dataarray.attrs) - # Convert 3's to NaNs as they should be No Data/Space - dataarray = dataarray.where(dataarray["variable"] != 3) - return dataarray + return compress_mask(dataarray) except Exception: return None @@ -304,12 +297,12 @@ def convert_scene_to_dataarray( def save_dataset_to_zarr( dataarray: xr.DataArray, zarr_path: str, + compressor_name: str, zarr_mode: str = "a", timesteps_per_chunk: int = 1, y_size_per_chunk: int = 256, x_size_per_chunk: int = 256, - channel_chunk_size: int = 12, - dtype="int16", + channel_chunk_size: int = 1, ) -> None: """ Save an Xarray DataArray into a Zarr file @@ -317,12 +310,14 @@ def save_dataset_to_zarr( Args: dataarray: DataArray to save zarr_path: Filename of the Zarr dataset + compressor_name: The name of the compression algorithm to use. Must be 'bz2' or 'jpeg-xl'. zarr_mode: Mode to write to the filename, either 'w' for write, or 'a' to append timesteps_per_chunk: Timesteps per Zarr chunk y_size_per_chunk: Y pixels per Zarr chunk x_size_per_chunk: X pixels per Zarr chunk - channel_chunk_size: Chunk size for the Dask arrays - dtype: Data type of the Xarray DataArray generated + channel_chunk_size: Chunk size for the Dask arrays. Must be 1 whilst using JPEG-XL + (at least until imagecodecs implements decompressing JPEG-XL files with multiple + images per file.) """ dataarray = dataarray.transpose("time", "y_geostationary", "x_geostationary", "variable") @@ -334,19 +329,23 @@ def save_dataset_to_zarr( channel_chunk_size, ) dataarray = dataarray.chunk(chunks) - dataarray = dataarray.fillna(-1) # Fill NaN with -1, even if none should exist if not is_dataset_clean(dataarray): # One last check again just incase chunking causes any issues print("Failing clean check after chunking") return - dataarray = xr.Dataset({"stacked_eumetsat_data": dataarray}).astype(dtype) + + compression_algos = { + "bz2": numcodecs.get_codec(dict(id="bz2", level=5)), + "jpeg-xl": JpegXlFloatWithNaNs(lossless=False, distance=0.4, effort=8), + } + compression_algo = compression_algos[compressor_name] zarr_mode_to_extra_kwargs = { "a": {"append_dim": "time"}, "w": { "encoding": { - "stacked_eumetsat_data": { - "compressor": numcodecs.get_codec(dict(id="bz2", level=5)), + "data": { + "compressor": compression_algo, "chunks": chunks, }, "time": {"units": "nanoseconds since 1970-01-01"}, @@ -357,7 +356,8 @@ def save_dataset_to_zarr( assert zarr_mode in ["a", "w"], "`zarr_mode` must be one of: `a`, `w`" extra_kwargs = zarr_mode_to_extra_kwargs[zarr_mode] - dataarray.to_zarr(zarr_path, mode=zarr_mode, consolidated=True, compute=True, **extra_kwargs) + dataset = dataarray.to_dataset(name="data") + dataset.to_zarr(zarr_path, mode=zarr_mode, consolidated=True, compute=True, **extra_kwargs) def add_constant_coord_to_dataarray( diff --git a/scripts/generate_test_plots.py b/scripts/generate_test_plots.py index 9f941d14..922b8c6b 100644 --- a/scripts/generate_test_plots.py +++ b/scripts/generate_test_plots.py @@ -121,44 +121,40 @@ def _plot_dataset(dataset: xr.DataArray, name: str, area: str) -> None: save_dataset_to_zarr( cloudmask_dataset, zarr_path=os.path.join(os.getcwd(), "cloud.zarr"), - channel_chunk_size=1, - dtype="int8", + compressor_name="bz2", zarr_mode="w", ) + del cloudmask_dataset + save_dataset_to_zarr( rss_dataset, zarr_path=os.path.join(os.getcwd(), "rss.zarr"), - channel_chunk_size=11, - dtype="int16", + compressor_name="jpeg-xl", zarr_mode="w", ) + del rss_dataset + save_dataset_to_zarr( hrv_dataset, zarr_path=os.path.join(os.getcwd(), "hrv.zarr"), - channel_chunk_size=1, - dtype="int16", + compressor_name="jpeg-xl", zarr_mode="w", ) + del hrv_dataset # Load them from Zarr to ensure its the same as the output from satip cloudmask_dataset = ( - xr.open_zarr(os.path.join(os.getcwd(), "cloud.zarr"), consolidated=True)[ - "stacked_eumetsat_data" - ] + xr.open_zarr(os.path.join(os.getcwd(), "cloud.zarr"), consolidated=True)["data"] .isel(time=0) .sel(variable="cloud_mask") ) rss_dataset = ( - xr.open_zarr(os.path.join(os.getcwd(), "rss.zarr"), consolidated=True)[ - "stacked_eumetsat_data" - ] + xr.open_zarr(os.path.join(os.getcwd(), "rss.zarr"), consolidated=True)["data"] .isel(time=0) .sel(variable="IR_016") ) hrv_dataset = ( - xr.open_zarr(os.path.join(os.getcwd(), "hrv.zarr"), consolidated=True)[ - "stacked_eumetsat_data" - ] + xr.open_zarr(os.path.join(os.getcwd(), "hrv.zarr"), consolidated=True)["data"] .isel(time=0) .sel(variable="HRV") )