diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..460aa0e
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1 @@
+./data
\ No newline at end of file
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000..1fd4893
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,2 @@
+[flake8]
+max-line-length = 160
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..adbb97d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+data/
\ No newline at end of file
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000..43511d3
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,43 @@
+stages:
+ - lint
+ - build
+
+code-lint:
+ stage: lint
+ tags:
+ - linux
+ image: python:3.12-slim
+ script:
+ - pip install flake8
+ - flake8 . --exclude=venv
+
+docker-build:
+ stage: build
+ tags:
+ - linux
+ image:
+ name: docker:stable
+ services:
+ - name: docker:dind
+ alias: docker
+
+ variables:
+ DOCKER_HOST: tcp://docker:2375/
+ DOCKER_DRIVER: overlay2
+ DOCKER_TLS_CERTDIR: ""
+
+ before_script:
+ - echo "$CI_REGISTRY_PASSWORD" | docker login --username $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
+ - echo "$DOCKERHUB_PASSWORD" | docker login --username $DOCKERHUB_USERNAME --password-stdin
+ script:
+ - docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG -t $CI_REGISTRY_IMAGE:latest .
+ - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
+ - docker push $CI_REGISTRY_IMAGE:latest
+
+ # Pushing to Docker Hub
+ - docker tag $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG $DOCKERHUB_USERNAME/$CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
+ - docker tag $CI_REGISTRY_IMAGE:latest $DOCKERHUB_USERNAME/$CI_REGISTRY_IMAGE:latest
+ - docker push $DOCKERHUB_USERNAME/$CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
+ - docker push $DOCKERHUB_USERNAME/$CI_REGISTRY_IMAGE:latest
+ only:
+ - tags
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..ecbadbd
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,6 @@
+FROM python:3.9-slim
+
+VOLUME ["/data"]
+COPY ./app /app
+RUN pip install --no-cache-dir --upgrade -r /app/requirements.txt
+CMD ["python", "/app/main.py"]
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d53a016
--- /dev/null
+++ b/README.md
@@ -0,0 +1,96 @@
+
+
+
+
+
+# Unraid to Home Assistant
+This Docker container parses and forwards all WebSocket messages from your Unraid server to Home Assistant using the MQTT protocol. This enables you to create dashboards that provide a superior overview compared to Unraid's native capabilities.
+
+
+
+
+
+
+
+## Prerequisites
+Ensure that Home Assistant and MQTT are correctly configured before proceeding.
+
+
+## Getting started
+
+I haven't created a Unraid template as I personally utilize docker-compose. However, setting this up shouldn't pose significant challenges.
+
+Generate a config.yaml and store it in a directory named `data`. For instance, I'll demonstrate by configuring two servers named 'Kaya' and 'Hisa'. Ensure to adjust the MQTT settings accordingly:
+```
+unraid:
+ - name: Kaya
+ host: 192.168.1.10
+ port: 80
+ ssl: false
+ username: root
+ password: PASSWORD
+ scan_interval: 30
+
+ - name: Hisa
+ host: 192.168.1.20
+ port: 80
+ ssl: false
+ username: root
+ password: PASSWORD
+ scan_interval: 30
+
+mqtt:
+ host: 192.168.1.100
+ port: 1883
+ username: USERNAME
+ password: PASSWORD
+```
+
+Now we can run our container either using `docker run` or `docker-compose`.
+
+Docker run:
+```
+docker run -d \
+ --name hass-unraid \
+ --network bridge \
+ --restart always \
+ -e TZ=Europe/Oslo \
+ -v $(pwd)/data:/data \
+ registry.idmedia.no/idmedia/docker/hass-unraid:latest
+```
+
+Docker-compose:
+```
+version: '3'
+
+services:
+ hass-unraid:
+ container_name: hass-unraid
+ network_mode: bridge
+ restart: always
+ environment:
+ - TZ=Europe/Oslo
+ volumes:
+ - './data:/data'
+ image: registry.idmedia.no/idmedia/docker/hass-unraid:latest
+```
+
+The container should now connect to your Unraid server(s) and automatically create an entry in Home Assistant. To verify navigate to Settings->Devices & Services->MQTT. If no device is created make sure to check the contains logs using `docker logs hass-unraid`.
+
+
+## Lovelace
+
+This lovelace example is a bit complex and requires these modules in Home Assistant to work properly:
+ * [button-card](https://github.com/custom-cards/button-card)
+ * [vertical-stack-in-card](https://github.com/ofekashery/vertical-stack-in-card)
+ * [auto-entities](https://github.com/thomasloven/lovelace-auto-entities)
+
+
+Please check out the `lovelace` folder. That's where I've placed two button-card templates and the main setup for showing the server named `Kaya` just like you see in the screenshot.
+
+
+### Feel free to contribute, report issues, or suggest improvements! If you find this repository useful, don't forget to star it :)
+
+
+
+
\ No newline at end of file
diff --git a/app/main.py b/app/main.py
new file mode 100644
index 0000000..ea18776
--- /dev/null
+++ b/app/main.py
@@ -0,0 +1,286 @@
+import os
+import re
+import sys
+import time
+import json
+import httpx
+import signal
+import asyncio
+import logging
+import parsers
+import websockets
+from lxml import etree
+from utils import load_file, normalize_str, handle_sigterm
+from gmqtt import Client as MQTTClient, Message
+
+
+class UnRAIDServer(object):
+ def __init__(self, mqtt_config, unraid_config):
+ # Unraid config
+ unraid_host = unraid_config.get('host')
+ unraid_port = unraid_config.get('port')
+ unraid_ssl = unraid_config.get('ssl', False)
+ unraid_address = f'{unraid_host}:{unraid_port}'
+ unraid_protocol = 'https://' if unraid_ssl else 'http://'
+
+ self.unraid_version = ''
+ self.unraid_name = unraid_config.get('name')
+ self.unraid_username = unraid_config.get('username')
+ self.unraid_password = unraid_config.get('password')
+ self.unraid_url = f'{unraid_protocol}{unraid_address}'
+ self.unraid_ws = f'wss://{unraid_address}' if unraid_ssl else f'ws://{unraid_address}'
+ self.scan_interval = unraid_config.get('scan_interval', 30)
+ self.share_parser_lastrun = 0
+ self.share_parser_interval = 3600
+ self.csrf_token = ''
+ self.unraid_cookie = ''
+
+ # MQTT client
+ self.mqtt_connected = False
+ unraid_id = normalize_str(self.unraid_name)
+ will_message = Message(f'unraid/{unraid_id}/connectivity/state', 'OFF', retain=True)
+ self.mqtt_client = MQTTClient(self.unraid_name, will_message=will_message)
+ asyncio.ensure_future(self.mqtt_connect(mqtt_config))
+
+ # Logger
+ self.logger = logging.getLogger(self.unraid_name)
+ self.logger.setLevel(logging.INFO)
+ unraid_logger = logging.StreamHandler(sys.stdout)
+ unraid_logger_formatter = logging.Formatter(f'%(asctime)s [%(levelname)s] [{self.unraid_name}] %(message)s')
+ unraid_logger.setFormatter(unraid_logger_formatter)
+ self.logger.addHandler(unraid_logger)
+
+ def on_connect(self, client, flags, rc, properties):
+ self.logger.info('Successfully connected to mqtt server')
+
+ # Create and subscribe to Mover button
+ # mover_payload = { 'name': 'Mover' }
+ # self.mqtt_publish(mover_payload, 'button', state_value='OFF', create_config=True)
+
+ self.mqtt_connected = True
+ self.mqtt_status(connected=True, create_config=True)
+ self.unraid_task = asyncio.ensure_future(self.ws_connect())
+
+ def on_message(self, client, topic, payload, qos, properties):
+ self.logger.info(f'Message received: {topic}')
+
+ def on_disconnect(self, client, packet, exc=None):
+ self.logger.error('Disconnected from mqtt server')
+ self.mqtt_status(connected=False)
+ self.mqtt_connected = False
+
+ def mqtt_status(self, connected, create_config=False):
+ # Update status
+ status_payload = {
+ 'name': 'Connectivity',
+ 'device_class': 'connectivity'
+ }
+ state_value = 'ON' if connected else 'OFF'
+ self.mqtt_publish(status_payload, 'binary_sensor', state_value, create_config=create_config)
+
+ def mqtt_publish(self, payload, sensor_type, state_value, json_attributes=None, create_config=False, retain=False):
+ # Make clean variables
+ unraid_id = normalize_str(self.unraid_name)
+ sensor_id = normalize_str(payload["name"])
+ unraid_sensor_id = f'{unraid_id}_{sensor_id}'
+
+ # Create config
+ if create_config:
+
+ # Create device
+ device = {
+ 'name': self.unraid_name,
+ 'identifiers': f'unraid_{unraid_id}'.lower(),
+ 'model': 'Unraid',
+ 'manufacturer': 'Lime Technology'
+ }
+ if self.unraid_version:
+ device['sw_version'] = self.unraid_version
+
+ # Update payload with default fields
+ create_config = payload
+
+ if state_value is not None:
+ create_config['state_topic'] = f'unraid/{unraid_id}/{sensor_id}/state'
+ if json_attributes:
+ create_config['json_attributes_topic'] = f'unraid/{unraid_id}/{sensor_id}/attributes'
+ if sensor_type == 'button':
+ create_config['command_topic'] = f'unraid/{unraid_id}/{sensor_id}/commands'
+
+ # Expire all sensors except binary_sensor (connectivity)
+ if not sensor_id.startswith(('connectivity', 'share_', 'disk_')):
+ expire_in_seconds = self.scan_interval * 4
+ create_config['expire_after'] = expire_in_seconds if expire_in_seconds > 120 else 120
+
+ # Append extra fields
+ config_fields = {
+ 'name': f'{payload["name"]}',
+ 'attribution': 'Data provided by UNRAID',
+ 'unique_id': unraid_sensor_id,
+ 'device': device
+ }
+ create_config.update(config_fields)
+
+ # Create config
+ self.mqtt_client.publish(f'homeassistant/{sensor_type}/{unraid_sensor_id}/config', json.dumps(create_config), retain=True)
+
+ # Push state update
+ if state_value is not None:
+ self.mqtt_client.publish(f'unraid/{unraid_id}/{sensor_id}/state', state_value, retain=retain)
+
+ # Push attributes update
+ if json_attributes:
+ self.mqtt_client.publish(f'unraid/{unraid_id}/{sensor_id}/attributes', json.dumps(json_attributes), retain=retain)
+
+ # Subscribe to buttons
+ if sensor_type == 'button':
+ self.mqtt_client.subscribe(f'unraid/{unraid_id}/{sensor_id}/commands', qos=0, retain=retain)
+
+ async def mqtt_connect(self, mqtt_config):
+ # MQTT config
+ mqtt_host = mqtt_config.get('host')
+ mqtt_port = mqtt_config.get('port', 1883)
+ mqtt_username = mqtt_config.get('username')
+ mqtt_password = mqtt_config.get('password')
+
+ self.mqtt_history = {}
+ self.share_parser_lastrun = 0
+ self.mqtt_client.on_connect = self.on_connect
+ self.mqtt_client.on_message = self.on_message
+ self.mqtt_client.on_disconnect = self.on_disconnect
+ self.mqtt_client.set_auth_credentials(mqtt_username, mqtt_password)
+
+ while True:
+ try:
+ self.logger.info('Connecting to mqtt server...')
+ await self.mqtt_client.connect(mqtt_host, mqtt_port)
+ break
+
+ except ConnectionRefusedError:
+ self.logger.error('Failed to connect to mqtt server because the connection was refused...')
+ await asyncio.sleep(30)
+ except Exception:
+ self.logger.exception('Failed to connect to mqtt server due to an exception...')
+ await asyncio.sleep(30)
+
+ async def ws_connect(self):
+ while self.mqtt_connected:
+ self.logger.info('Connecting to unraid...')
+ last_msg = ''
+
+ try:
+ # Get Unraid auth key
+ payload = {
+ 'username': self.unraid_username,
+ 'password': self.unraid_password
+ }
+
+ async with httpx.AsyncClient() as http:
+ r = await http.post(f'{self.unraid_url}/login', data=payload, timeout=120)
+ self.unraid_cookie = r.headers.get('set-cookie')
+
+ r = await http.get(f'{self.unraid_url}/Dashboard', follow_redirects=True, timeout=120)
+ tree = etree.HTML(r.text)
+ version_elem = tree.xpath('.//div[@class="logo"]/text()[preceding-sibling::a]')
+ self.unraid_version = ''.join(c for c in ''.join(version_elem) if c.isdigit() or c == '.')
+
+ # Connect to WS
+ headers = {'Cookie': self.unraid_cookie}
+ subprotocols = ['ws+meta.nchan']
+
+ sub_channels = {
+ 'var': parsers.var,
+ 'session': parsers.session,
+ 'cpuload': parsers.cpuload,
+ # 'diskload': parsers.default,
+ 'disks': parsers.disks,
+ 'parity': parsers.parity,
+ 'shares': parsers.shares,
+ 'update1': parsers.update1,
+ 'update3': parsers.update3,
+ # 'dockerload': parsers.default,
+ 'temperature': parsers.temperature
+ }
+
+ websocket_url = f'{self.unraid_ws}/sub/{",".join(sub_channels)}'
+ async with websockets.connect(websocket_url, subprotocols=subprotocols, extra_headers=headers) as websocket:
+ self.logger.info('Successfully connected to unraid')
+
+ # Docker channel needs to be triggered
+ # await session.get(f'{self.url}/Docker')
+
+ # Listen for messages
+ while self.mqtt_connected:
+ data = await asyncio.wait_for(websocket.recv(), timeout=120)
+
+ # Store last message
+ last_msg = data
+
+ # Parse message id and content
+ msg_data = data.replace('\00', ' ').split('\n\n', 1)[1]
+ msg_ids = re.findall(r'([-\[\d\],]+,[-\[\d\],]*)|$', data)[0].split(',')
+ sub_channel = next(sub for (sub, msg) in zip(sub_channels, msg_ids) if msg.startswith('['))
+ msg_parser = sub_channels.get(sub_channel, parsers.default)
+
+ # Skip share calculation if within time limit as it's resource intensive
+ if sub_channel == 'shares':
+ current_time = time.time()
+ time_passed = current_time - self.share_parser_lastrun
+ if time_passed <= self.share_parser_interval:
+ # seconds_left = int(self.share_parser_interval - time_passed)
+ # self.logger.info(f'Ignoring data for shares (rate-limited for {seconds_left} more seconds)')
+ continue
+
+ self.share_parser_lastrun = current_time
+
+ # Create config
+ if sub_channel not in self.mqtt_history:
+ self.logger.info(f'Create config for {sub_channel}')
+ self.mqtt_history[sub_channel] = (time.time() - self.scan_interval)
+ msg_parser(self, msg_data, create_config=True)
+
+ # Parse content
+ if self.scan_interval <= (time.time() - self.mqtt_history.get(sub_channel, time.time())):
+ self.logger.info(f'Parse data for {sub_channel}')
+ self.mqtt_history[sub_channel] = time.time()
+ msg_parser(self, msg_data, create_config=False)
+
+ except (httpx.ConnectTimeout, httpx.ConnectError):
+ self.logger.error('Failed to connect to unraid due to a timeout or connection issue...')
+ self.mqtt_status(connected=False)
+ await asyncio.sleep(30)
+
+ except Exception:
+ self.logger.exception('Failed to connect to unraid due to an exception...')
+ self.logger.error('Last message received:')
+ self.logger.error(last_msg)
+ self.mqtt_status(connected=False)
+ await asyncio.sleep(30)
+
+
+if __name__ == '__main__':
+ # Allow keyboard interrupts
+ signal.signal(signal.SIGTERM, handle_sigterm)
+
+ # Disable gmqtt log
+ loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict if name.startswith(('gmqtt'))]
+ for log in loggers:
+ logging.getLogger(log.name).disabled = True
+
+ # Read config file
+ data_path = '../data'
+ config = load_file(os.path.join(data_path, 'config.yaml'))
+
+ # Required by the MQTT client on Windows
+ if os.name == 'nt':
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
+
+ # Get event loop
+ loop = asyncio.get_event_loop()
+
+ # Create unraid instances
+ for unraid_config in config.get('unraid'):
+ UnRAIDServer(config.get('mqtt'), unraid_config)
+
+ # Loop forever
+ loop.run_forever()
diff --git a/app/parsers.py b/app/parsers.py
new file mode 100644
index 0000000..f0e8047
--- /dev/null
+++ b/app/parsers.py
@@ -0,0 +1,321 @@
+import re
+import math
+import requests
+from lxml import etree
+from utils import Preferences
+from humanfriendly import parse_size
+
+
+def default(self, msg_data, create_config):
+ pass
+
+
+def session(self, msg_data, create_config):
+ self.csrf_token = msg_data
+
+
+def cpuload(self, msg_data, create_config):
+ prefs = Preferences(msg_data)
+ state_value = int(prefs.as_dict()['cpu']['host'])
+
+ payload = {
+ 'name': 'CPU Utilization',
+ 'unit_of_measurement': '%',
+ 'icon': 'mdi:chip',
+ 'state_class': 'measurement',
+ }
+
+ self.mqtt_publish(payload, 'sensor', state_value, create_config=create_config)
+
+
+def disks(self, msg_data, create_config):
+ prefs = Preferences(msg_data)
+ disks = prefs.as_dict()
+
+ for n in disks:
+ disk = disks[n]
+
+ disk_name = disk['name']
+ disk_temp = int(disk['temp']) if str(disk['temp']).isnumeric() else 0
+
+ match = re.match(r'([a-z_]+)([0-9]+)', disk_name, re.I)
+ if match:
+ disk_num = match[2]
+ disk_name = match[1] if match[1] != 'disk' else None
+ disk_name = ' '.join(filter(None, [disk_name, disk_num]))
+ disk_name = disk_name.title().replace('_', ' ')
+
+ payload = {
+ 'name': f'Disk {disk_name}',
+ 'unit_of_measurement': '°C',
+ 'device_class': 'temperature',
+ 'icon': 'mdi:harddisk',
+ 'state_class': 'measurement'
+ }
+
+ json_attributes = disk
+ self.mqtt_publish(payload, 'sensor', disk_temp, json_attributes, create_config=create_config, retain=True)
+
+
+def shares(self, msg_data, create_config):
+ prefs = Preferences(msg_data)
+ shares = prefs.as_dict()
+
+ for n in shares:
+ share = shares[n]
+ share_name = share['name']
+ share_disk_count = len(share['include'].split(','))
+ share_floor_size = share['floor']
+ share_nameorig = share['nameorig']
+ share_use_cache = share['usecache']
+ share_cachepool = share['cachepool']
+
+ if share_use_cache in ['no', 'yes', 'prefer']:
+
+ # unRAID 6.11
+ if self.unraid_version.startswith('6.11'):
+
+ # Auth header
+ headers = {'Cookie': self.unraid_cookie + ';ssz=ssz'}
+
+ # Calculate used space
+ params = {
+ 'cmd': '/webGui/scripts/share_size',
+ 'arg1': share_nameorig,
+ 'arg2': 'ssz1',
+ 'arg3': share_cachepool,
+ 'csrf_token': self.csrf_token
+ }
+ requests.get(f'{self.unraid_url}/update.htm', params=params, headers=headers)
+
+ # Read result
+ params = {
+ 'compute': 'no',
+ 'path': 'Shares',
+ 'scale': 1,
+ 'fill': 'ssz',
+ 'number': '.'
+ }
+
+ r = requests.get(f'{self.unraid_url}/webGui/include/ShareList.php', params=params, headers=headers)
+
+ # unRAID 6.12+
+ else:
+
+ # Auth header
+ headers = {'Cookie': self.unraid_cookie}
+
+ # Read result
+ data = {
+ 'compute': share_nameorig,
+ 'path': 'Shares',
+ 'all': 1,
+ 'csrf_token': self.csrf_token
+ }
+
+ r = requests.get(f'{self.unraid_url}/webGui/include/ShareList.php', data=data, headers=headers)
+
+ if r.ok:
+ tree = etree.HTML(r.text)
+
+ size_total_used = tree.xpath(f'//td/a[text()="{share_nameorig}"]/ancestor::tr[1]/td[6]/text()')
+ size_total_used = next(iter(size_total_used or []), '0').strip()
+ size_total_used = parse_size(size_total_used)
+
+ size_total_free = tree.xpath(f'//td/a[text()="{share_nameorig}"]/ancestor::tr[1]/td[7]/text()')
+ size_total_free = next(iter(size_total_free or []), '0').strip()
+ size_total_free = parse_size(size_total_free)
+
+ size_cache_used = tree.xpath(f'//td/a[text()="{share_nameorig}"]/following::tr[1]/td[1][not(contains(text(), "Disk "))]/../td[6]/text()')
+ size_cache_used = next(iter(size_cache_used or []), '0').strip()
+ size_cache_used = parse_size(size_cache_used)
+
+ size_cache_free = tree.xpath(f'//td/a[text()="{share_nameorig}"]/following::tr[1]/td[1][not(contains(text(), "Disk "))]/../td[7]/text()')
+ size_cache_free = next(iter(size_cache_free or []), '0').strip()
+ size_cache_free = parse_size(size_cache_free)
+
+ # # Debug
+ # from humanfriendly import format_size
+ # print(f'Share: {share_nameorig}')
+ # print(f'Used (total): {format_size(size_total_used)} Free (total): {format_size(size_total_free)}')
+ # print(f'Used (cache): {format_size(size_cache_used)} Free (total): {format_size(size_cache_free)}')
+
+ # Recalculate used and free space, converted from bytes to kbytes
+ share['used'] = int(size_total_used / 1000)
+ share['free'] = int((size_total_free - size_cache_free - size_cache_used) / 1000)
+
+ # Skip empty shares
+ if share['used'] == 0:
+ continue
+
+ # If the drives is exclusive we change the share_disk_count to 1
+ if share.get('exclusive') in ['yes']:
+ share_disk_count = 1
+
+ share_size_floor = share_disk_count * share_floor_size
+ share['free'] -= share_size_floor
+
+ share_size_total = share['used'] + share['free']
+ share_used_pct = math.ceil((share['used'] / (share_size_total) * 100))
+
+ payload = {
+ 'name': f'Share {share_name.title()} Usage',
+ 'unit_of_measurement': '%',
+ 'icon': 'mdi:folder-network',
+ 'state_class': 'measurement'
+ }
+
+ json_attributes = share
+ self.mqtt_publish(payload, 'sensor', share_used_pct, json_attributes, create_config=create_config, retain=True)
+
+
+def temperature(self, msg_data, create_config):
+ tree = etree.HTML(msg_data)
+ sensors = tree.xpath('.//span[@title]')
+
+ for node in sensors:
+ device_name = node.get('title')
+ device_value_raw = ''.join(node.itertext())
+ device_value = ''.join(c for c in device_value_raw if c.isdigit() or c == '.')
+
+ if device_value:
+
+ if 'rpm' in device_value_raw:
+ device_name = re.sub('fan', '', device_name, flags=re.IGNORECASE).strip()
+ device_value = int(device_value)
+
+ payload = {
+ 'name': f'Fan {device_name} Speed',
+ 'unit_of_measurement': 'RPM',
+ 'icon': 'mdi:fan',
+ 'state_class': 'measurement'
+ }
+
+ else:
+ device_value = float(device_value)
+
+ payload = {
+ 'name': f'{device_name} Temperature',
+ 'unit_of_measurement': '°C',
+ 'icon': 'mdi:thermometer',
+ 'state_class': 'measurement',
+ 'device_class': 'temperature'
+ }
+
+ self.mqtt_publish(payload, 'sensor', device_value, create_config=create_config)
+
+
+def update1(self, msg_data, create_config):
+ memory_categories = ['RAM', 'Flash', 'Log', 'Docker']
+ for (memory_name, memory_usage) in zip(memory_categories, re.findall(re.compile(r'(\d+%)'), msg_data)):
+ memory_value = ''.join(c for c in memory_usage if c.isdigit())
+
+ if memory_value:
+ memory_value = int(memory_value)
+
+ payload = {
+ 'name': f'{memory_name} Usage',
+ 'unit_of_measurement': '%',
+ 'icon': 'mdi:memory',
+ 'state_class': 'measurement'
+ }
+
+ self.mqtt_publish(payload, 'sensor', memory_value, create_config=create_config)
+
+ for fan_id, fan_rpm in enumerate(re.findall(re.compile(r'(\d+ RPM)'), msg_data)):
+ fan_name = f'Fan {fan_id}'
+ fan_value = ''.join(c for c in fan_rpm if c.isdigit())
+
+ if fan_value:
+ fan_value = int(fan_value)
+
+ payload = {
+ 'name': f'{fan_name} Speed',
+ 'unit_of_measurement': 'RPM',
+ 'icon': 'mdi:fan',
+ 'state_class': 'measurement'
+ }
+
+ self.mqtt_publish(payload, 'sensor', fan_value, create_config=create_config)
+
+
+def update3(self, msg_data, create_config):
+ network_download = 0
+ network_upload = 0
+
+ for line in msg_data.splitlines():
+ network = [n.strip() for n in line.split(' ')]
+
+ if not network[0].startswith('eth'):
+ continue
+
+ network_download_text = ' '.join(network[1:3])
+ network_download += round(parse_size(network_download_text) / 1000 / 1000, 1)
+ payload_download = {
+ 'name': 'Download Throughput',
+ 'unit_of_measurement': 'Mbit/s',
+ 'icon': 'mdi:download',
+ 'state_class': 'measurement'
+ }
+
+ network_upload_text = ' '.join(network[3:5])
+ network_upload += round(parse_size(network_upload_text) / 1000 / 1000, 1)
+ payload_upload = {
+ 'name': 'Upload Throughput',
+ 'unit_of_measurement': 'Mbit/s',
+ 'icon': 'mdi:download',
+ 'state_class': 'measurement'
+ }
+
+ self.mqtt_publish(payload_download, 'sensor', network_download, create_config=create_config)
+ self.mqtt_publish(payload_upload, 'sensor', network_upload, create_config=create_config)
+
+
+def parity(self, msg_data, create_config):
+ data = msg_data.split(';')
+
+ if len(data) < 5:
+ return
+
+ position_size = re.sub(r'\([^)]*\)', '', data[2])
+ position_pct = data[2][data[2].find('(') + 1:data[2].find(')')]
+ position_pct = ''.join(c for c in position_pct if c.isdigit() or c == '.')
+
+ state_value = float(position_pct)
+
+ payload = {
+ 'name': 'Parity Check',
+ 'unit_of_measurement': '%',
+ 'icon': 'mdi:database-eye',
+ 'state_class': 'measurement'
+ }
+
+ json_attributes = {
+ 'total_size': parse_size(data[0]),
+ 'elapsed_time': data[1],
+ 'current_position': parse_size(position_size),
+ 'estimated_speed': parse_size(data[3]),
+ 'estimated_finish': data[4],
+ 'sync_errors_corrected': data[5]
+ }
+
+ self.mqtt_publish(payload, 'sensor', state_value, json_attributes, create_config=create_config)
+
+
+def var(self, msg_data, create_config):
+ msg_data = f'[var]\n{msg_data}'
+ prefs = Preferences(msg_data)
+ var = prefs.as_dict()
+ var_json = var['var']
+
+ var_value = 'OFF'
+ if 'started' in var_json['mdstate'].lower():
+ var_value = 'ON'
+
+ payload = {
+ 'name': 'Array',
+ 'device_class': 'running'
+ }
+
+ json_attributes = var_json
+ self.mqtt_publish(payload, 'binary_sensor', var_value, json_attributes, create_config=create_config, retain=True)
diff --git a/app/requirements.txt b/app/requirements.txt
new file mode 100644
index 0000000..c5d2083
--- /dev/null
+++ b/app/requirements.txt
@@ -0,0 +1,17 @@
+anyio==3.6.1
+asyncio-mqtt==0.12.1
+certifi==2022.6.15
+gmqtt==0.6.11
+h11==0.12.0
+httpcore==0.15.0
+httpx==0.23.0
+humanfriendly==10.0
+idna==3.3
+lxml==4.9.1
+paho-mqtt==1.6.1
+pyreadline3==3.4.1
+PyYAML==6.0.1
+rfc3986==1.5.0
+sniffio==1.2.0
+websockets==10.3
+requests==2.28.1
\ No newline at end of file
diff --git a/app/utils.py b/app/utils.py
new file mode 100644
index 0000000..d80055c
--- /dev/null
+++ b/app/utils.py
@@ -0,0 +1,80 @@
+import os
+import json
+import yaml
+import configparser
+
+
+class Preferences:
+ def __init__(self, string_ini):
+ self.config = configparser.ConfigParser()
+ self.config.read_string(string_ini)
+ self.d = self.to_dict(self.config._sections)
+
+ def as_dict(self):
+ return self.d
+
+ def to_dict(self, config):
+ """
+ Nested OrderedDict to normal dict.
+ Also, remove the annoying quotes (apostrophes) from around string values.
+ """
+ d = json.loads(json.dumps(config))
+ d = remove_quotes(d)
+ d = {k: v for k, v in d.items() if v}
+
+ return d
+
+
+def handle_sigterm(*args):
+ raise KeyboardInterrupt()
+
+
+def normalize_str(string):
+ string = string.lower()
+ string = string.replace(' ', '_')
+ string = ''.join([c for c in string if c.isalpha() or c.isdigit() or c == '_']).rstrip()
+
+ return string
+
+
+def remove_quotes(config):
+ for key, value in list(config.items()):
+
+ # Remove quotes from section
+ key_strip = key.strip('"')
+ config[key_strip] = config.pop(key)
+
+ if isinstance(value, str):
+ s = config[key_strip]
+
+ # Remove quotes from value
+ s = s.strip('"')
+
+ # Convert strings to numbers
+ try:
+ s = int(s)
+ except ValueError:
+ pass
+
+ config[key_strip] = s
+ if isinstance(value, dict):
+ config[key_strip] = remove_quotes(value)
+
+ return config
+
+
+def load_file(path_to_file):
+ if not os.path.isfile(path_to_file):
+ return {}
+
+ filename, extension = os.path.splitext(path_to_file)
+ with open(path_to_file) as f:
+ try:
+ if 'json' in extension:
+ data = json.load(f)
+ else:
+ data = yaml.safe_load(f)
+ except Exception:
+ return {}
+
+ return data
diff --git a/docker-compose.yaml b/docker-compose.yaml
new file mode 100644
index 0000000..6651daa
--- /dev/null
+++ b/docker-compose.yaml
@@ -0,0 +1,14 @@
+version: '3'
+
+services:
+ hass-unraid:
+ container_name: hass-unraid
+ network_mode: bridge
+ restart: always
+ environment:
+ - TZ=Europe/Oslo
+ volumes:
+ - './data:/data'
+ labels:
+ - 'net.unraid.docker.icon=/mnt/user/docker/docker-icons/data/hass-unraid.png'
+ image: registry.idmedia.no/idmedia/docker/hass-unraid:latest
diff --git a/extras/logo.png b/extras/logo.png
new file mode 100644
index 0000000..cf84ef5
Binary files /dev/null and b/extras/logo.png differ
diff --git a/extras/screenshot.png b/extras/screenshot.png
new file mode 100644
index 0000000..caa6365
Binary files /dev/null and b/extras/screenshot.png differ
diff --git a/lovelace/server_kaya.yaml b/lovelace/server_kaya.yaml
new file mode 100644
index 0000000..bccd707
--- /dev/null
+++ b/lovelace/server_kaya.yaml
@@ -0,0 +1,261 @@
+type: custom:vertical-stack-in-card
+card_mod:
+ style: |
+ ha-card {
+ padding: 10px;
+ }
+ .card-header {
+ padding: 0;
+ }
+cards:
+ - type: custom:vertical-stack-in-card
+ card_mod:
+ style:
+ .: |
+ ha-card { border-style: none !important; }
+ hui-grid-card$: |
+ h1 {
+ padding: 0 !important;
+ line-height: 48px;
+ }
+ #root {
+ grid-template-columns: 100px auto !important;
+ grid-gap: 20px !important;
+ align-items: center;
+ min-height: 130px;
+ }
+ hui-picture-card { align-self: center; }
+ cards:
+ - type: grid
+ title: Kaya
+ columns: 2
+ cards:
+ - type: picture
+ card_mod:
+ style: |
+ ha-card { border-style: none !important; border-radius: unset !important; }
+ image: /local/norco.svg
+ tap_action:
+ action: none
+ - type: vertical-stack
+ cards:
+ - type: custom:button-card
+ entity: sensor.kaya_cpu_utilization
+ template: button_simple_bar
+ name: CPU
+ - type: custom:button-card
+ entity: sensor.kaya_ram_usage
+ template: button_simple_bar
+ name: RAM
+ - type: custom:button-card
+ entity: sensor.kaya_fan_speed
+ template: button_simple_bar
+ name: FAN
+ variables:
+ warning_from_state: 80
+ critical_from_state: 90
+ - type: custom:button-card
+ entity: sensor.kaya_power_power
+ template: button_simple_bar
+ name: PWR
+ variables:
+ max_value: 500
+ warning_from_state: 70
+ critical_from_state: 90
+
+ - type: custom:auto-entities
+ show_empty: false
+ card:
+ type: markdown
+ card_mod:
+ style:
+ .: |
+ ha-card {
+ padding: 0 16px 20px 16px !important;
+ border-style: none !important;
+ }
+ ha-markdown {
+ padding: 0px !important;
+ }
+ content: |
+
+ Parity-Check in progress
+ Current position: {{ state_attr("sensor.kaya_parity_check", "current_position") | int(0) | filesizeformat() }}
+ ({{ states("sensor.kaya_parity_check") | float(0)}} %) @
+ {{ state_attr("sensor.kaya_parity_check", "estimated_speed") | int(0) | filesizeformat() }}/sec
+ Estimated finish: {{ state_attr("sensor.kaya_parity_check", "estimated_finish") }}
+
+ filter:
+ template: |
+ {% set parity_status = states("sensor.kaya_parity_check") | float(default=-1) %}
+ {% if parity_status >= 0 and parity_status < 100 %}
+ sensor.kaya_parity_check,
+ {% endif %}
+
+ - type: custom:auto-entities
+ card:
+ type: grid
+ columns: 4
+ square: false
+ card_param: cards
+ filter:
+ template: >-
+ {% set disk_order = [
+ 'sensor.kaya_disk_parity',
+ 'sensor.kaya_disk_parity_2',
+ 'sensor.kaya_disk_cache_array',
+ 'sensor.kaya_disk_cache_array_2',
+ 'sensor.kaya_disk_1',
+ 'sensor.kaya_disk_2',
+ 'sensor.kaya_disk_3',
+ 'sensor.kaya_disk_4',
+ 'sensor.kaya_disk_5',
+ 'sensor.kaya_disk_6',
+ 'sensor.kaya_disk_7',
+ 'sensor.kaya_disk_8',
+ 'sensor.kaya_disk_9',
+ 'sensor.kaya_disk_10',
+ 'sensor.kaya_disk_11',
+ 'sensor.kaya_disk_12',
+ 'sensor.kaya_disk_13',
+ 'sensor.kaya_disk_14',
+ 'sensor.kaya_disk_15',
+ 'sensor.kaya_disk_16',
+ 'sensor.kaya_disk_17',
+ 'sensor.kaya_disk_18',
+ 'sensor.kaya_disk_19',
+ 'sensor.kaya_disk_20',
+ ]
+ %}
+
+ {% set ns = namespace(disk_array=[])%}
+ {% for disk in disk_order %}
+ {% set ns.disk_array = ns.disk_array + expand(disk) %}
+ {% endfor %}
+
+ {%- for disk in ns.disk_array -%}
+ {{
+ {
+ 'type': 'custom:button-card',
+ 'entity': disk.entity_id,
+ 'template': 'button_unraid_disk',
+ 'variables': {
+ 'connectivity_entity': 'sensor.kaya_cpu_utilization'
+ },
+ 'name': disk.attributes.friendly_name | replace("Kaya", "") | replace("Disk Parity", "Parity") | replace("Disk Cache Array", "Cache")
+ }
+ }},
+ {%- endfor %}
+
+ - type: custom:auto-entities
+ card:
+ type: grid
+ columns: 2
+ square: false
+ card_param: cards
+ filter:
+ template: >-
+ {% set disk_order = [
+ 'sensor.kaya_disk_cache_docker',
+ 'sensor.kaya_disk_cache_docker_2',
+ ]
+ %}
+
+ {% set ns = namespace(disk_array=[])%}
+ {% for disk in disk_order %}
+ {% set ns.disk_array = ns.disk_array + expand(disk) %}
+ {% endfor %}
+
+ {%- for disk in ns.disk_array -%}
+ {{
+ {
+ 'type': 'custom:button-card',
+ 'entity': disk.entity_id,
+ 'template': 'button_unraid_disk',
+ 'variables': {
+ 'connectivity_entity': 'sensor.kaya_cpu_utilization'
+ },
+ 'card_mod': {
+ 'style': 'ha-card { margin-top: 15px; }'
+ },
+ 'name': disk.attributes.friendly_name | replace("Disk Parity", "Parity") | replace("Disk Cache", "Cache") | replace("Kaya", "") | trim
+ }
+ }},
+ {%- endfor %}
+
+ - type: custom:auto-entities
+ card:
+ type: grid
+ columns: 2
+ square: false
+ card_param: cards
+ filter:
+ template: >-
+ {% set disk_order = [
+ 'sensor.kaya_disk_cache_system',
+ 'sensor.kaya_disk_cache_system_2',
+ ]
+ %}
+
+ {% set ns = namespace(disk_array=[])%}
+ {% for disk in disk_order %}
+ {% set ns.disk_array = ns.disk_array + expand(disk) %}
+ {% endfor %}
+
+ {%- for disk in ns.disk_array -%}
+ {{
+ {
+ 'type': 'custom:button-card',
+ 'entity': disk.entity_id,
+ 'template': 'button_unraid_disk',
+ 'variables': {
+ 'connectivity_entity': 'sensor.kaya_cpu_utilization'
+ },
+ 'card_mod': {
+ 'style': 'ha-card { margin-top: 15px; }'
+ },
+ 'name': disk.attributes.friendly_name | replace("Disk Parity", "Parity") | replace("Disk Cache", "Cache") | replace("Kaya", "") | trim
+ }
+ }},
+ {%- endfor %}
+
+ - type: custom:vertical-stack-in-card
+ title: Shares
+ card_mod:
+ style:
+ .: |
+ ha-card {
+ padding: 10px 0;
+ border-style: none !important;
+ }
+ $: |
+ .card-header {
+ padding: 0 !important;
+ line-height: 48px;
+ }
+ cards:
+ - type: custom:auto-entities
+ card:
+ type: grid
+ columns: 3
+ square: false
+ card_param: cards
+ filter:
+ template: >-
+ {% set share_list = states.sensor
+ | selectattr('entity_id','search', 'kaya_share')
+ | rejectattr('attributes.nameorig', 'in', ['appdata', 'backup', 'system', 'vms'])
+ | sort(reverse=true,attribute='state')
+ | map(attribute='entity_id')
+ | list
+ %}
+
+ {%- for share in share_list -%}
+ {{
+ {
+ 'type': 'custom:button-card',
+ 'template': 'button_network_share',
+ 'entity': share,
+ }
+ }},
+ {%- endfor %}
\ No newline at end of file
diff --git a/lovelace/templates/simple_bar.yaml b/lovelace/templates/simple_bar.yaml
new file mode 100644
index 0000000..301f069
--- /dev/null
+++ b/lovelace/templates/simple_bar.yaml
@@ -0,0 +1,50 @@
+button_simple_bar:
+ show_icon: false
+ show_state: true
+ variables:
+ name_padding: 50px
+ state_padding: 50px
+ max_value: 100
+ warning_from_state: 50
+ critical_from_state: 80
+ bar_color_normal: '#69B34C'
+ bar_color_warning: '#FAB733'
+ bar_color_critical: '#FF4E11'
+ bar_color_background: '#E5E6E8'
+ state_display: >
+ [[[
+ return (isNaN(entity.state)) ? '-' : `${entity.state}${entity.attributes.unit_of_measurement}`;
+ ]]]
+ styles:
+ grid:
+ - grid-template-areas: '"n bar s"'
+ - grid-template-columns: auto 1fr auto
+ - grid-template-rows: auto
+ card:
+ - color: '#000'
+ - font-size: 1em
+ - margin: 5px 0
+ - padding: 0
+ - border-style: none
+ - --mdc-ripple-color: none
+ name:
+ - text-align: left
+ - min-width: '[[[ return variables.name_padding ]]]'
+ state:
+ - font-weight: 500
+ - text-align: right
+ - min-width: '[[[ return variables.state_padding ]]]'
+ custom_fields:
+ bar: >
+ [[[
+ let pctUsed = (isNaN(entity.state)) ? 100 : (100*(entity.state / variables.max_value));
+ let barColor = variables.bar_color_normal;
+
+ barColor = (pctUsed > variables.warning_from_state ) ? variables.bar_color_warning : barColor;
+ barColor = (pctUsed > variables.critical_from_state ) ? variables.bar_color_critical : barColor;
+ barColor = (isNaN(entity.state)) ? variables.bar_color_background : barColor;
+
+ return ``;
+ ]]]
\ No newline at end of file
diff --git a/lovelace/templates/unraid_disk.yaml b/lovelace/templates/unraid_disk.yaml
new file mode 100644
index 0000000..145f5a6
--- /dev/null
+++ b/lovelace/templates/unraid_disk.yaml
@@ -0,0 +1,179 @@
+button_unraid_disk:
+ variables:
+ connectivity_entity: ''
+
+ max_temp_normal_ata: 40
+ max_temp_warning_ata: 45
+ max_temp_normal_nvme: 50
+ max_temp_warning_nvme: 60
+
+ color_disk_empty: 'transparent'
+ color_disk_standby: '#D2D2D2'
+ color_disk_unavailable: '#F2F2F2'
+ color_disk_eror: '#FF4E11'
+ color_temp_normal: '#69B34C'
+ color_temp_warning: '#FAB733'
+ color_temp_critical: '#FF4E11'
+
+ show_icon: false
+ tap_action:
+ action: >
+ [[[
+ return (entity.attributes.transport) ? 'more-info' : 'none';
+ ]]]
+ styles:
+ card:
+ - padding: 0
+ - padding-top: 2px
+ - color: '#000'
+ - font-size: 1em
+ - border-radius: unset
+ - --mdc-ripple-color: none
+ - background-color: >
+ [[[
+ // Server is offline
+ if (variables.connectivity_entity !== '' && isNaN(states[variables.connectivity_entity].state)) {
+ return variables.color_disk_unavailable;
+ }
+
+ // Define temperature ranges
+ let activeTempRange = [variables.max_temp_normal_ata, variables.max_temp_warning_ata]
+
+ if (entity.attributes.transport === 'nvme') {
+ activeTempRange = [variables.max_temp_normal_nvme, variables.max_temp_warning_nvme]
+ }
+
+ // Parse state and change background-color accordingly
+ if (entity.state == 0 && entity.attributes.size == 0) return variables.color_disk_empty;
+ else if (entity.attributes.color && !entity.attributes.color.startsWith('green')) return variables.color_disk_eror;
+ else if (entity.state == 0) return variables.color_disk_standby;
+ else if (entity.state <= activeTempRange[0]) return variables.color_temp_normal;
+ else if (entity.state <= activeTempRange[1]) return variables.color_temp_warning;
+ else return variables.color_temp_critical;
+ ]]]
+ grid:
+ - grid-template-areas: '"icon n temp"
+ "info info used"
+ "shares shares shares"
+ "bar bar bar"'
+ - grid-template-columns: min-content 1fr
+ - grid-template-rows: 1fr
+ name:
+ - justify-self: start
+ - font-weight: 500
+ - color: '#000'
+ - padding-left: 5px
+ custom_fields:
+ icon:
+ - justify-self: start
+ - padding-left: 5px
+ - font-weight: bold
+ temp:
+ - grid-area: temp
+ - justify-self: end
+ - padding-right: 5px
+ - color: '#000'
+ - font-size: 0.8em
+ info:
+ - grid-area: info
+ - justify-self: start
+ - font-size: 0.8em
+ - padding-left: 5px
+ - text-transform: uppercase
+ shares:
+ - grid-area: shares
+ - justify-self: start
+ - font-size: 0.8em
+ - padding: 2px 0 4px 5px
+ used:
+ - grid-area: used
+ - justify-self: end
+ - padding-right: 5px
+ - color: '#000'
+ - font-size: 0.8em
+ bar:
+ - grid-area: bar
+ - font-size: 0.8em
+ - color: '#fff'
+ - width: 100%
+ custom_fields:
+ icon: >
+ [[[
+ if (entity.attributes.color) {
+ if (entity.attributes.color.startsWith('yellow')) { return '⚠'; }
+ else if (entity.attributes.color.startsWith('red')) { return '✖'; }
+ }
+ else {
+ return '?';
+ }
+ ]]]
+ temp: >
+ [[[
+ if (entity.state !== 'unknown' && entity.attributes.size > 0) return `${entity.state}°C`;
+ ]]]
+ info: >
+ [[[
+ function formatBytes(a,b=2){if(!+a)return"0 Bytes";const c=0>b?0:b,d=Math.floor(Math.log(a)/Math.log(1000));return`${parseFloat((a/Math.pow(1000,d)).toFixed(c))} ${["Bytes", "KB","MB","GB","TB","PB","EB","ZB","YB"][d]}`}
+
+ let diskSize = formatBytes(entity.attributes.size * 1024, 0);
+ let diskType = (entity.attributes.rotational) ? 'HDD' : 'SSD';
+ let diskTransport = entity.attributes.transport;
+ let diskStatus = (!isNaN(entity.state)) ? 'Empty' : 'Unavailable'
+ let diskInfo = (diskTransport) ? `${diskSize} ${diskType} ${diskTransport}` : diskStatus;
+
+ return diskInfo;
+ ]]]
+ shares: >
+ [[[
+ let regex = entity.entity_id.match(/disk_(\d+)/g) || [""];
+ let diskId = regex[0].replace("_", "")
+
+ if (!diskId)
+ return ' ';
+
+ let serverName = entity.entity_id.substring(entity.entity_id.indexOf(".") + 1, entity.entity_id.indexOf("_"));
+ let diskShares = Object.entries(states)
+ .filter(([k]) => k.includes(`${serverName}_share`))
+ .filter(([k, v]) => typeof v.attributes.include !== 'undefined')
+ .filter(([k, v]) => v.attributes.include.split(',').indexOf(`${diskId}`) !== -1)
+ .map(([k, v]) => { return v.attributes.nameorig });
+
+ let shares = (diskShares.length > 0) ? diskShares.join(', ') : ' ';
+
+ return shares;
+ ]]]
+ used: >
+ [[[
+ if(entity.attributes.fssize) return Math.ceil((100 * entity.attributes.fsused) / entity.attributes.fssize) + '%';
+ ]]]
+ bar: >
+ [[[
+ let hideProgress;
+ let percentageUsed = -1;
+ let progressColor = variables.color_temp_critical;
+
+ if(entity.attributes.fssize) percentageUsed = Math.ceil((100 * entity.attributes.fsused) / entity.attributes.fssize);
+ if (percentageUsed == -1) hideProgress = "visibility:hidden";
+
+ let activeTempRange = [variables.max_temp_normal_ata, variables.max_temp_warning_ata]
+ if (entity.attributes.transport === 'nvme') {
+ activeTempRange = [variables.max_temp_normal_nvme, variables.max_temp_warning_nvme]
+ }
+ if (entity.state == 0) progressColor = variables.color_disk_standby;
+ else if (entity.state <= activeTempRange[0]) progressColor = variables.color_temp_normal;
+ else if (entity.state <= activeTempRange[1]) progressColor = variables.color_temp_warning;
+ else progressColor = variables.color_temp_critical;
+
+ if (entity.attributes.color && !entity.attributes.color.startsWith('green')) {
+ progressColor = variables.color_disk_eror;
+ }
+
+ // Server is offline
+ if (variables.connectivity_entity !== '' && isNaN(states[variables.connectivity_entity].state)) {
+ progressColor = variables.color_disk_unavailable;
+ }
+
+ return ``;
+ ]]]
\ No newline at end of file