* make timestamp handling stateless * adapt tests for stateless timestamp handling * initial version * add more type annotations * add more type annotations * fix Generator annotation for ha_proxy_confs * fix names of issue branches * add more type annotations * don't use depricated varn anymore * don't mark all test as async * fix imports * fix solarman unit tests - fake Mqtt class * print image build time during proxy start * update changelog * fix pytest collect warning * cleanup msg_get_time handler * addapt unit test * label debug images with debug * dump droped packages * fix warnings * add systemtest with invalid start byte * update changelog * update changelog * add exposed ports and healthcheck * add wget for healthcheck * add aiohttp * use config validation for healthcheck * add http server for healthcheck * calculate msg prossesing time * add healthy check methods * fix typo * log ConfigErr with DEBUG level * Update async_stream.py - check if processing time is < 5 sec * add a close handler to release internal resources * call modbus close hanlder on a close call * add exception handling for forward handler * update changelog * isolate Modbus fix * cleanup * update changelog * add heaithy handler * log unrelease references * add healtcheck * complete exposed port list * add wget for healtcheck * add aiohttp * use Enum class for State * calc processing time for healthcheck * add HTTP server for healthcheck * cleanup * Update CHANGELOG.md * updat changelog * add docstrings to state enum * set new state State.received * add healthy method * log healthcheck infos with DEBUG level * update changelog * S allius/issue100 (#101) * detect dead connections - disconnect connection on Msg receive timeout - improve connection trace (add connection id) * update changelog * fix merge conflict * fix unittests * S allius/issue108 (#109) * add more data types * adapt unittests * improve test coverage * fix linter warning * update changelog * S allius/issue102 (#110) * hotfix: don't send two MODBUS commands together * fix unit tests * remove read loop * optional sleep between msg read and sending rsp * wait after read 0.5s before sending a response * add pending state * fix state definitions * determine the connection timeout by the conn state * avoid sending MODBUS cmds in the inverter's reporting phase * update changelog * S allius/issue111 (#112) Synchronize regular MODBUS commands with the status of the inverter to prevent the inverter from crashing due to unexpected packets. * inital checkin * remove crontab entry for regular MODBUS cmds * add timer for regular MODBUS polling * fix Stop method call for already stopped timer * optimize MB_START_TIMEOUT value * cleanup * update changelog * fix buildx warnings * fix timer cleanup * fix Config.class_init() - return error string or None - release Schema structure after building thr config * add quit flag to docker push * fix timout calculation * rename python to debugpy * add asyncio log * cleanup shutdown - stop webserver on shutdown - enable asyncio debug mode for debug versions * update changelog * update changelog * fix exception in MODBUS timeout callback * update changelog
133 lines
5.4 KiB
Python
133 lines
5.4 KiB
Python
import logging
|
|
import traceback
|
|
import json
|
|
import asyncio
|
|
from asyncio import StreamReader, StreamWriter
|
|
from config import Config
|
|
from inverter import Inverter
|
|
from gen3.connection_g3 import ConnectionG3
|
|
from aiomqtt import MqttCodeError
|
|
from infos import Infos
|
|
|
|
# import gc
|
|
|
|
# logger = logging.getLogger('conn')
|
|
logger_mqtt = logging.getLogger('mqtt')
|
|
|
|
|
|
class InverterG3(Inverter, ConnectionG3):
|
|
'''class Inverter is a derivation of an Async_Stream
|
|
|
|
The class has some class method for managing common resources like a
|
|
connection to the MQTT broker or proxy error counter which are common
|
|
for all inverter connection
|
|
|
|
Instances of the class are connections to an inverter and can have an
|
|
optional link to an remote connection to the TSUN cloud. A remote
|
|
connection dies with the inverter connection.
|
|
|
|
class methods:
|
|
class_init(): initialize the common resources of the proxy (MQTT
|
|
broker, Proxy DB, etc). Must be called before the
|
|
first inverter instance can be created
|
|
class_close(): release the common resources of the proxy. Should not
|
|
be called before any instances of the class are
|
|
destroyed
|
|
|
|
methods:
|
|
server_loop(addr): Async loop method for receiving messages from the
|
|
inverter (server-side)
|
|
client_loop(addr): Async loop method for receiving messages from the
|
|
TSUN cloud (client-side)
|
|
async_create_remote(): Establish a client connection to the TSUN cloud
|
|
async_publ_mqtt(): Publish data to MQTT broker
|
|
close(): Release method which must be called before a instance can be
|
|
destroyed
|
|
'''
|
|
|
|
def __init__(self, reader: StreamReader, writer: StreamWriter, addr):
|
|
super().__init__(reader, writer, addr, None, True)
|
|
self.__ha_restarts = -1
|
|
|
|
async def async_create_remote(self) -> None:
|
|
'''Establish a client connection to the TSUN cloud'''
|
|
tsun = Config.get('tsun')
|
|
host = tsun['host']
|
|
port = tsun['port']
|
|
addr = (host, port)
|
|
|
|
try:
|
|
logging.info(f'[{self.node_id}] Connect to {addr}')
|
|
connect = asyncio.open_connection(host, port)
|
|
reader, writer = await connect
|
|
self.remoteStream = ConnectionG3(reader, writer, addr, self,
|
|
False, self.id_str)
|
|
logging.info(f'[{self.remoteStream.node_id}:'
|
|
f'{self.remoteStream.conn_no}] '
|
|
f'Connected to {addr}')
|
|
asyncio.create_task(self.client_loop(addr))
|
|
|
|
except (ConnectionRefusedError, TimeoutError) as error:
|
|
logging.info(f'{error}')
|
|
except Exception:
|
|
self.inc_counter('SW_Exception')
|
|
logging.error(
|
|
f"Inverter: Exception for {addr}:\n"
|
|
f"{traceback.format_exc()}")
|
|
|
|
async def async_publ_mqtt(self) -> None:
|
|
'''publish data to MQTT broker'''
|
|
# check if new inverter or collector infos are available or when the
|
|
# home assistant has changed the status back to online
|
|
try:
|
|
if (('inverter' in self.new_data and self.new_data['inverter'])
|
|
or ('collector' in self.new_data and
|
|
self.new_data['collector'])
|
|
or self.mqtt.ha_restarts != self.__ha_restarts):
|
|
await self._register_proxy_stat_home_assistant()
|
|
await self.__register_home_assistant()
|
|
self.__ha_restarts = self.mqtt.ha_restarts
|
|
|
|
for key in self.new_data:
|
|
await self.__async_publ_mqtt_packet(key)
|
|
for key in Infos.new_stat_data:
|
|
await self._async_publ_mqtt_proxy_stat(key)
|
|
|
|
except MqttCodeError as error:
|
|
logging.error(f'Mqtt except: {error}')
|
|
except Exception:
|
|
self.inc_counter('SW_Exception')
|
|
logging.error(
|
|
f"Inverter: Exception:\n"
|
|
f"{traceback.format_exc()}")
|
|
|
|
async def __async_publ_mqtt_packet(self, key):
|
|
db = self.db.db
|
|
if key in db and self.new_data[key]:
|
|
data_json = json.dumps(db[key])
|
|
node_id = self.node_id
|
|
logger_mqtt.debug(f'{key}: {data_json}')
|
|
await self.mqtt.publish(f'{self.entity_prfx}{node_id}{key}', data_json) # noqa: E501
|
|
self.new_data[key] = False
|
|
|
|
async def __register_home_assistant(self) -> None:
|
|
'''register all our topics at home assistant'''
|
|
for data_json, component, node_id, id in self.db.ha_confs(
|
|
self.entity_prfx, self.node_id, self.unique_id,
|
|
self.sug_area):
|
|
logger_mqtt.debug(f"MQTT Register: cmp:'{component}'"
|
|
f" node_id:'{node_id}' {data_json}")
|
|
await self.mqtt.publish(f"{self.discovery_prfx}{component}"
|
|
f"/{node_id}{id}/config", data_json)
|
|
|
|
self.db.reg_clr_at_midnight(f'{self.entity_prfx}{self.node_id}')
|
|
|
|
def close(self) -> None:
|
|
logging.debug(f'InverterG3.close() l{self.l_addr} | r{self.r_addr}')
|
|
super().close() # call close handler in the parent class
|
|
# logging.info(f'Inverter refs: {gc.get_referrers(self)}')
|
|
|
|
def __del__(self):
|
|
logging.debug("InverterG3.__del__")
|
|
super().__del__()
|