Dev 0.9 (#115)
* make timestamp handling stateless * adapt tests for stateless timestamp handling * initial version * add more type annotations * add more type annotations * fix Generator annotation for ha_proxy_confs * fix names of issue branches * add more type annotations * don't use depricated varn anymore * don't mark all test as async * fix imports * fix solarman unit tests - fake Mqtt class * print image build time during proxy start * update changelog * fix pytest collect warning * cleanup msg_get_time handler * addapt unit test * label debug images with debug * dump droped packages * fix warnings * add systemtest with invalid start byte * update changelog * update changelog * add exposed ports and healthcheck * add wget for healthcheck * add aiohttp * use config validation for healthcheck * add http server for healthcheck * calculate msg prossesing time * add healthy check methods * fix typo * log ConfigErr with DEBUG level * Update async_stream.py - check if processing time is < 5 sec * add a close handler to release internal resources * call modbus close hanlder on a close call * add exception handling for forward handler * update changelog * isolate Modbus fix * cleanup * update changelog * add heaithy handler * log unrelease references * add healtcheck * complete exposed port list * add wget for healtcheck * add aiohttp * use Enum class for State * calc processing time for healthcheck * add HTTP server for healthcheck * cleanup * Update CHANGELOG.md * updat changelog * add docstrings to state enum * set new state State.received * add healthy method * log healthcheck infos with DEBUG level * update changelog * S allius/issue100 (#101) * detect dead connections - disconnect connection on Msg receive timeout - improve connection trace (add connection id) * update changelog * fix merge conflict * fix unittests * S allius/issue108 (#109) * add more data types * adapt unittests * improve test coverage * fix linter warning * update changelog * S allius/issue102 (#110) * hotfix: don't send two MODBUS commands together * fix unit tests * remove read loop * optional sleep between msg read and sending rsp * wait after read 0.5s before sending a response * add pending state * fix state definitions * determine the connection timeout by the conn state * avoid sending MODBUS cmds in the inverter's reporting phase * update changelog * S allius/issue111 (#112) Synchronize regular MODBUS commands with the status of the inverter to prevent the inverter from crashing due to unexpected packets. * inital checkin * remove crontab entry for regular MODBUS cmds * add timer for regular MODBUS polling * fix Stop method call for already stopped timer * optimize MB_START_TIMEOUT value * cleanup * update changelog * fix buildx warnings * fix timer cleanup * fix Config.class_init() - return error string or None - release Schema structure after building thr config * add quit flag to docker push * fix timout calculation * rename python to debugpy * add asyncio log * cleanup shutdown - stop webserver on shutdown - enable asyncio debug mode for debug versions * update changelog * update changelog * fix exception in MODBUS timeout callback * update changelog
This commit is contained in:
@@ -1,44 +1,77 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import traceback
|
||||
from messages import hex_dump_memory
|
||||
import time
|
||||
from asyncio import StreamReader, StreamWriter
|
||||
from messages import hex_dump_memory, State
|
||||
from typing import Self
|
||||
from itertools import count
|
||||
|
||||
import gc
|
||||
logger = logging.getLogger('conn')
|
||||
|
||||
|
||||
class AsyncStream():
|
||||
_ids = count(0)
|
||||
MAX_PROC_TIME = 2
|
||||
'''maximum processing time for a received msg in sec'''
|
||||
MAX_START_TIME = 400
|
||||
'''maximum time without a received msg in sec'''
|
||||
MAX_INV_IDLE_TIME = 90
|
||||
'''maximum time without a received msg from the inverter in sec'''
|
||||
MAX_CLOUD_IDLE_TIME = 360
|
||||
'''maximum time without a received msg from cloud side in sec'''
|
||||
|
||||
def __init__(self, reader, writer, addr) -> None:
|
||||
def __init__(self, reader: StreamReader, writer: StreamWriter,
|
||||
addr) -> None:
|
||||
logger.debug('AsyncStream.__init__')
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.addr = addr
|
||||
self.r_addr = ''
|
||||
self.l_addr = ''
|
||||
self.conn_no = next(self._ids)
|
||||
self.proc_start = None # start processing start timestamp
|
||||
self.proc_max = 0
|
||||
|
||||
async def server_loop(self, addr):
|
||||
def __timeout(self) -> int:
|
||||
if self.state == State.init:
|
||||
to = self.MAX_START_TIME
|
||||
else:
|
||||
if self.server_side:
|
||||
to = self.MAX_INV_IDLE_TIME
|
||||
else:
|
||||
to = self.MAX_CLOUD_IDLE_TIME
|
||||
return to
|
||||
|
||||
async def server_loop(self, addr: str) -> None:
|
||||
'''Loop for receiving messages from the inverter (server-side)'''
|
||||
logging.info(f'[{self.node_id}] Accept connection from {addr}')
|
||||
logger.info(f'[{self.node_id}:{self.conn_no}] '
|
||||
f'Accept connection from {addr}')
|
||||
self.inc_counter('Inverter_Cnt')
|
||||
await self.loop()
|
||||
self.dec_counter('Inverter_Cnt')
|
||||
logging.info(f'[{self.node_id}] Server loop stopped for'
|
||||
f' r{self.r_addr}')
|
||||
logger.info(f'[{self.node_id}:{self.conn_no}] Server loop stopped for'
|
||||
f' r{self.r_addr}')
|
||||
|
||||
# if the server connection closes, we also have to disconnect
|
||||
# the connection to te TSUN cloud
|
||||
if self.remoteStream:
|
||||
logging.debug("disconnect client connection")
|
||||
logger.info(f'[{self.node_id}:{self.conn_no}] disc client '
|
||||
f'connection: [{self.remoteStream.node_id}:'
|
||||
f'{self.remoteStream.conn_no}]')
|
||||
await self.remoteStream.disc()
|
||||
try:
|
||||
await self._async_publ_mqtt_proxy_stat('proxy')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def client_loop(self, addr):
|
||||
async def client_loop(self, addr: str) -> None:
|
||||
'''Loop for receiving messages from the TSUN cloud (client-side)'''
|
||||
clientStream = await self.remoteStream.loop()
|
||||
logging.info(f'[{self.node_id}] Client loop stopped for'
|
||||
f' l{clientStream.l_addr}')
|
||||
logger.info(f'[{clientStream.node_id}:{clientStream.conn_no}] '
|
||||
'Client loop stopped for'
|
||||
f' l{clientStream.l_addr}')
|
||||
|
||||
# if the client connection closes, we don't touch the server
|
||||
# connection. Instead we erase the client connection stream,
|
||||
@@ -54,28 +87,45 @@ class AsyncStream():
|
||||
# than erase client connection
|
||||
self.remoteStream = None
|
||||
|
||||
async def loop(self):
|
||||
async def loop(self) -> Self:
|
||||
"""Async loop handler for precessing all received messages"""
|
||||
self.r_addr = self.writer.get_extra_info('peername')
|
||||
self.l_addr = self.writer.get_extra_info('sockname')
|
||||
|
||||
self.proc_start = time.time()
|
||||
while True:
|
||||
try:
|
||||
await self.__async_read()
|
||||
proc = time.time() - self.proc_start
|
||||
if proc > self.proc_max:
|
||||
self.proc_max = proc
|
||||
self.proc_start = None
|
||||
dead_conn_to = self.__timeout()
|
||||
await asyncio.wait_for(self.__async_read(),
|
||||
dead_conn_to)
|
||||
|
||||
if self.unique_id:
|
||||
await self.async_write()
|
||||
await self.__async_forward()
|
||||
await self.async_publ_mqtt()
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f'[{self.node_id}:{self.conn_no}] Dead '
|
||||
f'connection timeout ({dead_conn_to}s) '
|
||||
f'for {self.l_addr}')
|
||||
await self.disc()
|
||||
self.close()
|
||||
return self
|
||||
|
||||
except OSError as error:
|
||||
logger.error(f'[{self.node_id}] {error} for l{self.l_addr} | '
|
||||
logger.error(f'[{self.node_id}:{self.conn_no}] '
|
||||
f'{error} for l{self.l_addr} | '
|
||||
f'r{self.r_addr}')
|
||||
await self.disc()
|
||||
self.close()
|
||||
return self
|
||||
|
||||
except RuntimeError as error:
|
||||
logger.info(f"[{self.node_id}] {error} for {self.l_addr}")
|
||||
logger.info(f'[{self.node_id}:{self.conn_no}] '
|
||||
f'{error} for {self.l_addr}')
|
||||
await self.disc()
|
||||
self.close()
|
||||
return self
|
||||
@@ -86,31 +136,8 @@ class AsyncStream():
|
||||
f"Exception for {self.addr}:\n"
|
||||
f"{traceback.format_exc()}")
|
||||
|
||||
async def disc(self) -> None:
|
||||
if self.writer.is_closing():
|
||||
return
|
||||
logger.debug(f'AsyncStream.disc() l{self.l_addr} | r{self.r_addr}')
|
||||
self.writer.close()
|
||||
await self.writer.wait_closed()
|
||||
|
||||
def close(self):
|
||||
if self.writer.is_closing():
|
||||
return
|
||||
logger.debug(f'AsyncStream.close() l{self.l_addr} | r{self.r_addr}')
|
||||
self.writer.close()
|
||||
|
||||
'''
|
||||
Our private methods
|
||||
'''
|
||||
async def __async_read(self) -> None:
|
||||
data = await self.reader.read(4096)
|
||||
if data:
|
||||
self._recv_buffer += data
|
||||
self.read() # call read in parent class
|
||||
else:
|
||||
raise RuntimeError("Peer closed.")
|
||||
|
||||
async def async_write(self, headline='Transmit to ') -> None:
|
||||
async def async_write(self, headline: str = 'Transmit to ') -> None:
|
||||
"""Async write handler to transmit the send_buffer"""
|
||||
if self._send_buffer:
|
||||
hex_dump_memory(logging.INFO, f'{headline}{self.addr}:',
|
||||
self._send_buffer, len(self._send_buffer))
|
||||
@@ -118,8 +145,57 @@ class AsyncStream():
|
||||
await self.writer.drain()
|
||||
self._send_buffer = bytearray(0) # self._send_buffer[sent:]
|
||||
|
||||
async def disc(self) -> None:
|
||||
"""Async disc handler for graceful disconnect"""
|
||||
if self.writer.is_closing():
|
||||
return
|
||||
logger.debug(f'AsyncStream.disc() l{self.l_addr} | r{self.r_addr}')
|
||||
self.writer.close()
|
||||
await self.writer.wait_closed()
|
||||
|
||||
def close(self) -> None:
|
||||
"""close handler for a no waiting disconnect
|
||||
|
||||
hint: must be called before releasing the connection instance
|
||||
"""
|
||||
self.reader.feed_eof() # abort awaited read
|
||||
if self.writer.is_closing():
|
||||
return
|
||||
logger.debug(f'AsyncStream.close() l{self.l_addr} | r{self.r_addr}')
|
||||
self.writer.close()
|
||||
|
||||
def healthy(self) -> bool:
|
||||
elapsed = 0
|
||||
if self.proc_start is not None:
|
||||
elapsed = time.time() - self.proc_start
|
||||
if self.state == State.closed or elapsed > self.MAX_PROC_TIME:
|
||||
logging.debug(f'[{self.node_id}:{self.conn_no}:'
|
||||
f'{type(self).__name__}]'
|
||||
f' act:{round(1000*elapsed)}ms'
|
||||
f' max:{round(1000*self.proc_max)}ms')
|
||||
logging.debug(f'Healthy()) refs: {gc.get_referrers(self)}')
|
||||
return elapsed < 5
|
||||
|
||||
'''
|
||||
Our private methods
|
||||
'''
|
||||
async def __async_read(self) -> None:
|
||||
"""Async read handler to read received data from TCP stream"""
|
||||
data = await self.reader.read(4096)
|
||||
if data:
|
||||
self.proc_start = time.time()
|
||||
self._recv_buffer += data
|
||||
wait = self.read() # call read in parent class
|
||||
if wait > 0:
|
||||
await asyncio.sleep(wait)
|
||||
else:
|
||||
raise RuntimeError("Peer closed.")
|
||||
|
||||
async def __async_forward(self) -> None:
|
||||
if self._forward_buffer:
|
||||
"""forward handler transmits data over the remote connection"""
|
||||
if not self._forward_buffer:
|
||||
return
|
||||
try:
|
||||
if not self.remoteStream:
|
||||
await self.async_create_remote()
|
||||
if self.remoteStream:
|
||||
@@ -136,6 +212,30 @@ class AsyncStream():
|
||||
await self.remoteStream.writer.drain()
|
||||
self._forward_buffer = bytearray(0)
|
||||
|
||||
except OSError as error:
|
||||
if self.remoteStream:
|
||||
rmt = self.remoteStream
|
||||
self.remoteStream = None
|
||||
logger.error(f'[{rmt.node_id}:{rmt.conn_no}] Fwd: {error} for '
|
||||
f'l{rmt.l_addr} | r{rmt.r_addr}')
|
||||
await rmt.disc()
|
||||
rmt.close()
|
||||
|
||||
except RuntimeError as error:
|
||||
if self.remoteStream:
|
||||
rmt = self.remoteStream
|
||||
self.remoteStream = None
|
||||
logger.info(f'[{rmt.node_id}:{rmt.conn_no}] '
|
||||
f'Fwd: {error} for {rmt.l_addr}')
|
||||
await rmt.disc()
|
||||
rmt.close()
|
||||
|
||||
except Exception:
|
||||
self.inc_counter('SW_Exception')
|
||||
logger.error(
|
||||
f"Fwd Exception for {self.addr}:\n"
|
||||
f"{traceback.format_exc()}")
|
||||
|
||||
def __del__(self):
|
||||
logger.debug(
|
||||
f"AsyncStream.__del__ l{self.l_addr} | r{self.r_addr}")
|
||||
|
||||
Reference in New Issue
Block a user