Dev 0.9 (#115)
* make timestamp handling stateless * adapt tests for stateless timestamp handling * initial version * add more type annotations * add more type annotations * fix Generator annotation for ha_proxy_confs * fix names of issue branches * add more type annotations * don't use depricated varn anymore * don't mark all test as async * fix imports * fix solarman unit tests - fake Mqtt class * print image build time during proxy start * update changelog * fix pytest collect warning * cleanup msg_get_time handler * addapt unit test * label debug images with debug * dump droped packages * fix warnings * add systemtest with invalid start byte * update changelog * update changelog * add exposed ports and healthcheck * add wget for healthcheck * add aiohttp * use config validation for healthcheck * add http server for healthcheck * calculate msg prossesing time * add healthy check methods * fix typo * log ConfigErr with DEBUG level * Update async_stream.py - check if processing time is < 5 sec * add a close handler to release internal resources * call modbus close hanlder on a close call * add exception handling for forward handler * update changelog * isolate Modbus fix * cleanup * update changelog * add heaithy handler * log unrelease references * add healtcheck * complete exposed port list * add wget for healtcheck * add aiohttp * use Enum class for State * calc processing time for healthcheck * add HTTP server for healthcheck * cleanup * Update CHANGELOG.md * updat changelog * add docstrings to state enum * set new state State.received * add healthy method * log healthcheck infos with DEBUG level * update changelog * S allius/issue100 (#101) * detect dead connections - disconnect connection on Msg receive timeout - improve connection trace (add connection id) * update changelog * fix merge conflict * fix unittests * S allius/issue108 (#109) * add more data types * adapt unittests * improve test coverage * fix linter warning * update changelog * S allius/issue102 (#110) * hotfix: don't send two MODBUS commands together * fix unit tests * remove read loop * optional sleep between msg read and sending rsp * wait after read 0.5s before sending a response * add pending state * fix state definitions * determine the connection timeout by the conn state * avoid sending MODBUS cmds in the inverter's reporting phase * update changelog * S allius/issue111 (#112) Synchronize regular MODBUS commands with the status of the inverter to prevent the inverter from crashing due to unexpected packets. * inital checkin * remove crontab entry for regular MODBUS cmds * add timer for regular MODBUS polling * fix Stop method call for already stopped timer * optimize MB_START_TIMEOUT value * cleanup * update changelog * fix buildx warnings * fix timer cleanup * fix Config.class_init() - return error string or None - release Schema structure after building thr config * add quit flag to docker push * fix timout calculation * rename python to debugpy * add asyncio log * cleanup shutdown - stop webserver on shutdown - enable asyncio debug mode for debug versions * update changelog * update changelog * fix exception in MODBUS timeout callback * update changelog
This commit is contained in:
@@ -2,6 +2,8 @@ import logging
|
||||
import asyncio
|
||||
import signal
|
||||
import os
|
||||
from asyncio import StreamReader, StreamWriter
|
||||
from aiohttp import web
|
||||
from logging import config # noqa F401
|
||||
from messages import Message
|
||||
from inverter import Inverter
|
||||
@@ -10,25 +12,83 @@ from gen3plus.inverter_g3p import InverterG3P
|
||||
from scheduler import Schedule
|
||||
from config import Config
|
||||
|
||||
routes = web.RouteTableDef()
|
||||
proxy_is_up = False
|
||||
|
||||
async def handle_client(reader, writer):
|
||||
|
||||
@routes.get('/')
|
||||
async def hello(request):
|
||||
return web.Response(text="Hello, world")
|
||||
|
||||
|
||||
@routes.get('/-/ready')
|
||||
async def ready(request):
|
||||
if proxy_is_up:
|
||||
status = 200
|
||||
text = 'Is ready'
|
||||
else:
|
||||
status = 503
|
||||
text = 'Not ready'
|
||||
return web.Response(status=status, text=text)
|
||||
|
||||
|
||||
@routes.get('/-/healthy')
|
||||
async def healthy(request):
|
||||
|
||||
if proxy_is_up:
|
||||
# logging.info('web reqeust healthy()')
|
||||
for stream in Message:
|
||||
try:
|
||||
res = stream.healthy()
|
||||
if not res:
|
||||
return web.Response(status=503, text="I have a problem")
|
||||
except Exception as err:
|
||||
logging.info(f'Exception:{err}')
|
||||
|
||||
return web.Response(status=200, text="I'm fine")
|
||||
|
||||
|
||||
async def webserver(addr, port):
|
||||
'''coro running our webserver'''
|
||||
app = web.Application()
|
||||
app.add_routes(routes)
|
||||
runner = web.AppRunner(app)
|
||||
|
||||
await runner.setup()
|
||||
site = web.TCPSite(runner, addr, port)
|
||||
await site.start()
|
||||
logging.info(f'HTTP server listen on port: {port}')
|
||||
|
||||
try:
|
||||
# Normal interaction with aiohttp
|
||||
while True:
|
||||
await asyncio.sleep(3600) # sleep forever
|
||||
except asyncio.CancelledError:
|
||||
logging.info('HTTP server cancelled')
|
||||
await runner.cleanup()
|
||||
logging.debug('HTTP cleanup done')
|
||||
|
||||
|
||||
async def handle_client(reader: StreamReader, writer: StreamWriter):
|
||||
'''Handles a new incoming connection and starts an async loop'''
|
||||
|
||||
addr = writer.get_extra_info('peername')
|
||||
await InverterG3(reader, writer, addr).server_loop(addr)
|
||||
|
||||
|
||||
async def handle_client_v2(reader, writer):
|
||||
async def handle_client_v2(reader: StreamReader, writer: StreamWriter):
|
||||
'''Handles a new incoming connection and starts an async loop'''
|
||||
|
||||
addr = writer.get_extra_info('peername')
|
||||
await InverterG3P(reader, writer, addr).server_loop(addr)
|
||||
|
||||
|
||||
async def handle_shutdown(loop):
|
||||
async def handle_shutdown(web_task):
|
||||
'''Close all TCP connections and stop the event loop'''
|
||||
|
||||
logging.info('Shutdown due to SIGTERM')
|
||||
global proxy_is_up
|
||||
proxy_is_up = False
|
||||
|
||||
#
|
||||
# first, disc all open TCP connections gracefully
|
||||
@@ -38,7 +98,7 @@ async def handle_shutdown(loop):
|
||||
await asyncio.wait_for(stream.disc(), 2)
|
||||
except Exception:
|
||||
pass
|
||||
logging.info('Disconnecting done')
|
||||
logging.info('Proxy disconnecting done')
|
||||
|
||||
#
|
||||
# second, close all open TCP connections
|
||||
@@ -46,12 +106,20 @@ async def handle_shutdown(loop):
|
||||
for stream in Message:
|
||||
stream.close()
|
||||
|
||||
#
|
||||
# at last, we stop the loop
|
||||
#
|
||||
loop.stop()
|
||||
await asyncio.sleep(0.1) # give time for closing
|
||||
logging.info('Proxy closing done')
|
||||
|
||||
logging.info('Shutdown complete')
|
||||
#
|
||||
# third, cancel the web server
|
||||
#
|
||||
web_task.cancel()
|
||||
await web_task
|
||||
|
||||
#
|
||||
# at last, start a coro for stopping the loop
|
||||
#
|
||||
logging.debug("Stop event loop")
|
||||
loop.stop()
|
||||
|
||||
|
||||
def get_log_level() -> int:
|
||||
@@ -84,17 +152,28 @@ if __name__ == "__main__":
|
||||
logging.getLogger('conn').setLevel(log_level)
|
||||
logging.getLogger('data').setLevel(log_level)
|
||||
logging.getLogger('tracer').setLevel(log_level)
|
||||
logging.getLogger('asyncio').setLevel(log_level)
|
||||
# logging.getLogger('mqtt').setLevel(log_level)
|
||||
|
||||
# read config file
|
||||
Config.class_init()
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
# read config file
|
||||
ConfigErr = Config.class_init()
|
||||
if ConfigErr is not None:
|
||||
logging.info(f'ConfigErr: {ConfigErr}')
|
||||
Inverter.class_init()
|
||||
Schedule.start()
|
||||
|
||||
#
|
||||
# Create tasks for our listening servers. These must be tasks! If we call
|
||||
# start_server directly out of our main task, the eventloop will be blocked
|
||||
# and we can't receive and handle the UNIX signals!
|
||||
#
|
||||
loop.create_task(asyncio.start_server(handle_client, '0.0.0.0', 5005))
|
||||
loop.create_task(asyncio.start_server(handle_client_v2, '0.0.0.0', 10000))
|
||||
web_task = loop.create_task(webserver('0.0.0.0', 8127))
|
||||
|
||||
#
|
||||
# Register some UNIX Signal handler for a gracefully server shutdown
|
||||
# on Docker restart and stop
|
||||
@@ -102,22 +181,18 @@ if __name__ == "__main__":
|
||||
for signame in ('SIGINT', 'SIGTERM'):
|
||||
loop.add_signal_handler(getattr(signal, signame),
|
||||
lambda loop=loop: asyncio.create_task(
|
||||
handle_shutdown(loop)))
|
||||
|
||||
#
|
||||
# Create taska for our listening servera. These must be tasks! If we call
|
||||
# start_server directly out of our main task, the eventloop will be blocked
|
||||
# and we can't receive and handle the UNIX signals!
|
||||
#
|
||||
loop.create_task(asyncio.start_server(handle_client, '0.0.0.0', 5005))
|
||||
loop.create_task(asyncio.start_server(handle_client_v2, '0.0.0.0', 10000))
|
||||
handle_shutdown(web_task)))
|
||||
|
||||
loop.set_debug(log_level == logging.DEBUG)
|
||||
try:
|
||||
if ConfigErr is None:
|
||||
proxy_is_up = True
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
logging.info("Event loop is stopped")
|
||||
Inverter.class_close(loop)
|
||||
logging.info('Close event loop')
|
||||
logging.debug('Close event loop')
|
||||
loop.close()
|
||||
logging.info(f'Finally, exit Server "{serv_name}"')
|
||||
|
||||
Reference in New Issue
Block a user