Files
tsun-gen3-proxy/app/src/server.py
Stefan Allius b364fb3f8e Dev 0.10 (#151)
* S allius/issue117 (#118)

* add shutdown flag

* add more register definitions

* add start commando for client side connections

* add first support for port 8899

* fix shutdown

* add client_mode configuration

* read client_mode config to setup inverter connections

* add client_mode connections over port 8899

* add preview build

* Update README.md

describe the new client-mode over port 8899 for GEN3PLUS

* MODBUS: the last digit of the inverter version is a hexadecimal number (#121)

* S allius/issue117 (#122)

* add shutdown flag

* add more register definitions

* add start commando for client side connections

* add first support for port 8899

* fix shutdown

* add client_mode configuration

* read client_mode config to setup inverter connections

* add client_mode connections over port 8899

* add preview build

* add documentation for client_mode

* catch os error and log thme with DEBUG level

* update changelog

* make the maximum output coefficient configurable (#124)

* S allius/issue120 (#126)

* add config option to disable the modbus polling

* read more modbus regs in polling mode

* extend connection timeouts if polling mode is disabled

* update changelog

* S allius/issue125 (#127)

* fix linter warning

* move sequence diagramm to wiki

* catch asyncio.CancelledError

* S allius/issue128 (#130)

* set Register.NO_INPUTS fix to 4 for GEN3PLUS

* don't set Register.NO_INPUTS per MODBUS

* fix unit tests

* register OUTPUT_COEFFICIENT at HA

* update changelog

* - Home Assistant: improve inverter status value texts

* - GEN3: add inverter status

* on closing send outstanding MQTT data to the broker

* force MQTT publish on every conn open and close

* reset inverter state on close

- workaround which reset the inverter status to
  offline when the inverter has a very low
  output power on connection close

* improve client modified
- reduce the polling cadence to 30s
- set controller statistics for HA

* client mode set controller IP for HA

* S allius/issue131 (#132)

* Make __publish_outstanding_mqtt public

* update proxy counter

- on client mode connection establishment or
  disconnecting update tje counection counter

* Update README.md (#133)

* reset inverter state on close

- workaround which reset the inverter status to
  offline when the inverter has a very low
  output power on connection close

* S allius/issue134 (#135)

* add polling invertval and method ha_remove()

* add client_mode arg to constructors

- add PollingInvervall

* hide some topics in client mode

- we hide topics in HA by sending an empty register
  MQTT topic during HA auto configuration

* add client_mode value

* update class diagram

* fix modbus close handler

- fix empty call and cleanup que
- add unit test

* don't sent an initial 1710 msg in client mode

* change HA icon for inverter status

* increase test coverage

* accelerate timer tests

* bump aiomqtt and schema to latest release (#137)

* MQTT timestamps and protocol improvements (#140)

* add TS_INPUT, TS_GRID and TS_TOTAL

* prepare MQTT timestamps

- add _set_mqtt_timestamp method
- fix hexdump printing

* push dev and debug images to docker.io

* add unix epoche timestamp for MQTT pakets

* set timezone for unit tests

* set name für setting timezone step

* trigger new action

* GEN3 and GEN3PLUS: handle multiple message

- read: iterate over the receive buffer
- forward: append messages to the forward buffer
- _update_header: iterate over the forward buffer

* GEN3: optimize timeout handling

- longer timeout in state init and reveived
- got to state pending only from state up

* update changelog

* cleanup

* print coloured logs

* Create sonarcloud.yml (#143)

* Update sonarcloud.yml

* Update sonarcloud.yml

* Update sonarcloud.yml

* Update sonarcloud.yml

* Update sonarcloud.yml

* build multi arch images with sboms (#146)

* don't send MODBUS request when state is not up (#147)

* adapt timings

* don't send MODBUS request when state is note up

* adapt unit test

* make test code more clean (#148)

* Make test code more clean (#149)

* cleanup

* Code coverage for SonarCloud (#150)


* cleanup code and unit tests

* add test coverage for SonarCloud

* configure SonarCloud

* update changelog

* Do no build on *.yml changes

* prepare release 0.10.0

* disable MODBUS_POLLING for GEN§PLUS in example config

* bump aiohttp to version 3.10.2

* code cleanup

* Fetch all history for all tags and branches
2024-08-09 23:16:47 +02:00

209 lines
5.8 KiB
Python

import logging
import asyncio
import signal
import os
from asyncio import StreamReader, StreamWriter
from aiohttp import web
from logging import config # noqa F401
from messages import Message
from inverter import Inverter
from gen3.inverter_g3 import InverterG3
from gen3plus.inverter_g3p import InverterG3P
from scheduler import Schedule
from config import Config
from modbus_tcp import ModbusTcp
routes = web.RouteTableDef()
proxy_is_up = False
@routes.get('/')
async def hello(request):
return web.Response(text="Hello, world")
@routes.get('/-/ready')
async def ready(request):
if proxy_is_up:
status = 200
text = 'Is ready'
else:
status = 503
text = 'Not ready'
return web.Response(status=status, text=text)
@routes.get('/-/healthy')
async def healthy(request):
if proxy_is_up:
# logging.info('web reqeust healthy()')
for stream in Message:
try:
res = stream.healthy()
if not res:
return web.Response(status=503, text="I have a problem")
except Exception as err:
logging.info(f'Exception:{err}')
return web.Response(status=200, text="I'm fine")
async def webserver(addr, port):
'''coro running our webserver'''
app = web.Application()
app.add_routes(routes)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, addr, port)
await site.start()
logging.info(f'HTTP server listen on port: {port}')
try:
# Normal interaction with aiohttp
while True:
await asyncio.sleep(3600) # sleep forever
except asyncio.CancelledError:
logging.info('HTTP server cancelled')
await runner.cleanup()
logging.debug('HTTP cleanup done')
async def handle_client(reader: StreamReader, writer: StreamWriter):
'''Handles a new incoming connection and starts an async loop'''
addr = writer.get_extra_info('peername')
await InverterG3(reader, writer, addr).server_loop(addr)
async def handle_client_v2(reader: StreamReader, writer: StreamWriter):
'''Handles a new incoming connection and starts an async loop'''
addr = writer.get_extra_info('peername')
await InverterG3P(reader, writer, addr).server_loop(addr)
async def handle_shutdown(web_task):
'''Close all TCP connections and stop the event loop'''
logging.info('Shutdown due to SIGTERM')
global proxy_is_up
proxy_is_up = False
#
# first, disc all open TCP connections gracefully
#
for stream in Message:
stream.shutdown_started = True
try:
await asyncio.wait_for(stream.disc(), 2)
except Exception:
pass
logging.info('Proxy disconnecting done')
#
# second, close all open TCP connections
#
for stream in Message:
stream.close()
await asyncio.sleep(0.1) # give time for closing
logging.info('Proxy closing done')
#
# third, cancel the web server
#
web_task.cancel()
await web_task
#
# now cancel all remaining (pending) tasks
#
pending = asyncio.all_tasks()
for task in pending:
task.cancel()
#
# at last, start a coro for stopping the loop
#
logging.debug("Stop event loop")
loop.stop()
def get_log_level() -> int:
'''checks if LOG_LVL is set in the environment and returns the
corresponding logging.LOG_LEVEL'''
log_level = os.getenv('LOG_LVL', 'INFO')
if log_level == 'DEBUG':
log_level = logging.DEBUG
elif log_level == 'WARN':
log_level = logging.WARNING
else:
log_level = logging.INFO
return log_level
if __name__ == "__main__":
#
# Setup our daily, rotating logger
#
serv_name = os.getenv('SERVICE_NAME', 'proxy')
version = os.getenv('VERSION', 'unknown')
logging.config.fileConfig('logging.ini')
logging.info(f'Server "{serv_name} - {version}" will be started')
# set lowest-severity for 'root', 'msg', 'conn' and 'data' logger
log_level = get_log_level()
logging.getLogger().setLevel(log_level)
logging.getLogger('msg').setLevel(log_level)
logging.getLogger('conn').setLevel(log_level)
logging.getLogger('data').setLevel(log_level)
logging.getLogger('tracer').setLevel(log_level)
logging.getLogger('asyncio').setLevel(log_level)
# logging.getLogger('mqtt').setLevel(log_level)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# read config file
ConfigErr = Config.class_init()
if ConfigErr is not None:
logging.info(f'ConfigErr: {ConfigErr}')
Inverter.class_init()
Schedule.start()
mb_tcp = ModbusTcp(loop)
#
# Create tasks for our listening servers. These must be tasks! If we call
# start_server directly out of our main task, the eventloop will be blocked
# and we can't receive and handle the UNIX signals!
#
loop.create_task(asyncio.start_server(handle_client, '0.0.0.0', 5005))
loop.create_task(asyncio.start_server(handle_client_v2, '0.0.0.0', 10000))
web_task = loop.create_task(webserver('0.0.0.0', 8127))
#
# Register some UNIX Signal handler for a gracefully server shutdown
# on Docker restart and stop
#
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame),
lambda loop=loop: asyncio.create_task(
handle_shutdown(web_task)))
loop.set_debug(log_level == logging.DEBUG)
try:
if ConfigErr is None:
proxy_is_up = True
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
logging.info("Event loop is stopped")
Inverter.class_close(loop)
logging.debug('Close event loop')
loop.close()
logging.info(f'Finally, exit Server "{serv_name}"')