Compare commits

...

5 Commits

Author SHA1 Message Date
Stefan Allius
2632698008 Merge branch 'main' of https://github.com/s-allius/tsun-gen3-proxy into ssl-connection 2024-07-02 00:41:14 +02:00
Stefan Allius
a42ba8a8c6 Dev 0.9 (#115)
* make timestamp handling stateless

* adapt tests for stateless timestamp handling

* initial version

* add more type annotations

* add more type annotations

* fix Generator annotation for ha_proxy_confs

* fix names of issue branches

* add more type annotations

* don't use depricated varn anymore

* don't mark all test as async

* fix imports

* fix solarman unit tests

- fake Mqtt class

* print image build time during proxy start

* update changelog

* fix pytest collect warning

* cleanup msg_get_time handler

* addapt unit test

* label debug images with debug

* dump droped packages

* fix warnings

* add systemtest with invalid start byte

* update changelog

* update changelog

* add exposed ports and healthcheck

* add wget for healthcheck

* add aiohttp

* use config validation for healthcheck

* add http server for healthcheck

* calculate msg prossesing time

* add healthy check methods

* fix typo

* log ConfigErr with DEBUG level

* Update async_stream.py

- check if processing time is < 5 sec

* add a close handler to release internal resources

* call modbus close hanlder on a close call

* add exception handling for forward handler

* update changelog

* isolate Modbus fix

* cleanup

* update changelog

* add heaithy handler

* log unrelease references

* add healtcheck

* complete exposed port list

* add wget for healtcheck

* add aiohttp

* use Enum class for State

* calc processing time for healthcheck

* add HTTP server for healthcheck

* cleanup

* Update CHANGELOG.md

* updat changelog

* add docstrings to state enum

* set new state State.received

* add healthy method

* log healthcheck infos with DEBUG level

* update changelog

* S allius/issue100 (#101)

* detect dead connections

- disconnect connection on Msg receive timeout
- improve connection trace (add connection id)

* update changelog

* fix merge conflict

* fix unittests

* S allius/issue108 (#109)

* add more data types

* adapt unittests

* improve test coverage

* fix linter warning

* update changelog

* S allius/issue102 (#110)

* hotfix: don't send two MODBUS commands together

* fix unit tests

* remove read loop

* optional sleep between msg read and sending rsp

* wait after read 0.5s before sending a response

* add pending state

* fix state definitions

* determine the connection timeout by the conn state

* avoid sending MODBUS cmds in the inverter's reporting phase

* update changelog

* S allius/issue111 (#112)

Synchronize regular MODBUS commands with the status of the inverter to prevent the inverter from crashing due to unexpected packets.

* inital checkin

* remove crontab entry for regular MODBUS cmds

* add timer for regular MODBUS polling

* fix Stop method call for already stopped timer

* optimize MB_START_TIMEOUT value

* cleanup

* update changelog

* fix buildx warnings

* fix timer cleanup

* fix Config.class_init()

- return error string or None
- release Schema structure after building thr config

* add quit flag to docker push

* fix timout calculation

* rename python to debugpy

* add asyncio log

* cleanup shutdown
- stop webserver on shutdown
- enable asyncio debug mode for debug versions

* update changelog

* update changelog

* fix exception in MODBUS timeout callback

* update changelog
2024-07-01 23:41:56 +02:00
Stefan Allius
210c02f0b9 add experimental SSL support 2024-06-29 17:28:34 +02:00
Stefan Allius
a51ac03021 dd asyncio logging 2024-06-29 17:27:55 +02:00
Stefan Allius
e6b726912a add cert directory 2024-06-29 17:25:46 +02:00
5 changed files with 97 additions and 27 deletions

View File

@@ -7,6 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [unreleased] ## [unreleased]
## [0.9.0] - 2024-07-01
- fix exception in MODBUS timeout callback
## [0.9.0-RC1] - 2024-06-29
- add asyncio log and debug mode
- stop the HTTP server on shutdown gracefully
- Synchronize regular MODBUS commands with the status of the inverter to prevent the inverter from crashing due to - Synchronize regular MODBUS commands with the status of the inverter to prevent the inverter from crashing due to
unexpected packets. [#111](https://github.com/s-allius/tsun-gen3-proxy/issues/111) unexpected packets. [#111](https://github.com/s-allius/tsun-gen3-proxy/issues/111)
- GEN3: avoid sending MODBUS commands to the inverter during the inverter's reporting phase - GEN3: avoid sending MODBUS commands to the inverter during the inverter's reporting phase

View File

@@ -45,7 +45,7 @@ ENV HOME=/home/$SERVICE_NAME
# set the working directory in the container # set the working directory in the container
WORKDIR /home/$SERVICE_NAME WORKDIR /home/$SERVICE_NAME
VOLUME ["/home/$SERVICE_NAME/log", "/home/$SERVICE_NAME/config"] VOLUME ["/home/$SERVICE_NAME/log", "/home/$SERVICE_NAME/config", "/home/$SERVICE_NAME/cert"]
# install the requirements from the wheels packages from the builder stage # install the requirements from the wheels packages from the builder stage
# and unistall python packages and alpine package manger to reduce attack surface # and unistall python packages and alpine package manger to reduce attack surface
@@ -64,7 +64,7 @@ COPY --chmod=0700 entrypoint.sh /root/entrypoint.sh
COPY config . COPY config .
COPY src . COPY src .
RUN date > /build-date.txt RUN date > /build-date.txt
EXPOSE 5005 8127 10000 EXPOSE 5005 8127 10000 10443
# command to run on container start # command to run on container start
ENTRYPOINT ["/root/entrypoint.sh"] ENTRYPOINT ["/root/entrypoint.sh"]

View File

@@ -1,5 +1,5 @@
[loggers] [loggers]
keys=root,tracer,mesg,conn,data,mqtt keys=root,tracer,mesg,conn,data,mqtt,asyncio
[handlers] [handlers]
keys=console_handler,file_handler_name1,file_handler_name2 keys=console_handler,file_handler_name1,file_handler_name2
@@ -24,6 +24,12 @@ handlers=console_handler,file_handler_name1
propagate=0 propagate=0
qualname=mqtt qualname=mqtt
[logger_asyncio]
level=INFO
handlers=console_handler,file_handler_name1
propagate=0
qualname=asyncio
[logger_data] [logger_data]
level=DEBUG level=DEBUG
handlers=file_handler_name1 handlers=file_handler_name1

View File

@@ -108,6 +108,7 @@ class Modbus():
def close(self): def close(self):
"""free the queue and erase the callback handlers""" """free the queue and erase the callback handlers"""
logging.debug('Modbus close:') logging.debug('Modbus close:')
self.__stop_timer()
self.rsp_handler = None self.rsp_handler = None
self.snd_handler = None self.snd_handler = None
while not self.que.empty: while not self.que.empty:
@@ -252,6 +253,7 @@ class Modbus():
# logging.debug(f'Modbus stop timer {self}') # logging.debug(f'Modbus stop timer {self}')
if self.tim: if self.tim:
self.tim.cancel() self.tim.cancel()
self.tim = None
def __timeout_cb(self) -> None: def __timeout_cb(self) -> None:
'''Rsponse timeout handler retransmit pdu or send next pdu''' '''Rsponse timeout handler retransmit pdu or send next pdu'''

View File

@@ -1,5 +1,6 @@
import logging import logging
import asyncio import asyncio
import ssl
import signal import signal
import os import os
from asyncio import StreamReader, StreamWriter from asyncio import StreamReader, StreamWriter
@@ -48,14 +49,25 @@ async def healthy(request):
return web.Response(status=200, text="I'm fine") return web.Response(status=200, text="I'm fine")
async def webserver(runner, addr, port): async def webserver(addr, port):
'''coro running our webserver'''
app = web.Application()
app.add_routes(routes)
runner = web.AppRunner(app)
await runner.setup() await runner.setup()
site = web.TCPSite(runner, addr, port) site = web.TCPSite(runner, addr, port)
await site.start() await site.start()
logging.info(f'HTTP server listen on port: {port}') logging.info(f'HTTP server listen on port: {port}')
while True: try:
await asyncio.sleep(3600) # sleep forever # Normal interaction with aiohttp
while True:
await asyncio.sleep(3600) # sleep forever
except asyncio.CancelledError:
logging.info('HTTP server cancelled')
await runner.cleanup()
logging.debug('HTTP cleanup done')
async def handle_client(reader: StreamReader, writer: StreamWriter): async def handle_client(reader: StreamReader, writer: StreamWriter):
@@ -72,12 +84,17 @@ async def handle_client_v2(reader: StreamReader, writer: StreamWriter):
await InverterG3P(reader, writer, addr).server_loop(addr) await InverterG3P(reader, writer, addr).server_loop(addr)
async def handle_client_v3(reader: StreamReader, writer: StreamWriter):
'''Handles a new incoming connection and starts an async loop'''
logging.info('Accept on port 10443')
addr = writer.get_extra_info('peername')
await InverterG3P(reader, writer, addr).server_loop(addr)
async def handle_shutdown(loop, runner): async def handle_shutdown(loop, runner):
'''Close all TCP connections and stop the event loop''' '''Close all TCP connections and stop the event loop'''
logging.info('Shutdown due to SIGTERM') logging.info('Shutdown due to SIGTERM')
await runner.cleanup()
logging.info('HTTP server stopped')
# #
# first, disc all open TCP connections gracefully # first, disc all open TCP connections gracefully
@@ -87,7 +104,7 @@ async def handle_shutdown(loop, runner):
await asyncio.wait_for(stream.disc(), 2) await asyncio.wait_for(stream.disc(), 2)
except Exception: except Exception:
pass pass
logging.info('Disconnecting done') logging.info('Proxy disconnecting done')
# #
# second, close all open TCP connections # second, close all open TCP connections
@@ -95,13 +112,21 @@ async def handle_shutdown(loop, runner):
for stream in Message: for stream in Message:
stream.close() stream.close()
await asyncio.sleep(0.1) # give time for closing
logging.info('Proxy closing done')
#
# third, cancel the web server
#
web_task.cancel()
await web_task
# #
# at last, we stop the loop # at last, we stop the loop
# #
logging.debug("Stop event loop")
loop.stop() loop.stop()
logging.info('Shutdown complete')
def get_log_level() -> int: def get_log_level() -> int:
'''checks if LOG_LVL is set in the environment and returns the '''checks if LOG_LVL is set in the environment and returns the
@@ -133,6 +158,7 @@ if __name__ == "__main__":
logging.getLogger('conn').setLevel(log_level) logging.getLogger('conn').setLevel(log_level)
logging.getLogger('data').setLevel(log_level) logging.getLogger('data').setLevel(log_level)
logging.getLogger('tracer').setLevel(log_level) logging.getLogger('tracer').setLevel(log_level)
logging.getLogger('asyncio').setLevel(log_level)
# logging.getLogger('mqtt').setLevel(log_level) # logging.getLogger('mqtt').setLevel(log_level)
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
@@ -146,11 +172,47 @@ if __name__ == "__main__":
Schedule.start() Schedule.start()
# #
# Setup webserver application and runner # Create tasks for our listening servers. These must be tasks! If we call
# start_server directly out of our main task, the eventloop will be blocked
# and we can't receive and handle the UNIX signals!
# #
app = web.Application() loop.create_task(asyncio.start_server(handle_client, '0.0.0.0', 5005))
app.add_routes(routes) loop.create_task(asyncio.start_server(handle_client_v2, '0.0.0.0', 10000))
runner = web.AppRunner(app)
# https://crypto.stackexchange.com/questions/26591/tls-encryption-with-a-self-signed-pki-and-python-s-asyncio-module
'''
openssl genrsa -out -des3 ca.key.pem 2048
openssl genrsa -out server.key.pem 2048
openssl genrsa -out client.key.pem 2048
openssl req -x509 -new -nodes -key ca.key.pem -sha256 -days 365
-out ca.cert.pem -subj /C=US/ST=CA/L=Somewhere/O=Someone/CN=FoobarCA
openssl req -new -sha256 -key server.key.pem
-subj /C=US/ST=CA/L=Somewhere/O=Someone/CN=Foobar -out server.csr
openssl x509 -req -in server.csr -CA ca.cert.pem -CAkey ca.key.pem
-CAcreateserial -out server.cert.pem -days 365 -sha256
openssl req -new -sha256 -key client.key.pem
-subj /C=US/ST=CA/L=Somewhere/O=Someone/CN=Foobar -out client.csr
openssl x509 -req -in client.csr -CA ca.cert.pem -CAkey ca.key.pem
-CAcreateserial -out client.cert.pem -days 365 -sha256
'''
server_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_ctx.minimum_version = ssl.TLSVersion.TLSv1_2
server_ctx.maximum_version = ssl.TLSVersion.TLSv1_3
server_ctx.verify_mode = ssl.CERT_REQUIRED
server_ctx.options |= ssl.OP_SINGLE_ECDH_USE
server_ctx.options |= ssl.OP_NO_COMPRESSION
server_ctx.load_cert_chain(certfile='cert/server.pem',
keyfile='cert/server.key')
server_ctx.load_verify_locations(cafile='cert/ca.pem')
server_ctx.set_ciphers('ECDH+AESGCM')
loop.create_task(asyncio.start_server(handle_client_v3, '0.0.0.0', 10443,
ssl=server_ctx))
web_task = loop.create_task(webserver('0.0.0.0', 8127))
# #
# Register some UNIX Signal handler for a gracefully server shutdown # Register some UNIX Signal handler for a gracefully server shutdown
@@ -159,17 +221,9 @@ if __name__ == "__main__":
for signame in ('SIGINT', 'SIGTERM'): for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame), loop.add_signal_handler(getattr(signal, signame),
lambda loop=loop: asyncio.create_task( lambda loop=loop: asyncio.create_task(
handle_shutdown(loop, runner))) handle_shutdown(web_task)))
#
# Create tasks for our listening servers. These must be tasks! If we call
# start_server directly out of our main task, the eventloop will be blocked
# and we can't receive and handle the UNIX signals!
#
loop.create_task(asyncio.start_server(handle_client, '0.0.0.0', 5005))
loop.create_task(asyncio.start_server(handle_client_v2, '0.0.0.0', 10000))
loop.create_task(webserver(runner, '0.0.0.0', 8127))
loop.set_debug(True)
try: try:
if ConfigErr is None: if ConfigErr is None:
proxy_is_up = True proxy_is_up = True
@@ -177,8 +231,8 @@ if __name__ == "__main__":
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
finally: finally:
proxy_is_up = False logging.info("Event loop is stopped")
Inverter.class_close(loop) Inverter.class_close(loop)
logging.info('Close event loop') logging.debug('Close event loop')
loop.close() loop.close()
logging.info(f'Finally, exit Server "{serv_name}"') logging.info(f'Finally, exit Server "{serv_name}"')