Gen 3 plus support (#38)

* add tsun_v2 default configuration

* Add port 10000 for gen 3 plus inverters

* add monitor_sn for solarman support

* listen on port 10000 for solarman inverters

* initial version for gen 3 plus support

* refactoring split gen3 and gen3plus

* refactoring

* refactoring classes

* refactor proxy statistic counter

* - fix loggin levels
- user super() in close() and __del__()

* add config for gen 3 plus

* Add solarman config support

* refacot Message.. classes

* rename class MessageG3 into Talent

* refactor close() handler

* refactor disc() handler

* move loop() into the base class AsyncStream

* move async_read, _write and _forward into base class

* Cleanup

* move server_loop and client_loop into basic class

* add msg forwarding for solarman V5 protocol

* move server_loop() and client_loop to class AsyncStream

* rename AsyncStreamxx ton Connectionxx

* fix unit tests

* make more attributes privae

* load .env file

* wait after last test

* ignore .env

* add response handler

* Update README.md

* update unreleased changes

* home assistant add more diagnostic values

* fix typo

* Update README.md

Definition of the inverter generations added to the compatibility table

* add ha couter for 'Internal SW Exceptions'

* Update README.md

Fixes an incorrect marking in the display of the configuration file

* Update README.md

Planning documented for MS-2000 support

* S allius/issue33 (#34)

* - fix issue 33

  The TSUN Cloud now responds to contact_info and get_time messages with
  an empty display message and not with a response message as before.
  We tried to parse data from the empty message, which led to an
  exception

* Add test with empty conn_ind from inverter

* version 0.5.5

* add tsun_v2 default configuration

* Add port 10000 for gen 3 plus inverters

* add monitor_sn for solarman support

* listen on port 10000 for solarman inverters

initial version for gen 3 plus support

* refactoring split gen3 and gen3plus

* refactoring

* refactoring classes

* refactor proxy statistic counter

* - fix loggin levels
- user super() in close() and __del__()

* add config for gen 3 plus

* Add solarman config support

* refacot Message.. classes

* rename class MessageG3 into Talent

* refactor close() handler

* refactor disc() handler

* move loop() into the base class AsyncStream

* move async_read, _write and _forward into base class

* Cleanup

* move server_loop and client_loop into basic class

* add msg forwarding for solarman V5 protocol

* move server_loop() and client_loop to class AsyncStream

* rename AsyncStreamxx ton Connectionxx

* fix unit tests

* make more attributes privae

load .env file

* wait after last test

* ignore .env

* add response handler
This commit is contained in:
Stefan Allius
2024-03-27 01:40:29 +01:00
committed by GitHub
parent 542f422e1e
commit ef1fd4f913
23 changed files with 1595 additions and 554 deletions

View File

@@ -1,27 +1,57 @@
import logging
import traceback
# from config import Config
# import gc
from messages import Message, hex_dump_memory
from messages import hex_dump_memory
logger = logging.getLogger('conn')
class AsyncStream(Message):
class AsyncStream():
def __init__(self, reader, writer, addr, remote_stream, server_side: bool,
id_str=b'') -> None:
super().__init__(server_side, id_str)
def __init__(self, reader, writer, addr) -> None:
logger.debug('AsyncStream.__init__')
self.reader = reader
self.writer = writer
self.remoteStream = remote_stream
self.addr = addr
self.r_addr = ''
self.l_addr = ''
'''
Our puplic methods
'''
async def server_loop(self, addr):
'''Loop for receiving messages from the inverter (server-side)'''
logging.info(f'Accept connection from {addr} to {self.l_addr}')
self.inc_counter('Inverter_Cnt')
await self.loop()
self.dec_counter('Inverter_Cnt')
logging.info(f'Server loop stopped for r{self.r_addr}')
# if the server connection closes, we also have to disconnect
# the connection to te TSUN cloud
if self.remoteStream:
logging.debug("disconnect client connection")
self.remoteStream.disc()
try:
await self._async_publ_mqtt_proxy_stat('proxy')
except Exception:
pass
async def client_loop(self, addr):
'''Loop for receiving messages from the TSUN cloud (client-side)'''
clientStream = await self.remoteStream.loop()
logging.info(f'Client loop stopped for l{clientStream.l_addr}')
# if the client connection closes, we don't touch the server
# connection. Instead we erase the client connection stream,
# thus on the next received packet from the inverter, we can
# establish a new connection to the TSUN cloud
# erase backlink to inverter
clientStream.remoteStream = None
if self.remoteStream == clientStream:
# logging.debug(f'Client l{clientStream.l_addr} refs:'
# f' {gc.get_referrers(clientStream)}')
# than erase client connection
self.remoteStream = None
async def loop(self):
self.r_addr = self.writer.get_extra_info('peername')
self.l_addr = self.writer.get_extra_info('sockname')
@@ -52,15 +82,12 @@ class AsyncStream(Message):
return self
def disc(self) -> None:
logger.debug(f'in AsyncStream.disc() l{self.l_addr} | r{self.r_addr}')
logger.debug(f'AsyncStream.disc() l{self.l_addr} | r{self.r_addr}')
self.writer.close()
def close(self):
logger.debug(f'in AsyncStream.close() l{self.l_addr} | r{self.r_addr}')
logger.debug(f'AsyncStream.close() l{self.l_addr} | r{self.r_addr}')
self.writer.close()
super().close() # call close handler in the parent class
# logger.info(f'AsyncStream refs: {gc.get_referrers(self)}')
'''
Our private methods
@@ -86,9 +113,8 @@ class AsyncStream(Message):
if not self.remoteStream:
await self.async_create_remote()
if self.remoteStream:
self.remoteStream._init_new_client_conn(self.contact_name,
self.contact_mail)
await self.remoteStream.__async_write()
if self.remoteStream._init_new_client_conn():
await self.remoteStream.__async_write()
if self.remoteStream:
hex_dump_memory(logging.INFO,
@@ -99,11 +125,6 @@ class AsyncStream(Message):
await self.remoteStream.writer.drain()
self._forward_buffer = bytearray(0)
async def async_create_remote(self) -> None:
pass
async def async_publ_mqtt(self) -> None:
pass
def __del__(self):
logging.debug(f"AsyncStream.__del__ l{self.l_addr} | r{self.r_addr}")
logger.debug(
f"AsyncStream.__del__ l{self.l_addr} | r{self.r_addr}")

View File

@@ -19,6 +19,11 @@ class Config():
'host': Use(str),
'port': And(Use(int), lambda n: 1024 <= n <= 65535)
},
'solarman': {
'enabled': Use(bool),
'host': Use(str),
'port': And(Use(int), lambda n: 1024 <= n <= 65535)
},
'mqtt': {
'host': Use(str),
'port': And(Use(int), lambda n: 1024 <= n <= 65535),
@@ -34,6 +39,7 @@ class Config():
},
'inverters': {
'allow_all': Use(bool), And(Use(str), lambda s: len(s) == 16): {
Optional('monitor_sn', default=0): Use(int),
Optional('node_id', default=""): And(Use(str),
Use(lambda s: s + '/'
if len(s) > 0 and
@@ -67,6 +73,8 @@ class Config():
usr_config = tomllib.load(f)
config['tsun'] = def_config['tsun'] | usr_config['tsun']
config['solarman'] = def_config['solarman'] | \
usr_config['solarman']
config['mqtt'] = def_config['mqtt'] | usr_config['mqtt']
config['ha'] = def_config['ha'] | usr_config['ha']
config['inverters'] = def_config['inverters'] | \

View File

@@ -0,0 +1,36 @@
import logging
# import gc
from async_stream import AsyncStream
from gen3.talent import Talent
logger = logging.getLogger('conn')
class ConnectionG3(AsyncStream, Talent):
def __init__(self, reader, writer, addr, remote_stream, server_side: bool,
id_str=b'') -> None:
AsyncStream.__init__(self, reader, writer, addr)
Talent.__init__(self, server_side, id_str)
self.remoteStream = remote_stream
'''
Our puplic methods
'''
def close(self):
AsyncStream.close(self)
Talent.close(self)
# logger.info(f'AsyncStream refs: {gc.get_referrers(self)}')
async def async_create_remote(self) -> None:
pass
async def async_publ_mqtt(self) -> None:
pass
'''
Our private methods
'''
def __del__(self):
super().__del__()

126
app/src/gen3/inverter_g3.py Normal file
View File

@@ -0,0 +1,126 @@
import asyncio
import logging
import traceback
import json
from config import Config
from inverter import Inverter
from gen3.connection_g3 import ConnectionG3
from aiomqtt import MqttCodeError
from infos import Infos
# import gc
# logger = logging.getLogger('conn')
logger_mqtt = logging.getLogger('mqtt')
class InverterG3(Inverter, ConnectionG3):
'''class Inverter is a derivation of an Async_Stream
The class has some class method for managing common resources like a
connection to the MQTT broker or proxy error counter which are common
for all inverter connection
Instances of the class are connections to an inverter and can have an
optional link to an remote connection to the TSUN cloud. A remote
connection dies with the inverter connection.
class methods:
class_init(): initialize the common resources of the proxy (MQTT
broker, Proxy DB, etc). Must be called before the
first inverter instance can be created
class_close(): release the common resources of the proxy. Should not
be called before any instances of the class are
destroyed
methods:
server_loop(addr): Async loop method for receiving messages from the
inverter (server-side)
client_loop(addr): Async loop method for receiving messages from the
TSUN cloud (client-side)
async_create_remote(): Establish a client connection to the TSUN cloud
async_publ_mqtt(): Publish data to MQTT broker
close(): Release method which must be called before a instance can be
destroyed
'''
def __init__(self, reader, writer, addr):
super().__init__(reader, writer, addr, None, True)
self.__ha_restarts = -1
async def async_create_remote(self) -> None:
'''Establish a client connection to the TSUN cloud'''
tsun = Config.get('tsun')
host = tsun['host']
port = tsun['port']
addr = (host, port)
try:
logging.info(f'Connected to {addr}')
connect = asyncio.open_connection(host, port)
reader, writer = await connect
self.remoteStream = ConnectionG3(reader, writer, addr, self,
False, self.id_str)
asyncio.create_task(self.client_loop(addr))
except (ConnectionRefusedError, TimeoutError) as error:
logging.info(f'{error}')
except Exception:
self.inc_counter('SW_Exception')
logging.error(
f"Inverter: Exception for {addr}:\n"
f"{traceback.format_exc()}")
async def async_publ_mqtt(self) -> None:
'''publish data to MQTT broker'''
# check if new inverter or collector infos are available or when the
# home assistant has changed the status back to online
try:
if (('inverter' in self.new_data and self.new_data['inverter'])
or ('collector' in self.new_data and
self.new_data['collector'])
or self.mqtt.ha_restarts != self.__ha_restarts):
await self._register_proxy_stat_home_assistant()
await self.__register_home_assistant()
self.__ha_restarts = self.mqtt.ha_restarts
for key in self.new_data:
await self.__async_publ_mqtt_packet(key)
for key in Infos.new_stat_data:
await self._async_publ_mqtt_proxy_stat(key)
except MqttCodeError as error:
logging.error(f'Mqtt except: {error}')
except Exception:
self.inc_counter('SW_Exception')
logging.error(
f"Inverter: Exception:\n"
f"{traceback.format_exc()}")
async def __async_publ_mqtt_packet(self, key):
db = self.db.db
if key in db and self.new_data[key]:
data_json = json.dumps(db[key])
node_id = self.node_id
logger_mqtt.debug(f'{key}: {data_json}')
await self.mqtt.publish(f'{self.entity_prfx}{node_id}{key}', data_json) # noqa: E501
self.new_data[key] = False
async def __register_home_assistant(self) -> None:
'''register all our topics at home assistant'''
for data_json, component, node_id, id in self.db.ha_confs(
self.entity_prfx, self.node_id, self.unique_id,
False, self.sug_area):
logger_mqtt.debug(f"MQTT Register: cmp:'{component}'"
f" node_id:'{node_id}' {data_json}")
await self.mqtt.publish(f"{self.discovery_prfx}{component}"
f"/{node_id}{id}/config", data_json)
def close(self) -> None:
logging.debug(f'InverterG3.close() l{self.l_addr} | r{self.r_addr}')
super().close() # call close handler in the parent class
# logger.debug (f'Inverter refs: {gc.get_referrers(self)}')
def __del__(self):
logging.debug("InverterG3.__del__")
super().__del__()

350
app/src/gen3/talent.py Normal file
View File

@@ -0,0 +1,350 @@
import struct
import logging
import time
from datetime import datetime
if __name__ == "app.src.gen3.talent":
from app.src.messages import hex_dump_memory, Message
from app.src.config import Config
else: # pragma: no cover
from messages import hex_dump_memory, Message
from config import Config
logger = logging.getLogger('msg')
class Control:
def __init__(self, ctrl: int):
self.ctrl = ctrl
def __int__(self) -> int:
return self.ctrl
def is_ind(self) -> bool:
return (self.ctrl == 0x91)
def is_req(self) -> bool:
return (self.ctrl == 0x70)
def is_resp(self) -> bool:
return (self.ctrl == 0x99)
class Talent(Message):
def __init__(self, server_side: bool, id_str=b''):
super().__init__(server_side)
self.await_conn_resp_cnt = 0
self.id_str = id_str
self.contact_name = b''
self.contact_mail = b''
self.switch = {
0x00: self.msg_contact_info,
0x13: self.msg_ota_update,
0x22: self.msg_get_time,
0x71: self.msg_collector_data,
0x04: self.msg_inverter_data,
}
'''
Our puplic methods
'''
def close(self) -> None:
logging.debug('Talent.close()')
# we have refernces to methods of this class in self.switch
# so we have to erase self.switch, otherwise this instance can't be
# deallocated by the garbage collector ==> we get a memory leak
self.switch.clear()
def set_serial_no(self, serial_no: str):
if self.unique_id == serial_no:
logger.debug(f'SerialNo: {serial_no}')
else:
inverters = Config.get('inverters')
# logger.debug(f'Inverters: {inverters}')
if serial_no in inverters:
inv = inverters[serial_no]
self.node_id = inv['node_id']
self.sug_area = inv['suggested_area']
logger.debug(f'SerialNo {serial_no} allowed! area:{self.sug_area}') # noqa: E501
else:
self.node_id = ''
self.sug_area = ''
if 'allow_all' not in inverters or not inverters['allow_all']:
self.inc_counter('Unknown_SNR')
self.unique_id = None
logger.warning(f'ignore message from unknow inverter! (SerialNo: {serial_no})') # noqa: E501
return
logger.debug(f'SerialNo {serial_no} not known but accepted!')
self.unique_id = serial_no
def read(self) -> None:
self._read()
if not self.header_valid:
self.__parse_header(self._recv_buffer, len(self._recv_buffer))
if self.header_valid and len(self._recv_buffer) >= (self.header_len +
self.data_len):
hex_dump_memory(logging.INFO, f'Received from {self.addr}:',
self._recv_buffer, self.header_len+self.data_len)
self.set_serial_no(self.id_str.decode("utf-8"))
self.__dispatch_msg()
self.__flush_recv_msg()
return
def forward(self, buffer, buflen) -> None:
tsun = Config.get('tsun')
if tsun['enabled']:
self._forward_buffer = buffer[:buflen]
hex_dump_memory(logging.DEBUG, 'Store for forwarding:',
buffer, buflen)
self.__parse_header(self._forward_buffer,
len(self._forward_buffer))
fnc = self.switch.get(self.msg_id, self.msg_unknown)
logger.info(self.__flow_str(self.server_side, 'forwrd') +
f' Ctl: {int(self.ctrl):#02x} Msg: {fnc.__name__!r}')
return
def _init_new_client_conn(self) -> bool:
contact_name = self.contact_name
contact_mail = self.contact_mail
logger.info(f'name: {contact_name} mail: {contact_mail}')
self.msg_id = 0
self.await_conn_resp_cnt += 1
self.__build_header(0x91)
self._send_buffer += struct.pack(f'!{len(contact_name)+1}p'
f'{len(contact_mail)+1}p',
contact_name, contact_mail)
self.__finish_send_msg()
return True
'''
Our private methods
'''
def __flow_str(self, server_side: bool, type: str): # noqa: F821
switch = {
'rx': ' <',
'tx': ' >',
'forwrd': '<< ',
'drop': ' xx',
'rxS': '> ',
'txS': '< ',
'forwrdS': ' >>',
'dropS': 'xx ',
}
if server_side:
type += 'S'
return switch.get(type, '???')
def _timestamp(self): # pragma: no cover
if False:
# utc as epoche
ts = time.time()
else:
# convert localtime in epoche
ts = (datetime.now() - datetime(1970, 1, 1)).total_seconds()
return round(ts*1000)
# check if there is a complete header in the buffer, parse it
# and set
# self.header_len
# self.data_len
# self.id_str
# self.ctrl
# self.msg_id
#
# if the header is incomplete, than self.header_len is still 0
#
def __parse_header(self, buf: bytes, buf_len: int) -> None:
if (buf_len < 5): # enough bytes to read len and id_len?
return
result = struct.unpack_from('!lB', buf, 0)
len = result[0] # len of complete message
id_len = result[1] # len of variable id string
hdr_len = 5+id_len+2
if (buf_len < hdr_len): # enough bytes for complete header?
return
result = struct.unpack_from(f'!{id_len+1}pBB', buf, 4)
# store parsed header values in the class
self.id_str = result[0]
self.ctrl = Control(result[1])
self.msg_id = result[2]
self.data_len = len-id_len-3
self.header_len = hdr_len
self.header_valid = True
return
def __build_header(self, ctrl) -> None:
self.send_msg_ofs = len(self._send_buffer)
self._send_buffer += struct.pack(f'!l{len(self.id_str)+1}pBB',
0, self.id_str, ctrl, self.msg_id)
fnc = self.switch.get(self.msg_id, self.msg_unknown)
logger.info(self.__flow_str(self.server_side, 'tx') +
f' Ctl: {int(ctrl):#02x} Msg: {fnc.__name__!r}')
def __finish_send_msg(self) -> None:
_len = len(self._send_buffer) - self.send_msg_ofs
struct.pack_into('!l', self._send_buffer, self.send_msg_ofs, _len-4)
def __dispatch_msg(self) -> None:
fnc = self.switch.get(self.msg_id, self.msg_unknown)
if self.unique_id:
logger.info(self.__flow_str(self.server_side, 'rx') +
f' Ctl: {int(self.ctrl):#02x} Msg: {fnc.__name__!r}')
fnc()
else:
logger.info(self.__flow_str(self.server_side, 'drop') +
f' Ctl: {int(self.ctrl):#02x} Msg: {fnc.__name__!r}')
def __flush_recv_msg(self) -> None:
self._recv_buffer = self._recv_buffer[(self.header_len+self.data_len):]
self.header_valid = False
'''
Message handler methods
'''
def msg_contact_info(self):
if self.ctrl.is_ind():
if self.server_side and self.__process_contact_info():
self.__build_header(0x91)
self._send_buffer += b'\x01'
self.__finish_send_msg()
# don't forward this contact info here, we will build one
# when the remote connection is established
elif self.await_conn_resp_cnt > 0:
self.await_conn_resp_cnt -= 1
else:
self.forward(self._recv_buffer, self.header_len+self.data_len)
return
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def __process_contact_info(self) -> bool:
result = struct.unpack_from('!B', self._recv_buffer, self.header_len)
name_len = result[0]
if self.data_len < name_len+2:
return False
result = struct.unpack_from(f'!{name_len+1}pB', self._recv_buffer,
self.header_len)
self.contact_name = result[0]
mail_len = result[1]
logger.info(f'name: {self.contact_name}')
result = struct.unpack_from(f'!{mail_len+1}p', self._recv_buffer,
self.header_len+name_len+1)
self.contact_mail = result[0]
logger.info(f'mail: {self.contact_mail}')
return True
def msg_get_time(self):
tsun = Config.get('tsun')
if tsun['enabled']:
if self.ctrl.is_ind():
if self.data_len >= 8:
ts = self._timestamp()
result = struct.unpack_from('!q', self._recv_buffer,
self.header_len)
logger.debug(f'tsun-time: {result[0]:08x}'
f' proxy-time: {ts:08x}')
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
else:
if self.ctrl.is_ind():
if self.data_len == 0:
ts = self._timestamp()
logger.debug(f'time: {ts:08x}')
self.__build_header(0x91)
self._send_buffer += struct.pack('!q', ts)
self.__finish_send_msg()
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
def parse_msg_header(self):
result = struct.unpack_from('!lB', self._recv_buffer, self.header_len)
data_id = result[0] # len of complete message
id_len = result[1] # len of variable id string
logger.debug(f'Data_ID: {data_id} id_len: {id_len}')
msg_hdr_len = 5+id_len+9
result = struct.unpack_from(f'!{id_len+1}pBq', self._recv_buffer,
self.header_len + 4)
logger.debug(f'ID: {result[0]} B: {result[1]}')
logger.debug(f'time: {result[2]:08x}')
# logger.info(f'time: {datetime.utcfromtimestamp(result[2]).strftime(
# "%Y-%m-%d %H:%M:%S")}')
return msg_hdr_len
def msg_collector_data(self):
if self.ctrl.is_ind():
self.__build_header(0x99)
self._send_buffer += b'\x01'
self.__finish_send_msg()
self.__process_data()
elif self.ctrl.is_resp():
return # ignore received response
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def msg_inverter_data(self):
if self.ctrl.is_ind():
self.__build_header(0x99)
self._send_buffer += b'\x01'
self.__finish_send_msg()
self.__process_data()
elif self.ctrl.is_resp():
return # ignore received response
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def __process_data(self):
msg_hdr_len = self.parse_msg_header()
for key, update in self.db.parse(self._recv_buffer, self.header_len
+ msg_hdr_len):
if update:
self.new_data[key] = True
def msg_ota_update(self):
if self.ctrl.is_req():
self.inc_counter('OTA_Start_Msg')
elif self.ctrl.is_ind():
pass
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def msg_unknown(self):
logger.warning(f"Unknow Msg: ID:{self.msg_id}")
self.inc_counter('Unknown_Msg')
self.forward(self._recv_buffer, self.header_len+self.data_len)

View File

@@ -0,0 +1,36 @@
import logging
# import gc
from async_stream import AsyncStream
from gen3plus.solarman_v5 import SolarmanV5
logger = logging.getLogger('conn')
class ConnectionG3P(AsyncStream, SolarmanV5):
def __init__(self, reader, writer, addr, remote_stream,
server_side: bool) -> None:
AsyncStream.__init__(self, reader, writer, addr)
SolarmanV5.__init__(self, server_side)
self.remoteStream = remote_stream
'''
Our puplic methods
'''
def close(self):
AsyncStream.close(self)
SolarmanV5.close(self)
# logger.info(f'AsyncStream refs: {gc.get_referrers(self)}')
async def async_create_remote(self) -> None:
pass
async def async_publ_mqtt(self) -> None:
pass
'''
Our private methods
'''
def __del__(self):
super().__del__()

View File

@@ -0,0 +1,126 @@
import asyncio
import logging
import traceback
import json
from config import Config
from inverter import Inverter
from gen3plus.connection_g3p import ConnectionG3P
from aiomqtt import MqttCodeError
from infos import Infos
# import gc
# logger = logging.getLogger('conn')
logger_mqtt = logging.getLogger('mqtt')
class InverterG3P(Inverter, ConnectionG3P):
'''class Inverter is a derivation of an Async_Stream
The class has some class method for managing common resources like a
connection to the MQTT broker or proxy error counter which are common
for all inverter connection
Instances of the class are connections to an inverter and can have an
optional link to an remote connection to the TSUN cloud. A remote
connection dies with the inverter connection.
class methods:
class_init(): initialize the common resources of the proxy (MQTT
broker, Proxy DB, etc). Must be called before the
first inverter instance can be created
class_close(): release the common resources of the proxy. Should not
be called before any instances of the class are
destroyed
methods:
server_loop(addr): Async loop method for receiving messages from the
inverter (server-side)
client_loop(addr): Async loop method for receiving messages from the
TSUN cloud (client-side)
async_create_remote(): Establish a client connection to the TSUN cloud
async_publ_mqtt(): Publish data to MQTT broker
close(): Release method which must be called before a instance can be
destroyed
'''
def __init__(self, reader, writer, addr):
super().__init__(reader, writer, addr, None, True)
self.__ha_restarts = -1
async def async_create_remote(self) -> None:
'''Establish a client connection to the TSUN cloud'''
tsun = Config.get('solarman')
host = tsun['host']
port = tsun['port']
addr = (host, port)
try:
logging.info(f'Connected to {addr}')
connect = asyncio.open_connection(host, port)
reader, writer = await connect
self.remoteStream = ConnectionG3P(reader, writer, addr, self,
False)
asyncio.create_task(self.client_loop(addr))
except (ConnectionRefusedError, TimeoutError) as error:
logging.info(f'{error}')
except Exception:
self.inc_counter('SW_Exception')
logging.error(
f"Inverter: Exception for {addr}:\n"
f"{traceback.format_exc()}")
async def async_publ_mqtt(self) -> None:
'''publish data to MQTT broker'''
# check if new inverter or collector infos are available or when the
# home assistant has changed the status back to online
try:
if (('inverter' in self.new_data and self.new_data['inverter'])
or ('collector' in self.new_data and
self.new_data['collector'])
or self.mqtt.ha_restarts != self.__ha_restarts):
await self._register_proxy_stat_home_assistant()
await self.__register_home_assistant()
self.__ha_restarts = self.mqtt.ha_restarts
for key in self.new_data:
await self.__async_publ_mqtt_packet(key)
for key in Infos.new_stat_data:
await self._async_publ_mqtt_proxy_stat(key)
except MqttCodeError as error:
logging.error(f'Mqtt except: {error}')
except Exception:
self.inc_counter('SW_Exception')
logging.error(
f"Inverter: Exception:\n"
f"{traceback.format_exc()}")
async def __async_publ_mqtt_packet(self, key):
db = self.db.db
if key in db and self.new_data[key]:
data_json = json.dumps(db[key])
node_id = self.node_id
logger_mqtt.debug(f'{key}: {data_json}')
await self.mqtt.publish(f'{self.entity_prfx}{node_id}{key}', data_json) # noqa: E501
self.new_data[key] = False
async def __register_home_assistant(self) -> None:
'''register all our topics at home assistant'''
for data_json, component, node_id, id in self.db.ha_confs(
self.entity_prfx, self.node_id, self.unique_id,
False, self.sug_area):
logger_mqtt.debug(f"MQTT Register: cmp:'{component}'"
f" node_id:'{node_id}' {data_json}")
await self.mqtt.publish(f"{self.discovery_prfx}{component}"
f"/{node_id}{id}/config", data_json)
def close(self) -> None:
logging.debug(f'InverterG3P.close() l{self.l_addr} | r{self.r_addr}')
super().close() # call close handler in the parent class
# logger.debug (f'Inverter refs: {gc.get_referrers(self)}')
def __del__(self):
logging.debug("InverterG3P.__del__")
super().__del__()

View File

@@ -0,0 +1,279 @@
import struct
import logging
# import time
from datetime import datetime
if __name__ == "app.src.gen3plus.solarman_v5":
from app.src.messages import hex_dump_memory, Message
from app.src.config import Config
else: # pragma: no cover
from messages import hex_dump_memory, Message
from config import Config
# import traceback
logger = logging.getLogger('msg')
class SolarmanV5(Message):
def __init__(self, server_side: bool):
super().__init__(server_side)
self.header_len = 11 # overwrite construcor in class Message
self.control = 0
self.serial = 0
self.snr = 0
# self.await_conn_resp_cnt = 0
# self.id_str = id_str
self.switch = {
0x4110: self.msg_dev_ind, # hello
0x1110: self.msg_dev_rsp,
0x4210: self.msg_unknown, # data
0x1210: self.msg_data_rsp,
0x4310: self.msg_unknown,
0x4710: self.msg_unknown, # heatbeat
0x1710: self.msg_hbeat_rsp,
0x4810: self.msg_unknown, # hello end
}
'''
Our puplic methods
'''
def close(self) -> None:
logging.debug('Solarman.close()')
# we have refernces to methods of this class in self.switch
# so we have to erase self.switch, otherwise this instance can't be
# deallocated by the garbage collector ==> we get a memory leak
self.switch.clear()
def set_serial_no(self, snr: int):
serial_no = str(snr)
if self.unique_id == serial_no:
logger.debug(f'SerialNo: {serial_no}')
else:
found = False
inverters = Config.get('inverters')
# logger.debug(f'Inverters: {inverters}')
for key, inv in inverters.items():
# logger.debug(f'key: {key} -> {inv}')
if (type(inv) is dict and 'monitor_sn' in inv
and inv['monitor_sn'] == snr):
found = True
self.node_id = inv['node_id']
self.sug_area = inv['suggested_area']
logger.debug(f'SerialNo {serial_no} allowed! area:{self.sug_area}') # noqa: E501
if not found:
self.node_id = ''
self.sug_area = ''
if 'allow_all' not in inverters or not inverters['allow_all']:
self.inc_counter('Unknown_SNR')
self.unique_id = None
logger.warning(f'ignore message from unknow inverter! (SerialNo: {serial_no})') # noqa: E501
return
logger.debug(f'SerialNo {serial_no} not known but accepted!')
self.unique_id = serial_no
def read(self) -> None:
self._read()
if not self.header_valid:
self.__parse_header(self._recv_buffer, len(self._recv_buffer))
if self.header_valid and len(self._recv_buffer) >= (self.header_len +
self.data_len+2):
hex_dump_memory(logging.INFO, f'Received from {self.addr}:',
self._recv_buffer, self.header_len+self.data_len+2)
if self.__trailer_is_ok(self._recv_buffer, self.header_len
+ self.data_len + 2):
self.set_serial_no(self.snr)
self.__dispatch_msg()
self.__flush_recv_msg()
return
def forward(self, buffer, buflen) -> None:
tsun = Config.get('solarman')
if tsun['enabled']:
self._forward_buffer = buffer[:buflen]
hex_dump_memory(logging.DEBUG, 'Store for forwarding:',
buffer, buflen)
self.__parse_header(self._forward_buffer,
len(self._forward_buffer))
fnc = self.switch.get(self.control, self.msg_unknown)
logger.info(self.__flow_str(self.server_side, 'forwrd') +
f' Ctl: {int(self.control):#04x}'
f' Msg: {fnc.__name__!r}')
return
def _init_new_client_conn(self) -> bool:
# self.__build_header(0x91)
# self._send_buffer += struct.pack(f'!{len(contact_name)+1}p'
# f'{len(contact_mail)+1}p',
# contact_name, contact_mail)
# self.__finish_send_msg()
return False
'''
Our private methods
'''
def __flow_str(self, server_side: bool, type: str): # noqa: F821
switch = {
'rx': ' <',
'tx': ' >',
'forwrd': '<< ',
'drop': ' xx',
'rxS': '> ',
'txS': '< ',
'forwrdS': ' >>',
'dropS': 'xx ',
}
if server_side:
type += 'S'
return switch.get(type, '???')
def __parse_header(self, buf: bytes, buf_len: int) -> None:
if (buf_len < self.header_len): # enough bytes for complete header?
return
result = struct.unpack_from('<BHHHL', buf, 0)
# store parsed header values in the class
start = result[0] # len of complete message
self.data_len = result[1] # len of variable id string
self.control = result[2]
self.serial = result[3]
self.snr = result[4]
if start != 0xA5:
return
self.header_valid = True
return
def __trailer_is_ok(self, buf: bytes, buf_len: int) -> bool:
crc = buf[self.data_len+11]
stop = buf[self.data_len+12]
if stop != 0x15:
return False
check = sum(buf[1:buf_len-2]) & 0xff
if check != crc:
logger.debug(f'CRC {int(crc):#02x} {int(check):#08x}'
f' Stop:{int(stop):#02x}')
return False
return True
def __dispatch_msg(self) -> None:
fnc = self.switch.get(self.control, self.msg_unknown)
if self.unique_id:
logger.info(self.__flow_str(self.server_side, 'rx') +
f' Ctl: {int(self.control):#04x}' +
f' Msg: {fnc.__name__!r}')
fnc()
else:
logger.info(self.__flow_str(self.server_side, 'drop') +
f' Ctl: {int(self.control):#04x}' +
f' Msg: {fnc.__name__!r}')
def __flush_recv_msg(self) -> None:
self._recv_buffer = self._recv_buffer[(self.header_len +
self.data_len+2):]
self.header_valid = False
'''
def modbus(self, data):
POLY = 0xA001
crc = 0xFFFF
for byte in data:
crc ^= byte
for _ in range(8):
crc = ((crc >> 1) ^ POLY
if (crc & 0x0001)
else crc >> 1)
return crc
def validate_modbus_crc(self, frame):
# Calculate crc with all but the last 2 bytes of
# the frame (they contain the crc)
calc_crc = 0xFFFF
for pos in frame[:-2]:
calc_crc ^= pos
for i in range(8):
if (calc_crc & 1) != 0:
calc_crc >>= 1
calc_crc ^= 0xA001 # bitwise 'or' with modbus magic
# number (0xa001 == bitwise
# reverse of 0x8005)
else:
calc_crc >>= 1
# Compare calculated crc with the one supplied in the frame....
frame_crc, = struct.unpack('<H', frame[-2:])
if calc_crc == frame_crc:
return 1
else:
return 0
'''
'''
Message handler methods
'''
def msg_unknown(self):
logger.warning(f"Unknow Msg: ID:{int(self.control):#04x}")
self.inc_counter('Unknown_Msg')
self.forward(self._recv_buffer, self.header_len+self.data_len+2)
def msg_dev_ind(self):
data = self._recv_buffer[self.header_len:]
result = struct.unpack_from('<BLLL', data, 0)
code = result[0] # always 2
total = result[1]
tim = result[2]
res = result[3] # always zero
logger.info(f'code:{code} total:{total}s'
f' timer:{tim:08x}s null:{res}')
dt = datetime.fromtimestamp(total)
logger.info(f'ts: {dt.strftime("%Y-%m-%d %H:%M:%S")}')
if (code == 2):
result = struct.unpack_from('<BBBBBB40s', data, 13)
upload_period = result[0]
data_acq_period = result[1]
heart_beat = result[2]
res = result[3]
wifi = result[4]
ver = result[6]
# res2 = result[5]
logger.info(f'upload:{upload_period}min '
f'data collect:{data_acq_period}s '
f'heartbeat:{heart_beat}s '
f'wifi:{wifi}%')
logger.info(f'ver:{ver}')
self.forward(self._recv_buffer, self.header_len+self.data_len+2)
def msg_dev_rsp(self):
self.msg_response()
def msg_data_rsp(self):
self.msg_response()
def msg_hbeat_rsp(self):
self.msg_response()
def msg_response(self):
data = self._recv_buffer[self.header_len:]
result = struct.unpack_from('<BBLL', data, 0)
code = result[0] # always 2
valid = result[1] == 1 # status
ts = result[2]
repeat = result[3] # always 60
logger.info(f'code:{code} accepted:{valid}'
f' ts:{ts:08x} repeat:{repeat}s')
dt = datetime.fromtimestamp(ts)
logger.info(f'ts: {dt.strftime("%Y-%m-%d %H:%M:%S")}')
self.forward(self._recv_buffer, self.header_len+self.data_len+2)

View File

@@ -9,6 +9,8 @@ class Infos:
app_name = os.getenv('SERVICE_NAME', 'proxy')
version = os.getenv('VERSION', 'unknown')
new_stat_data = {}
@classmethod
def static_init(cls):
logging.info('Initialize proxy statistics')
@@ -318,7 +320,7 @@ class Infos:
return d['name'], d['level'], d['unit'], must_incr, new_val
def parse(self, buf, ind=0) -> None:
def parse(self, buf, ind=0): # -> None | tuple[str,bool]:
'''parse a data sequence received from the inverter and
stores the values in Infos.db

View File

@@ -1,48 +1,15 @@
import asyncio
import logging
import traceback
import json
from config import Config
from async_stream import AsyncStream
from mqtt import Mqtt
from aiomqtt import MqttCodeError
from infos import Infos
# import gc
# logger = logging.getLogger('conn')
logger_mqtt = logging.getLogger('mqtt')
class Inverter(AsyncStream):
'''class Inverter is a derivation of an Async_Stream
The class has some class method for managing common resources like a
connection to the MQTT broker or proxy error counter which are common
for all inverter connection
Instances of the class are connections to an inverter and can have an
optional link to an remote connection to the TSUN cloud. A remote
connection dies with the inverter connection.
class methods:
class_init(): initialize the common resources of the proxy (MQTT
broker, Proxy DB, etc). Must be called before the
first inverter instance can be created
class_close(): release the common resources of the proxy. Should not
be called before any instances of the class are
destroyed
methods:
server_loop(addr): Async loop method for receiving messages from the
inverter (server-side)
client_loop(addr): Async loop method for receiving messages from the
TSUN cloud (client-side)
async_create_remote(): Establish a client connection to the TSUN cloud
async_publ_mqtt(): Publish data to MQTT broker
close(): Release method which must be called before a instance can be
destroyed
'''
class Inverter():
@classmethod
def class_init(cls) -> None:
logging.debug('Inverter.class_init')
@@ -57,21 +24,21 @@ class Inverter(AsyncStream):
cls.proxy_unique_id = ha['proxy_unique_id']
# call Mqtt singleton to establisch the connection to the mqtt broker
cls.mqtt = Mqtt(cls.__cb_mqtt_is_up)
cls.mqtt = Mqtt(cls._cb_mqtt_is_up)
@classmethod
async def __cb_mqtt_is_up(cls) -> None:
async def _cb_mqtt_is_up(cls) -> None:
logging.info('Initialize proxy device on home assistant')
# register proxy status counters at home assistant
await cls.__register_proxy_stat_home_assistant()
await cls._register_proxy_stat_home_assistant()
# send values of the proxy status counters
await asyncio.sleep(0.5) # wait a bit, before sending data
cls.new_stat_data['proxy'] = True # force sending data to sync ha
await cls.__async_publ_mqtt_proxy_stat('proxy')
Infos.new_stat_data['proxy'] = True # force sending data to sync ha
await cls._async_publ_mqtt_proxy_stat('proxy')
@classmethod
async def __register_proxy_stat_home_assistant(cls) -> None:
async def _register_proxy_stat_home_assistant(cls) -> None:
'''register all our topics at home assistant'''
for data_json, component, node_id, id in cls.db_stat.ha_confs(
cls.entity_prfx, cls.proxy_node_id,
@@ -80,15 +47,15 @@ class Inverter(AsyncStream):
await cls.mqtt.publish(f'{cls.discovery_prfx}{component}/{node_id}{id}/config', data_json) # noqa: E501
@classmethod
async def __async_publ_mqtt_proxy_stat(cls, key) -> None:
async def _async_publ_mqtt_proxy_stat(cls, key) -> None:
stat = Infos.stat
if key in stat and cls.new_stat_data[key]:
if key in stat and Infos.new_stat_data[key]:
data_json = json.dumps(stat[key])
node_id = cls.proxy_node_id
logger_mqtt.debug(f'{key}: {data_json}')
await cls.mqtt.publish(f"{cls.entity_prfx}{node_id}{key}",
data_json)
cls.new_stat_data[key] = False
Infos.new_stat_data[key] = False
@classmethod
def class_close(cls, loop) -> None:
@@ -96,121 +63,3 @@ class Inverter(AsyncStream):
logging.info('Close MQTT Task')
loop.run_until_complete(cls.mqtt.close())
cls.mqtt = None
def __init__(self, reader, writer, addr):
super().__init__(reader, writer, addr, None, True)
self.ha_restarts = -1
async def server_loop(self, addr):
'''Loop for receiving messages from the inverter (server-side)'''
logging.info(f'Accept connection from {addr}')
self.inc_counter('Inverter_Cnt')
await self.loop()
self.dec_counter('Inverter_Cnt')
logging.info(f'Server loop stopped for r{self.r_addr}')
# if the server connection closes, we also have to disconnect
# the connection to te TSUN cloud
if self.remoteStream:
logging.debug("disconnect client connection")
self.remoteStream.disc()
try:
await self.__async_publ_mqtt_proxy_stat('proxy')
except Exception:
pass
async def client_loop(self, addr):
'''Loop for receiving messages from the TSUN cloud (client-side)'''
clientStream = await self.remoteStream.loop()
logging.info(f'Client loop stopped for l{clientStream.l_addr}')
# if the client connection closes, we don't touch the server
# connection. Instead we erase the client connection stream,
# thus on the next received packet from the inverter, we can
# establish a new connection to the TSUN cloud
# erase backlink to inverter
clientStream.remoteStream = None
if self.remoteStream == clientStream:
# logging.debug(f'Client l{clientStream.l_addr} refs:'
# f' {gc.get_referrers(clientStream)}')
# than erase client connection
self.remoteStream = None
async def async_create_remote(self) -> None:
'''Establish a client connection to the TSUN cloud'''
tsun = Config.get('tsun')
host = tsun['host']
port = tsun['port']
addr = (host, port)
try:
logging.info(f'Connected to {addr}')
connect = asyncio.open_connection(host, port)
reader, writer = await connect
self.remoteStream = AsyncStream(reader, writer, addr, self,
False, self.id_str)
asyncio.create_task(self.client_loop(addr))
except ConnectionRefusedError as error:
logging.info(f'{error}')
except Exception:
self.inc_counter('SW_Exception')
logging.error(
f"Inverter: Exception for {addr}:\n"
f"{traceback.format_exc()}")
async def async_publ_mqtt(self) -> None:
'''publish data to MQTT broker'''
# check if new inverter or collector infos are available or when the
# home assistant has changed the status back to online
try:
if (('inverter' in self.new_data and self.new_data['inverter'])
or ('collector' in self.new_data and
self.new_data['collector'])
or self.mqtt.ha_restarts != self.ha_restarts):
await self.__register_proxy_stat_home_assistant()
await self.__register_home_assistant()
self.ha_restarts = self.mqtt.ha_restarts
for key in self.new_data:
await self.__async_publ_mqtt_packet(key)
for key in self.new_stat_data:
await self.__async_publ_mqtt_proxy_stat(key)
except MqttCodeError as error:
logging.error(f'Mqtt except: {error}')
except Exception:
self.inc_counter('SW_Exception')
logging.error(
f"Inverter: Exception:\n"
f"{traceback.format_exc()}")
async def __async_publ_mqtt_packet(self, key):
db = self.db.db
if key in db and self.new_data[key]:
data_json = json.dumps(db[key])
node_id = self.node_id
logger_mqtt.debug(f'{key}: {data_json}')
await self.mqtt.publish(f'{self.entity_prfx}{node_id}{key}', data_json) # noqa: E501
self.new_data[key] = False
async def __register_home_assistant(self) -> None:
'''register all our topics at home assistant'''
for data_json, component, node_id, id in self.db.ha_confs(
self.entity_prfx, self.node_id, self.unique_id,
False, self.sug_area):
logger_mqtt.debug(f"MQTT Register: cmp:'{component}'"
f" node_id:'{node_id}' {data_json}")
await self.mqtt.publish(f"{self.discovery_prfx}{component}"
f"/{node_id}{id}/config", data_json)
def close(self) -> None:
logging.debug(f'Inverter.close() l{self.l_addr} | r{self.r_addr}')
super().close() # call close handler in the parent class
# logger.debug (f'Inverter refs: {gc.get_referrers(self)}')
def __del__(self):
logging.debug("Inverter.__del__")
super().__del__()

View File

@@ -1,15 +1,10 @@
import struct
import logging
import time
from datetime import datetime
import weakref
if __name__ == "app.src.messages":
from app.src.infos import Infos
from app.src.config import Config
else: # pragma: no cover
from infos import Infos
from config import Config
logger = logging.getLogger('msg')
@@ -45,23 +40,6 @@ def hex_dump_memory(level, info, data, num):
tracer.log(level, '\n'.join(lines))
class Control:
def __init__(self, ctrl: int):
self.ctrl = ctrl
def __int__(self) -> int:
return self.ctrl
def is_ind(self) -> bool:
return (self.ctrl == 0x91)
def is_req(self) -> bool:
return (self.ctrl == 0x70)
def is_resp(self) -> bool:
return (self.ctrl == 0x99)
class IterRegistry(type):
def __iter__(cls):
for ref in cls._registry:
@@ -72,10 +50,10 @@ class IterRegistry(type):
class Message(metaclass=IterRegistry):
_registry = []
new_stat_data = {}
def __init__(self, server_side: bool, id_str=b''):
def __init__(self, server_side: bool):
self._registry.append(weakref.ref(self))
self.server_side = server_side
self.header_valid = False
self.header_len = 0
@@ -83,22 +61,11 @@ class Message(metaclass=IterRegistry):
self.unique_id = 0
self.node_id = ''
self.sug_area = ''
self.await_conn_resp_cnt = 0
self.id_str = id_str
self.contact_name = b''
self.contact_mail = b''
self._recv_buffer = bytearray(0)
self._send_buffer = bytearray(0)
self._forward_buffer = bytearray(0)
self.db = Infos()
self.new_data = {}
self.switch = {
0x00: self.msg_contact_info,
0x13: self.msg_ota_update,
0x22: self.msg_get_time,
0x71: self.msg_collector_data,
0x04: self.msg_inverter_data,
}
'''
Empty methods, that have to be implemented in any child class which
@@ -112,306 +79,12 @@ class Message(metaclass=IterRegistry):
Our puplic methods
'''
def close(self) -> None:
# we have refernces to methods of this class in self.switch
# so we have to erase self.switch, otherwise this instance can't be
# deallocated by the garbage collector ==> we get a memory leak
self.switch.clear()
pass # pragma: no cover
def inc_counter(self, counter: str) -> None:
self.db.inc_counter(counter)
self.new_stat_data['proxy'] = True
Infos.new_stat_data['proxy'] = True
def dec_counter(self, counter: str) -> None:
self.db.dec_counter(counter)
self.new_stat_data['proxy'] = True
def set_serial_no(self, serial_no: str):
if self.unique_id == serial_no:
logger.debug(f'SerialNo: {serial_no}')
else:
inverters = Config.get('inverters')
# logger.debug(f'Inverters: {inverters}')
if serial_no in inverters:
inv = inverters[serial_no]
self.node_id = inv['node_id']
self.sug_area = inv['suggested_area']
logger.debug(f'SerialNo {serial_no} allowed! area:{self.sug_area}') # noqa: E501
else:
self.node_id = ''
self.sug_area = ''
if 'allow_all' not in inverters or not inverters['allow_all']:
self.inc_counter('Unknown_SNR')
self.unique_id = None
logger.warning(f'ignore message from unknow inverter! (SerialNo: {serial_no})') # noqa: E501
return
logger.debug(f'SerialNo {serial_no} not known but accepted!')
self.unique_id = serial_no
def read(self) -> None:
self._read()
if not self.header_valid:
self.__parse_header(self._recv_buffer, len(self._recv_buffer))
if self.header_valid and len(self._recv_buffer) >= (self.header_len +
self.data_len):
hex_dump_memory(logging.INFO, f'Received from {self.addr}:',
self._recv_buffer, self.header_len+self.data_len)
self.set_serial_no(self.id_str.decode("utf-8"))
self.__dispatch_msg()
self.__flush_recv_msg()
return
def forward(self, buffer, buflen) -> None:
tsun = Config.get('tsun')
if tsun['enabled']:
self._forward_buffer = buffer[:buflen]
hex_dump_memory(logging.DEBUG, 'Store for forwarding:',
buffer, buflen)
self.__parse_header(self._forward_buffer,
len(self._forward_buffer))
fnc = self.switch.get(self.msg_id, self.msg_unknown)
logger.info(self.__flow_str(self.server_side, 'forwrd') +
f' Ctl: {int(self.ctrl):#02x} Msg: {fnc.__name__!r}')
return
def _init_new_client_conn(self, contact_name, contact_mail) -> None:
logger.info(f'name: {contact_name} mail: {contact_mail}')
self.msg_id = 0
self.await_conn_resp_cnt += 1
self.__build_header(0x91)
self._send_buffer += struct.pack(f'!{len(contact_name)+1}p'
f'{len(contact_mail)+1}p',
contact_name, contact_mail)
self.__finish_send_msg()
'''
Our private methods
'''
def __flow_str(self, server_side: bool, type:
('rx', 'tx', 'forwrd', 'drop')): # noqa: F821
switch = {
'rx': ' <',
'tx': ' >',
'forwrd': '<< ',
'drop': ' xx',
'rxS': '> ',
'txS': '< ',
'forwrdS': ' >>',
'dropS': 'xx ',
}
if server_side:
type += 'S'
return switch.get(type, '???')
def _timestamp(self): # pragma: no cover
if False:
# utc as epoche
ts = time.time()
else:
# convert localtime in epoche
ts = (datetime.now() - datetime(1970, 1, 1)).total_seconds()
return round(ts*1000)
# check if there is a complete header in the buffer, parse it
# and set
# self.header_len
# self.data_len
# self.id_str
# self.ctrl
# self.msg_id
#
# if the header is incomplete, than self.header_len is still 0
#
def __parse_header(self, buf: bytes, buf_len: int) -> None:
if (buf_len < 5): # enough bytes to read len and id_len?
return
result = struct.unpack_from('!lB', buf, 0)
len = result[0] # len of complete message
id_len = result[1] # len of variable id string
hdr_len = 5+id_len+2
if (buf_len < hdr_len): # enough bytes for complete header?
return
result = struct.unpack_from(f'!{id_len+1}pBB', buf, 4)
# store parsed header values in the class
self.id_str = result[0]
self.ctrl = Control(result[1])
self.msg_id = result[2]
self.data_len = len-id_len-3
self.header_len = hdr_len
self.header_valid = True
return
def __build_header(self, ctrl) -> None:
self.send_msg_ofs = len(self._send_buffer)
self._send_buffer += struct.pack(f'!l{len(self.id_str)+1}pBB',
0, self.id_str, ctrl, self.msg_id)
fnc = self.switch.get(self.msg_id, self.msg_unknown)
logger.info(self.__flow_str(self.server_side, 'tx') +
f' Ctl: {int(ctrl):#02x} Msg: {fnc.__name__!r}')
def __finish_send_msg(self) -> None:
_len = len(self._send_buffer) - self.send_msg_ofs
struct.pack_into('!l', self._send_buffer, self.send_msg_ofs, _len-4)
def __dispatch_msg(self) -> None:
fnc = self.switch.get(self.msg_id, self.msg_unknown)
if self.unique_id:
logger.info(self.__flow_str(self.server_side, 'rx') +
f' Ctl: {int(self.ctrl):#02x} Msg: {fnc.__name__!r}')
fnc()
else:
logger.info(self.__flow_str(self.server_side, 'drop') +
f' Ctl: {int(self.ctrl):#02x} Msg: {fnc.__name__!r}')
def __flush_recv_msg(self) -> None:
self._recv_buffer = self._recv_buffer[(self.header_len+self.data_len):]
self.header_valid = False
'''
Message handler methods
'''
def msg_contact_info(self):
if self.ctrl.is_ind():
if self.server_side and self.__process_contact_info():
self.__build_header(0x91)
self._send_buffer += b'\x01'
self.__finish_send_msg()
# don't forward this contact info here, we will build one
# when the remote connection is established
elif self.await_conn_resp_cnt > 0:
self.await_conn_resp_cnt -= 1
else:
self.forward(self._recv_buffer, self.header_len+self.data_len)
return
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def __process_contact_info(self) -> bool:
result = struct.unpack_from('!B', self._recv_buffer, self.header_len)
name_len = result[0]
if self.data_len < name_len+2:
return False
result = struct.unpack_from(f'!{name_len+1}pB', self._recv_buffer,
self.header_len)
self.contact_name = result[0]
mail_len = result[1]
logger.info(f'name: {self.contact_name}')
result = struct.unpack_from(f'!{mail_len+1}p', self._recv_buffer,
self.header_len+name_len+1)
self.contact_mail = result[0]
logger.info(f'mail: {self.contact_mail}')
return True
def msg_get_time(self):
tsun = Config.get('tsun')
if tsun['enabled']:
if self.ctrl.is_ind():
if self.data_len >= 8:
ts = self._timestamp()
result = struct.unpack_from('!q', self._recv_buffer,
self.header_len)
logger.debug(f'tsun-time: {result[0]:08x}'
f' proxy-time: {ts:08x}')
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
else:
if self.ctrl.is_ind():
if self.data_len == 0:
ts = self._timestamp()
logger.debug(f'time: {ts:08x}')
self.__build_header(0x91)
self._send_buffer += struct.pack('!q', ts)
self.__finish_send_msg()
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
def parse_msg_header(self):
result = struct.unpack_from('!lB', self._recv_buffer, self.header_len)
data_id = result[0] # len of complete message
id_len = result[1] # len of variable id string
logger.debug(f'Data_ID: {data_id} id_len: {id_len}')
msg_hdr_len = 5+id_len+9
result = struct.unpack_from(f'!{id_len+1}pBq', self._recv_buffer,
self.header_len + 4)
logger.debug(f'ID: {result[0]} B: {result[1]}')
logger.debug(f'time: {result[2]:08x}')
# logger.info(f'time: {datetime.utcfromtimestamp(result[2]).strftime(
# "%Y-%m-%d %H:%M:%S")}')
return msg_hdr_len
def msg_collector_data(self):
if self.ctrl.is_ind():
self.__build_header(0x99)
self._send_buffer += b'\x01'
self.__finish_send_msg()
self.__process_data()
elif self.ctrl.is_resp():
return # ignore received response
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def msg_inverter_data(self):
if self.ctrl.is_ind():
self.__build_header(0x99)
self._send_buffer += b'\x01'
self.__finish_send_msg()
self.__process_data()
elif self.ctrl.is_resp():
return # ignore received response
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def __process_data(self):
msg_hdr_len = self.parse_msg_header()
for key, update in self.db.parse(self._recv_buffer, self.header_len
+ msg_hdr_len):
if update:
self.new_data[key] = True
def msg_ota_update(self):
if self.ctrl.is_req():
self.inc_counter('OTA_Start_Msg')
elif self.ctrl.is_ind():
pass
else:
logger.warning('Unknown Ctrl')
self.inc_counter('Unknown_Ctrl')
self.forward(self._recv_buffer, self.header_len+self.data_len)
def msg_unknown(self):
logger.warning(f"Unknow Msg: ID:{self.msg_id}")
self.inc_counter('Unknown_Msg')
self.forward(self._recv_buffer, self.header_len+self.data_len)
Infos.new_stat_data['proxy'] = True

View File

@@ -18,8 +18,8 @@ class Singleton(type):
class Mqtt(metaclass=Singleton):
client = None
cb_MqttIsUp = None
__client = None
__cb_MqttIsUp = None
def __init__(self, cb_MqttIsUp):
logger_mqtt.debug('MQTT: __init__')
@@ -50,8 +50,8 @@ class Mqtt(metaclass=Singleton):
async def publish(self, topic: str, payload: str | bytes | bytearray
| int | float | None = None) -> None:
if self.client:
await self.client.publish(topic, payload)
if self.__client:
await self.__client.publish(topic, payload)
async def __loop(self) -> None:
mqtt = Config.get('mqtt')
@@ -59,22 +59,24 @@ class Mqtt(metaclass=Singleton):
logger_mqtt.info(f'start MQTT: host:{mqtt["host"]} port:'
f'{mqtt["port"]} '
f'user:{mqtt["user"]}')
self.client = aiomqtt.Client(hostname=mqtt['host'], port=mqtt['port'],
username=mqtt['user'],
password=mqtt['passwd'])
self.__client = aiomqtt.Client(hostname=mqtt['host'],
port=mqtt['port'],
username=mqtt['user'],
password=mqtt['passwd'])
interval = 5 # Seconds
while True:
try:
async with self.client:
async with self.__client:
logger_mqtt.info('MQTT broker connection established')
if self.cb_MqttIsUp:
await self.cb_MqttIsUp()
async with self.client.messages() as messages:
await self.client.subscribe(f"{ha['auto_conf_prefix']}"
"/status")
async with self.__client.messages() as messages:
await self.__client.subscribe(
f"{ha['auto_conf_prefix']}"
"/status")
async for message in messages:
status = message.payload.decode("UTF-8")
logger_mqtt.info('Home-Assistant Status:'
@@ -89,5 +91,5 @@ class Mqtt(metaclass=Singleton):
await asyncio.sleep(interval)
except asyncio.CancelledError:
logger_mqtt.debug("MQTT task cancelled")
self.client = None
self.__client = None
return

View File

@@ -4,8 +4,10 @@ import signal
import functools
import os
from logging import config # noqa F401
from async_stream import AsyncStream
from messages import Message
from inverter import Inverter
from gen3.inverter_g3 import InverterG3
from gen3plus.inverter_g3p import InverterG3P
from config import Config
@@ -13,7 +15,14 @@ async def handle_client(reader, writer):
'''Handles a new incoming connection and starts an async loop'''
addr = writer.get_extra_info('peername')
await Inverter(reader, writer, addr).server_loop(addr)
await InverterG3(reader, writer, addr).server_loop(addr)
async def handle_client_v2(reader, writer):
'''Handles a new incoming connection and starts an async loop'''
addr = writer.get_extra_info('peername')
await InverterG3P(reader, writer, addr).server_loop(addr)
def handle_SIGTERM(loop):
@@ -24,7 +33,7 @@ def handle_SIGTERM(loop):
#
# first, close all open TCP connections
#
for stream in AsyncStream:
for stream in Message:
stream.close()
#
@@ -81,11 +90,12 @@ if __name__ == "__main__":
functools.partial(handle_SIGTERM, loop))
#
# Create a task for our listening server. This must be a task! If we call
# Create taska for our listening servera. These must be tasks! If we call
# start_server directly out of our main task, the eventloop will be blocked
# and we can't receive and handle the UNIX signals!
#
loop.create_task(asyncio.start_server(handle_client, '0.0.0.0', 5005))
loop.create_task(asyncio.start_server(handle_client_v2, '0.0.0.0', 10000))
try:
loop.run_forever()