diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/__init__.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/config.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/config.py new file mode 100644 index 000000000..f87236d24 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/config.py @@ -0,0 +1,54 @@ +import serial +import logging +from data import read_file_one_line + +# dbus configuration + +CONNECTION = 'Modbus RTU' +PRODUCT_NAME = 'FIAMM 48TL Series Battery' +PRODUCT_ID = 0xB012 # assigned by victron +DEVICE_INSTANCE = 1 +SERVICE_NAME_PREFIX = 'com.victronenergy.battery.' + + +# driver configuration + +SOFTWARE_VERSION = '3.0.0' +UPDATE_INTERVAL = 2000 # milliseconds +LOG_LEVEL = logging.INFO +#LOG_LEVEL = logging.DEBUG + + +# battery config + +V_MAX = 54.2 +V_MIN = 42 +R_STRING_MIN = 0.125 +R_STRING_MAX = 0.250 +I_MAX_PER_STRING = 15 +AH_PER_STRING = 40 + +# modbus configuration + +BASE_ADDRESS = 999 +NO_OF_REGISTERS = 56 +MAX_SLAVE_ADDRESS = 25 + + +# RS 485 configuration + +PARITY = serial.PARITY_ODD +TIMEOUT = 0.1 # seconds +BAUD_RATE = 115200 +BYTE_SIZE = 8 +STOP_BITS = 1 +MODE = 'rtu' + +# InnovEnergy IOT configuration + +INSTALLATION_NAME = read_file_one_line('/data/innovenergy/openvpn/installation-name') +INNOVENERGY_SERVER_IP = '10.2.0.1' +INNOVENERGY_SERVER_PORT = 8134 +INNOVENERGY_PROTOCOL_VERSION = '48TL200V3' + + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/config.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/config.pyc new file mode 100644 index 000000000..0381e9f5d Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/config.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/convert.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/convert.py new file mode 100644 index 000000000..dbc3af66b --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/convert.py @@ -0,0 +1,160 @@ +import struct + +import config as cfg +from data import LedState, BatteryStatus + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Iterable, Union, AnyStr, Any + + +def read_bool(base_register, bit): + # type: (int, int) -> Callable[[BatteryStatus], bool] + + # TODO: explain base register offset + register = base_register + int(bit/16) + bit = bit % 16 + + def get_value(status): + # type: (BatteryStatus) -> bool + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value & (1 << bit) > 0 + + return get_value + + +def read_float(register, scale_factor=1.0, offset=0.0): + # type: (int, float, float) -> Callable[[BatteryStatus], float] + + def get_value(status): + # type: (BatteryStatus) -> float + value = status.modbus_data[register - cfg.BASE_ADDRESS] + + if value >= 0x8000: # convert to signed int16 + value -= 0x10000 # fiamm stores their integers signed AND with sign-offset @#%^&! + + return (value + offset) * scale_factor + + return get_value + + +def read_registers(register, count): + # type: (int, int) -> Callable[[BatteryStatus], List[int]] + + start = register - cfg.BASE_ADDRESS + end = start + count + + def get_value(status): + # type: (BatteryStatus) -> List[int] + return [x for x in status.modbus_data[start:end]] + + return get_value + + +def comma_separated(values): + # type: (Iterable[str]) -> str + return ", ".join(set(values)) + + +def count_bits(base_register, nb_of_registers, nb_of_bits, first_bit=0): + # type: (int, int, int, int) -> Callable[[BatteryStatus], int] + + get_registers = read_registers(base_register, nb_of_registers) + end_bit = first_bit + nb_of_bits + + def get_value(status): + # type: (BatteryStatus) -> int + + registers = get_registers(status) + bin_registers = [bin(x)[-1:1:-1] for x in registers] # reverse the bits in each register so that bit0 is to the left + str_registers = [str(x).ljust(16, "0") for x in bin_registers] # add leading zeroes, so all registers are 16 chars long + bit_string = ''.join(str_registers) # join them, one long string of 0s and 1s + filtered_bits = bit_string[first_bit:end_bit] # take the first nb_of_bits bits starting at first_bit + + return filtered_bits.count('1') # count 1s + + return get_value + + +def read_led_state(register, led): + # type: (int, int) -> Callable[[BatteryStatus], int] + + read_lo = read_bool(register, led * 2) + read_hi = read_bool(register, led * 2 + 1) + + def get_value(status): + # type: (BatteryStatus) -> int + + lo = read_lo(status) + hi = read_hi(status) + + if hi: + if lo: + return LedState.blinking_fast + else: + return LedState.blinking_slow + else: + if lo: + return LedState.on + else: + return LedState.off + + return get_value + + +# noinspection PyShadowingNames +def unit(unit): + # type: (unicode) -> Callable[[unicode], unicode] + + def get_text(v): + # type: (unicode) -> unicode + return "{0}{1}".format(str(v), unit) + + return get_text + + +def const(constant): + # type: (any) -> Callable[[any], any] + def get(*args): + return constant + return get + + +def mean(numbers): + # type: (List[Union[float,int]]) -> float + return float(sum(numbers)) / len(numbers) + + +def first(ts, default=None): + return next((t for t in ts), default) + + +def bitfields_to_str(lists): + # type: (List[List[int]]) -> str + + def or_lists(): + # type: () -> Iterable[int] + + length = len(first(lists)) + n_lists = len(lists) + + for i in range(0, length): + e = 0 + for l in range(0, n_lists): + e = e | lists[l][i] + yield e + + hexed = [ + '{0:0>4X}'.format(x) + for x in or_lists() + ] + + return ' '.join(hexed) + + +def pack_string(string): + # type: (AnyStr) -> Any + data = string.encode('UTF-8') + return struct.pack('B', len(data)) + data + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/convert.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/convert.pyc new file mode 100644 index 000000000..0baf61947 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/convert.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/data.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/data.py new file mode 100644 index 000000000..b860d860c --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/data.py @@ -0,0 +1,125 @@ +import config as cfg + + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Optional, AnyStr, Union, Any + + +class LedState(object): + """ + from page 6 of the '48TLxxx ModBus Protocol doc' + """ + off = 0 + on = 1 + blinking_slow = 2 + blinking_fast = 3 + + +class LedColor(object): + green = 0 + amber = 1 + blue = 2 + red = 3 + + +class ServiceSignal(object): + + def __init__(self, dbus_path, get_value_or_const, unit=''): + # type: (str, Union[Callable[[],Any],Any], Optional[AnyStr] )->None + + self.get_value_or_const = get_value_or_const + self.dbus_path = dbus_path + self.unit = unit + + @property + def value(self): + try: + return self.get_value_or_const() # callable + except: + return self.get_value_or_const # value + + +class BatterySignal(object): + + def __init__(self, dbus_path, aggregate, get_value, unit=''): + # type: (str, Callable[[List[any]],any], Callable[[BatteryStatus],any], Optional[AnyStr] )->None + """ + A Signal holds all information necessary for the handling of a + certain datum (e.g. voltage) published by the battery. + + :param dbus_path: str + object_path on DBus where the datum needs to be published + + :param aggregate: Iterable[any] -> any + function that combines the values of multiple batteries into one. + e.g. sum for currents, or mean for voltages + + :param get_value: (BatteryStatus) -> any + function to extract the datum from the modbus record, + """ + + self.dbus_path = dbus_path + self.aggregate = aggregate + self.get_value = get_value + self.unit = unit + + +class Battery(object): + + """ Data record to hold hardware and firmware specs of the battery """ + + def __init__(self, slave_address, hardware_version, firmware_version, bms_version, ampere_hours): + # type: (int, str, str, str, int) -> None + self.slave_address = slave_address + self.hardware_version = hardware_version + self.firmware_version = firmware_version + self.bms_version = bms_version + self.ampere_hours = ampere_hours + self.n_strings = int(ampere_hours/cfg.AH_PER_STRING) + self.i_max = self.n_strings * cfg.I_MAX_PER_STRING + self.v_min = cfg.V_MIN + self.v_max = cfg.V_MAX + self.r_int_min = cfg.R_STRING_MIN / self.n_strings + self.r_int_max = cfg.R_STRING_MAX / self.n_strings + + def __str__(self): + return 'slave address = {0}\nhardware version = {1}\nfirmware version = {2}\nbms version = {3}\nampere hours = {4}'.format( + self.slave_address, self.hardware_version, self.firmware_version, self.bms_version, str(self.ampere_hours)) + + +class BatteryStatus(object): + """ + record holding the current status of a battery + """ + def __init__(self, battery, modbus_data): + # type: (Battery, List[int]) -> None + + self.battery = battery + self.modbus_data = modbus_data + + def serialize(self): + # type: () -> str + + b = self.battery + + s = cfg.INNOVENERGY_PROTOCOL_VERSION + '\n' + s += cfg.INSTALLATION_NAME + '\n' + s += str(b.slave_address) + '\n' + s += b.hardware_version + '\n' + s += b.firmware_version + '\n' + s += b.bms_version + '\n' + s += str(b.ampere_hours) + '\n' + + for d in self.modbus_data: + s += str(d) + '\n' + + return s + + +def read_file_one_line(file_name): + + with open(file_name, 'r') as file: + return file.read().replace('\n', '').replace('\r', '').strip() + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/data.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/data.pyc new file mode 100644 index 000000000..138d71341 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/data.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/dbus-fzsonick-48tl.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/dbus-fzsonick-48tl.py new file mode 100755 index 000000000..11266c5d0 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/dbus-fzsonick-48tl.py @@ -0,0 +1,354 @@ +#!/usr/bin/python2 -u +# coding=utf-8 + +import logging +import re +import socket +import sys +import gobject +import signals +import config as cfg + +from dbus.mainloop.glib import DBusGMainLoop +from pymodbus.client.sync import ModbusSerialClient as Modbus +from pymodbus.exceptions import ModbusException, ModbusIOException +from pymodbus.other_message import ReportSlaveIdRequest +from pymodbus.pdu import ExceptionResponse +from pymodbus.register_read_message import ReadInputRegistersResponse +from data import BatteryStatus, BatterySignal, Battery, ServiceSignal +from python_libs.ie_dbus.dbus_service import DBusService + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Iterable, NoReturn + + +RESET_REGISTER = 0x2087 + + +def init_modbus(tty): + # type: (str) -> Modbus + + logging.debug('initializing Modbus') + + return Modbus( + port='/dev/' + tty, + method=cfg.MODE, + baudrate=cfg.BAUD_RATE, + stopbits=cfg.STOP_BITS, + bytesize=cfg.BYTE_SIZE, + timeout=cfg.TIMEOUT, + parity=cfg.PARITY) + + +def init_udp_socket(): + # type: () -> socket + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setblocking(False) + + return s + + +def report_slave_id(modbus, slave_address): + # type: (Modbus, int) -> str + + slave = str(slave_address) + + logging.debug('requesting slave id from node ' + slave) + + with modbus: + + request = ReportSlaveIdRequest(unit=slave_address) + response = modbus.execute(request) + + if response is ExceptionResponse or issubclass(type(response), ModbusException): + raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) + + return response.identifier + + +def identify_battery(modbus, slave_address): + # type: (Modbus, int) -> Battery + + logging.info('identifying battery...') + + hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) + firmware_version = read_firmware_version(modbus, slave_address) + + specs = Battery( + slave_address=slave_address, + hardware_version=hardware_version, + firmware_version=firmware_version, + bms_version=bms_version, + ampere_hours=ampere_hours) + + logging.info('battery identified:\n{0}'.format(str(specs))) + + return specs + + +def identify_batteries(modbus): + # type: (Modbus) -> List[Battery] + + def _identify_batteries(): + slave_address = 0 + n_missing = -255 + + while n_missing < 3: + slave_address += 1 + try: + yield identify_battery(modbus, slave_address) + n_missing = 0 + except Exception as e: + logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) + n_missing += 1 + + logging.info('giving up searching for further batteries') + + batteries = list(_identify_batteries()) # dont be lazy! + + n = len(batteries) + logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) + + return batteries + + +def parse_slave_id(modbus, slave_address): + # type: (Modbus, int) -> (str, str, int) + + slave_id = report_slave_id(modbus, slave_address) + + sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars + + match = re.match('(?P48TL(?P[0-9]+)) *(?P.*)', sid) + + if match is None: + raise Exception('no known battery found') + + return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip()) + + +def read_firmware_version(modbus, slave_address): + # type: (Modbus, int) -> str + + logging.debug('reading firmware version') + + with modbus: + + response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) + register = response.registers[0] + + return '{0:0>4X}'.format(register) + + +def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS): + # type: (Modbus, int, int, int) -> ReadInputRegistersResponse + + logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) + + return modbus.read_input_registers( + address=base_address, + count=count, + unit=slave_address) + + +def read_battery_status(modbus, battery): + # type: (Modbus, Battery) -> BatteryStatus + """ + Read the modbus registers containing the battery's status info. + """ + + logging.debug('reading battery status') + + with modbus: + data = read_modbus_registers(modbus, battery.slave_address) + return BatteryStatus(battery, data.registers) + + +def publish_values_on_dbus(service, battery_signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + publish_individuals(service, battery_signals, battery_statuses) + publish_aggregates(service, battery_signals, battery_statuses) + + +def publish_aggregates(service, signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + for s in signals: + if s.aggregate is None: + continue + values = [s.get_value(battery_status) for battery_status in battery_statuses] + value = s.aggregate(values) + service.own_properties.set(s.dbus_path, value, s.unit) + + +def publish_individuals(service, signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + for signal in signals: + for battery_status in battery_statuses: + address = battery_status.battery.slave_address + dbus_path = '/_Battery/' + str(address) + signal.dbus_path + value = signal.get_value(battery_status) + service.own_properties.set(dbus_path, value, signal.unit) + + +def publish_service_signals(service, signals): + # type: (DBusService, Iterable[ServiceSignal]) -> NoReturn + + for signal in signals: + service.own_properties.set(signal.dbus_path, signal.value, signal.unit) + + +def upload_status_to_innovenergy(sock, statuses): + # type: (socket, Iterable[BatteryStatus]) -> bool + + logging.debug('upload status') + + try: + for s in statuses: + sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT)) + except: + logging.debug('FAILED') + return False + else: + return True + + +def print_usage(): + print ('Usage: ' + __file__ + ' ') + print ('Example: ' + __file__ + ' ttyUSB0') + + +def parse_cmdline_args(argv): + # type: (List[str]) -> str + + if len(argv) == 0: + logging.info('missing command line argument for tty device') + print_usage() + sys.exit(1) + + return argv[0] + + +def reset_batteries(modbus, batteries): + # type: (Modbus, Iterable[Battery]) -> NoReturn + + logging.info('Resetting batteries...') + + for battery in batteries: + + result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address) + + # expecting a ModbusIOException (timeout) + # BMS can no longer reply because it is already reset + success = isinstance(result, ModbusIOException) + + outcome = 'successfully' if success else 'FAILED to' + logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome)) + + logging.info('Shutting down fz-sonick driver') + exit(0) + + +alive = True # global alive flag, watchdog_task clears it, update_task sets it + + +def create_update_task(modbus, service, batteries): + # type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool] + """ + Creates an update task which runs the main update function + and resets the alive flag + """ + _socket = init_udp_socket() + _signals = signals.init_battery_signals() + + def update_task(): + # type: () -> bool + + global alive + + logging.debug('starting update cycle') + + if service.own_properties.get('/ResetBatteries').value == 1: + reset_batteries(modbus, batteries) + + statuses = [read_battery_status(modbus, battery) for battery in batteries] + + publish_values_on_dbus(service, _signals, statuses) + upload_status_to_innovenergy(_socket, statuses) + + logging.debug('finished update cycle\n') + + alive = True + + return True + + return update_task + + +def create_watchdog_task(main_loop): + # type: (DBusGMainLoop) -> Callable[[],bool] + """ + Creates a Watchdog task that monitors the alive flag. + The watchdog kills the main loop if the alive flag is not periodically reset by the update task. + Who watches the watchdog? + """ + def watchdog_task(): + # type: () -> bool + + global alive + + if alive: + logging.debug('watchdog_task: update_task is alive') + alive = False + return True + else: + logging.info('watchdog_task: killing main loop because update_task is no longer alive') + main_loop.quit() + return False + + return watchdog_task + + +def main(argv): + # type: (List[str]) -> () + + logging.basicConfig(level=cfg.LOG_LEVEL) + logging.info('starting ' + __file__) + + tty = parse_cmdline_args(argv) + modbus = init_modbus(tty) + + batteries = identify_batteries(modbus) + + if len(batteries) <= 0: + sys.exit(2) + + service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty) + + service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False + + main_loop = gobject.MainLoop() + + service_signals = signals.init_service_signals(batteries) + publish_service_signals(service, service_signals) + + update_task = create_update_task(modbus, service, batteries) + update_task() # run it right away, so that all props are initialized before anyone can ask + watchdog_task = create_watchdog_task(main_loop) + + gobject.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority = gobject.PRIORITY_LOW) # add watchdog first + gobject.timeout_add(cfg.UPDATE_INTERVAL, update_task, priority = gobject.PRIORITY_LOW) # call update once every update_interval + + logging.info('starting gobject.MainLoop') + main_loop.run() + logging.info('gobject.MainLoop was shut down') + + sys.exit(0xFF) # reaches this only on error + + +main(sys.argv[1:]) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/dbus-fzsonick-48tl.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/dbus-fzsonick-48tl.pyc new file mode 100644 index 000000000..095ea01bc Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/dbus-fzsonick-48tl.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/ve_utils.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/ve_utils.py new file mode 100644 index 000000000..459584bab --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/ve_utils.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from traceback import print_exc +from os import _exit as os_exit +from os import statvfs +import logging +from functools import update_wrapper +import dbus +logger = logging.getLogger(__name__) + +VEDBUS_INVALID = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) + +# Use this function to make sure the code quits on an unexpected exception. Make sure to use it +# when using gobject.idle_add and also gobject.timeout_add. +# Without this, the code will just keep running, since gobject does not stop the mainloop on an +# exception. +# Example: gobject.idle_add(exit_on_error, myfunc, arg1, arg2) +def exit_on_error(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except: + try: + print 'exit_on_error: there was an exception. Printing stacktrace will be tryed and then exit' + print_exc() + except: + pass + + # sys.exit() is not used, since that throws an exception, which does not lead to a program + # halt when used in a dbus callback, see connection.py in the Python/Dbus libraries, line 230. + os_exit(1) + + +__vrm_portal_id = None +def get_vrm_portal_id(): + # For the CCGX, the definition of the VRM Portal ID is that it is the mac address of the onboard- + # ethernet port (eth0), stripped from its colons (:) and lower case. + + # nice coincidence is that this also works fine when running on your (linux) development computer. + + global __vrm_portal_id + + if __vrm_portal_id: + return __vrm_portal_id + + # Assume we are on linux + import fcntl, socket, struct + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', 'eth0'[:15])) + __vrm_portal_id = ''.join(['%02x' % ord(char) for char in info[18:24]]) + + return __vrm_portal_id + + +# See VE.Can registers - public.docx for definition of this conversion +def convert_vreg_version_to_readable(version): + def str_to_arr(x, length): + a = [] + for i in range(0, len(x), length): + a.append(x[i:i+length]) + return a + + x = "%x" % version + x = x.upper() + + if len(x) == 5 or len(x) == 3 or len(x) == 1: + x = '0' + x + + a = str_to_arr(x, 2); + + # remove the first 00 if there are three bytes and it is 00 + if len(a) == 3 and a[0] == '00': + a.remove(0); + + # if we have two or three bytes now, and the first character is a 0, remove it + if len(a) >= 2 and a[0][0:1] == '0': + a[0] = a[0][1]; + + result = '' + for item in a: + result += ('.' if result != '' else '') + item + + + result = 'v' + result + + return result + + +def get_free_space(path): + result = -1 + + try: + s = statvfs(path) + result = s.f_frsize * s.f_bavail # Number of free bytes that ordinary users + except Exception, ex: + logger.info("Error while retrieving free space for path %s: %s" % (path, ex)) + + return result + + +def get_load_averages(): + c = read_file('/proc/loadavg') + return c.split(' ')[:3] + + +# Returns False if it cannot find a machine name. Otherwise returns the string +# containing the name +def get_machine_name(): + c = read_file('/proc/device-tree/model') + + if c != False: + return c.strip('\x00') + + return read_file('/etc/venus/machine') + + +# Returns False if it cannot open the file. Otherwise returns its rstripped contents +def read_file(path): + content = False + + try: + with open(path, 'r') as f: + content = f.read().rstrip() + except Exception, ex: + logger.debug("Error while reading %s: %s" % (path, ex)) + + return content + + +def wrap_dbus_value(value): + if value is None: + return VEDBUS_INVALID + if isinstance(value, float): + return dbus.Double(value, variant_level=1) + if isinstance(value, bool): + return dbus.Boolean(value, variant_level=1) + if isinstance(value, int): + return dbus.Int32(value, variant_level=1) + if isinstance(value, str): + return dbus.String(value, variant_level=1) + if isinstance(value, unicode): + return dbus.String(value, variant_level=1) + if isinstance(value, list): + if len(value) == 0: + # If the list is empty we cannot infer the type of the contents. So assume unsigned integer. + # A (signed) integer is dangerous, because an empty list of signed integers is used to encode + # an invalid value. + return dbus.Array([], signature=dbus.Signature('u'), variant_level=1) + return dbus.Array([wrap_dbus_value(x) for x in value], variant_level=1) + if isinstance(value, long): + return dbus.Int64(value, variant_level=1) + if isinstance(value, dict): + # Wrapping the keys of the dictionary causes D-Bus errors like: + # 'arguments to dbus_message_iter_open_container() were incorrect, + # assertion "(type == DBUS_TYPE_ARRAY && contained_signature && + # *contained_signature == DBUS_DICT_ENTRY_BEGIN_CHAR) || (contained_signature == NULL || + # _dbus_check_is_valid_signature (contained_signature))" failed in file ...' + return dbus.Dictionary({(k, wrap_dbus_value(v)) for k, v in value.items()}, variant_level=1) + return value + + +dbus_int_types = (dbus.Int32, dbus.UInt32, dbus.Byte, dbus.Int16, dbus.UInt16, dbus.UInt32, dbus.Int64, dbus.UInt64) + + +def unwrap_dbus_value(val): + """Converts D-Bus values back to the original type. For example if val is of type DBus.Double, + a float will be returned.""" + if isinstance(val, dbus_int_types): + return int(val) + if isinstance(val, dbus.Double): + return float(val) + if isinstance(val, dbus.Array): + v = [unwrap_dbus_value(x) for x in val] + return None if len(v) == 0 else v + if isinstance(val, (dbus.Signature, dbus.String)): + return unicode(val) + # Python has no byte type, so we convert to an integer. + if isinstance(val, dbus.Byte): + return int(val) + if isinstance(val, dbus.ByteArray): + return "".join([str(x) for x in val]) + if isinstance(val, (list, tuple)): + return [unwrap_dbus_value(x) for x in val] + if isinstance(val, (dbus.Dictionary, dict)): + # Do not unwrap the keys, see comment in wrap_dbus_value + return dict([(x, unwrap_dbus_value(y)) for x, y in val.items()]) + if isinstance(val, dbus.Boolean): + return bool(val) + return val + +class reify(object): + """ Decorator to replace a property of an object with the calculated value, + to make it concrete. """ + def __init__(self, wrapped): + self.wrapped = wrapped + update_wrapper(self, wrapped) + def __get__(self, inst, objtype=None): + if inst is None: + return self + v = self.wrapped(inst) + setattr(inst, self.wrapped.__name__, v) + return v diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/ve_utils.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/ve_utils.pyc new file mode 100644 index 000000000..da3ba351b Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/ve_utils.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/vedbus.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/vedbus.py new file mode 100644 index 000000000..2dbed13e2 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/vedbus.py @@ -0,0 +1,496 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import dbus.service +import logging +import traceback +import os +import weakref +from ve_utils import wrap_dbus_value, unwrap_dbus_value + +# vedbus contains three classes: +# VeDbusItemImport -> use this to read data from the dbus, ie import +# VeDbusItemExport -> use this to export data to the dbus (one value) +# VeDbusService -> use that to create a service and export several values to the dbus + +# Code for VeDbusItemImport is copied from busitem.py and thereafter modified. +# All projects that used busitem.py need to migrate to this package. And some +# projects used to define there own equivalent of VeDbusItemExport. Better to +# use VeDbusItemExport, or even better the VeDbusService class that does it all for you. + +# TODOS +# 1 check for datatypes, it works now, but not sure if all is compliant with +# com.victronenergy.BusItem interface definition. See also the files in +# tests_and_examples. And see 'if type(v) == dbus.Byte:' on line 102. Perhaps +# something similar should also be done in VeDbusBusItemExport? +# 2 Shouldn't VeDbusBusItemExport inherit dbus.service.Object? +# 7 Make hard rules for services exporting data to the D-Bus, in order to make tracking +# changes possible. Does everybody first invalidate its data before leaving the bus? +# And what about before taking one object away from the bus, instead of taking the +# whole service offline? +# They should! And after taking one value away, do we need to know that someone left +# the bus? Or we just keep that value in invalidated for ever? Result is that we can't +# see the difference anymore between an invalidated value and a value that was first on +# the bus and later not anymore. See comments above VeDbusItemImport as well. +# 9 there are probably more todos in the code below. + +# Some thoughts with regards to the data types: +# +# Text from: http://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html#data-types +# --- +# Variants are represented by setting the variant_level keyword argument in the +# constructor of any D-Bus data type to a value greater than 0 (variant_level 1 +# means a variant containing some other data type, variant_level 2 means a variant +# containing a variant containing some other data type, and so on). If a non-variant +# is passed as an argument but introspection indicates that a variant is expected, +# it'll automatically be wrapped in a variant. +# --- +# +# Also the different dbus datatypes, such as dbus.Int32, and dbus.UInt32 are a subclass +# of Python int. dbus.String is a subclass of Python standard class unicode, etcetera +# +# So all together that explains why we don't need to explicitly convert back and forth +# between the dbus datatypes and the standard python datatypes. Note that all datatypes +# in python are objects. Even an int is an object. + +# The signature of a variant is 'v'. + +# Export ourselves as a D-Bus service. +class VeDbusService(object): + def __init__(self, servicename, bus=None): + # dict containing the VeDbusItemExport objects, with their path as the key. + self._dbusobjects = {} + self._dbusnodes = {} + + # dict containing the onchange callbacks, for each object. Object path is the key + self._onchangecallbacks = {} + + # Connect to session bus whenever present, else use the system bus + self._dbusconn = bus or (dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus()) + + # make the dbus connection available to outside, could make this a true property instead, but ach.. + self.dbusconn = self._dbusconn + + # Register ourselves on the dbus, trigger an error if already in use (do_not_queue) + self._dbusname = dbus.service.BusName(servicename, self._dbusconn, do_not_queue=True) + + # Add the root item that will return all items as a tree + self._dbusnodes['/'] = self._create_tree_export(self._dbusconn, '/', self._get_tree_dict) + + logging.info("registered ourselves on D-Bus as %s" % servicename) + + def _get_tree_dict(self, path, get_text=False): + logging.debug("_get_tree_dict called for %s" % path) + r = {} + px = path + if not px.endswith('/'): + px += '/' + for p, item in self._dbusobjects.items(): + if p.startswith(px): + v = item.GetText() if get_text else wrap_dbus_value(item.local_get_value()) + r[p[len(px):]] = v + logging.debug(r) + return r + + # To force immediate deregistering of this dbus service and all its object paths, explicitly + # call __del__(). + def __del__(self): + for node in self._dbusnodes.values(): + node.__del__() + self._dbusnodes.clear() + for item in self._dbusobjects.values(): + item.__del__() + self._dbusobjects.clear() + if self._dbusname: + self._dbusname.__del__() # Forces call to self._bus.release_name(self._name), see source code + self._dbusname = None + + # @param callbackonchange function that will be called when this value is changed. First parameter will + # be the path of the object, second the new value. This callback should return + # True to accept the change, False to reject it. + def add_path(self, path, value, description="", writeable=False, + onchangecallback=None, gettextcallback=None): + + if onchangecallback is not None: + self._onchangecallbacks[path] = onchangecallback + + item = VeDbusItemExport( + self._dbusconn, path, value, description, writeable, + self._value_changed, gettextcallback, deletecallback=self._item_deleted) + + spl = path.split('/') + for i in range(2, len(spl)): + subPath = '/'.join(spl[:i]) + if subPath not in self._dbusnodes and subPath not in self._dbusobjects: + self._dbusnodes[subPath] = self._create_tree_export(self._dbusconn, subPath, self._get_tree_dict) + self._dbusobjects[path] = item + logging.debug('added %s with start value %s. Writeable is %s' % (path, value, writeable)) + + # Add the mandatory paths, as per victron dbus api doc + def add_mandatory_paths(self, processname, processversion, connection, + deviceinstance, productid, productname, firmwareversion, hardwareversion, connected): + self.add_path('/Mgmt/ProcessName', processname) + self.add_path('/Mgmt/ProcessVersion', processversion) + self.add_path('/Mgmt/Connection', connection) + + # Create rest of the mandatory objects + self.add_path('/DeviceInstance', deviceinstance) + self.add_path('/ProductId', productid) + self.add_path('/ProductName', productname) + self.add_path('/FirmwareVersion', firmwareversion) + self.add_path('/HardwareVersion', hardwareversion) + self.add_path('/Connected', connected) + + def _create_tree_export(self, bus, objectPath, get_value_handler): + return VeDbusTreeExport(bus, objectPath, get_value_handler) + + # Callback function that is called from the VeDbusItemExport objects when a value changes. This function + # maps the change-request to the onchangecallback given to us for this specific path. + def _value_changed(self, path, newvalue): + if path not in self._onchangecallbacks: + return True + + return self._onchangecallbacks[path](path, newvalue) + + def _item_deleted(self, path): + self._dbusobjects.pop(path) + for np in self._dbusnodes.keys(): + if np != '/': + for ip in self._dbusobjects: + if ip.startswith(np + '/'): + break + else: + self._dbusnodes[np].__del__() + self._dbusnodes.pop(np) + + def __getitem__(self, path): + return self._dbusobjects[path].local_get_value() + + def __setitem__(self, path, newvalue): + self._dbusobjects[path].local_set_value(newvalue) + + def __delitem__(self, path): + self._dbusobjects[path].__del__() # Invalidates and then removes the object path + assert path not in self._dbusobjects + + def __contains__(self, path): + return path in self._dbusobjects + +""" +Importing basics: + - If when we power up, the D-Bus service does not exist, or it does exist and the path does not + yet exist, still subscribe to a signal: as soon as it comes online it will send a signal with its + initial value, which VeDbusItemImport will receive and use to update local cache. And, when set, + call the eventCallback. + - If when we power up, save it + - When using get_value, know that there is no difference between services (or object paths) that don't + exist and paths that are invalid (= empty array, see above). Both will return None. In case you do + really want to know ifa path exists or not, use the exists property. + - When a D-Bus service leaves the D-Bus, it will first invalidate all its values, and send signals + with that update, and only then leave the D-Bus. (or do we need to subscribe to the NameOwnerChanged- + signal!?!) To be discussed and make sure. Not really urgent, since all existing code that uses this + class already subscribes to the NameOwnerChanged signal, and subsequently removes instances of this + class. + +Read when using this class: +Note that when a service leaves that D-Bus without invalidating all its exported objects first, for +example because it is killed, VeDbusItemImport doesn't have a clue. So when using VeDbusItemImport, +make sure to also subscribe to the NamerOwnerChanged signal on bus-level. Or just use dbusmonitor, +because that takes care of all of that for you. +""" +class VeDbusItemImport(object): + ## Constructor + # @param bus the bus-object (SESSION or SYSTEM). + # @param serviceName the dbus-service-name (string), for example 'com.victronenergy.battery.ttyO1' + # @param path the object-path, for example '/Dc/V' + # @param eventCallback function that you want to be called on a value change + # @param createSignal only set this to False if you use this function to one time read a value. When + # leaving it to True, make sure to also subscribe to the NameOwnerChanged signal + # elsewhere. See also note some 15 lines up. + def __init__(self, bus, serviceName, path, eventCallback=None, createsignal=True): + # TODO: is it necessary to store _serviceName and _path? Isn't it + # stored in the bus_getobjectsomewhere? + self._serviceName = serviceName + self._path = path + self._match = None + # TODO: _proxy is being used in settingsdevice.py, make a getter for that + self._proxy = bus.get_object(serviceName, path, introspect=False) + self.eventCallback = eventCallback + + assert eventCallback is None or createsignal == True + if createsignal: + self._match = self._proxy.connect_to_signal( + "PropertiesChanged", weak_functor(self._properties_changed_handler)) + + # store the current value in _cachedvalue. When it doesn't exists set _cachedvalue to + # None, same as when a value is invalid + self._cachedvalue = None + try: + v = self._proxy.GetValue() + except dbus.exceptions.DBusException: + pass + else: + self._cachedvalue = unwrap_dbus_value(v) + + def __del__(self): + if self._match != None: + self._match.remove() + self._match = None + self._proxy = None + + def _refreshcachedvalue(self): + self._cachedvalue = unwrap_dbus_value(self._proxy.GetValue()) + + ## Returns the path as a string, for example '/AC/L1/V' + @property + def path(self): + return self._path + + ## Returns the dbus service name as a string, for example com.victronenergy.vebus.ttyO1 + @property + def serviceName(self): + return self._serviceName + + ## Returns the value of the dbus-item. + # the type will be a dbus variant, for example dbus.Int32(0, variant_level=1) + # this is not a property to keep the name consistant with the com.victronenergy.busitem interface + # returns None when the property is invalid + def get_value(self): + return self._cachedvalue + + ## Writes a new value to the dbus-item + def set_value(self, newvalue): + r = self._proxy.SetValue(wrap_dbus_value(newvalue)) + + # instead of just saving the value, go to the dbus and get it. So we have the right type etc. + if r == 0: + self._refreshcachedvalue() + + return r + + ## Returns the text representation of the value. + # For example when the value is an enum/int GetText might return the string + # belonging to that enum value. Another example, for a voltage, GetValue + # would return a float, 12.0Volt, and GetText could return 12 VDC. + # + # Note that this depends on how the dbus-producer has implemented this. + def get_text(self): + return self._proxy.GetText() + + ## Returns true of object path exists, and false if it doesn't + @property + def exists(self): + # TODO: do some real check instead of this crazy thing. + r = False + try: + r = self._proxy.GetValue() + r = True + except dbus.exceptions.DBusException: + pass + + return r + + ## callback for the trigger-event. + # @param eventCallback the event-callback-function. + @property + def eventCallback(self): + return self._eventCallback + + @eventCallback.setter + def eventCallback(self, eventCallback): + self._eventCallback = eventCallback + + ## Is called when the value of the imported bus-item changes. + # Stores the new value in our local cache, and calls the eventCallback, if set. + def _properties_changed_handler(self, changes): + if "Value" in changes: + changes['Value'] = unwrap_dbus_value(changes['Value']) + self._cachedvalue = changes['Value'] + if self._eventCallback: + # The reason behind this try/except is to prevent errors silently ending up the an error + # handler in the dbus code. + try: + self._eventCallback(self._serviceName, self._path, changes) + except: + traceback.print_exc() + os._exit(1) # sys.exit() is not used, since that also throws an exception + + +class VeDbusTreeExport(dbus.service.Object): + def __init__(self, bus, objectPath, get_value_handler): + dbus.service.Object.__init__(self, bus, objectPath) + self._get_value_handler = get_value_handler + logging.debug("VeDbusTreeExport %s has been created" % objectPath) + + def __del__(self): + # self._get_path() will raise an exception when retrieved after the call to .remove_from_connection, + # so we need a copy. + path = self._get_path() + if path is None: + return + self.remove_from_connection() + logging.debug("VeDbusTreeExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + value = self._get_value_handler(self._get_path()) + return dbus.Dictionary(value, signature=dbus.Signature('sv'), variant_level=1) + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetText(self): + return self._get_value_handler(self._get_path(), True) + + def local_get_value(self): + return self._get_value_handler(self.path) + + +class VeDbusItemExport(dbus.service.Object): + ## Constructor of VeDbusItemExport + # + # Use this object to export (publish), values on the dbus + # Creates the dbus-object under the given dbus-service-name. + # @param bus The dbus object. + # @param objectPath The dbus-object-path. + # @param value Value to initialize ourselves with, defaults to None which means Invalid + # @param description String containing a description. Can be called over the dbus with GetDescription() + # @param writeable what would this do!? :). + # @param callback Function that will be called when someone else changes the value of this VeBusItem + # over the dbus. First parameter passed to callback will be our path, second the new + # value. This callback should return True to accept the change, False to reject it. + def __init__(self, bus, objectPath, value=None, description=None, writeable=False, + onchangecallback=None, gettextcallback=None, deletecallback=None): + dbus.service.Object.__init__(self, bus, objectPath) + self._onchangecallback = onchangecallback + self._gettextcallback = gettextcallback + self._value = value + self._description = description + self._writeable = writeable + self._deletecallback = deletecallback + + # To force immediate deregistering of this dbus object, explicitly call __del__(). + def __del__(self): + # self._get_path() will raise an exception when retrieved after the + # call to .remove_from_connection, so we need a copy. + path = self._get_path() + if path == None: + return + if self._deletecallback is not None: + self._deletecallback(path) + self.local_set_value(None) + self.remove_from_connection() + logging.debug("VeDbusItemExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + ## Sets the value. And in case the value is different from what it was, a signal + # will be emitted to the dbus. This function is to be used in the python code that + # is using this class to export values to the dbus. + # set value to None to indicate that it is Invalid + def local_set_value(self, newvalue): + if self._value == newvalue: + return + + self._value = newvalue + + changes = {} + changes['Value'] = wrap_dbus_value(newvalue) + changes['Text'] = self.GetText() + self.PropertiesChanged(changes) + + def local_get_value(self): + return self._value + + # ==== ALL FUNCTIONS BELOW THIS LINE WILL BE CALLED BY OTHER PROCESSES OVER THE DBUS ==== + + ## Dbus exported method SetValue + # Function is called over the D-Bus by other process. It will first check (via callback) if new + # value is accepted. And it is, stores it and emits a changed-signal. + # @param value The new value. + # @return completion-code When successful a 0 is return, and when not a -1 is returned. + @dbus.service.method('com.victronenergy.BusItem', in_signature='v', out_signature='i') + def SetValue(self, newvalue): + if not self._writeable: + return 1 # NOT OK + + newvalue = unwrap_dbus_value(newvalue) + + if newvalue == self._value: + return 0 # OK + + # call the callback given to us, and check if new value is OK. + if (self._onchangecallback is None or + (self._onchangecallback is not None and self._onchangecallback(self.__dbus_object_path__, newvalue))): + + self.local_set_value(newvalue) + return 0 # OK + + return 2 # NOT OK + + ## Dbus exported method GetDescription + # + # Returns the a description. + # @param language A language code (e.g. ISO 639-1 en-US). + # @param length Lenght of the language string. + # @return description + @dbus.service.method('com.victronenergy.BusItem', in_signature='si', out_signature='s') + def GetDescription(self, language, length): + return self._description if self._description is not None else 'No description given' + + ## Dbus exported method GetValue + # Returns the value. + # @return the value when valid, and otherwise an empty array + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + return wrap_dbus_value(self._value) + + ## Dbus exported method GetText + # Returns the value as string of the dbus-object-path. + # @return text A text-value. '---' when local value is invalid + @dbus.service.method('com.victronenergy.BusItem', out_signature='s') + def GetText(self): + if self._value is None: + return '---' + + # Default conversion from dbus.Byte will get you a character (so 'T' instead of '84'), so we + # have to convert to int first. Note that if a dbus.Byte turns up here, it must have come from + # the application itself, as all data from the D-Bus should have been unwrapped by now. + if self._gettextcallback is None and type(self._value) == dbus.Byte: + return str(int(self._value)) + + if self._gettextcallback is None and self.__dbus_object_path__ == '/ProductId': + return "0x%X" % self._value + + if self._gettextcallback is None: + return str(self._value) + + return self._gettextcallback(self.__dbus_object_path__, self._value) + + ## The signal that indicates that the value has changed. + # Other processes connected to this BusItem object will have subscribed to the + # event when they want to track our state. + @dbus.service.signal('com.victronenergy.BusItem', signature='a{sv}') + def PropertiesChanged(self, changes): + pass + +## This class behaves like a regular reference to a class method (eg. self.foo), but keeps a weak reference +## to the object which method is to be called. +## Use this object to break circular references. +class weak_functor: + def __init__(self, f): + self._r = weakref.ref(f.__self__) + self._f = weakref.ref(f.__func__) + + def __call__(self, *args, **kargs): + r = self._r() + f = self._f() + if r == None or f == None: + return + f(r, *args, **kargs) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/vedbus.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/vedbus.pyc new file mode 100644 index 000000000..499c6c4c8 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/ext/velib_python/vedbus.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/__init__.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/__init__.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/__init__.pyc new file mode 100644 index 000000000..db1868bde Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/__init__.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/__init__.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/__init__.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/__init__.pyc new file mode 100644 index 000000000..5d474fa27 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/__init__.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/dbus_service.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/dbus_service.py new file mode 100644 index 000000000..ebd1b92e8 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/dbus_service.py @@ -0,0 +1,54 @@ +from logging import getLogger +from python_libs.ie_utils.mixins import Disposable, RequiresMainLoop, Record +from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon +from python_libs.ie_dbus.private.own_properties import OwnProperties +from python_libs.ie_dbus.private.remote_properties import RemoteProperties +from python_libs.ie_dbus.private.ve_constants import SERVICE_PREFIX +from python_libs.ie_dbus.private.settings import Settings + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import Union, AnyStr, NoReturn, List + + +def _enforce_ve_prefix(service_name_filter): + if not service_name_filter.startswith(SERVICE_PREFIX): + raise ValueError('service_name_filter must start with ' + SERVICE_PREFIX) + + +SESSION_BUS = 0 +SYSTEM_BUS = 1 + + +class DBusService(Record, Disposable, RequiresMainLoop): + + def __init__(self, service_name=None, device_instance=1, connection_type_or_address=SYSTEM_BUS): + # type: (str, int, Union[int, AnyStr]) -> NoReturn + + service_name = service_name if service_name.startswith(SERVICE_PREFIX) else SERVICE_PREFIX + service_name + + self._daemon = DBusDaemon(connection_type_or_address) + self.remote_properties = RemoteProperties(self._daemon) + self.own_properties = OwnProperties(self._daemon) + self.own_properties.set('/DeviceInstance', device_instance) # must be set before request_name, sigh + + self.settings = Settings(self._daemon, self.remote_properties) + self.name = service_name + + if service_name is not None: + self._bus_name = self._daemon.request_name(service_name) + _log.info('service name is ' + service_name) + + _log.info('id is ' + self.bus_id) + + @property + def available_services(self): + # type: () -> List[unicode] + return [s.name for s in self._daemon.services] + + @property + def bus_id(self): + # type: () -> unicode + return self._daemon.bus_id diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/dbus_service.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/dbus_service.pyc new file mode 100644 index 000000000..5770ab811 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/dbus_service.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/__init__.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/__init__.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/__init__.pyc new file mode 100644 index 000000000..97d047316 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/__init__.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/datatypes.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/datatypes.py new file mode 100644 index 000000000..282956033 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/datatypes.py @@ -0,0 +1,22 @@ +from logging import getLogger + +from python_libs.ie_utils.mixins import Record + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import AnyStr + + +class ServiceInfo(Record): + + # noinspection PyShadowingBuiltins + def __init__(self, name, id, pid, proc_name, cmd): + # type: (AnyStr, AnyStr, int, str, str) -> ServiceInfo + + self.proc_name = proc_name + self.name = name + self.id = id + self.cmd = cmd + self.pid = pid diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/datatypes.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/datatypes.pyc new file mode 100644 index 000000000..a379d9652 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/datatypes.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_connection.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_connection.py new file mode 100644 index 000000000..54372460a --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_connection.py @@ -0,0 +1,185 @@ +from logging import getLogger + +from _dbus_bindings import Connection, MethodCallMessage, SignalMessage, BUS_DAEMON_NAME, \ + BUS_DAEMON_PATH, BUS_DAEMON_IFACE, NAME_FLAG_DO_NOT_QUEUE, Message, HANDLER_RESULT_HANDLED + +from python_libs.ie_dbus.private.dbus_types import dbus_string, dbus_uint32 +from python_libs.ie_dbus.private.message_types import DBusException +from python_libs.ie_utils.mixins import Disposable + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import List, Optional, Iterable, Callable, Union, NoReturn, AnyStr, Any + from python_libs.ie_dbus.private.dbus_types import DbusType + + +class DbusConnection(Disposable): + """ + A collection of stateless functions operating on a Connection object + """ + + def __init__(self, connection_type_or_address): + # type: (Union[int, AnyStr]) -> NoReturn + + self._address = connection_type_or_address + # noinspection PyProtectedMember + self._connection = Connection._new_for_bus(connection_type_or_address) # it's not disposable + self.chain_disposable(self._connection.close, 'connection ' + self._connection.get_unique_name()) + + @property + def bus_id(self): + return self._connection.get_unique_name() + + def fork(self): + return DbusConnection(self._address) + + def get_ids_and_service_names(self): + # type: () -> Iterable[unicode] + + # noinspection PyTypeChecker + return map(unicode, self.call_daemon_method('ListNames')[0]) + + def get_service_names(self): + # type: () -> Iterable[AnyStr] + + return ( + unicode(name) + for name + in self.get_ids_and_service_names() + if not name.startswith(':') + ) + + def get_service_ids(self): + # type: () -> Iterable[AnyStr] + + return ( + name + for name in self.get_ids_and_service_names() if name.startswith(':')) + + # noinspection PyBroadException + def get_pid_of_service(self, service_name): + # type: (AnyStr) -> Optional[int] + try: + reply = self.call_daemon_method('GetConnectionUnixProcessID', dbus_string(service_name)) + return int(reply[0]) + except: + return None + + def get_id_of_service(self, service_name): + # type: (AnyStr) -> AnyStr + reply = self.call_daemon_method('GetNameOwner', dbus_string(service_name)) + return unicode(reply[0]) + + def call_method(self, service_name, object_path, interface, member, *args): + # type: (AnyStr, AnyStr, Optional[str], str, List[Any]) -> List[Any] + + msg = MethodCallMessage(service_name, object_path, interface, member) + + for arg in args: + msg.append(arg) + + reply = self._connection.send_message_with_reply_and_block(msg) # with py3 we could use asyncio here + DBusException.raise_if_error_reply(reply) + + return reply.get_args_list() # TODO: utf8_strings=True ? + + def send_message(self, msg): + # type: (Message) -> NoReturn + + self._connection.send_message(msg) + + def call_daemon_method(self, method_name, *args): + # type: (AnyStr, Iterable[DbusType])-> List[any] + + return self.call_method(BUS_DAEMON_NAME, BUS_DAEMON_PATH, BUS_DAEMON_IFACE, method_name, *args) + + def request_name(self, service_name): + # type: (AnyStr) -> Disposable + + _log.debug('requesting bus name ' + service_name) + + self.call_daemon_method('RequestName', dbus_string(service_name), dbus_uint32(NAME_FLAG_DO_NOT_QUEUE)) + + def dispose(): + self.call_daemon_method('ReleaseName', dbus_string(service_name)) + + return self.create_dependent_disposable(dispose, 'bus name ' + service_name) + + def broadcast_signal(self, object_path, interface, member, *args): + # type: (AnyStr, AnyStr, AnyStr, List[Any]) -> NoReturn + + msg = SignalMessage(object_path, interface, member) + for arg in args: + msg.append(arg) + + self._connection.send_message(msg) + + def add_message_callback(self, callback, filter_rule, fork=True): + # type: (Callable[[Message], NoReturn], AnyStr, Optional[bool]) -> Disposable + if fork: + return self._add_message_callback_fork(callback, filter_rule) + else: + return self._add_message_callback_no_fork(callback, filter_rule) + + def _add_message_callback_no_fork(self, callback, filter_rule): # TODO: forking for incoming method calls + # type: (Callable[[Message], NoReturn], AnyStr) -> Disposable + + def dispatch(_, msg): + # type: (Connection, Message) -> int + + #_log.info(' ####### got message type=' + str(msg.get_type()) + ' ' + msg.get_path() + '/' + msg.get_member()) + callback(msg) + #_log.debug('DONE') + return HANDLER_RESULT_HANDLED + + msg_filter = self._add_message_filter(dispatch) + match = self._add_match(filter_rule) + + def dispose(): + match.dispose() + msg_filter.dispose() + + return self.create_dependent_disposable(dispose) + + def _add_message_callback_fork(self, callback, filter_rule): + # type: (Callable[[Message], NoReturn], AnyStr) -> Disposable + + forked = self.fork() + _log.debug('forked connection ' + forked.bus_id) + + def dispatch(_, msg): + # type: (Connection, Message) -> int + + # _log.debug('got message type=' + str(msg.get_type()) + ' ' + msg.get_path() + '/' + msg.get_member()) + callback(msg) + return HANDLER_RESULT_HANDLED + + forked._add_message_filter(dispatch) + forked._add_match(filter_rule) + + return self.create_dependent_disposable(forked) + + def _add_message_filter(self, callback): + # type: (Callable[[Connection, Message], int]) -> Disposable + + _log.debug('added filter on ' + self.bus_id) + self._connection.add_message_filter(callback) + + def dispose(): + self._connection.remove_message_filter(callback) + + return self.create_dependent_disposable(dispose, 'message filter on ' + self.bus_id) + + def _add_match(self, filter_rule): + # type: (AnyStr) -> Disposable + + self.call_daemon_method('AddMatch', dbus_string(filter_rule)) + + _log.debug('added match_rule: ' + filter_rule) + + def dispose(): + self.call_daemon_method('RemoveMatch', dbus_string(filter_rule)) + + return self.create_dependent_disposable(dispose, 'Match ' + filter_rule) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_connection.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_connection.pyc new file mode 100644 index 000000000..5f69a5a15 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_connection.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_daemon.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_daemon.py new file mode 100644 index 000000000..825028454 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_daemon.py @@ -0,0 +1,273 @@ +from logging import getLogger + +from _dbus_bindings import Message, ErrorMessage, BUS_DAEMON_NAME, BUS_DAEMON_PATH, BUS_DAEMON_IFACE +from python_libs.ie_dbus.private.datatypes import ServiceInfo +from python_libs.ie_dbus.private.dbus_connection import DbusConnection +from python_libs.ie_dbus.private.message_types import MatchedMessage, MessageFilter, ResolvedMessage +from python_libs.ie_utils.mixins import Disposable, RequiresMainLoop + +_log = getLogger(__name__) + +NONE = '' + +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Optional, Iterable, Union, AnyStr, NoReturn, Any, Dict + from python_libs.ie_dbus.private.dbus_types import DbusType + + +class DBusDaemon(Disposable, RequiresMainLoop): + + _services = None # type: Dict[str, ServiceInfo] + + def __init__(self, connection_type_or_address): + # type: (Union[int, AnyStr]) -> NoReturn + + self._dbus = DbusConnection(connection_type_or_address) + # self._dbus.add_message_callback(lambda _: None, 'type=method_call', fork=False) # sink method calls, TODO + + self._name_changed = self.subscribe_to_signal_message( + self._on_name_owner_changed, + sender_id=BUS_DAEMON_NAME, + object_path=BUS_DAEMON_PATH, + interface=BUS_DAEMON_IFACE, + member='NameOwnerChanged') + + self._services = self._init_services() + + @property + def bus_id(self): + # type: () -> AnyStr + return self._dbus.bus_id + + @property + def services(self): + # type: () -> Iterable[ServiceInfo] + return self._services.itervalues() + + def subscribe_to_signal_message( + self, + callback, + sender_id='*', + sender_name='*', + object_path='*', + interface='*', + member='*', + signature='*'): + # type: (Callable[[MatchedMessage], None], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr]) -> Disposable + + message_filter = MessageFilter( + message_type='signal', + sender_id=sender_id, + sender_name=sender_name, + object_path=object_path, + interface=interface, + member=member, + signature=signature) + + def dispatch(msg): + # type: (Message) -> NoReturn + + resolved_msg = self._resolve_message(msg) + matched = message_filter.match_message(resolved_msg) + + if matched is not None: + callback(matched) + + return self._dbus.add_message_callback(dispatch, message_filter.filter_rule) + + def subscribe_to_method_call_message( + self, + callback, + sender_id='*', + sender_name='*', + object_path='*', + interface='*', + member='*', + signature='*', + destination_id='*', + destination_name='*'): + # type: (Callable[[MatchedMessage], Any], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[bool]) -> Disposable + + message_filter = MessageFilter( + message_type='method_call', + sender_id=sender_id, + sender_name=sender_name, + object_path=object_path, + interface=interface, + member=member, + signature=signature, + destination_id=destination_id, + destination_name=destination_name) # TODO: eavesdrop logic + + def dispatch(msg): + # type: (Message) -> NoReturn + + if msg.get_type() != 1: + return + + resolved_msg = self._resolve_message(msg) + matched = message_filter.match_message(resolved_msg) + + if matched is None: + reply = ErrorMessage(msg, 'com.victronenergy.method_call_refused', 'refused') + else: + try: + result = callback(matched) + except Exception as e: + # _log.debug('method_call threw an exception ' + str(e)) + # traceback.print_exc() + reply = matched.create_error_reply(e) + else: + reply = matched.create_method_reply(result) + + self._dbus.send_message(reply) + + return self._dbus.add_message_callback(dispatch, message_filter.filter_rule, fork=False) + + def request_name(self, service_name): + # type: (AnyStr) -> Disposable + + return self._dbus.request_name(service_name) + + def call_method(self, service_name, object_path, interface, member, *args): + # type: (AnyStr, AnyStr, AnyStr, AnyStr, Iterable[DbusType]) -> List[Any] + + return self._dbus.call_method(service_name, object_path, interface, member, *args) + + def broadcast_signal(self, object_path, interface, member, *args): + # type: (AnyStr, AnyStr, AnyStr, List[DbusType]) -> NoReturn + + self._dbus.broadcast_signal(object_path, interface, member, *args) + + def get_service_names_of_id(self, service_id): + # type: (str) -> List[AnyStr] + + if service_id is None: + return [] + + return [ + s.name + for s in self.services + if s.id == service_id + ] + + def get_id_for_service_name(self, service_name): + # type: (AnyStr) -> Optional[AnyStr] + + return next((s.id for s in self.services if s.name == service_name), None) + + def exists_service_with_name(self, service_name): + # type: (AnyStr) -> bool + + return self.get_id_for_service_name(service_name) is not None + + def _resolve_message(self, msg): + # type: (Message) -> ResolvedMessage + + sender_id, sender_names = self._resolve_name(msg.get_sender()) + destination_id, destination_names = self._resolve_name(msg.get_destination()) + + return ResolvedMessage(msg, sender_id, sender_names, destination_id, destination_names) + + # noinspection PyShadowingBuiltins + def _resolve_name(self, name): + # type: (str) -> (str, List[str]) + + if name is None: + id = NONE + names = [] + elif name.startswith(':'): + id = name + names = self.get_service_names_of_id(name) + else: + id = self.get_id_for_service_name(name) + names = [name] + + return id, names + + def _on_name_owner_changed(self, msg): + # type: (MatchedMessage) -> NoReturn + + (name, old_id, new_id) = msg.arguments + + old_id = old_id.strip() + new_id = new_id.strip() + name = name.strip() + + if name.startswith(':'): + name = None + + added = old_id == '' and new_id != '' + changed = old_id != '' and new_id != '' + removed = old_id != '' and new_id == '' + + # 'changed' is dispatched as 'removed' followed by 'added' + + if removed or changed: + self._services.pop(old_id, None) + + if added or changed: + service = self._create_service(name, new_id) + self._services[new_id] = service + + # noinspection PyShadowingBuiltins + def _init_services(self): + # type: () -> Dict[str, ServiceInfo] + + services = dict() + + names_and_ids = self._dbus.get_ids_and_service_names() + + ids = set([i for i in names_and_ids if i.startswith(':')]) + names = [n for n in names_and_ids if not n.startswith(':')] + + for service_name in names: + service = self._create_service(service_name) + services[service.id] = service + ids.discard(service.id) + + self._services = services # UGLY, because _create_service below references it. + + for id in ids: + services[id] = self._create_service(id=id) + + return services + + def _search_service_name_by_pid(self, pid): + # type: (int) -> Optional[AnyStr] + return next((s.name for s in self.services if s.pid == pid and s.name != NONE), NONE) + + # noinspection PyShadowingBuiltins + def _create_service(self, name=None, id=None): + # type: (Optional[AnyStr], Optional[AnyStr]) -> ServiceInfo + + id = id or self._dbus.get_id_of_service(name) + pid = self._dbus.get_pid_of_service(id) + proc = self._get_process_name_of_pid(pid) + cmd = self._get_commandline_of_pid(pid) + name = name or self._search_service_name_by_pid(pid) + + return ServiceInfo(name, id, pid, proc, cmd) + + # noinspection PyBroadException + @staticmethod + def _get_process_name_of_pid(service_pid): + # type: (int) -> str + + try: + with open('/proc/{0}/comm'.format(service_pid)) as proc: + return proc.read().replace('\0', ' ').rstrip() + except Exception as _: + return '' + + # noinspection PyBroadException + @staticmethod + def _get_commandline_of_pid(service_pid): + # type: (int) -> str + + try: + with open('/proc/{0}/cmdline'.format(service_pid)) as proc: + return proc.read().replace('\0', ' ').rstrip() + except Exception as _: + return '' diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_daemon.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_daemon.pyc new file mode 100644 index 000000000..20f55842c Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_daemon.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_types.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_types.py new file mode 100644 index 000000000..522473aa5 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_types.py @@ -0,0 +1,139 @@ +from logging import getLogger + +import dbus + + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import Any, Union, Dict + DbusString = Union[dbus.String, dbus.UTF8String, dbus.ObjectPath, dbus.Signature] + DbusInt = Union[dbus.Int16, dbus.Int32, dbus.Int64] + DbusDouble = dbus.Double + DbusBool = dbus.Boolean + + DbusStringVariant = DbusString # TODO: variant_level constraint ? + DbusIntVariant = DbusInt + DbusDoubleVariant = DbusDouble + DbusBoolVariant = DbusBool + + DbusValue = Union[DbusString, DbusInt, DbusDouble, DbusBool, DBUS_NONE] + DbusVariant = Union[DbusStringVariant, DbusIntVariant, DbusDoubleVariant, DbusBoolVariant, DBUS_NONE] + + DbusTextDict = dbus.Dictionary + DbusVariantDict = dbus.Dictionary + + DbusType = Union[DbusValue, DbusVariant, DbusVariantDict, DbusTextDict] + +DBUS_NONE = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) # DEFINED by victron + +MAX_INT16 = 2 ** 15 - 1 +MAX_INT32 = 2 ** 31 - 1 + + +def dbus_uint32(value): + # type: (int) -> dbus.UInt32 + if value < 0: + raise Exception('cannot convert negative value to UInt32') + + return dbus.UInt32(value) + + +def dbus_int(value): + # type: (Union[int, long]) -> Union[dbus.Int16, dbus.Int32, dbus.Int64] + abs_value = abs(value) + if abs_value < MAX_INT16: + return dbus.Int16(value) + elif abs_value < MAX_INT32: + return dbus.Int32(value) + else: + return dbus.Int64(value) + + +def dbus_string(value): + # type: (Union[str, unicode]) -> DbusString + if isinstance(value, unicode): + return dbus.UTF8String(value) + else: + return dbus.String(value) + + +def dbus_double(value): + # type: (float) -> DbusDouble + return dbus.Double(value) + + +def dbus_bool(value): + # type: (bool) -> DbusBool + return dbus.Boolean(value) + + +# VARIANTS + +def dbus_int_variant(value): + # type: (Union[int, long]) -> DbusIntVariant + abs_value = abs(value) + if abs_value < MAX_INT16: + return dbus.Int16(value, variant_level=1) + elif abs_value < MAX_INT32: + return dbus.Int32(value, variant_level=1) + else: + return dbus.Int64(value, variant_level=1) + + +def dbus_string_variant(value): + # type: (Union[str, unicode]) -> DbusStringVariant + if isinstance(value, unicode): + return dbus.UTF8String(value, variant_level=1) + else: + return dbus.String(value, variant_level=1) + + +def dbus_double_variant(value): + # type: (float) -> DbusDoubleVariant + return dbus.Double(value, variant_level=1) + + +def dbus_bool_variant(value): + # type: (bool) -> DbusBoolVariant + return dbus.Boolean(value, variant_level=1) + + +def dbus_variant(value): + # type: (Any) -> DbusVariant + + if value is None: + return DBUS_NONE + if isinstance(value, float): + return dbus_double_variant(value) + if isinstance(value, bool): + return dbus_bool_variant(value) + if isinstance(value, (int, long)): + return dbus_int_variant(value) + if isinstance(value, (str, unicode)): + return dbus_string_variant(value) + # TODO: container types + + raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value)) + + +def dbus_value(value): + # type: (Any) -> DbusVariant + + if value is None: + return DBUS_NONE + if isinstance(value, float): + return dbus_double(value) + if isinstance(value, bool): + return dbus_bool(value) + if isinstance(value, (int, long)): + return dbus_int(value) + if isinstance(value, (str, unicode)): + return dbus_string_variant(value) + # TODO: container types + + raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value)) + + + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_types.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_types.pyc new file mode 100644 index 000000000..000289bbc Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/dbus_types.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/message_types.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/message_types.py new file mode 100644 index 000000000..7a8cf615f --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/message_types.py @@ -0,0 +1,259 @@ +from fnmatch import fnmatch as glob +from logging import getLogger + +from _dbus_bindings import ErrorMessage, Message, MethodReturnMessage +from python_libs.ie_utils.mixins import Record + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import List, Optional, Iterable, AnyStr, NoReturn, Any + + +class MessageType(object): + + invalid = 0 + method_call = 1 + method_return = 2 + error = 3 + signal = 4 + + @staticmethod + def parse(message_type): + # type: (int) -> str + + if message_type == 1: + return 'method_call' + if message_type == 2: + return 'method_return' + if message_type == 3: + return 'error' + if message_type == 4: + return 'signal' + + return 'invalid' + + +class DBusMessage(Record): + + def __init__(self, msg, sender_id, destination_id): + # type: (Message, str, str) -> NoReturn + + self.sender_id = sender_id + self.destination_id = destination_id + self._msg = msg + + @property + def expects_reply(self): + # type: () -> bool + return not self._msg.get_no_reply() + + @property + def message_type(self): + # type: () -> int + return int(self._msg.get_type()) + + @property + def reply_serial(self): + # type: () -> int + return int(self._msg.get_reply_serial()) + + @property + def object_path(self): + # type: () -> str + return str(self._msg.get_path()) + + @property + def interface(self): + # type: () -> str + return str(self._msg.get_interface()) + + @property + def arguments(self): + # type: () -> List[Any] + return self._msg.get_args_list(utf8_strings=True) + + @property + def signature(self): + # type: () -> str + return str(self._msg.get_signature()) + + @property + def serial(self): + # type: () -> int + return int(self._msg.get_serial()) + + @property + def member(self): + # type: () -> str + return str(self._msg.get_member()) + + def create_method_reply(self, *args): + # type: (List[any]) -> MethodReturnMessage + + if self.message_type != MessageType.method_call: + raise Exception('cannot create a reply for a message that is not a method call') + + reply = MethodReturnMessage(self._msg) + + for arg in args: + reply.append(arg) + + return reply + + def create_error_reply(self, exception): + # type: (Exception) -> ErrorMessage + + if self.message_type != MessageType.method_call: + raise Exception('cannot create an error reply for a message that is not a method call') + + return ErrorMessage(self._msg, 'com.victronenergy.' + exception.__class__.__name__, exception.message) # TODO prefix + + +class ResolvedMessage(DBusMessage): + + def __init__(self, msg, sender_id, sender_names, destination_id, destination_names): + # type: (Message, str, List[str], str, List[str]) -> NoReturn + + super(ResolvedMessage, self).__init__(msg, sender_id, destination_id) + + self.sender_names = sender_names + self.destination_names = destination_names + + +class MatchedMessage(DBusMessage): + + def __init__(self, resolved_msg, sender_name, destination_name): + # type: (ResolvedMessage, str, str) -> NoReturn + + super(MatchedMessage, self).__init__(resolved_msg._msg, resolved_msg.sender_id, resolved_msg.destination_id) + + self.sender_name = sender_name + self.destination_name = destination_name + + +class MessageFilter(Record): + + def __init__( + self, + message_type='*', + sender_id='*', + sender_name='*', + object_path='*', + interface='*', + member='*', + signature='*', + destination_id='*', + destination_name='*', + eavesdrop=False): + + # type: (Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[bool]) -> NoReturn + + self.signature = signature + self.message_type = message_type + + self.member = member + self.interface = interface + self.object_path = object_path + + self.sender_id = sender_id + self.sender_name = sender_name + self.destination_id = destination_id + self.destination_name = destination_name + + self.eavesdrop = eavesdrop + + @staticmethod + def create_filter_rule( + message_type='*', + sender_id='*', + sender_name='*', + object_path='*', + interface='*', + member='*', + destination_id='*', + eavesdrop=False): + # type: (Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],bool) -> AnyStr + + rules = [] + + def rule(key, value): + if '*' not in value and '?' not in value: + rules.append("%s='%s'" % (key, value)) + + rule('type', message_type) + rule('sender', sender_id if sender_name == '*' and sender_id != '*' else sender_name) + rule('destination', destination_id) + rule('eavesdrop', 'true' if eavesdrop else 'false') + rule('path', object_path) # TODO: endswith *, object namespace + rule('interface', interface) + rule('member', member) + + return ','.join(rules) + + @property + def filter_rule(self): + # type: () -> AnyStr + + return self.create_filter_rule( + message_type=self.message_type, + sender_id=self.sender_id, + sender_name=self.sender_name, + object_path=self.object_path, + interface=self.interface, + member=self.member, + destination_id=self.destination_id, + eavesdrop=self.eavesdrop) + + @staticmethod + def _get_matching_name(names, name_filter): + # type: (Iterable[AnyStr], AnyStr) -> Optional[AnyStr] + + matching_names = ( + name + for name + in names + if glob(name, name_filter) + ) + + return next(matching_names, None) + + def match_message(self, msg): + # type: (ResolvedMessage) -> Optional[MatchedMessage] + + match = \ + glob(msg.object_path, self.object_path) and \ + glob(msg.interface or '', self.interface) and \ + glob(msg.member, self.member) and \ + glob(msg.signature, self.signature) and \ + glob(msg.sender_id, self.sender_id) and \ + glob(msg.destination_id or '', self.destination_id) + + if not match: + return None + + sender_name = self._get_matching_name(msg.sender_names, self.sender_name) + if sender_name is None and self.sender_name != '*': # sender might not have a well known name + return None + + destination_name = self._get_matching_name(msg.destination_names, self.destination_name) + if destination_name is None and self.destination_name != '*': + return None + + return MatchedMessage(msg, sender_name, destination_name) + + +class DBusException(Exception): + + def __init__(self, message): + super(Exception, self).__init__(message) + + @classmethod + def raise_if_error_reply(cls, reply): + # type: (Message) -> Message + + if isinstance(reply, ErrorMessage): + raise DBusException(reply.get_error_name()) + else: + return reply diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/message_types.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/message_types.pyc new file mode 100644 index 000000000..fae86efb1 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/message_types.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/own_properties.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/own_properties.py new file mode 100644 index 000000000..76404b25c --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/own_properties.py @@ -0,0 +1,177 @@ + +from logging import getLogger + +import dbus + +from python_libs.ie_dbus.private.dbus_types import dbus_variant, dbus_string +from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon +from python_libs.ie_dbus.private.message_types import MatchedMessage +from python_libs.ie_dbus.private.ve_constants import GET_TEXT, INTERFACE_BUS_ITEM, PROPERTIES_CHANGED, GET_VALUE, SET_VALUE +from python_libs.ie_utils.mixins import Disposable, Record + +_log = getLogger(__name__) + + +# noinspection PyUnreachableCode +if False: + from typing import Optional, AnyStr, NoReturn, Dict, Any + from python_libs.ie_dbus.private.dbus_types import DbusVariant, DbusString, DbusVariantDict, DbusType + + +class OwnProperty(Record): + + def __init__(self, value, unit='', writable=False): + + str_value = round(value, 2) if isinstance(value, float) else value + + self.text = unicode(str_value) + unit + self.value = value + self.unit = unit + self.writable = writable + + @property + def dbus_dict(self): + # type: () -> dbus.Dictionary + d = { + dbus.String('Text'): dbus_variant(self.text), + dbus.String('Value'): dbus_variant(self.value) + } + return dbus.Dictionary(d, signature='sv') + + @property + def dbus_value(self): + # type: () -> DbusVariant + return dbus_variant(self.value) + + @property + def dbus_text(self): + # type: () -> DbusString + return dbus_string(self.text) + + def update_value(self, value): + # type: (any) -> OwnProperty + return OwnProperty(value, self.unit, self.writable) + + def __iter__(self): + yield self.value + yield self.text + + +class OwnProperties(Disposable): + + _own_properties = None # type: Dict[AnyStr, OwnProperty] + + # noinspection PyProtectedMember + def __init__(self, daemon): + # type: (DBusDaemon) -> NoReturn + + self._daemon = daemon + self._own_properties = dict() + self._method_call_subs = self._daemon.subscribe_to_method_call_message(self._on_method_called) # no filter whatsoever + + def get(self, object_path): + # type: (AnyStr) -> OwnProperty + return self._own_properties[object_path] + + def set(self, object_path, value, unit='', writable=False): + # type: (AnyStr, any, Optional[AnyStr], Optional[bool]) -> bool + + prop = OwnProperty(value, unit, writable) + + if object_path in self._own_properties: + if self._own_properties[object_path] == prop: + return False + + self._own_properties[object_path] = prop + # object_path, interface, member, *args): + self._daemon.broadcast_signal( + object_path, + INTERFACE_BUS_ITEM, + PROPERTIES_CHANGED, + prop.dbus_dict) + + return True + + def _on_method_called(self, message): + # type: (MatchedMessage) -> Any + + # _log.info(str(message.sender_name) + '(' + str(message.sender_id) + ') asked ' + message.member + ' ' + message.object_path) + + if message.member == GET_VALUE: + return self._on_get_value_called(message) + elif message.member == GET_TEXT: + return self._on_get_text_called(message) + elif message.member == SET_VALUE: + return self._on_set_value_called(message) + + def _on_set_value_called(self, message): + # type: (MatchedMessage) -> bool + + path = message.object_path + + if path not in self._own_properties: + raise Exception('property ' + path + ' does not exist') + + prop = self._own_properties[path] + if not prop.writable: + raise Exception('property ' + path + ' is read-only') + + value = message.arguments[0] + + if prop.value == value: + return False + + prop = prop.update_value(value) + self._own_properties[path] = prop + + # object_path, interface, member, *args): + self._daemon.broadcast_signal( + path, + INTERFACE_BUS_ITEM, + PROPERTIES_CHANGED, + prop.dbus_dict) + + return True + + def _on_get_value_called(self, message): + # type: (MatchedMessage) -> DbusType + + path = message.object_path + + if path in self._own_properties: + return self._own_properties[path].dbus_value + + if path.endswith('/'): # "Tree Export" + values = { + dbus.String(k.lstrip('/')): dbus_variant(p.value) + for (k, p) + in self._own_properties.iteritems() + if k.startswith(path) + } + + return dbus.Dictionary(values, signature='sv', variant_level=1) # variant for tree export !! + + raise Exception('property ' + path + ' does not exist') + + def _on_get_text_called(self, message): + # type: (MatchedMessage) -> DbusType + + path = message.object_path + + if path in self._own_properties: + return self._own_properties[message.object_path].dbus_text + + if path.endswith('/'): # "Tree Export" + values = { + dbus.String(k.lstrip('/')): dbus.String(p.text) + for (k, p) + in self._own_properties.iteritems() + if k.startswith(path) + } + return dbus.Dictionary(values, signature='ss', variant_level=1) # variant for tree export !! + + raise Exception('property ' + path + ' does not exist') + + def __contains__(self, object_path): + # type: (AnyStr) -> bool + return object_path in self._own_properties diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/own_properties.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/own_properties.pyc new file mode 100644 index 000000000..e62bf1b7d Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/own_properties.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/remote_properties.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/remote_properties.py new file mode 100644 index 000000000..10a26f2cb --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/remote_properties.py @@ -0,0 +1,166 @@ +from logging import getLogger + +from python_libs.ie_dbus.private.dbus_types import dbus_variant +from python_libs.ie_utils.mixins import Disposable, Record +from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon +from python_libs.ie_dbus.private.message_types import MatchedMessage +from python_libs.ie_dbus.private.ve_constants import GET_TEXT, INTERFACE_BUS_ITEM, PROPERTIES_CHANGED, GET_VALUE, SERVICE_PREFIX, SET_VALUE + +_log = getLogger(__name__) + +_UNKNOWN_TEXT = '' + +# noinspection PyUnreachableCode +if False: + from typing import List, AnyStr, NoReturn, Dict, Any + + +class RemoteProperty(Record): + + def __init__(self, value, text): + + self.text = text + self.value = value + + @staticmethod + def from_dbus_dict(dbus_dict): + value = dbus_dict['Value'] + text = dbus_dict['Text'] + return RemoteProperty(value, text) + + +class RemoteProperties(Disposable): + + _remote_properties = None # type: Dict[AnyStr, RemoteProperty] + + def __init__(self, daemon): + # type: (DBusDaemon) -> NoReturn + + self._daemon = daemon + self._remote_properties = dict() + + # noinspection PyBroadException + def available_properties(self, service_name): + # type: (unicode) -> List[unicode] + + if not self._daemon.exists_service_with_name(service_name): + return [] + + try: + paths = self._call_remote(service_name=service_name, object_path='/', member=GET_TEXT)[0].keys() + except Exception as _: + return [] + else: + return ['/' + str(path) for path in paths] + + def exists(self, combined_path): + # type: (AnyStr) -> bool + + service_name, object_path, combined_path = self._parse_combined_path(combined_path) + return object_path in self.available_properties(service_name) + + def get(self, combined_path): + # type: (AnyStr) -> RemoteProperty + + service_name, object_path, combined_path = self._parse_combined_path(combined_path) + + if combined_path in self._remote_properties: + cached = self._remote_properties[combined_path] + + # a cached prop might have an unknown text, because its value has been written before, + # but it has never read or updated via property-changed + + if cached.text != _UNKNOWN_TEXT: + return cached + + text = self._get_text(service_name, object_path) + self._remote_properties[combined_path] = RemoteProperty(cached.value, text) + + return self._remote_properties[combined_path] + + prop = self._get_property(service_name, object_path) + self._remote_properties[combined_path] = prop + self._subscribe_to_property_changed(service_name, object_path) + + return prop + + def set(self, combined_path, value): + # type: (AnyStr, any) -> bool + + service_name, object_path, combined_path = self._parse_combined_path(combined_path) + + if combined_path in self._remote_properties: + if self._remote_properties[combined_path].value == value: + return False # property already has the requested value => nothing to do + else: + self._subscribe_to_property_changed(service_name, object_path) + + result = self._call_remote(service_name, object_path, SET_VALUE, dbus_variant(value))[0] + + if result != 0: + raise Exception(service_name + ' refused to set value of ' + object_path + ' to ' + str(value)) + + self._remote_properties[combined_path] = RemoteProperty(value, _UNKNOWN_TEXT) + + return True + + def _subscribe_to_property_changed(self, service_name, object_path): + # type: (unicode, unicode) -> NoReturn + + def callback(msg): + # type: (MatchedMessage) -> NoReturn + prop = RemoteProperty.from_dbus_dict(msg.arguments[0]) + key = msg.sender_name+msg.object_path + self._remote_properties[key] = prop + + signal = self._daemon.subscribe_to_signal_message( + callback=callback, + sender_name=service_name, + object_path=object_path, + interface=INTERFACE_BUS_ITEM, # TODO: <- this could be removed to make it more robust, in theory + member=PROPERTIES_CHANGED) # TODO: OTOH, don't fix if it is not broken + + self.chain_disposable(signal, 'signal subscription on ' + self._daemon.bus_id + ' ' + service_name + object_path) + + def _get_value(self, service_name, object_path): + # type: (unicode, unicode) -> any + + return self._call_remote(service_name, object_path, GET_VALUE)[0] + + def _get_text(self, service_name, object_path): + # type: (unicode, unicode) -> unicode + + result = self._call_remote(service_name, object_path, GET_TEXT)[0] + return unicode(result) + + def _get_property(self, service_name, object_path): + # type: (unicode, unicode) -> RemoteProperty + + value = self._get_value(service_name, object_path) + text = self._get_text(service_name, object_path) + + return RemoteProperty(value, text) + + def _call_remote(self, service_name, object_path, member, *args): + # type: (unicode, unicode, unicode, List[Any]) -> List[Any] + + return self._daemon.call_method(service_name, object_path, INTERFACE_BUS_ITEM, member, *args) + + def _parse_combined_path(self, combined_path): + # type: (str) -> (unicode,unicode,unicode) + + service_name, object_path = combined_path.lstrip('/').split('/', 1) + + if service_name == '': + raise Exception('Failed to parse service name. \ncombined_path must be of the form "service_name/path/to/property"') + if object_path == '': + raise Exception('Failed to parse object path. \ncombined_path must be of the form "service_name/path/to/property"') + + service_name = service_name if service_name.startswith(SERVICE_PREFIX) else SERVICE_PREFIX + service_name + + if not self._daemon.exists_service_with_name(service_name): + raise Exception('there is no service with the name "' + service_name + '" on the bus') + + object_path = '/' + object_path + + return unicode(service_name), unicode(object_path), unicode(service_name + object_path) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/remote_properties.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/remote_properties.pyc new file mode 100644 index 000000000..f75fc3631 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/remote_properties.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/settings.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/settings.py new file mode 100644 index 000000000..09a8ae859 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/settings.py @@ -0,0 +1,89 @@ +from logging import getLogger + +from python_libs.ie_dbus.private.dbus_types import dbus_string, dbus_int_variant, dbus_string_variant, dbus_double_variant, dbus_variant +from python_libs.ie_utils.mixins import Record +from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon +from python_libs.ie_dbus.private.remote_properties import RemoteProperties +from python_libs.ie_dbus.private.ve_constants import SETTINGS_SERVICE, SETTINGS_INTERFACE, SETTINGS_PREFIX + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import Union, NoReturn, Optional, AnyStr + + +def prepend_settings_prefix(path): + # type: (AnyStr) -> any + + path = '/' + path.lstrip('/') + path = path if path.startswith(SETTINGS_PREFIX) else SETTINGS_PREFIX + path + return path + + +class Settings(Record): + + # noinspection PyProtectedMember + def __init__(self, daemon, remote_properties): + # type: (DBusDaemon, RemoteProperties) -> NoReturn + + self._daemon = daemon + self._remote_properties = remote_properties + + # noinspection PyShadowingBuiltins + + def add_setting(self, path, default_value, min=None, max=None, silent=False): + # type: (AnyStr, Union[unicode, int, float], Union[int, float, None], Union[int, float, None], Optional[bool]) -> NoReturn + + path = prepend_settings_prefix(path) + + if isinstance(default_value, int): + item_type = 'i' + elif isinstance(default_value, float): + item_type = 'f' + elif isinstance(default_value, (str, unicode)): + item_type = 's' + else: + raise Exception('Unsupported Settings Type') + + reply = self._daemon.call_method( + SETTINGS_SERVICE, # service_name + '/', # object_path + SETTINGS_INTERFACE, # interface + 'AddSilentSetting' if silent else 'AddSetting', # member, + dbus_string(''), # "group", not used + dbus_string(path), + dbus_variant(default_value), + dbus_string(item_type), + dbus_int_variant(min or 0), + dbus_int_variant(max or 0)) + + if reply[0] != 0: + raise Exception('failed to add setting ' + path) + + def exists(self, path): + # type: (unicode) -> bool + + path = prepend_settings_prefix(path) + return path in self.available_settings + + def get(self, path): + # type: (unicode) -> Union[unicode, int, float] + + path = prepend_settings_prefix(path) + return self._remote_properties.get(SETTINGS_SERVICE + path).value + + def set(self, path, value): + # type: (unicode, Union[unicode, int, float]) -> NoReturn + + path = prepend_settings_prefix(path) + self._remote_properties.set(SETTINGS_SERVICE + path, value) + + @property + def available_settings(self): + # type: () -> [unicode] + return self._remote_properties.available_properties(SETTINGS_SERVICE) + + def __contains__(self, path): + # type: (unicode) -> bool + return self.exists(path) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/settings.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/settings.pyc new file mode 100644 index 000000000..8429dd625 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/settings.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/ve_constants.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/ve_constants.py new file mode 100644 index 000000000..b9e55df8e --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/ve_constants.py @@ -0,0 +1,11 @@ + +SERVICE_PREFIX = 'com.victronenergy.' +VE_SERVICE_FILTER = SERVICE_PREFIX + '*' +INTERFACE_BUS_ITEM = SERVICE_PREFIX + 'BusItem' +PROPERTIES_CHANGED = 'PropertiesChanged' +GET_VALUE = 'GetValue' +SET_VALUE = 'SetValue' +GET_TEXT = 'GetText' +SETTINGS_SERVICE = 'com.victronenergy.settings' +SETTINGS_INTERFACE = 'com.victronenergy.Settings' +SETTINGS_PREFIX = '/Settings' diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/ve_constants.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/ve_constants.pyc new file mode 100644 index 000000000..9ef77bf14 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_dbus/private/ve_constants.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/__init__.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/__init__.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/__init__.pyc new file mode 100644 index 000000000..884cf64fe Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/__init__.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/filters.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/filters.py new file mode 100644 index 000000000..32dc5e027 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/filters.py @@ -0,0 +1,73 @@ +from logging import getLogger + +# noinspection PyUnreachableCode +if False: + from typing import NoReturn, Optional + +_log = getLogger(__name__) + + +class MovingAverageFilter(object): + + def __init__(self, length=30, initial_value=0): + # type: (int, float) -> NoReturn + + self.value = initial_value + self.length = length + + def update(self, value, length=None): + # type: (float, int) -> float + + if length is not None: + self.length = length + + self.value = (self.value * self.length + value) / (self.length + 1) + + _log.debug('real value: ' + str(value) + ', filtered value: ' + str(self.value)) + + return self.value + + +class DebounceFilter(object): + + def __init__(self, initial_state=None, max_inertia=10): + # type: (Optional[bool], Optional[int]) -> NoReturn + + self._max_inertia = max_inertia + self._inertia = max_inertia + self._state = initial_state + + def reset(self, state=None, max_inertia=None): + # type: (Optional[bool], Optional[int]) -> bool + + self._max_inertia = max_inertia or self._max_inertia + self._inertia = self._max_inertia + self._state = state or self._state + + _log.debug('debounce filter reset: state={0}, inertia={1}'.format(self._state, self._inertia)) + + return self._state + + def flip(self): + # type: () -> bool + self._state = not self._state + self._inertia = self._max_inertia + return self._state + + def update(self, new_state, max_inertia=None): + # type: (bool, int) -> bool + + if max_inertia is not None and max_inertia != self._max_inertia: + return self.reset(new_state, max_inertia) + + if new_state != self._state: + if self._inertia > 0: + self._inertia = self._inertia - 1 + else: + self.flip() + else: + self._inertia = min(self._inertia + 1, self._max_inertia) + + _log.debug('debounce filter update: state={0}, inertia={1}'.format(self._state, self._inertia)) + + return self._state diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/filters.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/filters.pyc new file mode 100644 index 000000000..1378e921c Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/filters.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/main_loop.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/main_loop.py new file mode 100644 index 000000000..ae240be27 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/main_loop.py @@ -0,0 +1,30 @@ +from logging import getLogger +import traceback +import gobject + + +# noinspection PyUnreachableCode +if False: + from typing import Callable, NoReturn + +_log = getLogger(__name__) + + +def run_on_main_loop(update_action, update_period): + # type: (Callable[[],NoReturn], int) -> NoReturn + + main_loop = gobject.MainLoop() + + def update(*args, **kwargs): + try: + update_action() + return True + + except Exception as e: + _log.error(e.message) + traceback.print_exc() + main_loop.quit() + return False + + gobject.timeout_add(update_period, update) + main_loop.run() \ No newline at end of file diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/main_loop.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/main_loop.pyc new file mode 100644 index 000000000..419e953b1 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/main_loop.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/mixins.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/mixins.py new file mode 100644 index 000000000..5e8bf3986 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/mixins.py @@ -0,0 +1,115 @@ +from logging import getLogger +from _dbus_glib_bindings import DBusGMainLoop + +# noinspection PyUnreachableCode +if False: + from typing import Callable, NoReturn, List, AnyStr, Optional, Union + +_log = getLogger(__name__) + + +def nop(*_args): + pass + + +def memoize(fn): + + attr_name = '_memoized_' + fn.__name__ + + def _memoized(self): + if not hasattr(self, attr_name): + setattr(self, attr_name, fn(self)) + return getattr(self, attr_name) + + return _memoized + + +# noinspection PyAttributeOutsideInit +class Disposable(object): + + _dispose_actions = None # type: List[Callable[[],NoReturn]] + + def __enter__(self): + return self + + def __exit__(self, typ, value, tb): + self.dispose() + + def dispose(self): + # type: () -> NoReturn + + while self._dispose_actions: + dispose = self._dispose_actions.pop() + dispose() + + for k, v in self.__dict__.iteritems(): + if isinstance(v, Disposable) and v._dispose_actions: + _log.debug('disposing ' + type(self).__name__ + '.' + k) + v.dispose() + + def chain_disposable(self, dispose, message=None): + # type: (Union[Callable[[],None],Disposable], Optional[AnyStr]) -> NoReturn + + if self._dispose_actions is None: + self._dispose_actions = [] + + if isinstance(dispose, Disposable): + dispose = dispose.dispose + + if message is None: + self._dispose_actions.append(dispose) + return + + def dispose_with_log_msg(): + _log.debug('disposing ' + message) + dispose() + + # _log.debug('new disposable ' + message) + self._dispose_actions.append(dispose_with_log_msg) + + @classmethod + def create(cls, dispose_action, message=None): + # type: (Union[Callable[[],None],Disposable], Optional[AnyStr]) -> Disposable + + disposable = Disposable() + disposable.chain_disposable(dispose_action, message) + return disposable + + def create_dependent_disposable(self, dispose_action, message=None): + # type: (Union[Callable[[],None],Disposable], Optional[AnyStr]) -> Disposable + + disposable = Disposable.create(dispose_action, message) + self.chain_disposable(disposable) + return disposable + + +class Record(object): + + @memoize + def __str__(self): + return self.__class__.__name__ + ' ' + unicode(vars(self)) + + def __repr__(self): + return self.__str__() + + @memoize + def __hash__(self): + return self.__str__().__hash__() + + def __eq__(self, other): + # TODO: improve, iterable vars are not correctly handled + return str(other) == str(self) + + # make readonly + def __setattr__(self, key, value): + # type: (str, any) -> NoReturn + + if not key.startswith('_') and hasattr(self, key): # disallow redefining + raise ValueError(key + ' is read-only' + str(dir())) + + super(Record, self).__setattr__(key, value) + + +class RequiresMainLoop(object): + + main_loop = DBusGMainLoop(set_as_default=True) # initialized only once for all subclasses that need it diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/mixins.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/mixins.pyc new file mode 100644 index 000000000..8ff772726 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/mixins.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/utils.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/utils.py new file mode 100644 index 000000000..87177f9ef --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/utils.py @@ -0,0 +1,44 @@ +from logging import getLogger +import re + +# noinspection PyUnreachableCode +if False: + from typing import Dict + +_log = getLogger(__name__) + + +def make2way(dic): + # type: (Dict) -> Dict + for k, v in dic.items(): + dic[v] = k + + return dic + + +def invert_dict(src_dic): + # type: (Dict) -> Dict + dic = dict() + + for k, v in src_dic.items(): + dic[v] = k + + return dic + + +def enum_file_name_of(path): + # type: (str) -> Dict[int,str] + + """ + This is kinda hacky, but it works :) + The enum file must contain a single enum however! + """ + + path = path[0:-1] if path.endswith('.pyc') else path + pattern = re.compile(r"^\s*(\w+)\s*=\s*(\d+)", re.M) + with open(path, "r") as f: + return { + int(m[1]): m[0] + for m + in pattern.findall(f.read()) + } diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/utils.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/utils.pyc new file mode 100644 index 000000000..f5768cf65 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/ie_utils/utils.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/__init__.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/__init__.py new file mode 100644 index 000000000..a9315e02d --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2019 Ram Rachum and collaborators. +# This program is distributed under the MIT license. +''' +PySnooper - Never use print for debugging again + +Usage: + + import pysnooper + + @pysnooper.snoop() + def your_function(x): + ... + +A log will be written to stderr showing the lines executed and variables +changed in the decorated function. + +For more information, see https://github.com/cool-RR/PySnooper +''' + +from .tracer import Tracer as snoop +from .variables import Attrs, Exploding, Indices, Keys +import collections + +__VersionInfo = collections.namedtuple('VersionInfo', + ('major', 'minor', 'micro')) + +__version__ = '0.4.0' +__version_info__ = __VersionInfo(*(map(int, __version__.split('.')))) + +del collections, __VersionInfo # Avoid polluting the namespace diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/pycompat.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/pycompat.py new file mode 100644 index 000000000..247dbde57 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/pycompat.py @@ -0,0 +1,95 @@ +# Copyright 2019 Ram Rachum and collaborators. +# This program is distributed under the MIT license. +'''Python 2/3 compatibility''' + +import abc +import os +import inspect +import sys +import datetime as datetime_module + +PY3 = (sys.version_info[0] == 3) +PY2 = not PY3 + +if hasattr(abc, 'ABC'): + ABC = abc.ABC +else: + class ABC(object): + """Helper class that provides a standard way to create an ABC using + inheritance. + """ + __metaclass__ = abc.ABCMeta + __slots__ = () + + +if hasattr(os, 'PathLike'): + PathLike = os.PathLike +else: + class PathLike(ABC): + """Abstract base class for implementing the file system path protocol.""" + + @abc.abstractmethod + def __fspath__(self): + """Return the file system path representation of the object.""" + raise NotImplementedError + + @classmethod + def __subclasshook__(cls, subclass): + return ( + hasattr(subclass, '__fspath__') or + # Make a concession for older `pathlib` versions:g + (hasattr(subclass, 'open') and + 'path' in subclass.__name__.lower()) + ) + + +try: + iscoroutinefunction = inspect.iscoroutinefunction +except AttributeError: + iscoroutinefunction = lambda whatever: False # Lolz + +try: + isasyncgenfunction = inspect.isasyncgenfunction +except AttributeError: + isasyncgenfunction = lambda whatever: False # Lolz + + +if PY3: + string_types = (str,) + text_type = str +else: + string_types = (basestring,) + text_type = unicode + +try: + from collections import abc as collections_abc +except ImportError: # Python 2.7 + import collections as collections_abc + +if sys.version_info[:2] >= (3, 6): + time_isoformat = datetime_module.time.isoformat +else: + def time_isoformat(time, timespec='microseconds'): + assert isinstance(time, datetime_module.time) + if timespec != 'microseconds': + raise NotImplementedError + result = '{:02d}:{:02d}:{:02d}.{:06d}'.format( + time.hour, time.minute, time.second, time.microsecond + ) + assert len(result) == 15 + return result + + +def timedelta_format(timedelta): + time = (datetime_module.datetime.min + timedelta).time() + return time_isoformat(time, timespec='microseconds') + +def timedelta_parse(s): + hours, minutes, seconds, microseconds = map( + int, + s.replace('.', ':').split(':') + ) + return datetime_module.timedelta(hours=hours, minutes=minutes, + seconds=seconds, + microseconds=microseconds) + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/tracer.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/tracer.py new file mode 100644 index 000000000..1316f18d2 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/tracer.py @@ -0,0 +1,498 @@ +# Copyright 2019 Ram Rachum and collaborators. +# This program is distributed under the MIT license. + +import functools +import inspect +import opcode +import os +import sys +import re +import collections +import datetime as datetime_module +import itertools +import threading +import traceback + +from .variables import CommonVariable, Exploding, BaseVariable +from . import utils, pycompat +if pycompat.PY2: + from io import open + + +ipython_filename_pattern = re.compile('^$') + + +def get_local_reprs(frame, watch=(), custom_repr=(), max_length=None, normalize=False): + code = frame.f_code + vars_order = (code.co_varnames + code.co_cellvars + code.co_freevars + + tuple(frame.f_locals.keys())) + + result_items = [(key, utils.get_shortish_repr(value, custom_repr, + max_length, normalize)) + for key, value in frame.f_locals.items()] + result_items.sort(key=lambda key_value: vars_order.index(key_value[0])) + result = collections.OrderedDict(result_items) + + for variable in watch: + result.update(sorted(variable.items(frame, normalize))) + return result + + +class UnavailableSource(object): + def __getitem__(self, i): + return u'SOURCE IS UNAVAILABLE' + + +source_and_path_cache = {} + + +def get_path_and_source_from_frame(frame): + globs = frame.f_globals or {} + module_name = globs.get('__name__') + file_name = frame.f_code.co_filename + cache_key = (module_name, file_name) + try: + return source_and_path_cache[cache_key] + except KeyError: + pass + loader = globs.get('__loader__') + + source = None + if hasattr(loader, 'get_source'): + try: + source = loader.get_source(module_name) + except ImportError: + pass + if source is not None: + source = source.splitlines() + if source is None: + ipython_filename_match = ipython_filename_pattern.match(file_name) + if ipython_filename_match: + entry_number = int(ipython_filename_match.group(1)) + try: + import IPython + ipython_shell = IPython.get_ipython() + ((_, _, source_chunk),) = ipython_shell.history_manager. \ + get_range(0, entry_number, entry_number + 1) + source = source_chunk.splitlines() + except Exception: + pass + else: + try: + with open(file_name, 'rb') as fp: + source = fp.read().splitlines() + except utils.file_reading_errors: + pass + if not source: + # We used to check `if source is None` but I found a rare bug where it + # was empty, but not `None`, so now we check `if not source`. + source = UnavailableSource() + + # If we just read the source from a file, or if the loader did not + # apply tokenize.detect_encoding to decode the source into a + # string, then we should do that ourselves. + if isinstance(source[0], bytes): + encoding = 'utf-8' + for line in source[:2]: + # File coding may be specified. Match pattern from PEP-263 + # (https://www.python.org/dev/peps/pep-0263/) + match = re.search(br'coding[:=]\s*([-\w.]+)', line) + if match: + encoding = match.group(1).decode('ascii') + break + source = [pycompat.text_type(sline, encoding, 'replace') for sline in + source] + + result = (file_name, source) + source_and_path_cache[cache_key] = result + return result + + +def get_write_function(output, overwrite): + is_path = isinstance(output, (pycompat.PathLike, str)) + if overwrite and not is_path: + raise Exception('`overwrite=True` can only be used when writing ' + 'content to file.') + if output is None: + def write(s): + stderr = sys.stderr + try: + stderr.write(s) + except UnicodeEncodeError: + # God damn Python 2 + stderr.write(utils.shitcode(s)) + elif is_path: + return FileWriter(output, overwrite).write + elif callable(output): + write = output + else: + assert isinstance(output, utils.WritableStream) + + def write(s): + output.write(s) + return write + + +class FileWriter(object): + def __init__(self, path, overwrite): + self.path = pycompat.text_type(path) + self.overwrite = overwrite + + def write(self, s): + with open(self.path, 'w' if self.overwrite else 'a', + encoding='utf-8') as output_file: + output_file.write(s) + self.overwrite = False + + +thread_global = threading.local() +DISABLED = bool(os.getenv('PYSNOOPER_DISABLED', '')) + +class Tracer: + ''' + Snoop on the function, writing everything it's doing to stderr. + + This is useful for debugging. + + When you decorate a function with `@pysnooper.snoop()` + or wrap a block of code in `with pysnooper.snoop():`, you'll get a log of + every line that ran in the function and a play-by-play of every local + variable that changed. + + If stderr is not easily accessible for you, you can redirect the output to + a file:: + + @pysnooper.snoop('/my/log/file.log') + + See values of some expressions that aren't local variables:: + + @pysnooper.snoop(watch=('foo.bar', 'self.x["whatever"]')) + + Expand values to see all their attributes or items of lists/dictionaries: + + @pysnooper.snoop(watch_explode=('foo', 'self')) + + (see Advanced Usage in the README for more control) + + Show snoop lines for functions that your function calls:: + + @pysnooper.snoop(depth=2) + + Start all snoop lines with a prefix, to grep for them easily:: + + @pysnooper.snoop(prefix='ZZZ ') + + On multi-threaded apps identify which thread are snooped in output:: + + @pysnooper.snoop(thread_info=True) + + Customize how values are represented as strings:: + + @pysnooper.snoop(custom_repr=((type1, custom_repr_func1), + (condition2, custom_repr_func2), ...)) + + Variables and exceptions get truncated to 100 characters by default. You + can customize that: + + @pysnooper.snoop(max_variable_length=200) + + You can also use `max_variable_length=None` to never truncate them. + + Show timestamps relative to start time rather than wall time:: + + @pysnooper.snoop(relative_time=True) + + ''' + def __init__(self, output=None, watch=(), watch_explode=(), depth=1, + prefix='', overwrite=False, thread_info=False, custom_repr=(), + max_variable_length=100, normalize=False, relative_time=False): + self._write = get_write_function(output, overwrite) + + self.watch = [ + v if isinstance(v, BaseVariable) else CommonVariable(v) + for v in utils.ensure_tuple(watch) + ] + [ + v if isinstance(v, BaseVariable) else Exploding(v) + for v in utils.ensure_tuple(watch_explode) + ] + self.frame_to_local_reprs = {} + self.start_times = {} + self.depth = depth + self.prefix = prefix + self.thread_info = thread_info + self.thread_info_padding = 0 + assert self.depth >= 1 + self.target_codes = set() + self.target_frames = set() + self.thread_local = threading.local() + if len(custom_repr) == 2 and not all(isinstance(x, + pycompat.collections_abc.Iterable) for x in custom_repr): + custom_repr = (custom_repr,) + self.custom_repr = custom_repr + self.last_source_path = None + self.max_variable_length = max_variable_length + self.normalize = normalize + self.relative_time = relative_time + + def __call__(self, function_or_class): + if DISABLED: + return function_or_class + + if inspect.isclass(function_or_class): + return self._wrap_class(function_or_class) + else: + return self._wrap_function(function_or_class) + + def _wrap_class(self, cls): + for attr_name, attr in cls.__dict__.items(): + # Coroutines are functions, but snooping them is not supported + # at the moment + if pycompat.iscoroutinefunction(attr): + continue + + if inspect.isfunction(attr): + setattr(cls, attr_name, self._wrap_function(attr)) + return cls + + def _wrap_function(self, function): + self.target_codes.add(function.__code__) + + @functools.wraps(function) + def simple_wrapper(*args, **kwargs): + with self: + return function(*args, **kwargs) + + @functools.wraps(function) + def generator_wrapper(*args, **kwargs): + gen = function(*args, **kwargs) + method, incoming = gen.send, None + while True: + with self: + try: + outgoing = method(incoming) + except StopIteration: + return + try: + method, incoming = gen.send, (yield outgoing) + except Exception as e: + method, incoming = gen.throw, e + + if pycompat.iscoroutinefunction(function): + raise NotImplementedError + if pycompat.isasyncgenfunction(function): + raise NotImplementedError + elif inspect.isgeneratorfunction(function): + return generator_wrapper + else: + return simple_wrapper + + def write(self, s): + s = u'{self.prefix}{s}\n'.format(**locals()) + self._write(s) + + def __enter__(self): + if DISABLED: + return + calling_frame = inspect.currentframe().f_back + if not self._is_internal_frame(calling_frame): + calling_frame.f_trace = self.trace + self.target_frames.add(calling_frame) + + stack = self.thread_local.__dict__.setdefault( + 'original_trace_functions', [] + ) + stack.append(sys.gettrace()) + self.start_times[calling_frame] = datetime_module.datetime.now() + sys.settrace(self.trace) + + def __exit__(self, exc_type, exc_value, exc_traceback): + if DISABLED: + return + stack = self.thread_local.original_trace_functions + sys.settrace(stack.pop()) + calling_frame = inspect.currentframe().f_back + self.target_frames.discard(calling_frame) + self.frame_to_local_reprs.pop(calling_frame, None) + + ### Writing elapsed time: ############################################# + # # + start_time = self.start_times.pop(calling_frame) + duration = datetime_module.datetime.now() - start_time + elapsed_time_string = pycompat.timedelta_format(duration) + indent = ' ' * 4 * (thread_global.depth + 1) + self.write( + '{indent}Elapsed time: {elapsed_time_string}'.format(**locals()) + ) + # # + ### Finished writing elapsed time. #################################### + + def _is_internal_frame(self, frame): + return frame.f_code.co_filename == Tracer.__enter__.__code__.co_filename + + def set_thread_info_padding(self, thread_info): + current_thread_len = len(thread_info) + self.thread_info_padding = max(self.thread_info_padding, + current_thread_len) + return thread_info.ljust(self.thread_info_padding) + + def trace(self, frame, event, arg): + + ### Checking whether we should trace this line: ####################### + # # + # We should trace this line either if it's in the decorated function, + # or the user asked to go a few levels deeper and we're within that + # number of levels deeper. + + if not (frame.f_code in self.target_codes or frame in self.target_frames): + if self.depth == 1: + # We did the most common and quickest check above, because the + # trace function runs so incredibly often, therefore it's + # crucial to hyper-optimize it for the common case. + return None + elif self._is_internal_frame(frame): + return None + else: + _frame_candidate = frame + for i in range(1, self.depth): + _frame_candidate = _frame_candidate.f_back + if _frame_candidate is None: + return None + elif _frame_candidate.f_code in self.target_codes or _frame_candidate in self.target_frames: + break + else: + return None + + thread_global.__dict__.setdefault('depth', -1) + if event == 'call': + thread_global.depth += 1 + indent = ' ' * 4 * thread_global.depth + + # # + ### Finished checking whether we should trace this line. ############## + + ### Making timestamp: ################################################# + # # + if self.normalize: + timestamp = ' ' * 15 + elif self.relative_time: + try: + start_time = self.start_times[frame] + except KeyError: + start_time = self.start_times[frame] = \ + datetime_module.datetime.now() + duration = datetime_module.datetime.now() - start_time + timestamp = pycompat.timedelta_format(duration) + else: + timestamp = pycompat.time_isoformat( + datetime_module.datetime.now().time(), + timespec='microseconds' + ) + # # + ### Finished making timestamp. ######################################## + + line_no = frame.f_lineno + source_path, source = get_path_and_source_from_frame(frame) + source_path = source_path if not self.normalize else os.path.basename(source_path) + if self.last_source_path != source_path: + self.write(u'{indent}Source path:... {source_path}'. + format(**locals())) + self.last_source_path = source_path + source_line = source[line_no - 1] + thread_info = "" + if self.thread_info: + if self.normalize: + raise NotImplementedError("normalize is not supported with " + "thread_info") + current_thread = threading.current_thread() + thread_info = "{ident}-{name} ".format( + ident=current_thread.ident, name=current_thread.getName()) + thread_info = self.set_thread_info_padding(thread_info) + + ### Reporting newish and modified variables: ########################## + # # + old_local_reprs = self.frame_to_local_reprs.get(frame, {}) + self.frame_to_local_reprs[frame] = local_reprs = \ + get_local_reprs(frame, + watch=self.watch, custom_repr=self.custom_repr, + max_length=self.max_variable_length, + normalize=self.normalize, + ) + + newish_string = ('Starting var:.. ' if event == 'call' else + 'New var:....... ') + + for name, value_repr in local_reprs.items(): + if name not in old_local_reprs: + self.write('{indent}{newish_string}{name} = {value_repr}'.format( + **locals())) + elif old_local_reprs[name] != value_repr: + self.write('{indent}Modified var:.. {name} = {value_repr}'.format( + **locals())) + + # # + ### Finished newish and modified variables. ########################### + + + ### Dealing with misplaced function definition: ####################### + # # + if event == 'call' and source_line.lstrip().startswith('@'): + # If a function decorator is found, skip lines until an actual + # function definition is found. + for candidate_line_no in itertools.count(line_no): + try: + candidate_source_line = source[candidate_line_no - 1] + except IndexError: + # End of source file reached without finding a function + # definition. Fall back to original source line. + break + + if candidate_source_line.lstrip().startswith('def'): + # Found the def line! + line_no = candidate_line_no + source_line = candidate_source_line + break + # # + ### Finished dealing with misplaced function definition. ############## + + # If a call ends due to an exception, we still get a 'return' event + # with arg = None. This seems to be the only way to tell the difference + # https://stackoverflow.com/a/12800909/2482744 + code_byte = frame.f_code.co_code[frame.f_lasti] + if not isinstance(code_byte, int): + code_byte = ord(code_byte) + ended_by_exception = ( + event == 'return' + and arg is None + and (opcode.opname[code_byte] + not in ('RETURN_VALUE', 'YIELD_VALUE')) + ) + + if ended_by_exception: + self.write('{indent}Call ended by exception'. + format(**locals())) + else: + self.write(u'{indent}{timestamp} {thread_info}{event:9} ' + u'{line_no:4} {source_line}'.format(**locals())) + + if event == 'return': + self.frame_to_local_reprs.pop(frame, None) + self.start_times.pop(frame, None) + thread_global.depth -= 1 + + if not ended_by_exception: + return_value_repr = utils.get_shortish_repr(arg, + custom_repr=self.custom_repr, + max_length=self.max_variable_length, + normalize=self.normalize, + ) + self.write('{indent}Return value:.. {return_value_repr}'. + format(**locals())) + + if event == 'exception': + exception = '\n'.join(traceback.format_exception_only(*arg[:2])).strip() + if self.max_variable_length: + exception = utils.truncate(exception, self.max_variable_length) + self.write('{indent}{exception}'. + format(**locals())) + + return self.trace diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/utils.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/utils.py new file mode 100644 index 000000000..ff9b9e855 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/utils.py @@ -0,0 +1,98 @@ +# Copyright 2019 Ram Rachum and collaborators. +# This program is distributed under the MIT license. + +import abc +import re + +import sys +from .pycompat import ABC, string_types, collections_abc + +def _check_methods(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + + +class WritableStream(ABC): + @abc.abstractmethod + def write(self, s): + pass + + @classmethod + def __subclasshook__(cls, C): + if cls is WritableStream: + return _check_methods(C, 'write') + return NotImplemented + + + +file_reading_errors = ( + IOError, + OSError, + ValueError # IronPython weirdness. +) + + + +def shitcode(s): + return ''.join( + (c if (0 < ord(c) < 256) else '?') for c in s + ) + + +def get_repr_function(item, custom_repr): + for condition, action in custom_repr: + if isinstance(condition, type): + condition = lambda x, y=condition: isinstance(x, y) + if condition(item): + return action + return repr + + +DEFAULT_REPR_RE = re.compile(r' at 0x[a-f0-9A-F]{4,}') + + +def normalize_repr(item_repr): + """Remove memory address (0x...) from a default python repr""" + return DEFAULT_REPR_RE.sub('', item_repr) + + +def get_shortish_repr(item, custom_repr=(), max_length=None, normalize=False): + repr_function = get_repr_function(item, custom_repr) + try: + r = repr_function(item) + except Exception: + r = 'REPR FAILED' + r = r.replace('\r', '').replace('\n', '') + if normalize: + r = normalize_repr(r) + if max_length: + r = truncate(r, max_length) + return r + + +def truncate(string, max_length): + if (max_length is None) or (len(string) <= max_length): + return string + else: + left = (max_length - 3) // 2 + right = max_length - 3 - left + return u'{}...{}'.format(string[:left], string[-right:]) + + +def ensure_tuple(x): + if isinstance(x, collections_abc.Iterable) and \ + not isinstance(x, string_types): + return tuple(x) + else: + return (x,) + + + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/variables.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/variables.py new file mode 100644 index 000000000..2229c38aa --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/python_libs/pysnooper/variables.py @@ -0,0 +1,133 @@ +import itertools +import abc +try: + from collections.abc import Mapping, Sequence +except ImportError: + from collections import Mapping, Sequence +from copy import deepcopy + +from . import utils +from . import pycompat + + +def needs_parentheses(source): + def code(s): + return compile(s, '', 'eval').co_code + + return code('{}.x'.format(source)) != code('({}).x'.format(source)) + + +class BaseVariable(pycompat.ABC): + def __init__(self, source, exclude=()): + self.source = source + self.exclude = utils.ensure_tuple(exclude) + self.code = compile(source, '', 'eval') + if needs_parentheses(source): + self.unambiguous_source = '({})'.format(source) + else: + self.unambiguous_source = source + + def items(self, frame, normalize=False): + try: + main_value = eval(self.code, frame.f_globals or {}, frame.f_locals) + except Exception: + return () + return self._items(main_value, normalize) + + @abc.abstractmethod + def _items(self, key, normalize=False): + raise NotImplementedError + + @property + def _fingerprint(self): + return (type(self), self.source, self.exclude) + + def __hash__(self): + return hash(self._fingerprint) + + def __eq__(self, other): + return (isinstance(other, BaseVariable) and + self._fingerprint == other._fingerprint) + + +class CommonVariable(BaseVariable): + def _items(self, main_value, normalize=False): + result = [(self.source, utils.get_shortish_repr(main_value, normalize=normalize))] + for key in self._safe_keys(main_value): + try: + if key in self.exclude: + continue + value = self._get_value(main_value, key) + except Exception: + continue + result.append(( + '{}{}'.format(self.unambiguous_source, self._format_key(key)), + utils.get_shortish_repr(value) + )) + return result + + def _safe_keys(self, main_value): + try: + for key in self._keys(main_value): + yield key + except Exception: + pass + + def _keys(self, main_value): + return () + + def _format_key(self, key): + raise NotImplementedError + + def _get_value(self, main_value, key): + raise NotImplementedError + + +class Attrs(CommonVariable): + def _keys(self, main_value): + return itertools.chain( + getattr(main_value, '__dict__', ()), + getattr(main_value, '__slots__', ()) + ) + + def _format_key(self, key): + return '.' + key + + def _get_value(self, main_value, key): + return getattr(main_value, key) + + +class Keys(CommonVariable): + def _keys(self, main_value): + return main_value.keys() + + def _format_key(self, key): + return '[{}]'.format(utils.get_shortish_repr(key)) + + def _get_value(self, main_value, key): + return main_value[key] + + +class Indices(Keys): + _slice = slice(None) + + def _keys(self, main_value): + return range(len(main_value))[self._slice] + + def __getitem__(self, item): + assert isinstance(item, slice) + result = deepcopy(self) + result._slice = item + return result + + +class Exploding(BaseVariable): + def _items(self, main_value, normalize=False): + if isinstance(main_value, Mapping): + cls = Keys + elif isinstance(main_value, Sequence): + cls = Indices + else: + cls = Attrs + + return cls(self.source, self.exclude)._items(main_value, normalize) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/down b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/down new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/log/down b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/log/down new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/log/run b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/log/run new file mode 100755 index 000000000..74e759d9b --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/log/run @@ -0,0 +1,3 @@ +#!/bin/sh +exec 2>&1 +exec multilog t s25000 n4 /var/log/dbus-fzsonick-48tl.TTY diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/run b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/run new file mode 100755 index 000000000..7f5301435 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/service/run @@ -0,0 +1,4 @@ +#!/bin/sh +exec 2>&1 + +exec softlimit -d 100000000 -s 1000000 -a 100000000 /opt/innovenergy/dbus-fzsonick-48tl/start.sh TTY diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/signals.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/signals.py new file mode 100644 index 000000000..163a83a65 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/signals.py @@ -0,0 +1,214 @@ +# coding=utf-8 + +import config as cfg +from convert import mean, read_float, read_led_state, read_bool, count_bits, comma_separated +from data import BatterySignal, Battery, LedColor, ServiceSignal, BatteryStatus, LedState + +# noinspection PyUnreachableCode +if False: + from typing import List, Iterable + + +def init_service_signals(batteries): + # type: (List[Battery]) -> Iterable[ServiceSignal] + + n_batteries = len(batteries) + product_name = cfg.PRODUCT_NAME + ' x' + str(n_batteries) + + return [ + ServiceSignal('/NbOfBatteries', n_batteries), # TODO: nb of operational batteries + ServiceSignal('/Mgmt/ProcessName', __file__), + ServiceSignal('/Mgmt/ProcessVersion', cfg.SOFTWARE_VERSION), + ServiceSignal('/Mgmt/Connection', cfg.CONNECTION), + ServiceSignal('/DeviceInstance', cfg.DEVICE_INSTANCE), + ServiceSignal('/ProductName', product_name), + ServiceSignal('/ProductId', cfg.PRODUCT_ID), + ServiceSignal('/Connected', 1) + ] + + +def init_battery_signals(): + # type: () -> Iterable[BatterySignal] + + read_voltage = read_float(register=999, scale_factor=0.01, offset=0) + read_current = read_float(register=1000, scale_factor=0.01, offset=-10000) + + read_led_amber = read_led_state(register=1004, led=LedColor.amber) + read_led_green = read_led_state(register=1004, led=LedColor.green) + read_led_blue = read_led_state(register=1004, led=LedColor.blue) + read_led_red = read_led_state(register=1004, led=LedColor.red) + + def read_power(status): + # type: (BatteryStatus) -> int + return int(read_current(status) * read_voltage(status)) + + def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int): + # type: (float, float, float, float) -> float + + dv = v_limit - v + di = dv / r_int + p_limit = v_limit * (i + di) + + return p_limit + + def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int): + # type: (float, float, float, float) -> float + + di = i_limit - i + dv = di * r_int + p_limit = i_limit * (v + dv) + + return p_limit + + def calc_max_charge_power(bs): + # type: (BatteryStatus) -> int + + b = bs.battery + v = read_voltage(bs) + i = read_current(bs) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, b.v_max, b.r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, b.v_max, b.r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, b.i_max, b.r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, b.i_max, b.r_int_max), + ] + + p_limit = min(p_limits) # p_limit is normally positive here (signed) + p_limit = max(p_limit, 0) # charge power must not become negative + + return int(p_limit) + + def calc_max_discharge_power(bs): + # type: (BatteryStatus) -> float + + b = bs.battery + v = read_voltage(bs) + i = read_current(bs) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, b.v_min, b.r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, b.v_min, b.r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, -b.i_max, b.r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, -b.i_max, b.r_int_max), + ] + + p_limit = max(p_limits) # p_limit is normally negative here (signed) + p_limit = min(p_limit, 0) # discharge power must not become positive + + return int(-p_limit) # make unsigned! + + def read_battery_cold(status): + return \ + read_led_green(status) >= LedState.blinking_slow and \ + read_led_blue(status) >= LedState.blinking_slow + + def read_soc(status): + soc = read_float(register=1053, scale_factor=0.1, offset=0)(status) + + # if the SOC is 100 but EOC is not yet reached, report 99.9 instead of 100 + if soc > 99.9 and not read_eoc_reached(status): + return 99.9 + if soc >= 99.9 and read_eoc_reached(status): + return 100 + + return soc + + def read_eoc_reached(status): + return \ + read_led_green(status) == LedState.on and \ + read_led_amber(status) == LedState.off and \ + read_led_blue(status) == LedState.off + + return [ + BatterySignal('/Dc/0/Voltage', mean, get_value=read_voltage, unit='V'), + BatterySignal('/Dc/0/Current', sum, get_value=read_current, unit='A'), + BatterySignal('/Dc/0/Power', sum, get_value=read_power, unit='W'), + + BatterySignal('/BussVoltage', mean, read_float(register=1001, scale_factor=0.01, offset=0), unit='V'), + BatterySignal('/Soc', mean, read_soc, unit='%'), + BatterySignal('/Dc/0/Temperature', mean, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'), + + BatterySignal('/NumberOfWarningFlags', sum, count_bits(base_register=1005, nb_of_registers=3, nb_of_bits=47)), + BatterySignal('/WarningFlags/TaM1', any, read_bool(base_register=1005, bit=1)), + BatterySignal('/WarningFlags/TbM1', any, read_bool(base_register=1005, bit=4)), + BatterySignal('/WarningFlags/VBm1', any, read_bool(base_register=1005, bit=6)), + BatterySignal('/WarningFlags/VBM1', any, read_bool(base_register=1005, bit=8)), + BatterySignal('/WarningFlags/IDM1', any, read_bool(base_register=1005, bit=10)), + BatterySignal('/WarningFlags/vsM1', any, read_bool(base_register=1005, bit=24)), + BatterySignal('/WarningFlags/iCM1', any, read_bool(base_register=1005, bit=26)), + BatterySignal('/WarningFlags/iDM1', any, read_bool(base_register=1005, bit=28)), + BatterySignal('/WarningFlags/MID1', any, read_bool(base_register=1005, bit=30)), + BatterySignal('/WarningFlags/BLPW', any, read_bool(base_register=1005, bit=32)), + BatterySignal('/WarningFlags/Ah_W', any, read_bool(base_register=1005, bit=35)), + BatterySignal('/WarningFlags/MPMM', any, read_bool(base_register=1005, bit=38)), + BatterySignal('/WarningFlags/TCMM', any, read_bool(base_register=1005, bit=39)), + BatterySignal('/WarningFlags/TCdi', any, read_bool(base_register=1005, bit=40)), + BatterySignal('/WarningFlags/WMTO', any, read_bool(base_register=1005, bit=41)), + BatterySignal('/WarningFlags/bit44', any, read_bool(base_register=1005, bit=44)), + BatterySignal('/WarningFlags/CELL1', any, read_bool(base_register=1005, bit=46)), + BatterySignal('/WarningFlags/bit47WarningDummy', any, read_bool(base_register=1005, bit=47)), + + BatterySignal('/NumberOfAlarmFlags', sum, count_bits(base_register=1009, nb_of_registers=3, nb_of_bits=47)), + BatterySignal('/AlarmFlags/Tam', any, read_bool(base_register=1009, bit=0)), + BatterySignal('/AlarmFlags/TaM2', any, read_bool(base_register=1009, bit=2)), + BatterySignal('/AlarmFlags/Tbm', any, read_bool(base_register=1009, bit=3)), + BatterySignal('/AlarmFlags/TbM2', any, read_bool(base_register=1009, bit=5)), + BatterySignal('/AlarmFlags/VBm2', any, read_bool(base_register=1009, bit=7)), + BatterySignal('/AlarmFlags/IDM2', any, read_bool(base_register=1009, bit=11)), + BatterySignal('/AlarmFlags/ISOB', any, read_bool(base_register=1009, bit=12)), + BatterySignal('/AlarmFlags/MSWE', any, read_bool(base_register=1009, bit=13)), + BatterySignal('/AlarmFlags/FUSE', any, read_bool(base_register=1009, bit=14)), + BatterySignal('/AlarmFlags/HTRE', any, read_bool(base_register=1009, bit=15)), + BatterySignal('/AlarmFlags/TCPE', any, read_bool(base_register=1009, bit=16)), + BatterySignal('/AlarmFlags/STRE', any, read_bool(base_register=1009, bit=17)), + BatterySignal('/AlarmFlags/CME', any, read_bool(base_register=1009, bit=18)), + BatterySignal('/AlarmFlags/HWFL', any, read_bool(base_register=1009, bit=19)), + BatterySignal('/AlarmFlags/HWEM', any, read_bool(base_register=1009, bit=20)), + BatterySignal('/AlarmFlags/ThM', any, read_bool(base_register=1009, bit=21)), + BatterySignal('/AlarmFlags/vsm1', any, read_bool(base_register=1009, bit=22)), + BatterySignal('/AlarmFlags/vsm2', any, read_bool(base_register=1009, bit=23)), + BatterySignal('/AlarmFlags/vsM2', any, read_bool(base_register=1009, bit=25)), + BatterySignal('/AlarmFlags/iCM2', any, read_bool(base_register=1009, bit=27)), + BatterySignal('/AlarmFlags/iDM2', any, read_bool(base_register=1009, bit=29)), + BatterySignal('/AlarmFlags/MID2', any, read_bool(base_register=1009, bit=31)), + BatterySignal('/AlarmFlags/CCBF', any, read_bool(base_register=1009, bit=33)), + BatterySignal('/AlarmFlags/AhFL', any, read_bool(base_register=1009, bit=34)), + BatterySignal('/AlarmFlags/TbCM', any, read_bool(base_register=1009, bit=36)), + BatterySignal('/AlarmFlags/BRNF', any, read_bool(base_register=1009, bit=37)), + BatterySignal('/AlarmFlags/HTFS', any, read_bool(base_register=1009, bit=42)), + BatterySignal('/AlarmFlags/DATA', any, read_bool(base_register=1009, bit=43)), + BatterySignal('/AlarmFlags/CELL2', any, read_bool(base_register=1009, bit=45)), + BatterySignal('/AlarmFlags/bit47AlarmDummy', any, read_bool(base_register=1009, bit=47)), + + BatterySignal('/LedStatus/Red', max, read_led_red), + BatterySignal('/LedStatus/Blue', max, read_led_blue), + BatterySignal('/LedStatus/Green', max, read_led_green), + BatterySignal('/LedStatus/Amber', max, read_led_amber), + + BatterySignal('/IoStatus/MainSwitchClosed', any, read_bool(base_register=1013, bit=0)), + BatterySignal('/IoStatus/AlarmOutActive', any, read_bool(base_register=1013, bit=1)), + BatterySignal('/IoStatus/InternalFanActive', any, read_bool(base_register=1013, bit=2)), + BatterySignal('/IoStatus/VoltMeasurementAllowed', any, read_bool(base_register=1013, bit=3)), + BatterySignal('/IoStatus/AuxRelay', any, read_bool(base_register=1013, bit=4)), + BatterySignal('/IoStatus/RemoteState', any, read_bool(base_register=1013, bit=5)), + BatterySignal('/IoStatus/HeaterOn', any, read_bool(base_register=1013, bit=6)), + BatterySignal('/IoStatus/EocReached', min, read_eoc_reached), + BatterySignal('/IoStatus/BatteryCold', any, read_battery_cold), + + # see protocol doc page 7 + BatterySignal('/Info/MaxDischargeCurrent', sum, lambda bs: bs.battery.i_max, unit='A'), + BatterySignal('/Info/MaxChargeCurrent', sum, lambda bs: bs.battery.i_max, unit='A'), + BatterySignal('/Info/MaxChargeVoltage', min, lambda bs: bs.battery.v_max, unit='V'), + BatterySignal('/Info/MinDischargeVoltage', max, lambda bs: bs.battery.v_min, unit='V'), + BatterySignal('/Info/BatteryLowVoltage' , max, lambda bs: bs.battery.v_min-2, unit='V'), + BatterySignal('/Info/NumberOfStrings', sum, lambda bs: bs.battery.n_strings), + + BatterySignal('/Info/MaxChargePower', sum, calc_max_charge_power), + BatterySignal('/Info/MaxDischargePower', sum, calc_max_discharge_power), + + BatterySignal('/FirmwareVersion', comma_separated, lambda bs: bs.battery.firmware_version), + BatterySignal('/HardwareVersion', comma_separated, lambda bs: bs.battery.hardware_version), + BatterySignal('/BmsVersion', comma_separated, lambda bs: bs.battery.bms_version) + + ] diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/signals.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/signals.pyc new file mode 100644 index 000000000..4260f83fb Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/signals.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/start.sh b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/start.sh new file mode 100755 index 000000000..83860d3e4 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl-Ivo/start.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. /opt/victronenergy/serial-starter/run-service.sh + +app="/opt/innovenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py" +args="$tty" +start $args diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/config.cpython-38.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/config.cpython-38.pyc new file mode 100644 index 000000000..54c386653 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/config.cpython-38.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/convert.cpython-38.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/convert.cpython-38.pyc new file mode 100644 index 000000000..758cd0699 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/convert.cpython-38.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/data.cpython-38.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/data.cpython-38.pyc new file mode 100644 index 000000000..586475eb3 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/data.cpython-38.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/dbus-fzsonick-48tl.cpython-38.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/dbus-fzsonick-48tl.cpython-38.pyc new file mode 100644 index 000000000..437a1d771 Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/__pycache__/dbus-fzsonick-48tl.cpython-38.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/config.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/config.py new file mode 100644 index 000000000..b327a25ad --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/config.py @@ -0,0 +1,55 @@ +import serial +import logging + +# dbus configuration + +FIRMWARE_VERSION = 1 # value returned by getValue (getText returns string value reported by battery) +HARDWARE_VERSION = 1 # value returned by getValue (getText returns string value reported by battery) + +CONNECTION = 'Modbus RTU' +PRODUCT_NAME = 'FZS 48TL200' +PRODUCT_ID = 0xB012 # assigned by victron +DEVICE_INSTANCE = 1 +SERVICE_NAME_PREFIX = 'com.victronenergy.battery.' + +#s3 configuration +S3BUCKET = "13-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" +S3KEY = "EXOcca50b894afa583d8d380dd1" +S3SECRET = "7fmdIN1WL8WL9k-20YjLZC5liH2qCwYrGP31Y4dityk" + +# driver configuration + +SOFTWARE_VERSION = '3.0.3' +UPDATE_INTERVAL = 2000 # milliseconds +#LOG_LEVEL = logging.INFO +LOG_LEVEL = logging.DEBUG + +# modbus configuration + +BASE_ADDRESS = 999 +#NO_OF_REGISTERS = 63 +NO_OF_REGISTERS = 64 +MAX_SLAVE_ADDRESS = 10 + + +# RS 485 configuration + +PARITY = serial.PARITY_ODD +TIMEOUT = 0.1 # seconds +BAUD_RATE = 115200 +BYTE_SIZE = 8 +STOP_BITS = 1 +MODE = 'rtu' + + +# battery configuration + +MAX_CHARGE_VOLTAGE = 58 +I_MAX_PER_STRING = 15 +NUM_OF_STRING_PER_BATTERY = 5 +AH_PER_STRING = 40 +V_MAX = 54.2 +R_STRING_MIN = 0.125 +R_STRING_MAX = 0.250 + + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/convert.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/convert.py new file mode 100644 index 000000000..2696f1664 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/convert.py @@ -0,0 +1,119 @@ +from collections import Iterable +from decimal import * + +import config as cfg +from data import LedState, BatteryStatus + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable + + +def read_bool(register, bit): + # type: (int, int) -> Callable[[BatteryStatus], bool] + + def get_value(status): + # type: (BatteryStatus) -> bool + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value & (1 << bit) > 0 + + return get_value + + +def read_float(register, scale_factor=1.0, offset=0.0, places=2): + # type: (int, float, float) -> Callable[[BatteryStatus], float] + + def get_value(status): + # type: (BatteryStatus) -> float + value = status.modbus_data[register - cfg.BASE_ADDRESS] + + if value >= 0x8000: # convert to signed int16 + value -= 0x10000 # fiamm stores their integers signed AND with sign-offset @#%^&! + + result = (value+offset)*scale_factor + return round(result,places) + + return get_value + + +def read_hex_string(register, count): + # type: (int, int) -> Callable[[BatteryStatus], str] + """ + reads count consecutive modbus registers from start_address, + and returns a hex representation of it: + e.g. for count=4: DEAD BEEF DEAD BEEF. + """ + start = register - cfg.BASE_ADDRESS + end = start + count + + def get_value(status): + # type: (BatteryStatus) -> str + return ' '.join(['{0:0>4X}'.format(x) for x in status.modbus_data[start:end]]) + + return get_value + + +def read_led_state(register, led): + # type: (int, int) -> Callable[[BatteryStatus], int] + + read_lo = read_bool(register, led * 2) + read_hi = read_bool(register, led * 2 + 1) + + def get_value(status): + # type: (BatteryStatus) -> int + + lo = read_lo(status) + hi = read_hi(status) + + if hi: + if lo: + return LedState.blinking_fast + else: + return LedState.blinking_slow + else: + if lo: + return LedState.on + else: + return LedState.off + + return get_value + + +def read_bitmap(register): + # type: (int) -> Callable[[BatteryStatus], bitmap] + + def get_value(status): + # type: (BatteryStatus) -> bitmap + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value + + return get_value + + +def append_unit(unit): + # type: (unicode) -> Callable[[unicode], unicode] + + def get_text(v): + # type: (unicode) -> unicode + return "{0}{1}".format(str(v), unit) + + return get_text + + +def mean(numbers): + # type: (Iterable[float] | Iterable[int]) -> float + return float("{:.2f}".format(float(sum(numbers)) / len(numbers))) + +def ssum(numbers): + # type: (Iterable[float] | Iterable[int]) -> float + return float("{:.2f}".format(float(sum(numbers)))) + + +def first(ts): + return next(t for t in ts) + +def return_in_list(ts): + return ts + + diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/data.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/data.py new file mode 100644 index 000000000..05cdd1aa7 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/data.py @@ -0,0 +1,97 @@ +import config as cfg +from collections import Iterable + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable + + +class LedState(object): + """ + from page 6 of the '48TLxxx ModBus Protocol doc' + """ + off = 0 + on = 1 + blinking_slow = 2 + blinking_fast = 3 + + +class LedColor(object): + green = 0 + amber = 1 + blue = 2 + red = 3 + + + +class CsvSignal(object): + def __init__(self, name, get_value, get_text = None): + self.name = name + self.get_value = get_value if callable(get_value) else lambda _: get_value + self.get_text = get_text + + if get_text is None: + self.get_text = "" + +class Signal(object): + + def __init__(self, dbus_path, aggregate, get_value, get_text=None): + # type: (str, Callable[[Iterable[object]],object], Callable[[BatteryStatus],object] | object, Callable[[object],unicode] | object)->None + """ + A Signal holds all information necessary for the handling of a + certain datum (e.g. voltage) published by the battery. + + :param dbus_path: str + object_path on DBus where the datum needs to be published + + :param aggregate: Iterable[object] -> object + function that combines the values of multiple batteries into one. + e.g. sum for currents, or mean for voltages + + :param get_value: (BatteryStatus) -> object + function to extract the datum from the modbus record, + alternatively: a constant + + :param get_text: (object) -> unicode [optional] + function to render datum to text, needed by DBus + alternatively: a constant + """ + + self.dbus_path = dbus_path + self.aggregate = aggregate + self.get_value = get_value if callable(get_value) else lambda _: get_value + self.get_text = get_text if callable(get_text) else lambda _: str(get_text) + + # if no 'get_text' provided use 'default_text' if available, otherwise str() + if get_text is None: + self.get_text = str + + +class Battery(object): + + """ Data record to hold hardware and firmware specs of the battery """ + + def __init__(self, slave_address, hardware_version, firmware_version, bms_version, ampere_hours): + # type: (int, str, str, str, int) -> None + self.slave_address = slave_address + self.hardware_version = hardware_version + self.firmware_version = firmware_version + self.bms_version = bms_version + self.ampere_hours = ampere_hours + + + def __str__(self): + return 'slave address = {0}\nhardware version = {1}\nfirmware version = {2}\nbms version = {3}\nampere hours = {4}'.format( + self.slave_address, self.hardware_version, self.firmware_version, self.bms_version, str(self.ampere_hours)) + + +class BatteryStatus(object): + """ + record holding the current status of a battery + """ + def __init__(self, battery, modbus_data): + # type: (Battery, list[int]) -> None + + self.battery = battery + self.modbus_data = modbus_data diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py new file mode 100755 index 000000000..aa61b3532 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py @@ -0,0 +1,1084 @@ +#!/usr/bin/python3 -u +# coding=utf-8 + +import re +import sys +import logging +from gi.repository import GLib + +import config as cfg +import convert as c + +from pymodbus.register_read_message import ReadInputRegistersResponse +from pymodbus.client.sync import ModbusSerialClient as Modbus +from pymodbus.other_message import ReportSlaveIdRequest +from pymodbus.exceptions import ModbusException +from pymodbus.pdu import ExceptionResponse + +from dbus.mainloop.glib import DBusGMainLoop +from data import BatteryStatus, Signal, Battery, LedColor, CsvSignal, LedState + +from collections import Iterable +from os import path + +app_dir = path.dirname(path.realpath(__file__)) +sys.path.insert(1, path.join(app_dir, 'ext', 'velib_python')) + +from vedbus import VeDbusService as DBus + +import time +import os +import csv + +import requests +import hmac +import hashlib +import base64 +from datetime import datetime +import io +import json + +import requests +import hmac +import hashlib +import base64 +from datetime import datetime +import pika +import time + + +# zip-comp additions +import zipfile +import io + +def compress_csv_data(csv_data, file_name="data.csv"): + + memory_stream = io.BytesIO() + + # Create a zip archive in the memory buffer + with zipfile.ZipFile(memory_stream, 'w', zipfile.ZIP_DEFLATED) as archive: + # Add CSV data to the ZIP archive + with archive.open('data.csv', 'w') as entry_stream: + entry_stream.write(csv_data.encode('utf-8')) + + # Get the compressed byte array from the memory buffer + compressed_bytes = memory_stream.getvalue() + + # Encode the compressed byte array as a Base64 string + base64_string = base64.b64encode(compressed_bytes).decode('utf-8') + + return base64_string + +class S3config: + def __init__(self): + self.bucket = cfg.S3BUCKET + self.region = "sos-ch-dk-2" + self.provider = "exo.io" + self.key = cfg.S3KEY + self.secret = cfg.S3SECRET + self.content_type = "application/base64; charset=utf-8" + + @property + def host(self): + return f"{self.bucket}.{self.region}.{self.provider}" + + @property + def url(self): + return f"https://{self.host}" + + def create_put_request(self, s3_path, data): + headers = self._create_request("PUT", s3_path) + url = f"{self.url}/{s3_path}" + response = requests.put(url, headers=headers, data=data) + return response + + def _create_request(self, method, s3_path): + date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + auth = self._create_authorization(method, self.bucket, s3_path, date, self.key, self.secret, self.content_type) + headers = { + "Host": self.host, + "Date": date, + "Authorization": auth, + "Content-Type": self.content_type + } + return headers + + @staticmethod + def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): + payload = f"{method}\n{md5_hash}\n{content_type}\n{date}\n/{bucket.strip('/')}/{s3_path.strip('/')}" + signature = base64.b64encode( + hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() + ).decode() + return f"AWS {s3_key}:{signature}" + + @staticmethod + def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): + payload = f"{method}\n{md5_hash}\n{content_type}\n{date}\n/{bucket.strip('/')}/{s3_path.strip('/')}" + signature = base64.b64encode( + hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() + ).decode() + return f"AWS {s3_key}:{signature}" + +def read_csv_as_string(file_path): + """ + Reads a CSV file from the given path and returns its content as a single string. + """ + try: + with open(file_path, 'r', encoding='utf-8') as file: + return file.read() + except FileNotFoundError: + print(f"Error: The file {file_path} does not exist.") + return None + except IOError as e: + print(f"IO error occurred: {str(e)}") + return None + +CSV_DIR = "/data/csv_files/" +#CSV_DIR = "csv_files/" + +# Define the path to the file containing the installation name +INSTALLATION_NAME_FILE = '/data/innovenergy/openvpn/installation-name' + + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable + +def interpret_limb_bitmap(bitmap_value): + # The bit for string 1 also monitors all 5 strings: 0000 0000 means All 5 strings activated. 0000 0001 means string 1 disabled. + string1_disabled = int((bitmap_value & 0b00001) != 0) + string2_disabled = int((bitmap_value & 0b00010) != 0) + string3_disabled = int((bitmap_value & 0b00100) != 0) + string4_disabled = int((bitmap_value & 0b01000) != 0) + string5_disabled = int((bitmap_value & 0b10000) != 0) + n_limb_strings = string1_disabled+string2_disabled+string3_disabled+string4_disabled+string5_disabled + return n_limb_strings + +def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int): + # type: (float, float, float, float) -> float + dv = v_limit - v + di = dv / r_int + p_limit = v_limit * (i + di) + return p_limit + +def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int): + # type: (float, float, float, float) -> float + di = i_limit - i + dv = di * r_int + p_limit = i_limit * (v + dv) + return p_limit + +def read_switch_closed(status): + value = c.read_bool(register=1013, bit=0)(status) + if value: + return False + return True + +def read_alarm_out_active(status): + value = c.read_bool(register=1013, bit=1)(status) + if value: + return False + return True + +def read_aux_relay(status): + value = c.read_bool(register=1013, bit=4)(status) + if value: + return False + return True + +def hex_string_to_ascii(hex_string): + # Ensure the hex_string is correctly formatted without spaces + hex_string = hex_string.replace(" ", "") + # Convert every two characters (a byte) in the hex string to ASCII + ascii_string = ''.join([chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2)]) + return ascii_string + +battery_status_reader = c.read_hex_string(1060,2) + +def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + +def return_led_state(status, color): + led_state = c.read_led_state(register=1004, led=color)(status) + if led_state == LedState.blinking_fast or led_state == LedState.blinking_slow: + return "Blinking" + elif led_state == LedState.on: + return "On" + elif led_state == LedState.off: + return "Off" + return "Unknown" + +def return_led_state_blue(status): + return return_led_state(status, LedColor.blue) + +def return_led_state_red(status): + return return_led_state(status, LedColor.red) + +def return_led_state_green(status): + return return_led_state(status, LedColor.green) + +def return_led_state_amber(status): + return return_led_state(status, LedColor.amber) + +def read_serial_number(status): + serial_regs = [1055, 1056, 1057, 1058] + serial_parts = [] + for reg in serial_regs: + # reading each register as a single hex value + hex_value_fun = c.read_hex_string(reg, 1) + hex_value = hex_value_fun(status) + # append without spaces and leading zeros stripped if any + serial_parts.append(hex_value.replace(' ', '')) + # concatenate all parts to form the full serial number + serial_number = ''.join(serial_parts).rstrip('0') + return serial_number + +def time_since_toc_in_time_format(status): + time_in_minutes = c.read_float(register=1052)(status) + # Convert minutes to total seconds + total_seconds = int(time_in_minutes * 60) + # Calculate days, hours, minutes, and seconds + days = total_seconds // (24 * 3600) + total_seconds = total_seconds % (24 * 3600) + hours = total_seconds // 3600 + total_seconds %= 3600 + minutes = total_seconds // 60 + seconds = total_seconds % 60 + # Format the string to show days.hours:minutes:seconds + return f"{days}.{hours:02}:{minutes:02}:{seconds:02}" + +def create_csv_signals(firmware_version): + read_voltage = c.read_float(register=999, scale_factor=0.01, offset=0, places=2) + read_current = c.read_float(register=1000, scale_factor=0.01, offset=-10000, places=2) + read_limb_bitmap = c.read_bitmap(1059) + + def read_power(status): + return int(read_current(status) * read_voltage(status)) + + def string1_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00001) != 0) + + def string2_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00010) != 0) + + def string3_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00100) != 0) + + def string4_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b01000) != 0) + + def string5_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b10000) != 0) + + def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap(status)) + + def calc_max_charge_power(status): + # type: (BatteryStatus) -> int + n_strings = cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage(status) + i = read_current(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) # p_limit is normally positive here (signed) + p_limit = max(p_limit, 0) # charge power must not become negative + + return int(p_limit) + + def calc_max_discharge_power(status): + n_strings = cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status) + max_discharge_current = n_strings*cfg.I_MAX_PER_STRING + return int(max_discharge_current*read_voltage(status)) + + total_current = c.read_float(register=1062, scale_factor=0.01, offset=-10000, places=1) + + def read_total_current(status): + return total_current(status) + + def read_heating_current(status): + return total_current(status) - read_current(status) + + def read_heating_power(status): + return read_voltage(status) * read_heating_current(status) + + soc_ah = c.read_float(register=1002, scale_factor=0.1, offset=-10000, places=1) + + def read_soc_ah(status): + return soc_ah(status) + + return [ + CsvSignal('/Battery/Devices/FwVersion', firmware_version), + CsvSignal('/Battery/Devices/Dc/Power', read_power, 'W'), + CsvSignal('/Battery/Devices/Dc/Voltage', read_voltage, 'V'), + CsvSignal('/Battery/Devices/Soc', c.read_float(register=1053, scale_factor=0.1, offset=0, places=1), '%'), + CsvSignal('/Battery/Devices/Temperatures/Cells/Average', c.read_float(register=1003, scale_factor=0.1, offset=-400, places=1), 'C'), + CsvSignal('/Battery/Devices/Dc/Current', read_current, 'A'), + CsvSignal('/Battery/Devices/BusCurrent', read_total_current, 'A'), + CsvSignal('/Battery/Devices/CellsCurrent', read_current, 'A'), + CsvSignal('/Battery/Devices/HeatingCurrent', read_heating_current, 'A'), + CsvSignal('/Battery/Devices/HeatingPower', read_heating_power, 'W'), + CsvSignal('/Battery/Devices/SOCAh', read_soc_ah), + CsvSignal('/Battery/Devices/Leds/Blue', return_led_state_blue), + CsvSignal('/Battery/Devices/Leds/Red', return_led_state_red), + CsvSignal('/Battery/Devices/Leds/Green', return_led_state_green), + CsvSignal('/Battery/Devices/Leds/Amber', return_led_state_amber), + CsvSignal('/Battery/Devices/BatteryStrings/String1Active', string1_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String2Active', string2_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String3Active', string3_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String4Active', string4_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String5Active', string5_disabled), + CsvSignal('/Battery/Devices/IoStatus/ConnectedToDcBus', read_switch_closed), + CsvSignal('/Battery/Devices/IoStatus/AlarmOutActive', read_alarm_out_active), + CsvSignal('/Battery/Devices/IoStatus/InternalFanActive', c.read_bool(register=1013, bit=2)), + CsvSignal('/Battery/Devices/IoStatus/VoltMeasurementAllowed', c.read_bool(register=1013, bit=3)), + CsvSignal('/Battery/Devices/IoStatus/AuxRelayBus', read_aux_relay), + CsvSignal('/Battery/Devices/IoStatus/RemoteStateActive', c.read_bool(register=1013, bit=5)), + CsvSignal('/Battery/Devices/IoStatus/RiscActive', c.read_bool(register=1013, bit=6)), + CsvSignal('/Battery/Devices/Eoc', read_eoc_reached), + CsvSignal('/Battery/Devices/SerialNumber', read_serial_number), + CsvSignal('/Battery/Devices/TimeSinceTOC', time_since_toc_in_time_format), + CsvSignal('/Battery/Devices/MaxChargePower', calc_max_charge_power), + CsvSignal('/Battery/Devices/MaxDischargePower', calc_max_discharge_power), + ] + +def init_signals(hardware_version, firmware_version, n_batteries): + # type: (str,str,int) -> Iterable[Signal] + """ + A Signal holds all information necessary for the handling of a + certain datum (e.g. voltage) published by the battery. + + Signal(dbus_path, aggregate, get_value, get_text = str) + + dbus_path: str + object_path on DBus where the datum needs to be published + + aggregate: Iterable[object] -> object + function that combines the values of multiple batteries into one. + e.g. sum for currents, or mean for voltages + + get_value: (BatteryStatus) -> object [optional] + function to extract the datum from the modbus record, + alternatively: a constant + + get_text: (object) -> unicode [optional] + function to render datum to text, needed by DBus + alternatively: a constant + + The conversion functions use the same parameters (e.g scale_factor, offset) + as described in the document 'T48TLxxx ModBus Protocol Rev.7.1' which can + be found in the /doc folder + """ + + product_id_hex = '0x{0:04x}'.format(cfg.PRODUCT_ID) + + read_voltage = c.read_float(register=999, scale_factor=0.01, offset=0, places=2) + read_current = c.read_float(register=1000, scale_factor=0.01, offset=-10000, places=2) + read_limb_bitmap = c.read_bitmap(1059) + + def read_power(status): + return int(read_current(status) * read_voltage(status)) + + def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap(status)) + + def max_discharge_current(status): + return (cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status))*cfg.I_MAX_PER_STRING + + def max_charge_current(status): + return status.battery.ampere_hours/2 + + def calc_max_charge_power(status): + # type: (BatteryStatus) -> int + n_strings = cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage(status) + i = read_current(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) # p_limit is normally positive here (signed) + p_limit = max(p_limit, 0) # charge power must not become negative + + return int(p_limit) + + product_name = cfg.PRODUCT_NAME + if n_batteries > 1: + product_name = cfg.PRODUCT_NAME + ' x' + str(n_batteries) + + return [ + # Node Red related dbus paths + Signal('/TimeToTOCRequest', max, c.read_float(register=1052)), + Signal('/EOCReached', c.return_in_list, read_eoc_reached), + Signal('/NumOfLimbStrings', c.return_in_list, get_value=limp_strings_value), + Signal('/NumOfBatteries', max, get_value=n_batteries), + Signal('/Dc/0/Voltage', c.mean, get_value=read_voltage, get_text=c.append_unit('V')), + Signal('/Dc/0/Current', c.ssum, get_value=read_current, get_text=c.append_unit('A')), + Signal('/Dc/0/Power', c.ssum, get_value=read_power, get_text=c.append_unit('W')), + Signal('/BussVoltage', c.mean, c.read_float(register=1001, scale_factor=0.01, offset=0, places=2), c.append_unit('V')), + Signal('/Soc', min, c.read_float(register=1053, scale_factor=0.1, offset=0, places=1), c.append_unit('%')), + Signal('/LowestSoc', min, c.read_float(register=1053, scale_factor=0.1, offset=0, places=1), c.append_unit('%')), + Signal('/Dc/0/Temperature', c.mean, c.read_float(register=1003, scale_factor=0.1, offset=-400, places=1), c.append_unit(u'°C')), + Signal('/Dc/0/LowestTemperature', min, c.read_float(register=1003, scale_factor=0.1, offset=-400, places=1), c.append_unit(u'°C')), + # Charge/Discharge current, voltage and power + Signal('/Info/MaxDischargeCurrent', c.ssum, max_discharge_current,c.append_unit('A')), + Signal('/Info/MaxChargeCurrent', c.ssum, max_charge_current, c.append_unit('A')), + Signal('/Info/MaxChargeVoltage', min, cfg.MAX_CHARGE_VOLTAGE, c.append_unit('V')), + Signal('/Info/MaxChargePower', c.ssum, calc_max_charge_power), + # Victron mandatory dbus paths + Signal('/Mgmt/ProcessName', c.first, __file__), + Signal('/Mgmt/ProcessVersion', c.first, cfg.SOFTWARE_VERSION), + Signal('/Mgmt/Connection', c.first, cfg.CONNECTION), + Signal('/DeviceInstance', c.first, cfg.DEVICE_INSTANCE), + Signal('/ProductName', c.first, product_name), + Signal('/ProductId', c.first, cfg.PRODUCT_ID, product_id_hex), + Signal('/Connected', c.first, 1), + Signal('/FirmwareVersion', c.return_in_list, firmware_version), + Signal('/HardwareVersion', c.first, cfg.HARDWARE_VERSION, hardware_version), + # Diagnostics + Signal('/Diagnostics/BmsVersion', c.first, lambda s: s.battery.bms_version), + # Warnings + Signal('/WarningFlags/TaM1', c.return_in_list, c.read_bool(register=1005, bit=1)), + Signal('/WarningFlags/TbM1', c.return_in_list, c.read_bool(register=1005, bit=4)), + Signal('/WarningFlags/VBm1', c.return_in_list, c.read_bool(register=1005, bit=6)), + Signal('/WarningFlags/VBM1', c.return_in_list, c.read_bool(register=1005, bit=8)), + Signal('/WarningFlags/IDM1', c.return_in_list, c.read_bool(register=1005, bit=10)), + Signal('/WarningFlags/vsm1', c.return_in_list, c.read_bool(register=1005, bit=22)), + Signal('/WarningFlags/vsM1', c.return_in_list, c.read_bool(register=1005, bit=24)), + Signal('/WarningFlags/iCM1', c.return_in_list, c.read_bool(register=1005, bit=26)), + Signal('/WarningFlags/iDM1', c.return_in_list, c.read_bool(register=1005, bit=28)), + Signal('/WarningFlags/MID1', c.return_in_list, c.read_bool(register=1005, bit=30)), + Signal('/WarningFlags/BLPW', c.return_in_list, c.read_bool(register=1005, bit=32)), + Signal('/WarningFlags/CCBF', c.return_in_list, c.read_bool(register=1005, bit=33)), + Signal('/WarningFlags/Ah_W', c.return_in_list, c.read_bool(register=1005, bit=35)), + Signal('/WarningFlags/MPMM', c.return_in_list, c.read_bool(register=1005, bit=38)), + Signal('/WarningFlags/TCdi', c.return_in_list, c.read_bool(register=1005, bit=40)), + Signal('/WarningFlags/LMPW', c.return_in_list, c.read_bool(register=1005, bit=44)), + Signal('/WarningFlags/TOCW', c.return_in_list, c.read_bool(register=1005, bit=47)), + Signal('/WarningFlags/BUSL', c.return_in_list, c.read_bool(register=1005, bit=49)), + # Alarms + Signal('/AlarmFlags/Tam', c.return_in_list, c.read_bool(register=1005, bit=0)), + Signal('/AlarmFlags/TaM2', c.return_in_list, c.read_bool(register=1005, bit=2)), + Signal('/AlarmFlags/Tbm', c.return_in_list, c.read_bool(register=1005, bit=3)), + Signal('/AlarmFlags/TbM2', c.return_in_list, c.read_bool(register=1005, bit=5)), + Signal('/AlarmFlags/VBm2', c.return_in_list, c.read_bool(register=1005, bit=7)), + Signal('/AlarmFlags/VBM2', c.return_in_list, c.read_bool(register=1005, bit=9)), + Signal('/AlarmFlags/IDM2', c.return_in_list, c.read_bool(register=1005, bit=11)), + Signal('/AlarmFlags/ISOB', c.return_in_list, c.read_bool(register=1005, bit=12)), + Signal('/AlarmFlags/MSWE', c.return_in_list, c.read_bool(register=1005, bit=13)), + Signal('/AlarmFlags/FUSE', c.return_in_list, c.read_bool(register=1005, bit=14)), + Signal('/AlarmFlags/HTRE', c.return_in_list, c.read_bool(register=1005, bit=15)), + Signal('/AlarmFlags/TCPE', c.return_in_list, c.read_bool(register=1005, bit=16)), + Signal('/AlarmFlags/STRE', c.return_in_list, c.read_bool(register=1005, bit=17)), + Signal('/AlarmFlags/CME', c.return_in_list, c.read_bool(register=1005, bit=18)), + Signal('/AlarmFlags/HWFL', c.return_in_list, c.read_bool(register=1005, bit=19)), + Signal('/AlarmFlags/HWEM', c.return_in_list, c.read_bool(register=1005, bit=20)), + Signal('/AlarmFlags/ThM', c.return_in_list, c.read_bool(register=1005, bit=21)), + Signal('/AlarmFlags/vsm2', c.return_in_list, c.read_bool(register=1005, bit=23)), + Signal('/AlarmFlags/vsM2', c.return_in_list, c.read_bool(register=1005, bit=25)), + Signal('/AlarmFlags/iCM2', c.return_in_list, c.read_bool(register=1005, bit=27)), + Signal('/AlarmFlags/iDM2', c.return_in_list, c.read_bool(register=1005, bit=29)), + Signal('/AlarmFlags/MID2', c.return_in_list, c.read_bool(register=1005, bit=31)), + Signal('/AlarmFlags/HTFS', c.return_in_list, c.read_bool(register=1005, bit=42)), + Signal('/AlarmFlags/DATA', c.return_in_list, c.read_bool(register=1005, bit=43)), + Signal('/AlarmFlags/LMPA', c.return_in_list, c.read_bool(register=1005, bit=45)), + Signal('/AlarmFlags/HEBT', c.return_in_list, c.read_bool(register=1005, bit=46)), + Signal('/AlarmFlags/CURM', c.return_in_list, c.read_bool(register=1005, bit=48)), + # LedStatus + Signal('/Diagnostics/LedStatus/Red', c.first, c.read_led_state(register=1004, led=LedColor.red)), + Signal('/Diagnostics/LedStatus/Blue', c.first, c.read_led_state(register=1004, led=LedColor.blue)), + Signal('/Diagnostics/LedStatus/Green', c.first, c.read_led_state(register=1004, led=LedColor.green)), + Signal('/Diagnostics/LedStatus/Amber', c.first, c.read_led_state(register=1004, led=LedColor.amber)), + # IO Status + Signal('/Diagnostics/IoStatus/MainSwitchClosed', c.return_in_list, read_switch_closed), + Signal('/Diagnostics/IoStatus/AlarmOutActive', c.return_in_list, read_alarm_out_active), + Signal('/Diagnostics/IoStatus/InternalFanActive', c.return_in_list, c.read_bool(register=1013, bit=2)), + Signal('/Diagnostics/IoStatus/VoltMeasurementAllowed', c.return_in_list, c.read_bool(register=1013, bit=3)), + Signal('/Diagnostics/IoStatus/AuxRelay', c.return_in_list, read_aux_relay), + Signal('/Diagnostics/IoStatus/RemoteState', c.return_in_list, c.read_bool(register=1013, bit=5)), + Signal('/Diagnostics/IoStatus/RiscOn', c.return_in_list, c.read_bool(register=1013, bit=6)), + ] + +def init_modbus(tty): + # type: (str) -> Modbus + logging.debug('initializing Modbus') + return Modbus( + port='/dev/' + tty, + method=cfg.MODE, + baudrate=cfg.BAUD_RATE, + stopbits=cfg.STOP_BITS, + bytesize=cfg.BYTE_SIZE, + timeout=cfg.TIMEOUT, + parity=cfg.PARITY) + +def init_dbus(tty, signals): + # type: (str, Iterable[Signal]) -> DBus + logging.debug('initializing DBus service') + dbus = DBus(servicename=cfg.SERVICE_NAME_PREFIX + tty) + logging.debug('initializing DBus paths') + for signal in signals: + init_dbus_path(dbus, signal) + return dbus + +# noinspection PyBroadException +def try_get_value(sig): + # type: (Signal) -> object + try: + return sig.get_value(None) + except: + return None + +def init_dbus_path(dbus, sig): + # type: (DBus, Signal) -> () + dbus.add_path( + sig.dbus_path, + try_get_value(sig), + gettextcallback=lambda _, v: sig.get_text(v)) + +def init_main_loop(): + # type: () -> DBusGMainLoop + logging.debug('initializing DBusGMainLoop Loop') + DBusGMainLoop(set_as_default=True) + return GLib.MainLoop() + +def report_slave_id(modbus, slave_address): + # type: (Modbus, int) -> str + slave = str(slave_address) + logging.debug('requesting slave id from node ' + slave) + try: + modbus.connect() + request = ReportSlaveIdRequest(unit=slave_address) + response = modbus.execute(request) + if response is ExceptionResponse or issubclass(type(response), ModbusException): + raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) + return response.identifier + finally: + modbus.close() + +def identify_battery(modbus, slave_address): + # type: (Modbus, int) -> Battery + logging.info('identifying battery...') + hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) + firmware_version = read_firmware_version(modbus, slave_address) + specs = Battery( + slave_address=slave_address, + hardware_version=hardware_version, + firmware_version=firmware_version, + bms_version=bms_version, + ampere_hours=ampere_hours) + logging.info('battery identified:\n{0}'.format(str(specs))) + return specs + +def identify_batteries(modbus): + # type: (Modbus) -> list[Battery] + def _identify_batteries(): + address_range = range(1, cfg.MAX_SLAVE_ADDRESS + 1) + for slave_address in address_range: + try: + yield identify_battery(modbus, slave_address) + except Exception as e: + logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) + return list(_identify_batteries()) # force that lazy iterable! + +def parse_slave_id(modbus, slave_address): + # type: (Modbus, int) -> (str, str, int) + slave_id = report_slave_id(modbus, slave_address) + sid = re.sub(b'[^\x20-\x7E]', b'', slave_id) # remove weird special chars + match = re.match('(?P48TL(?P\d+)) *(?P.*)', sid.decode('ascii')) + if match is None: + raise Exception('no known battery found') + return match.group('hw'), match.group('bms'), int(match.group('ah')) + +def read_firmware_version(modbus, slave_address): + # type: (Modbus, int) -> str + logging.debug('reading firmware version') + try: + modbus.connect() + response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) + register = response.registers[0] + return '{0:0>4X}'.format(register) + finally: + modbus.close() # close in any case + +def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS): + # type: (Modbus, int) -> ReadInputRegistersResponse + logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) + return modbus.read_input_registers( + address=base_address, + count=count, + unit=slave_address) + +def read_battery_status(modbus, battery): + # type: (Modbus, Battery) -> BatteryStatus + """ + Read the modbus registers containing the battery's status info. + """ + logging.debug('reading battery status') + try: + modbus.connect() + data = read_modbus_registers(modbus, battery.slave_address) + return BatteryStatus(battery, data.registers) + finally: + modbus.close() # close in any case + +def publish_values(dbus, signals, statuses): + # type: (DBus, Iterable[Signal], Iterable[BatteryStatus]) -> () + for s in signals: + values = [s.get_value(status) for status in statuses] + with dbus as srv: + srv[s.dbus_path] = s.aggregate(values) + +previous_warnings = {} +previous_alarms = {} + +class MessageType: + ALARM_OR_WARNING = "AlarmOrWarning" + HEARTBEAT = "Heartbeat" + +class AlarmOrWarning: + def __init__(self, description, created_by): + self.date = datetime.now().strftime('%Y-%m-%d') + self.time = datetime.now().strftime('%H:%M:%S') + self.description = description + self.created_by = created_by + + def to_dict(self): + return { + "Date": self.date, + "Time": self.time, + "Description": self.description, + "CreatedBy": self.created_by + } + +def SubscribeToQueue(): + try: + connection = pika.BlockingConnection(pika.ConnectionParameters(host="10.2.0.11", + port=5672, + virtual_host="/", + credentials=pika.PlainCredentials("producer", "b187ceaddb54d5485063ddc1d41af66f"))) + channel = connection.channel() + channel.queue_declare(queue="statusQueue", durable=True) + print("Subscribed to queue") + except Exception as ex: + print("An error occurred while connecting to the RabbitMQ queue:", ex) + return channel + +is_first_update = True +first_subscribe = False +prev_status=0 +subscribed_to_queue_first_time=False +channel = SubscribeToQueue() +heartbit_interval = 0 +# Create an S3config instance +s3_config = S3config() +INSTALLATION_ID=int(s3_config.bucket.split('-')[0]) +PRODUCT_ID = 1 + +def update_state_from_dictionaries(current_warnings, current_alarms, node_numbers): + global previous_warnings, previous_alarms, INSTALLATION_ID, PRODUCT_ID, is_first_update, first_subscribe, channel,prev_status,heartbit_interval,subscribed_to_queue_first_time + + heartbit_interval+=1 + + if is_first_update: + changed_warnings = current_warnings + changed_alarms = current_alarms + is_first_update = False + else: + changed_alarms={} + changed_warnings={} + # calculate the diff in warnings and alarms + prev_alarm_value_list=list(previous_alarms.values()) + alarm_keys=list(previous_alarms.keys()) + + for i, alarm in enumerate(current_alarms.values()): + if alarm!=prev_alarm_value_list[i]: + changed_alarms[alarm_keys[i]]=True + else: + changed_alarms[alarm_keys[i]]=False + + prev_warning_value_list=list(previous_warnings.values()) + warning_keys=list(previous_warnings.keys()) + + for i, warning in enumerate(current_warnings.values()): + if warning!=prev_warning_value_list[i]: + changed_warnings[warning_keys[i]]=True + else: + changed_warnings[warning_keys[i]]=False + + status_message = { + "InstallationId": INSTALLATION_ID, + "Product": PRODUCT_ID, + "Status": 0, + "Type": 1, + "Warnings": [], + "Alarms": [] + } + + + alarms_number_list = [] + for node_number in node_numbers: + cnt = 0 + for alarm_name, alarm_value in current_alarms.items(): + if str(node_number) in alarm_name and alarm_value: + cnt+=1 + alarms_number_list.append(cnt) + + warnings_number_list = [] + for node_number in node_numbers: + cnt = 0 + for warning_name, warning_value in current_warnings.items(): + if str(node_number) in warning_name and warning_value: + cnt+=1 + warnings_number_list.append(cnt) + + + # Evaluate alarms + if any(changed_alarms.values()): + for i, changed_alarm in enumerate(changed_alarms.values()): + if changed_alarm and list(current_alarms.values())[i]: + status_message["Alarms"].append(AlarmOrWarning(list(current_alarms.keys())[i],"System").to_dict()) + + if any(changed_warnings.values()): + for i, changed_warning in enumerate(changed_warnings.values()): + if changed_warning and list(current_warnings.values())[i]: + status_message["Warnings"].append(AlarmOrWarning(list(current_warnings.keys())[i],"System").to_dict()) + + if any(current_alarms.values()): + status_message["Status"]=2 + + if not any(current_alarms.values()) and any(current_warnings.values()): + status_message["Status"]=1 + + if not any(current_alarms.values()) and not any(current_warnings.values()): + status_message["Status"]=0 + + if status_message["Status"]!=prev_status or len(status_message["Warnings"])>0 or len(status_message["Alarms"])>0: + prev_status=status_message["Status"] + status_message["Type"]=0 + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + print(status_message) + print("Message sent successfully") + elif heartbit_interval>=15 or not subscribed_to_queue_first_time: + print("Send heartbit message to rabbitmq") + heartbit_interval=0 + subscribed_to_queue_first_time=True + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + + previous_warnings = current_warnings.copy() + previous_alarms = current_alarms.copy() + + return status_message, alarms_number_list, warnings_number_list + +def read_warning_and_alarm_flags(): + return [ + # Warnings + CsvSignal('/Battery/Devices/WarningFlags/TaM1', c.read_bool(register=1005, bit=1)), + CsvSignal('/Battery/Devices/WarningFlags/TbM1', c.read_bool(register=1005, bit=4)), + CsvSignal('/Battery/Devices/WarningFlags/VBm1', c.read_bool(register=1005, bit=6)), + CsvSignal('/Battery/Devices/WarningFlags/VBM1', c.read_bool(register=1005, bit=8)), + CsvSignal('/Battery/Devices/WarningFlags/IDM1', c.read_bool(register=1005, bit=10)), + CsvSignal('/Battery/Devices/WarningFlags/vsm1', c.read_bool(register=1005, bit=22)), + CsvSignal('/Battery/Devices/WarningFlags/vsM1', c.read_bool(register=1005, bit=24)), + CsvSignal('/Battery/Devices/WarningFlags/iCM1', c.read_bool(register=1005, bit=26)), + CsvSignal('/Battery/Devices/WarningFlags/iDM1', c.read_bool(register=1005, bit=28)), + CsvSignal('/Battery/Devices/WarningFlags/MID1', c.read_bool(register=1005, bit=30)), + CsvSignal('/Battery/Devices/WarningFlags/BLPW', c.read_bool(register=1005, bit=32)), + CsvSignal('/Battery/Devices/WarningFlags/CCBF', c.read_bool(register=1005, bit=33)), + CsvSignal('/Battery/Devices/WarningFlags/Ah_W', c.read_bool(register=1005, bit=35)), + CsvSignal('/Battery/Devices/WarningFlags/MPMM', c.read_bool(register=1005, bit=38)), + CsvSignal('/Battery/Devices/WarningFlags/TCdi', c.read_bool(register=1005, bit=40)), + CsvSignal('/Battery/Devices/WarningFlags/LMPW', c.read_bool(register=1005, bit=44)), + CsvSignal('/Battery/Devices/WarningFlags/TOCW', c.read_bool(register=1005, bit=47)), + CsvSignal('/Battery/Devices/WarningFlags/BUSL', c.read_bool(register=1005, bit=49)), + ], [ + # Alarms + CsvSignal('/Battery/Devices/AlarmFlags/Tam', c.read_bool(register=1005, bit=0)), + CsvSignal('/Battery/Devices/AlarmFlags/TaM2', c.read_bool(register=1005, bit=2)), + CsvSignal('/Battery/Devices/AlarmFlags/Tbm', c.read_bool(register=1005, bit=3)), + CsvSignal('/Battery/Devices/AlarmFlags/TbM2', c.read_bool(register=1005, bit=5)), + CsvSignal('/Battery/Devices/AlarmFlags/VBm2', c.read_bool(register=1005, bit=7)), + CsvSignal('/Battery/Devices/AlarmFlags/VBM2', c.read_bool(register=1005, bit=9)), + CsvSignal('/Battery/Devices/AlarmFlags/IDM2', c.read_bool(register=1005, bit=11)), + CsvSignal('/Battery/Devices/AlarmFlags/ISOB', c.read_bool(register=1005, bit=12)), + CsvSignal('/Battery/Devices/AlarmFlags/MSWE', c.read_bool(register=1005, bit=13)), + CsvSignal('/Battery/Devices/AlarmFlags/FUSE', c.read_bool(register=1005, bit=14)), + CsvSignal('/Battery/Devices/AlarmFlags/HTRE', c.read_bool(register=1005, bit=15)), + CsvSignal('/Battery/Devices/AlarmFlags/TCPE', c.read_bool(register=1005, bit=16)), + CsvSignal('/Battery/Devices/AlarmFlags/STRE', c.read_bool(register=1005, bit=17)), + CsvSignal('/Battery/Devices/AlarmFlags/CME', c.read_bool(register=1005, bit=18)), + CsvSignal('/Battery/Devices/AlarmFlags/HWFL', c.read_bool(register=1005, bit=19)), + CsvSignal('/Battery/Devices/AlarmFlags/HWEM', c.read_bool(register=1005, bit=20)), + CsvSignal('/Battery/Devices/AlarmFlags/ThM', c.read_bool(register=1005, bit=21)), + CsvSignal('/Battery/Devices/AlarmFlags/vsm2', c.read_bool(register=1005, bit=23)), + CsvSignal('/Battery/Devices/AlarmFlags/vsM2', c.read_bool(register=1005, bit=25)), + CsvSignal('/Battery/Devices/AlarmFlags/iCM2', c.read_bool(register=1005, bit=27)), + CsvSignal('/Battery/Devices/AlarmFlags/iDM2', c.read_bool(register=1005, bit=29)), + CsvSignal('/Battery/Devices/AlarmFlags/MID2', c.read_bool(register=1005, bit=31)), + CsvSignal('/Battery/Devices/AlarmFlags/HTFS', c.read_bool(register=1005, bit=42)), + CsvSignal('/Battery/Devices/AlarmFlags/DATA', c.read_bool(register=1005, bit=43)), + CsvSignal('/Battery/Devices/AlarmFlags/LMPA', c.read_bool(register=1005, bit=45)), + CsvSignal('/Battery/Devices/AlarmFlags/HEBT', c.read_bool(register=1005, bit=46)), + CsvSignal('/Battery/Devices/AlarmFlags/CURM', c.read_bool(register=1005, bit=48)), + ] + +import random + +'''def update_for_testing(modbus, batteries, dbus, signals, csv_signals): + global ALLOW + logging.debug('starting testing update cycle') + warning_signals, alarm_signals = read_warning_and_alarm_flags() + current_warnings = {} + current_alarms = {} + statuses = [read_battery_status(modbus, battery) for battery in batteries] + node_numbers = [battery.slave_address for battery in batteries] + if ALLOW: + any_warning_active = False + any_alarm_active = False + for i, node in enumerate(node_numbers): + for s in warning_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_warnings[signal_name] = value + if ALLOW and value: + any_warning_active = True + for s in alarm_signals: + signal_name = insert_id(s.name, i+1) + value = random.choice([True, False]) + current_alarms[signal_name] = value + if ALLOW and value: + any_alarm_active = True + print(update_state_from_dictionaries(current_warnings, current_alarms)) + publish_values(dbus, signals, statuses) + create_csv_files(csv_signals, statuses, node_numbers) + logging.debug('finished update cycle\n') + return True''' + +start_time = time.time() + +def update(modbus, batteries, dbus, signals, csv_signals): + global start_time + # type: (Modbus, Iterable[Battery], DBus, Iterable[Signal]) -> bool + """ + Main update function + + 1. requests status record each battery via modbus, + 2. parses the data using Signal.get_value + 3. aggregates the data from all batteries into one datum using Signal.aggregate + 4. publishes the data on the dbus + """ + logging.debug('starting update cycle') + warnings_signals, alarm_signals = read_warning_and_alarm_flags() + current_warnings = {} + current_alarms= {} + statuses = [read_battery_status(modbus, battery) for battery in batteries] + node_numbers = [battery.slave_address for battery in batteries] + # Iterate over each node and signal to create rows in the new format + for i, node in enumerate(node_numbers): + for s in warnings_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_warnings[signal_name] = value + for s in alarm_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_alarms[signal_name] = value + #print(update_state_from_dictionaries(current_warnings, current_alarms)) + status_message, alarms_number_list, warnings_number_list = update_state_from_dictionaries(current_warnings, current_alarms, node_numbers) + publish_values(dbus, signals, statuses) + elapsed_time = time.time() - start_time + if elapsed_time >= 30: + create_csv_files(csv_signals, statuses, node_numbers, alarms_number_list, warnings_number_list) + start_time = time.time() + print(f"Elapsed time: {elapsed_time:.2f} seconds") + logging.debug('finished update cycle\n') + return True + +def print_usage(): + print('Usage: ' + __file__ + ' ') + print('Example: ' + __file__ + ' ttyUSB0') + +def parse_cmdline_args(argv): + # type: (list[str]) -> str + if len(argv) == 0: + logging.info('missing command line argument for tty device') + print_usage() + sys.exit(1) + return argv[0] + +alive = True # global alive flag, watchdog_task clears it, update_task sets it +ALLOW = False + +def create_update_task(modbus, dbus, batteries, signals, csv_signals, main_loop): + # type: (Modbus, DBus, Iterable[Battery], Iterable[Signal], DBusGMainLoop) -> Callable[[],bool] + """ + Creates an update task which runs the main update function + and resets the alive flag + """ + def update_task(): + # type: () -> bool + global alive, ALLOW + if ALLOW: + ALLOW = False + else: + ALLOW = True + alive = update(modbus, batteries, dbus, signals, csv_signals) + #alive = update_for_testing(modbus, batteries, dbus, signals, csv_signals) + if not alive: + logging.info('update_task: quitting main loop because of error') + main_loop.quit() + return alive + return update_task + +def create_watchdog_task(main_loop): + # type: (DBusGMainLoop) -> Callable[[],bool] + """ + Creates a Watchdog task that monitors the alive flag. + The watchdog kills the main loop if the alive flag is not periodically reset by the update task. + Who watches the watchdog? + """ + def watchdog_task(): + # type: () -> bool + global alive + if alive: + logging.debug('watchdog_task: update_task is alive') + alive = False + return True + else: + logging.info('watchdog_task: killing main loop because update_task is no longer alive') + main_loop.quit() + return False + return watchdog_task + +def get_installation_name(file_path): + with open(file_path, 'r') as file: + return file.read().strip() + +def manage_csv_files(directory_path, max_files=20): + csv_files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))] + csv_files.sort(key=lambda x: os.path.getctime(os.path.join(directory_path, x))) + # Remove oldest files if exceeds maximum + while len(csv_files) > max_files: + file_to_delete = os.path.join(directory_path, csv_files.pop(0)) + os.remove(file_to_delete) + +def serialize_for_csv(value): + if isinstance(value, (dict, list, tuple)): + return json.dumps(value, ensure_ascii=False) + return str(value) + +def insert_id(path, id_number): + parts = path.split("/") + insert_position = parts.index("Devices") + 1 + parts.insert(insert_position, str(id_number)) + return "/".join(parts) + +def create_csv_files(signals, statuses, node_numbers, alarms_number_list, warnings_number_list): + global s3_config + timestamp = int(time.time()) + if timestamp % 2 != 0: + timestamp -= 1 + # Create CSV directory if it doesn't exist + if not os.path.exists(CSV_DIR): + os.makedirs(CSV_DIR) + csv_filename = f"{timestamp}.csv" + csv_path = os.path.join(CSV_DIR, csv_filename) + # Append values to the CSV file + with open(csv_path, 'a', newline='') as csvfile: + csv_writer = csv.writer(csvfile, delimiter=';') + # Add a special row for the nodes configuration + nodes_config_path = "/Config/Devices/BatteryNodes" + nodes_list = ",".join(str(node) for node in node_numbers) + config_row = [nodes_config_path, nodes_list, ""] + csv_writer.writerow(config_row) + # Iterate over each node and signal to create rows in the new format + for i, node in enumerate(node_numbers): + csv_writer.writerow([f"/Battery/Devices/{str(i+1)}/Alarms", alarms_number_list[i], ""]) + csv_writer.writerow([f"/Battery/Devices/{str(i+1)}/Warnings", warnings_number_list[i], ""]) + for s in signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + row_values = [signal_name, value, s.get_text] + csv_writer.writerow(row_values) + # Manage CSV files, keep a limited number of files + # Create the CSV as a string + csv_data = read_csv_as_string(csv_path) + + if csv_data is None: + print(" error while reading csv as string") + return + + # zip-comp additions + compressed_csv = compress_csv_data(csv_data) + compressed_filename = f"{timestamp}.csv" + + + + response = s3_config.create_put_request(compressed_filename, compressed_csv) + if response.status_code == 200: + #os.remove(csv_path) + print("Success") + else: + failed_dir = os.path.join(CSV_DIR, "failed") + if not os.path.exists(failed_dir): + os.makedirs(failed_dir) + failed_path = os.path.join(failed_dir, csv_filename) + os.rename(csv_path, failed_path) + print("Uploading failed") + manage_csv_files(failed_dir, 10) + manage_csv_files(CSV_DIR) + +def main(argv): + # type: (list[str]) -> () + logging.basicConfig(level=cfg.LOG_LEVEL) + logging.info('starting ' + __file__) + tty = parse_cmdline_args(argv) + modbus = init_modbus(tty) + batteries = identify_batteries(modbus) + n = len(batteries) + logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) + if n <= 0: + sys.exit(2) + bat = c.first(batteries) # report hw and fw version of first battery found + signals = init_signals(bat.hardware_version, bat.firmware_version, n) + csv_signals = create_csv_signals(bat.firmware_version) + main_loop = init_main_loop() # must run before init_dbus because gobject does some global magic + dbus = init_dbus(tty, signals) + update_task = create_update_task(modbus, dbus, batteries, signals, csv_signals, main_loop) + watchdog_task = create_watchdog_task(main_loop) + GLib.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task) # add watchdog first + GLib.timeout_add(cfg.UPDATE_INTERVAL, update_task) # call update once every update_interval + logging.info('starting GLib.MainLoop') + main_loop.run() + logging.info('GLib.MainLoop was shut down') + sys.exit(0xFF) # reaches this only on error + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/__pycache__/ve_utils.cpython-38.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/__pycache__/ve_utils.cpython-38.pyc new file mode 100644 index 000000000..ed1c69aff Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/__pycache__/ve_utils.cpython-38.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/__pycache__/vedbus.cpython-38.pyc b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/__pycache__/vedbus.cpython-38.pyc new file mode 100644 index 000000000..d359ef5bf Binary files /dev/null and b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/__pycache__/vedbus.cpython-38.pyc differ diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/ve_utils.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/ve_utils.py new file mode 100644 index 000000000..f5a2f85a0 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/ve_utils.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import sys +from traceback import print_exc +from os import _exit as os_exit +from os import statvfs +from subprocess import check_output, CalledProcessError +import logging +import dbus +logger = logging.getLogger(__name__) + +VEDBUS_INVALID = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) + +class NoVrmPortalIdError(Exception): + pass + +# Use this function to make sure the code quits on an unexpected exception. Make sure to use it +# when using GLib.idle_add and also GLib.timeout_add. +# Without this, the code will just keep running, since GLib does not stop the mainloop on an +# exception. +# Example: GLib.idle_add(exit_on_error, myfunc, arg1, arg2) +def exit_on_error(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except: + try: + print ('exit_on_error: there was an exception. Printing stacktrace will be tried and then exit') + print_exc() + except: + pass + + # sys.exit() is not used, since that throws an exception, which does not lead to a program + # halt when used in a dbus callback, see connection.py in the Python/Dbus libraries, line 230. + os_exit(1) + + +__vrm_portal_id = None +def get_vrm_portal_id(): + # The original definition of the VRM Portal ID is that it is the mac + # address of the onboard- ethernet port (eth0), stripped from its colons + # (:) and lower case. This may however differ between platforms. On Venus + # the task is therefore deferred to /sbin/get-unique-id so that a + # platform specific method can be easily defined. + # + # If /sbin/get-unique-id does not exist, then use the ethernet address + # of eth0. This also handles the case where velib_python is used as a + # package install on a Raspberry Pi. + # + # On a Linux host where the network interface may not be eth0, you can set + # the VRM_IFACE environment variable to the correct name. + + global __vrm_portal_id + + if __vrm_portal_id: + return __vrm_portal_id + + portal_id = None + + # First try the method that works if we don't have a data partition. This + # will fail when the current user is not root. + try: + portal_id = check_output("/sbin/get-unique-id").decode("utf-8", "ignore").strip() + if not portal_id: + raise NoVrmPortalIdError("get-unique-id returned blank") + __vrm_portal_id = portal_id + return portal_id + except CalledProcessError: + # get-unique-id returned non-zero + raise NoVrmPortalIdError("get-unique-id returned non-zero") + except OSError: + # File doesn't exist, use fallback + pass + + # Fall back to getting our id using a syscall. Assume we are on linux. + # Allow the user to override what interface is used using an environment + # variable. + import fcntl, socket, struct, os + + iface = os.environ.get('VRM_IFACE', 'eth0').encode('ascii') + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', iface[:15])) + except IOError: + raise NoVrmPortalIdError("ioctl failed for eth0") + + __vrm_portal_id = info[18:24].hex() + return __vrm_portal_id + + +# See VE.Can registers - public.docx for definition of this conversion +def convert_vreg_version_to_readable(version): + def str_to_arr(x, length): + a = [] + for i in range(0, len(x), length): + a.append(x[i:i+length]) + return a + + x = "%x" % version + x = x.upper() + + if len(x) == 5 or len(x) == 3 or len(x) == 1: + x = '0' + x + + a = str_to_arr(x, 2); + + # remove the first 00 if there are three bytes and it is 00 + if len(a) == 3 and a[0] == '00': + a.remove(0); + + # if we have two or three bytes now, and the first character is a 0, remove it + if len(a) >= 2 and a[0][0:1] == '0': + a[0] = a[0][1]; + + result = '' + for item in a: + result += ('.' if result != '' else '') + item + + + result = 'v' + result + + return result + + +def get_free_space(path): + result = -1 + + try: + s = statvfs(path) + result = s.f_frsize * s.f_bavail # Number of free bytes that ordinary users + except Exception as ex: + logger.info("Error while retrieving free space for path %s: %s" % (path, ex)) + + return result + + +def _get_sysfs_machine_name(): + try: + with open('/sys/firmware/devicetree/base/model', 'r') as f: + return f.read().rstrip('\x00') + except IOError: + pass + + return None + +# Returns None if it cannot find a machine name. Otherwise returns the string +# containing the name +def get_machine_name(): + # First try calling the venus utility script + try: + return check_output("/usr/bin/product-name").strip().decode('UTF-8') + except (CalledProcessError, OSError): + pass + + # Fall back to sysfs + name = _get_sysfs_machine_name() + if name is not None: + return name + + # Fall back to venus build machine name + try: + with open('/etc/venus/machine', 'r', encoding='UTF-8') as f: + return f.read().strip() + except IOError: + pass + + return None + + +def get_product_id(): + """ Find the machine ID and return it. """ + + # First try calling the venus utility script + try: + return check_output("/usr/bin/product-id").strip().decode('UTF-8') + except (CalledProcessError, OSError): + pass + + # Fall back machine name mechanism + name = _get_sysfs_machine_name() + return { + 'Color Control GX': 'C001', + 'Venus GX': 'C002', + 'Octo GX': 'C006', + 'EasySolar-II': 'C007', + 'MultiPlus-II': 'C008', + 'Maxi GX': 'C009', + 'Cerbo GX': 'C00A' + }.get(name, 'C003') # C003 is Generic + + +# Returns False if it cannot open the file. Otherwise returns its rstripped contents +def read_file(path): + content = False + + try: + with open(path, 'r') as f: + content = f.read().rstrip() + except Exception as ex: + logger.debug("Error while reading %s: %s" % (path, ex)) + + return content + + +def wrap_dbus_value(value): + if value is None: + return VEDBUS_INVALID + if isinstance(value, float): + return dbus.Double(value, variant_level=1) + if isinstance(value, bool): + return dbus.Boolean(value, variant_level=1) + if isinstance(value, int): + try: + return dbus.Int32(value, variant_level=1) + except OverflowError: + return dbus.Int64(value, variant_level=1) + if isinstance(value, str): + return dbus.String(value, variant_level=1) + if isinstance(value, list): + if len(value) == 0: + # If the list is empty we cannot infer the type of the contents. So assume unsigned integer. + # A (signed) integer is dangerous, because an empty list of signed integers is used to encode + # an invalid value. + return dbus.Array([], signature=dbus.Signature('u'), variant_level=1) + return dbus.Array([wrap_dbus_value(x) for x in value], variant_level=1) + if isinstance(value, dict): + # Wrapping the keys of the dictionary causes D-Bus errors like: + # 'arguments to dbus_message_iter_open_container() were incorrect, + # assertion "(type == DBUS_TYPE_ARRAY && contained_signature && + # *contained_signature == DBUS_DICT_ENTRY_BEGIN_CHAR) || (contained_signature == NULL || + # _dbus_check_is_valid_signature (contained_signature))" failed in file ...' + return dbus.Dictionary({(k, wrap_dbus_value(v)) for k, v in value.items()}, variant_level=1) + return value + + +dbus_int_types = (dbus.Int32, dbus.UInt32, dbus.Byte, dbus.Int16, dbus.UInt16, dbus.UInt32, dbus.Int64, dbus.UInt64) + + +def unwrap_dbus_value(val): + """Converts D-Bus values back to the original type. For example if val is of type DBus.Double, + a float will be returned.""" + if isinstance(val, dbus_int_types): + return int(val) + if isinstance(val, dbus.Double): + return float(val) + if isinstance(val, dbus.Array): + v = [unwrap_dbus_value(x) for x in val] + return None if len(v) == 0 else v + if isinstance(val, (dbus.Signature, dbus.String)): + return str(val) + # Python has no byte type, so we convert to an integer. + if isinstance(val, dbus.Byte): + return int(val) + if isinstance(val, dbus.ByteArray): + return "".join([bytes(x) for x in val]) + if isinstance(val, (list, tuple)): + return [unwrap_dbus_value(x) for x in val] + if isinstance(val, (dbus.Dictionary, dict)): + # Do not unwrap the keys, see comment in wrap_dbus_value + return dict([(x, unwrap_dbus_value(y)) for x, y in val.items()]) + if isinstance(val, dbus.Boolean): + return bool(val) + return val + +# When supported, only name owner changes for the the given namespace are reported. This +# prevents spending cpu time at irrelevant changes, like scripts accessing the bus temporarily. +def add_name_owner_changed_receiver(dbus, name_owner_changed, namespace="com.victronenergy"): + # support for arg0namespace is submitted upstream, but not included at the time of + # writing, Venus OS does support it, so try if it works. + if namespace is None: + dbus.add_signal_receiver(name_owner_changed, signal_name='NameOwnerChanged') + else: + try: + dbus.add_signal_receiver(name_owner_changed, + signal_name='NameOwnerChanged', arg0namespace=namespace) + except TypeError: + dbus.add_signal_receiver(name_owner_changed, signal_name='NameOwnerChanged') diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/vedbus.py b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/vedbus.py new file mode 100644 index 000000000..6171a2101 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/ext/velib_python/vedbus.py @@ -0,0 +1,614 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import dbus.service +import logging +import traceback +import os +import weakref +from collections import defaultdict +from ve_utils import wrap_dbus_value, unwrap_dbus_value + +# vedbus contains three classes: +# VeDbusItemImport -> use this to read data from the dbus, ie import +# VeDbusItemExport -> use this to export data to the dbus (one value) +# VeDbusService -> use that to create a service and export several values to the dbus + +# Code for VeDbusItemImport is copied from busitem.py and thereafter modified. +# All projects that used busitem.py need to migrate to this package. And some +# projects used to define there own equivalent of VeDbusItemExport. Better to +# use VeDbusItemExport, or even better the VeDbusService class that does it all for you. + +# TODOS +# 1 check for datatypes, it works now, but not sure if all is compliant with +# com.victronenergy.BusItem interface definition. See also the files in +# tests_and_examples. And see 'if type(v) == dbus.Byte:' on line 102. Perhaps +# something similar should also be done in VeDbusBusItemExport? +# 2 Shouldn't VeDbusBusItemExport inherit dbus.service.Object? +# 7 Make hard rules for services exporting data to the D-Bus, in order to make tracking +# changes possible. Does everybody first invalidate its data before leaving the bus? +# And what about before taking one object away from the bus, instead of taking the +# whole service offline? +# They should! And after taking one value away, do we need to know that someone left +# the bus? Or we just keep that value in invalidated for ever? Result is that we can't +# see the difference anymore between an invalidated value and a value that was first on +# the bus and later not anymore. See comments above VeDbusItemImport as well. +# 9 there are probably more todos in the code below. + +# Some thoughts with regards to the data types: +# +# Text from: http://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html#data-types +# --- +# Variants are represented by setting the variant_level keyword argument in the +# constructor of any D-Bus data type to a value greater than 0 (variant_level 1 +# means a variant containing some other data type, variant_level 2 means a variant +# containing a variant containing some other data type, and so on). If a non-variant +# is passed as an argument but introspection indicates that a variant is expected, +# it'll automatically be wrapped in a variant. +# --- +# +# Also the different dbus datatypes, such as dbus.Int32, and dbus.UInt32 are a subclass +# of Python int. dbus.String is a subclass of Python standard class unicode, etcetera +# +# So all together that explains why we don't need to explicitly convert back and forth +# between the dbus datatypes and the standard python datatypes. Note that all datatypes +# in python are objects. Even an int is an object. + +# The signature of a variant is 'v'. + +# Export ourselves as a D-Bus service. +class VeDbusService(object): + def __init__(self, servicename, bus=None): + # dict containing the VeDbusItemExport objects, with their path as the key. + self._dbusobjects = {} + self._dbusnodes = {} + self._ratelimiters = [] + self._dbusname = None + + # dict containing the onchange callbacks, for each object. Object path is the key + self._onchangecallbacks = {} + + # Connect to session bus whenever present, else use the system bus + self._dbusconn = bus or (dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus()) + + # make the dbus connection available to outside, could make this a true property instead, but ach.. + self.dbusconn = self._dbusconn + + # Register ourselves on the dbus, trigger an error if already in use (do_not_queue) + self._dbusname = dbus.service.BusName(servicename, self._dbusconn, do_not_queue=True) + + # Add the root item that will return all items as a tree + self._dbusnodes['/'] = VeDbusRootExport(self._dbusconn, '/', self) + + logging.info("registered ourselves on D-Bus as %s" % servicename) + + # To force immediate deregistering of this dbus service and all its object paths, explicitly + # call __del__(). + def __del__(self): + for node in list(self._dbusnodes.values()): + node.__del__() + self._dbusnodes.clear() + for item in list(self._dbusobjects.values()): + item.__del__() + self._dbusobjects.clear() + if self._dbusname: + self._dbusname.__del__() # Forces call to self._bus.release_name(self._name), see source code + self._dbusname = None + + def get_name(self): + return self._dbusname.get_name() + + # @param callbackonchange function that will be called when this value is changed. First parameter will + # be the path of the object, second the new value. This callback should return + # True to accept the change, False to reject it. + def add_path(self, path, value, description="", writeable=False, + onchangecallback=None, gettextcallback=None, valuetype=None, itemtype=None): + + if onchangecallback is not None: + self._onchangecallbacks[path] = onchangecallback + + itemtype = itemtype or VeDbusItemExport + item = itemtype(self._dbusconn, path, value, description, writeable, + self._value_changed, gettextcallback, deletecallback=self._item_deleted, valuetype=valuetype) + + spl = path.split('/') + for i in range(2, len(spl)): + subPath = '/'.join(spl[:i]) + if subPath not in self._dbusnodes and subPath not in self._dbusobjects: + self._dbusnodes[subPath] = VeDbusTreeExport(self._dbusconn, subPath, self) + self._dbusobjects[path] = item + logging.debug('added %s with start value %s. Writeable is %s' % (path, value, writeable)) + + # Add the mandatory paths, as per victron dbus api doc + def add_mandatory_paths(self, processname, processversion, connection, + deviceinstance, productid, productname, firmwareversion, hardwareversion, connected): + self.add_path('/Mgmt/ProcessName', processname) + self.add_path('/Mgmt/ProcessVersion', processversion) + self.add_path('/Mgmt/Connection', connection) + + # Create rest of the mandatory objects + self.add_path('/DeviceInstance', deviceinstance) + self.add_path('/ProductId', productid) + self.add_path('/ProductName', productname) + self.add_path('/FirmwareVersion', firmwareversion) + self.add_path('/HardwareVersion', hardwareversion) + self.add_path('/Connected', connected) + + # Callback function that is called from the VeDbusItemExport objects when a value changes. This function + # maps the change-request to the onchangecallback given to us for this specific path. + def _value_changed(self, path, newvalue): + if path not in self._onchangecallbacks: + return True + + return self._onchangecallbacks[path](path, newvalue) + + def _item_deleted(self, path): + self._dbusobjects.pop(path) + for np in list(self._dbusnodes.keys()): + if np != '/': + for ip in self._dbusobjects: + if ip.startswith(np + '/'): + break + else: + self._dbusnodes[np].__del__() + self._dbusnodes.pop(np) + + def __getitem__(self, path): + return self._dbusobjects[path].local_get_value() + + def __setitem__(self, path, newvalue): + self._dbusobjects[path].local_set_value(newvalue) + + def __delitem__(self, path): + self._dbusobjects[path].__del__() # Invalidates and then removes the object path + assert path not in self._dbusobjects + + def __contains__(self, path): + return path in self._dbusobjects + + def __enter__(self): + l = ServiceContext(self) + self._ratelimiters.append(l) + return l + + def __exit__(self, *exc): + # pop off the top one and flush it. If with statements are nested + # then each exit flushes its own part. + if self._ratelimiters: + self._ratelimiters.pop().flush() + +class ServiceContext(object): + def __init__(self, parent): + self.parent = parent + self.changes = {} + + def __getitem__(self, path): + return self.parent[path] + + def __setitem__(self, path, newvalue): + c = self.parent._dbusobjects[path]._local_set_value(newvalue) + if c is not None: + self.changes[path] = c + + def flush(self): + if self.changes: + self.parent._dbusnodes['/'].ItemsChanged(self.changes) + +class TrackerDict(defaultdict): + """ Same as defaultdict, but passes the key to default_factory. """ + def __missing__(self, key): + self[key] = x = self.default_factory(key) + return x + +class VeDbusRootTracker(object): + """ This tracks the root of a dbus path and listens for PropertiesChanged + signals. When a signal arrives, parse it and unpack the key/value changes + into traditional events, then pass it to the original eventCallback + method. """ + def __init__(self, bus, serviceName): + self.importers = defaultdict(weakref.WeakSet) + self.serviceName = serviceName + self._match = bus.get_object(serviceName, '/', introspect=False).connect_to_signal( + "ItemsChanged", weak_functor(self._items_changed_handler)) + + def __del__(self): + self._match.remove() + self._match = None + + def add(self, i): + self.importers[i.path].add(i) + + def _items_changed_handler(self, items): + if not isinstance(items, dict): + return + + for path, changes in items.items(): + try: + v = changes['Value'] + except KeyError: + continue + + try: + t = changes['Text'] + except KeyError: + t = str(unwrap_dbus_value(v)) + + for i in self.importers.get(path, ()): + i._properties_changed_handler({'Value': v, 'Text': t}) + +""" +Importing basics: + - If when we power up, the D-Bus service does not exist, or it does exist and the path does not + yet exist, still subscribe to a signal: as soon as it comes online it will send a signal with its + initial value, which VeDbusItemImport will receive and use to update local cache. And, when set, + call the eventCallback. + - If when we power up, save it + - When using get_value, know that there is no difference between services (or object paths) that don't + exist and paths that are invalid (= empty array, see above). Both will return None. In case you do + really want to know ifa path exists or not, use the exists property. + - When a D-Bus service leaves the D-Bus, it will first invalidate all its values, and send signals + with that update, and only then leave the D-Bus. (or do we need to subscribe to the NameOwnerChanged- + signal!?!) To be discussed and make sure. Not really urgent, since all existing code that uses this + class already subscribes to the NameOwnerChanged signal, and subsequently removes instances of this + class. + +Read when using this class: +Note that when a service leaves that D-Bus without invalidating all its exported objects first, for +example because it is killed, VeDbusItemImport doesn't have a clue. So when using VeDbusItemImport, +make sure to also subscribe to the NamerOwnerChanged signal on bus-level. Or just use dbusmonitor, +because that takes care of all of that for you. +""" +class VeDbusItemImport(object): + def __new__(cls, bus, serviceName, path, eventCallback=None, createsignal=True): + instance = object.__new__(cls) + + # If signal tracking should be done, also add to root tracker + if createsignal: + if "_roots" not in cls.__dict__: + cls._roots = TrackerDict(lambda k: VeDbusRootTracker(bus, k)) + + return instance + + ## Constructor + # @param bus the bus-object (SESSION or SYSTEM). + # @param serviceName the dbus-service-name (string), for example 'com.victronenergy.battery.ttyO1' + # @param path the object-path, for example '/Dc/V' + # @param eventCallback function that you want to be called on a value change + # @param createSignal only set this to False if you use this function to one time read a value. When + # leaving it to True, make sure to also subscribe to the NameOwnerChanged signal + # elsewhere. See also note some 15 lines up. + def __init__(self, bus, serviceName, path, eventCallback=None, createsignal=True): + # TODO: is it necessary to store _serviceName and _path? Isn't it + # stored in the bus_getobjectsomewhere? + self._serviceName = serviceName + self._path = path + self._match = None + # TODO: _proxy is being used in settingsdevice.py, make a getter for that + self._proxy = bus.get_object(serviceName, path, introspect=False) + self.eventCallback = eventCallback + + assert eventCallback is None or createsignal == True + if createsignal: + self._match = self._proxy.connect_to_signal( + "PropertiesChanged", weak_functor(self._properties_changed_handler)) + self._roots[serviceName].add(self) + + # store the current value in _cachedvalue. When it doesn't exists set _cachedvalue to + # None, same as when a value is invalid + self._cachedvalue = None + try: + v = self._proxy.GetValue() + except dbus.exceptions.DBusException: + pass + else: + self._cachedvalue = unwrap_dbus_value(v) + + def __del__(self): + if self._match is not None: + self._match.remove() + self._match = None + self._proxy = None + + def _refreshcachedvalue(self): + self._cachedvalue = unwrap_dbus_value(self._proxy.GetValue()) + + ## Returns the path as a string, for example '/AC/L1/V' + @property + def path(self): + return self._path + + ## Returns the dbus service name as a string, for example com.victronenergy.vebus.ttyO1 + @property + def serviceName(self): + return self._serviceName + + ## Returns the value of the dbus-item. + # the type will be a dbus variant, for example dbus.Int32(0, variant_level=1) + # this is not a property to keep the name consistant with the com.victronenergy.busitem interface + # returns None when the property is invalid + def get_value(self): + return self._cachedvalue + + ## Writes a new value to the dbus-item + def set_value(self, newvalue): + r = self._proxy.SetValue(wrap_dbus_value(newvalue)) + + # instead of just saving the value, go to the dbus and get it. So we have the right type etc. + if r == 0: + self._refreshcachedvalue() + + return r + + ## Resets the item to its default value + def set_default(self): + self._proxy.SetDefault() + self._refreshcachedvalue() + + ## Returns the text representation of the value. + # For example when the value is an enum/int GetText might return the string + # belonging to that enum value. Another example, for a voltage, GetValue + # would return a float, 12.0Volt, and GetText could return 12 VDC. + # + # Note that this depends on how the dbus-producer has implemented this. + def get_text(self): + return self._proxy.GetText() + + ## Returns true of object path exists, and false if it doesn't + @property + def exists(self): + # TODO: do some real check instead of this crazy thing. + r = False + try: + r = self._proxy.GetValue() + r = True + except dbus.exceptions.DBusException: + pass + + return r + + ## callback for the trigger-event. + # @param eventCallback the event-callback-function. + @property + def eventCallback(self): + return self._eventCallback + + @eventCallback.setter + def eventCallback(self, eventCallback): + self._eventCallback = eventCallback + + ## Is called when the value of the imported bus-item changes. + # Stores the new value in our local cache, and calls the eventCallback, if set. + def _properties_changed_handler(self, changes): + if "Value" in changes: + changes['Value'] = unwrap_dbus_value(changes['Value']) + self._cachedvalue = changes['Value'] + if self._eventCallback: + # The reason behind this try/except is to prevent errors silently ending up the an error + # handler in the dbus code. + try: + self._eventCallback(self._serviceName, self._path, changes) + except: + traceback.print_exc() + os._exit(1) # sys.exit() is not used, since that also throws an exception + + +class VeDbusTreeExport(dbus.service.Object): + def __init__(self, bus, objectPath, service): + dbus.service.Object.__init__(self, bus, objectPath) + self._service = service + logging.debug("VeDbusTreeExport %s has been created" % objectPath) + + def __del__(self): + # self._get_path() will raise an exception when retrieved after the call to .remove_from_connection, + # so we need a copy. + path = self._get_path() + if path is None: + return + self.remove_from_connection() + logging.debug("VeDbusTreeExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + def _get_value_handler(self, path, get_text=False): + logging.debug("_get_value_handler called for %s" % path) + r = {} + px = path + if not px.endswith('/'): + px += '/' + for p, item in self._service._dbusobjects.items(): + if p.startswith(px): + v = item.GetText() if get_text else wrap_dbus_value(item.local_get_value()) + r[p[len(px):]] = v + logging.debug(r) + return r + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + value = self._get_value_handler(self._get_path()) + return dbus.Dictionary(value, signature=dbus.Signature('sv'), variant_level=1) + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetText(self): + return self._get_value_handler(self._get_path(), True) + + def local_get_value(self): + return self._get_value_handler(self.path) + +class VeDbusRootExport(VeDbusTreeExport): + @dbus.service.signal('com.victronenergy.BusItem', signature='a{sa{sv}}') + def ItemsChanged(self, changes): + pass + + @dbus.service.method('com.victronenergy.BusItem', out_signature='a{sa{sv}}') + def GetItems(self): + return { + path: { + 'Value': wrap_dbus_value(item.local_get_value()), + 'Text': item.GetText() } + for path, item in self._service._dbusobjects.items() + } + + +class VeDbusItemExport(dbus.service.Object): + ## Constructor of VeDbusItemExport + # + # Use this object to export (publish), values on the dbus + # Creates the dbus-object under the given dbus-service-name. + # @param bus The dbus object. + # @param objectPath The dbus-object-path. + # @param value Value to initialize ourselves with, defaults to None which means Invalid + # @param description String containing a description. Can be called over the dbus with GetDescription() + # @param writeable what would this do!? :). + # @param callback Function that will be called when someone else changes the value of this VeBusItem + # over the dbus. First parameter passed to callback will be our path, second the new + # value. This callback should return True to accept the change, False to reject it. + def __init__(self, bus, objectPath, value=None, description=None, writeable=False, + onchangecallback=None, gettextcallback=None, deletecallback=None, + valuetype=None): + dbus.service.Object.__init__(self, bus, objectPath) + self._onchangecallback = onchangecallback + self._gettextcallback = gettextcallback + self._value = value + self._description = description + self._writeable = writeable + self._deletecallback = deletecallback + self._type = valuetype + + # To force immediate deregistering of this dbus object, explicitly call __del__(). + def __del__(self): + # self._get_path() will raise an exception when retrieved after the + # call to .remove_from_connection, so we need a copy. + path = self._get_path() + if path == None: + return + if self._deletecallback is not None: + self._deletecallback(path) + self.remove_from_connection() + logging.debug("VeDbusItemExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + ## Sets the value. And in case the value is different from what it was, a signal + # will be emitted to the dbus. This function is to be used in the python code that + # is using this class to export values to the dbus. + # set value to None to indicate that it is Invalid + def local_set_value(self, newvalue): + changes = self._local_set_value(newvalue) + if changes is not None: + self.PropertiesChanged(changes) + + def _local_set_value(self, newvalue): + if self._value == newvalue: + return None + + self._value = newvalue + return { + 'Value': wrap_dbus_value(newvalue), + 'Text': self.GetText() + } + + def local_get_value(self): + return self._value + + # ==== ALL FUNCTIONS BELOW THIS LINE WILL BE CALLED BY OTHER PROCESSES OVER THE DBUS ==== + + ## Dbus exported method SetValue + # Function is called over the D-Bus by other process. It will first check (via callback) if new + # value is accepted. And it is, stores it and emits a changed-signal. + # @param value The new value. + # @return completion-code When successful a 0 is return, and when not a -1 is returned. + @dbus.service.method('com.victronenergy.BusItem', in_signature='v', out_signature='i') + def SetValue(self, newvalue): + if not self._writeable: + return 1 # NOT OK + + newvalue = unwrap_dbus_value(newvalue) + + # If value type is enforced, cast it. If the type can be coerced + # python will do it for us. This allows ints to become floats, + # or bools to become ints. Additionally also allow None, so that + # a path may be invalidated. + if self._type is not None and newvalue is not None: + try: + newvalue = self._type(newvalue) + except (ValueError, TypeError): + return 1 # NOT OK + + if newvalue == self._value: + return 0 # OK + + # call the callback given to us, and check if new value is OK. + if (self._onchangecallback is None or + (self._onchangecallback is not None and self._onchangecallback(self.__dbus_object_path__, newvalue))): + + self.local_set_value(newvalue) + return 0 # OK + + return 2 # NOT OK + + ## Dbus exported method GetDescription + # + # Returns the a description. + # @param language A language code (e.g. ISO 639-1 en-US). + # @param length Lenght of the language string. + # @return description + @dbus.service.method('com.victronenergy.BusItem', in_signature='si', out_signature='s') + def GetDescription(self, language, length): + return self._description if self._description is not None else 'No description given' + + ## Dbus exported method GetValue + # Returns the value. + # @return the value when valid, and otherwise an empty array + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + return wrap_dbus_value(self._value) + + ## Dbus exported method GetText + # Returns the value as string of the dbus-object-path. + # @return text A text-value. '---' when local value is invalid + @dbus.service.method('com.victronenergy.BusItem', out_signature='s') + def GetText(self): + if self._value is None: + return '---' + + # Default conversion from dbus.Byte will get you a character (so 'T' instead of '84'), so we + # have to convert to int first. Note that if a dbus.Byte turns up here, it must have come from + # the application itself, as all data from the D-Bus should have been unwrapped by now. + if self._gettextcallback is None and type(self._value) == dbus.Byte: + return str(int(self._value)) + + if self._gettextcallback is None and self.__dbus_object_path__ == '/ProductId': + return "0x%X" % self._value + + if self._gettextcallback is None: + return str(self._value) + + return self._gettextcallback(self.__dbus_object_path__, self._value) + + ## The signal that indicates that the value has changed. + # Other processes connected to this BusItem object will have subscribed to the + # event when they want to track our state. + @dbus.service.signal('com.victronenergy.BusItem', signature='a{sv}') + def PropertiesChanged(self, changes): + pass + +## This class behaves like a regular reference to a class method (eg. self.foo), but keeps a weak reference +## to the object which method is to be called. +## Use this object to break circular references. +class weak_functor: + def __init__(self, f): + self._r = weakref.ref(f.__self__) + self._f = weakref.ref(f.__func__) + + def __call__(self, *args, **kargs): + r = self._r() + f = self._f() + if r == None or f == None: + return + f(r, *args, **kargs) diff --git a/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/start.sh b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/start.sh new file mode 100755 index 000000000..d818ffc57 --- /dev/null +++ b/NodeRed/NodeRedFiles/dbus-fzsonick-48tl/start.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. /opt/victronenergy/serial-starter/run-service.sh + +app=/opt/victronenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py +args="$tty" +start $args diff --git a/NodeRed/NodeRedFiles/flows.json b/NodeRed/NodeRedFiles/flows.json new file mode 100644 index 000000000..3fa924b39 --- /dev/null +++ b/NodeRed/NodeRedFiles/flows.json @@ -0,0 +1,5258 @@ +[ + { + "id": "172866a9820f49e3", + "type": "tab", + "label": "controller_calibration_charge", + "disabled": false, + "info": "", + "env": [] + }, + { + "id": "58aeeaac02a3a4c7", + "type": "tab", + "label": "Innovenergy Controller", + "disabled": false, + "info": "", + "env": [] + }, + { + "id": "449f3115316b1767", + "type": "tab", + "label": "parse_warnings_and_alarms", + "disabled": false, + "info": "", + "env": [] + }, + { + "id": "9744d7fd57e81fe3", + "type": "tab", + "label": "Node Red Dashboard", + "disabled": false, + "info": "", + "env": [] + }, + { + "id": "victron-client-id", + "type": "victron-client", + "showValues": true, + "contextStore": true + }, + { + "id": "e177392401620838", + "type": "ui_group", + "name": "Controller and Battery Info", + "tab": "157862d37ae585b5", + "order": 2, + "disp": true, + "width": "13", + "collapse": false, + "className": "" + }, + { + "id": "157862d37ae585b5", + "type": "ui_tab", + "name": "Home", + "icon": "check", + "disabled": false, + "hidden": false + }, + { + "id": "e0e675d533a148b7", + "type": "ui_base", + "theme": { + "name": "theme-light", + "lightTheme": { + "default": "#0094CE", + "baseColor": "#0094CE", + "baseFont": "-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen-Sans,Ubuntu,Cantarell,Helvetica Neue,sans-serif", + "edited": true, + "reset": false + }, + "darkTheme": { + "default": "#097479", + "baseColor": "#097479", + "baseFont": "-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen-Sans,Ubuntu,Cantarell,Helvetica Neue,sans-serif", + "edited": false + }, + "customTheme": { + "name": "Untitled Theme 1", + "default": "#4B7930", + "baseColor": "#4B7930", + "baseFont": "-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen-Sans,Ubuntu,Cantarell,Helvetica Neue,sans-serif", + "reset": false + }, + "themeState": { + "base-color": { + "default": "#0094CE", + "value": "#0094CE", + "edited": false + }, + "page-titlebar-backgroundColor": { + "value": "#0094CE", + "edited": false + }, + "page-backgroundColor": { + "value": "#fafafa", + "edited": false + }, + "page-sidebar-backgroundColor": { + "value": "#ffffff", + "edited": false + }, + "group-textColor": { + "value": "#1bbfff", + "edited": false + }, + "group-borderColor": { + "value": "#ffffff", + "edited": false + }, + "group-backgroundColor": { + "value": "#ffffff", + "edited": false + }, + "widget-textColor": { + "value": "#111111", + "edited": false + }, + "widget-backgroundColor": { + "value": "#0094ce", + "edited": false + }, + "widget-borderColor": { + "value": "#ffffff", + "edited": false + }, + "base-font": { + "value": "-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen-Sans,Ubuntu,Cantarell,Helvetica Neue,sans-serif" + } + }, + "angularTheme": { + "primary": "indigo", + "accents": "blue", + "warn": "red", + "background": "grey", + "palette": "light" + } + }, + "site": { + "name": "Node-RED Dashboard", + "hideToolbar": "false", + "allowSwipe": "false", + "lockMenu": "false", + "allowTempTheme": "true", + "dateFormat": "DD/MM/YYYY", + "sizes": { + "sx": 48, + "sy": 48, + "gx": 6, + "gy": 6, + "cx": 6, + "cy": 6, + "px": 0, + "py": 0 + } + } + }, + { + "id": "3290bd5996bd3175", + "type": "ui_group", + "name": "Easy Input", + "tab": "157862d37ae585b5", + "order": 3, + "disp": true, + "width": 13, + "collapse": false, + "className": "" + }, + { + "id": "d610b26df84f336e", + "type": "ui_group", + "name": "Calibration Charge", + "tab": "157862d37ae585b5", + "order": 1, + "disp": true, + "width": "13", + "collapse": false, + "className": "" + }, + { + "id": "0a24f718e758d2a9", + "type": "ui_switch", + "z": "172866a9820f49e3", + "name": "Start Calibration Charge Now", + "label": "Start Calibration Charge Now", + "tooltip": "", + "group": "d610b26df84f336e", + "order": 5, + "width": 0, + "height": 0, + "passthru": true, + "decouple": "false", + "topic": "#:(file)::start_calibration_charge_now_button", + "topicType": "global", + "style": "", + "onvalue": "true", + "onvalueType": "bool", + "onicon": "", + "oncolor": "", + "offvalue": "false", + "offvalueType": "bool", + "officon": "", + "offcolor": "", + "animate": false, + "className": "", + "x": 2440, + "y": 100, + "wires": [ + [ + "51c9f1ddaeb25f25", + "e41f78fc126646c2" + ] + ] + }, + { + "id": "51c9f1ddaeb25f25", + "type": "switch", + "z": "172866a9820f49e3", + "name": "Button is on", + "property": "payload", + "propertyType": "msg", + "rules": [ + { + "t": "true" + } + ], + "checkall": "true", + "repair": false, + "outputs": 1, + "x": 2670, + "y": 100, + "wires": [ + [ + "e78b1ea309a603a6" + ] + ] + }, + { + "id": "f020fbc3bd8d0451", + "type": "switch", + "z": "172866a9820f49e3", + "name": "Need to do calibration charge or not", + "property": "payload", + "propertyType": "msg", + "rules": [ + { + "t": "eq", + "v": "0", + "vt": "num" + }, + { + "t": "eq", + "v": "1", + "vt": "num" + }, + { + "t": "else" + } + ], + "checkall": "true", + "repair": false, + "outputs": 3, + "x": 1620, + "y": 240, + "wires": [ + [ + "68751648fdf25a8c", + "4b29ed333812c24a", + "92c2d1b3e084decd", + "1c9f62ea01d98941" + ], + [ + "9f12c0b1b50b4ce1", + "652a29107ef4c403" + ], + [ + "26b43f1b059c7c77", + "281c467e3b39527d" + ] + ] + }, + { + "id": "f43f9fdbb0ed174c", + "type": "ui_text", + "z": "172866a9820f49e3", + "group": "d610b26df84f336e", + "order": 2, + "width": 0, + "height": 0, + "name": "Time To Calibration Charge", + "label": "Time To Calibration Charge", + "format": "{{msg.payload}}", + "layout": "row-spread", + "className": "", + "style": false, + "font": "", + "fontSize": 16, + "color": "#000000", + "x": 3360, + "y": 420, + "wires": [] + }, + { + "id": "fa6c2442f6246ea7", + "type": "ui_text_input", + "z": "172866a9820f49e3", + "name": "Calibration Charge Start Time (hh:mm)", + "label": "Calibration Charge Start Time (hh:mm:ss.sss)", + "tooltip": "", + "group": "d610b26df84f336e", + "order": 4, + "width": 0, + "height": 0, + "passthru": true, + "mode": "time", + "delay": "0", + "topic": "#:(file)::calibration_charge_start_time", + "sendOnBlur": false, + "className": "", + "topicType": "global", + "x": 510, + "y": 80, + "wires": [ + [ + "3240485c8287ba94" + ] + ] + }, + { + "id": "e41f78fc126646c2", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::start_calibration_charge_now_button", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2780, + "y": 40, + "wires": [ + [ + "dc2cc7ea2fa34e60" + ] + ] + }, + { + "id": "dc2cc7ea2fa34e60", + "type": "debug", + "z": "172866a9820f49e3", + "name": "Debug for calibration button", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 3180, + "y": 40, + "wires": [] + }, + { + "id": "3240485c8287ba94", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::calibration_charge_start_time", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 900, + "y": 80, + "wires": [ + [ + "9777d2795825c36e" + ] + ] + }, + { + "id": "e78b1ea309a603a6", + "type": "change", + "z": "172866a9820f49e3", + "name": "Set \"Calibration charge now\" to Time To CalibrationCharge", + "rules": [ + { + "t": "set", + "p": "payload", + "pt": "msg", + "to": "Calibration charge now", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 3010, + "y": 100, + "wires": [ + [ + "f43f9fdbb0ed174c" + ] + ] + }, + { + "id": "f2947dcdf2aaa2d7", + "type": "victron-input-custom", + "z": "172866a9820f49e3", + "service": "com.victronenergy.settings", + "path": "/Settings/Controller/LastEOC", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/Controller/LastEOC", + "name": "/Settings/Controller/LastEOC", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 550, + "y": 280, + "wires": [ + [ + "ce55ad5eec4e4fcd", + "520ed5df0f889c12" + ] + ] + }, + { + "id": "92c2d1b3e084decd", + "type": "change", + "z": "172866a9820f49e3", + "name": "Get current timestamp to update LastEoc", + "rules": [ + { + "t": "set", + "p": "payload", + "pt": "msg", + "to": "", + "tot": "date" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2140, + "y": 200, + "wires": [ + [ + "e316195db1497dd6" + ] + ] + }, + { + "id": "a7cf225ccec6aceb", + "type": "victron-output-custom", + "z": "172866a9820f49e3", + "service": "com.victronenergy.settings", + "path": "/Settings/Controller/LastEOC", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/Controller/LastEOC", + "name": "/Settings/Controller/LastEOC", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 2790, + "y": 200, + "wires": [] + }, + { + "id": "e316195db1497dd6", + "type": "function", + "z": "172866a9820f49e3", + "name": "Millisecond_to_second", + "func": "current_timestamp_in_second=Math.floor(msg.payload/1000);\nmsg.payload = current_timestamp_in_second;\nreturn msg;", + "outputs": 1, + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 2440, + "y": 200, + "wires": [ + [ + "a7cf225ccec6aceb" + ] + ] + }, + { + "id": "2e11958f0143dec0", + "type": "comment", + "z": "172866a9820f49e3", + "name": "EOC reached ", + "info": "", + "x": 1890, + "y": 220, + "wires": [] + }, + { + "id": "877947e6fd87acc6", + "type": "comment", + "z": "172866a9820f49e3", + "name": "Do calibration charge now", + "info": "", + "x": 2270, + "y": 400, + "wires": [] + }, + { + "id": "a5e4b756a68c1b09", + "type": "comment", + "z": "172866a9820f49e3", + "name": "Still some time left to do calibration charge", + "info": "", + "x": 2280, + "y": 600, + "wires": [] + }, + { + "id": "2f1db37da138e532", + "type": "debug", + "z": "172866a9820f49e3", + "name": "Debug for calibration charge function", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload.count", + "statusType": "auto", + "x": 1630, + "y": 140, + "wires": [] + }, + { + "id": "4b29ed333812c24a", + "type": "change", + "z": "172866a9820f49e3", + "name": "EOC reached", + "rules": [ + { + "t": "set", + "p": "payload", + "pt": "msg", + "to": "EOC reached", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2060, + "y": 240, + "wires": [ + [ + "f43f9fdbb0ed174c" + ] + ] + }, + { + "id": "9f12c0b1b50b4ce1", + "type": "change", + "z": "172866a9820f49e3", + "name": "Calibration charge now", + "rules": [ + { + "t": "set", + "p": "payload", + "pt": "msg", + "to": "Calibration charge now", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2520, + "y": 420, + "wires": [ + [ + "f43f9fdbb0ed174c" + ] + ] + }, + { + "id": "329547301dcbd8fd", + "type": "ui_dropdown", + "z": "172866a9820f49e3", + "name": "Calibration Charge Day", + "label": "Calibration Charge Day", + "tooltip": "", + "place": "", + "group": "d610b26df84f336e", + "order": 3, + "width": 0, + "height": 0, + "passthru": true, + "multiple": false, + "options": [ + { + "label": "Sunday", + "value": 0, + "type": "num" + }, + { + "label": "Monday", + "value": 1, + "type": "num" + }, + { + "label": "Tuesday", + "value": 2, + "type": "num" + }, + { + "label": "Wednesday", + "value": 3, + "type": "num" + }, + { + "label": "Thursday", + "value": 4, + "type": "num" + }, + { + "label": "Friday", + "value": 5, + "type": "num" + }, + { + "label": "Saturday", + "value": 6, + "type": "num" + } + ], + "payload": "", + "topic": "#:(file)::calibration_charge_weekday", + "topicType": "global", + "className": "", + "x": 510, + "y": 180, + "wires": [ + [ + "1c86e1cdef14e122" + ] + ] + }, + { + "id": "1c86e1cdef14e122", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::calibration_charge_start_weekday", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 890, + "y": 180, + "wires": [ + [ + "9777d2795825c36e" + ] + ] + }, + { + "id": "ce55ad5eec4e4fcd", + "type": "debug", + "z": "172866a9820f49e3", + "name": "Debug for LastEOC", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": false, + "complete": "payload", + "targetType": "msg", + "statusVal": "", + "statusType": "auto", + "x": 970, + "y": 320, + "wires": [] + }, + { + "id": "b72b6b4568f60ba4", + "type": "inject", + "z": "172866a9820f49e3", + "name": "", + "props": [ + { + "p": "payload" + }, + { + "p": "topic", + "vt": "str" + } + ], + "repeat": "", + "crontab": "", + "once": true, + "onceDelay": 0.1, + "topic": "", + "payload": "#:(file)::calibration_charge_start_time", + "payloadType": "global", + "x": 150, + "y": 80, + "wires": [ + [ + "fa6c2442f6246ea7" + ] + ] + }, + { + "id": "3b641a3d5b50366c", + "type": "inject", + "z": "172866a9820f49e3", + "name": "", + "props": [ + { + "p": "payload" + }, + { + "p": "topic", + "vt": "str" + } + ], + "repeat": "", + "crontab": "", + "once": true, + "onceDelay": 0.1, + "topic": "", + "payload": "#:(file)::calibration_charge_start_weekday", + "payloadType": "global", + "x": 160, + "y": 180, + "wires": [ + [ + "329547301dcbd8fd" + ] + ] + }, + { + "id": "841f883bd864e9eb", + "type": "inject", + "z": "172866a9820f49e3", + "name": "", + "props": [ + { + "p": "payload" + }, + { + "p": "topic", + "vt": "str" + } + ], + "repeat": "5", + "crontab": "", + "once": true, + "onceDelay": "0", + "topic": "", + "payload": "#:(file)::start_calibration_charge_now_button", + "payloadType": "global", + "x": 2030, + "y": 80, + "wires": [ + [ + "0a24f718e758d2a9" + ] + ] + }, + { + "id": "9777d2795825c36e", + "type": "function", + "z": "172866a9820f49e3", + "name": "Cal time left to do calibration charge", + "func": "// Get minutes per day\nvar minutes_per_day = 1440;\n\n// Cal maximum minutes without calibration charge\nvar max_days_wihthout_EOC = 7;\nmax_minutes_without_EOC = max_days_wihthout_EOC*minutes_per_day;\n\n// Get LastEOC for the last EOC reached battery\nLastEOC =global.get('LastEOC','file');\n\n// Get EOC reached status for all batteries\nEOCReached_list = global.get('EOCReached','file');\n\nif (EOCReached_list.every(item => item === true)){// all batteries reach EOC\n msg.payload=0; // stop calibration charge\n return msg;\n}\n\n// Get calibration charge time (hh:mm) from user setting via Node Red Dashboard\nif(global.get('calibration_charge_start_time','file')!= null){\n minutes_from_midnight_calibration_charge = Math.floor(global.get('calibration_charge_start_time','file'));\n}else{\n minutes_from_midnight_calibration_charge = 32400000;//default value from 09:00\n}\n\n// Get calibration charge weekday from user setting via Node Red Dashboard\nif(global.get('calibration_charge_start_weekday','file')!=null){\n weekday_calibration_charge = global.get('calibration_charge_start_weekday','file');\n}else{\n weekday_calibration_charge = 0;//default value from Sunday\n}\n\n// Cal next calibration time\nfunction nextScheduleDay(adate, w) {\n var daysToAdd = (w - adate.getDay() + 7) % 7;\n var nextDate = new Date(adate);\n nextDate.setDate(adate.getDate() + daysToAdd);\n nextDate.setHours(0);\n nextDate.setMinutes(0);\n nextDate.setSeconds(0);\n return nextDate;\n}\n\n// Main function\nfunction chargeWindows(currentTime, weekday, starttime, LastEOC) {\n var d1 = nextScheduleDay(currentTime, weekday);\n\n // Convert starttime to a Date object\n var startTime = new Date(starttime);\n\n // Calculate the next ScheduleDay considering if the sum of timeToTOC and timeLeftMinutes is less than 7 days\n var timeLeftMinutes = Math.floor(((d1.getTime() + starttime)/1000-LastEOC)/60);\n\n if ( timeLeftMinutes < 6* minutes_per_day) {\n // If the sum is less than 7 days and larger than 1 day, push next ScheduleDay to next week\n d1.setDate(d1.getDate() + 7);\n }\n\n var startDateTimeD1 = new Date(d1);\n startDateTimeD1.setHours(startTime.getUTCHours(), startTime.getUTCMinutes(), 0, 0);\n\n // Check if current time is within the charge window\n if (currentTime < startDateTimeD1) {\n // Calculate time left until the end of the window\n var timeLeftMillis = startDateTimeD1 - currentTime;\n var daysLeft = Math.floor(timeLeftMillis / (1000 * 60 * 60 * 24));\n var hoursLeft = Math.floor((timeLeftMillis % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60));\n var minutesLeft = Math.ceil((timeLeftMillis % (1000 * 60 * 60)) / (1000 * 60));\n \n days_str = (daysLeft > 0) ? (daysLeft + \"d\") : \"\";\n hours_str = (hoursLeft > 0) ? (hoursLeft + \"h\") : \"\";\n minutes_str = (minutesLeft > 0) ? (minutesLeft + \"m\") : \"\";\n \n time_to_calibration_str = days_str+hours_str+minutes_str;\n\n return time_to_calibration_str; // still some time left to do calibration charge\n } else {\n return 1; // it's time to do calibration charge\n }\n}\n\nvar today = new Date(); // Assuming today's date\nvar timeLeft = chargeWindows(today, weekday_calibration_charge, minutes_from_midnight_calibration_charge, LastEOC);\n\nmsg.payload = timeLeft;\nreturn msg;", + "outputs": 1, + "timeout": "", + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 1280, + "y": 200, + "wires": [ + [ + "2f1db37da138e532", + "f020fbc3bd8d0451" + ] + ] + }, + { + "id": "68751648fdf25a8c", + "type": "function", + "z": "172866a9820f49e3", + "name": "Turn off calibration charge now button when EOC", + "func": "if(global.get('start_calibration_charge_now_button','file')==true)\n{\n msg.payload = false;\n}else{\n msg.payload = false;\n}\n\nreturn msg;\n\n", + "outputs": 1, + "timeout": "", + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 2090, + "y": 140, + "wires": [ + [ + "0a24f718e758d2a9" + ] + ] + }, + { + "id": "26b43f1b059c7c77", + "type": "function", + "z": "172866a9820f49e3", + "name": "Check whether the calibration charge now button is on", + "func": "if(global.get('start_calibration_charge_now_button','file')==true)\n{\n text= \"Calibration charge now\";\n}else{\n text = msg.payload;\n}\nmsg.payload = text;\n\nreturn msg;\n", + "outputs": 1, + "timeout": 0, + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 2620, + "y": 660, + "wires": [ + [ + "f43f9fdbb0ed174c" + ] + ] + }, + { + "id": "520ed5df0f889c12", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::LastEOC", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 970, + "y": 260, + "wires": [ + [ + "9777d2795825c36e" + ] + ] + }, + { + "id": "652a29107ef4c403", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::start_calibration_charge_now", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2580, + "y": 460, + "wires": [ + [ + "f5bba4f7a2152d29" + ] + ] + }, + { + "id": "1f497b3cf623d247", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::start_calibration_charge_now", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2980, + "y": 720, + "wires": [ + [ + "edf42d9f5ebd6332" + ] + ] + }, + { + "id": "281c467e3b39527d", + "type": "change", + "z": "172866a9820f49e3", + "name": "set global.start_calibration_charge_now to 0", + "rules": [ + { + "t": "set", + "p": "payload", + "pt": "msg", + "to": "0", + "tot": "num" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2590, + "y": 720, + "wires": [ + [ + "1f497b3cf623d247" + ] + ] + }, + { + "id": "1c9f62ea01d98941", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::start_calibration_charge_now", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 2420, + "y": 240, + "wires": [ + [ + "8a1706f0a4eed2ac" + ] + ] + }, + { + "id": "8a1706f0a4eed2ac", + "type": "debug", + "z": "172866a9820f49e3", + "name": "Debug for calibration", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 2720, + "y": 240, + "wires": [] + }, + { + "id": "f5bba4f7a2152d29", + "type": "debug", + "z": "172866a9820f49e3", + "name": "Debug for calibration", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 2860, + "y": 460, + "wires": [] + }, + { + "id": "edf42d9f5ebd6332", + "type": "debug", + "z": "172866a9820f49e3", + "name": "Debug for calibration", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 3300, + "y": 720, + "wires": [] + }, + { + "id": "8a45f7d38dde66b3", + "type": "victron-input-custom", + "z": "172866a9820f49e3", + "service": "com.victronenergy.battery/1", + "path": "/EOCReached", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/EOCReached", + "name": "/EOCReached", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 420, + "y": 400, + "wires": [ + [ + "45c35e2b5c560d3e" + ] + ] + }, + { + "id": "45c35e2b5c560d3e", + "type": "function", + "z": "172866a9820f49e3", + "name": "Parse EOCReached list", + "func": "EOCReched_list = [];\n\nEOCReached = msg.payload;\n\n//equals to the number of battery\nn=EOCReached.length;\n\nfor (i = 0; i < n; i++) {\n EOCReched_list.push(EOCReached[i][1][0]);\n}\n\nmsg.payload = EOCReched_list;\nreturn msg;", + "outputs": 1, + "timeout": 0, + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 710, + "y": 400, + "wires": [ + [ + "ee9ba7412a0f3ebf" + ] + ] + }, + { + "id": "ee9ba7412a0f3ebf", + "type": "change", + "z": "172866a9820f49e3", + "name": "", + "rules": [ + { + "t": "set", + "p": "#:(file)::EOCReached", + "pt": "global", + "to": "payload", + "tot": "msg" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 950, + "y": 400, + "wires": [ + [ + "9777d2795825c36e" + ] + ] + }, + { + "id": "2e3be5eb090fdd73", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "name": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 200, + "wires": [ + [ + "4f65d582fda98737" + ] + ] + }, + { + "id": "4f65d582fda98737", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "min_soc", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "min_soc", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 680, + "y": 200, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "ef77ee764778ffbe", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.battery/1", + "path": "/Dc/0/Power", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "com.victronenergy.battery (1)" + }, + "pathObj": { + "path": "/Dc/0/Power", + "name": "/Dc/0/Power", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 200, + "y": 260, + "wires": [ + [ + "11f86fe39b580847" + ] + ] + }, + { + "id": "11f86fe39b580847", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "battery_power", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "battery_power", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 580, + "y": 260, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "464455af5139ee7f", + "type": "join", + "z": "58aeeaac02a3a4c7", + "name": "", + "mode": "custom", + "build": "object", + "property": "payload", + "propertyType": "msg", + "key": "topic", + "joiner": "\\n", + "joinerType": "str", + "accumulate": true, + "timeout": "", + "count": "19", + "reduceRight": false, + "reduceExp": "", + "reduceInit": "", + "reduceInitType": "", + "reduceFixup": "", + "x": 980, + "y": 420, + "wires": [ + [ + "4f18dc53efd7160c" + ] + ] + }, + { + "id": "709a15b9236b4bb4", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "L1_AcPowerSetpoint", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "L1_AcPowerSetpoint", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 580, + "y": 760, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "50510b712eae0e61", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "L2_AcPowerSetpoint", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "L2_AcPowerSetpoint", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 580, + "y": 840, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "3cef16a03dd3e544", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "L3_AcPowerSetpoint", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "L3_AcPowerSetpoint", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 580, + "y": 920, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "d0ef21f7ad8bb7e8", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.battery/1", + "path": "/Info/MaxChargePower", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "com.victronenergy.battery (1)" + }, + "pathObj": { + "path": "/Info/MaxChargePower", + "name": "/Info/MaxChargePower", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 240, + "y": 320, + "wires": [ + [ + "3d579fc76adc2ef1" + ] + ] + }, + { + "id": "59bc7383091afa19", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/MaxChargePower", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/MaxChargePower", + "name": "/Settings/CGwacs/MaxChargePower", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 270, + "y": 380, + "wires": [ + [ + "85210ce266308ef5" + ] + ] + }, + { + "id": "3d579fc76adc2ef1", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "max_battery_charge_power", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "max_battery_charge_power", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 620, + "y": 320, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "85210ce266308ef5", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "max_configured_charge_power", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "max_configured_charge_power", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 650, + "y": 380, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "7630c78072545444", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "lowest_battery_temperature", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "lowest_battery_temperature", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 640, + "y": 440, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "b02c86727cdd38f1", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.battery/1", + "path": "/LowestSoc", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/LowestSoc", + "name": "/LowestSoc", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 170, + "y": 140, + "wires": [ + [ + "0f2babd5674b678d" + ] + ] + }, + { + "id": "0f2babd5674b678d", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "lowest_soc", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "lowest_soc", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 510, + "y": 140, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "077dbfcbe7cdd57c", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.battery/1", + "path": "/NumOfBatteries", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/NumOfBatteries", + "name": "/NumOfBatteries", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 190, + "y": 80, + "wires": [ + [ + "350ade5849cfef37" + ] + ] + }, + { + "id": "350ade5849cfef37", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "num_batteries", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "num_batteries", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 460, + "y": 80, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "2066c1a0f48b4e2c", + "type": "function", + "z": "58aeeaac02a3a4c7", + "name": "Get corrected inverter power setpoint", + "func": "msg.payload = msg.payload.inverter_setpower;\nreturn msg;", + "outputs": 1, + "timeout": "", + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 2690, + "y": 380, + "wires": [ + [ + "cff4cb998f39466b", + "4ad6bb1ca25ee32a", + "44d34af9d1e68b87", + "d4ab83f8c5fbbd6d" + ] + ] + }, + { + "id": "005a521093d8c181", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.battery/1", + "path": "/Dc/0/LowestTemperature", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/Dc/0/LowestTemperature", + "name": "/Dc/0/LowestTemperature", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 220, + "y": 440, + "wires": [ + [ + "7630c78072545444" + ] + ] + }, + { + "id": "49537ad0e4d9df8a", + "type": "victron-output-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/Hub4Mode", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/Hub4Mode", + "name": "/Settings/CGwacs/Hub4Mode", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 3110, + "y": 600, + "wires": [] + }, + { + "id": "b4b77872b62e2859", + "type": "function", + "z": "58aeeaac02a3a4c7", + "name": "Get ESS mode", + "func": "msg.payload = msg.payload.ess_mode;\nreturn msg;", + "outputs": 1, + "timeout": "", + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 2620, + "y": 600, + "wires": [ + [ + "49537ad0e4d9df8a", + "97a1f6a992a29081" + ] + ] + }, + { + "id": "97a1f6a992a29081", + "type": "debug", + "z": "58aeeaac02a3a4c7", + "name": "Debug for ESS mode", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 3000, + "y": 680, + "wires": [] + }, + { + "id": "ab6599f35fcbdbe6", + "type": "change", + "z": "58aeeaac02a3a4c7", + "name": "PVs_Power", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "PVs_Power", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1320, + "wires": [ + [ + "464455af5139ee7f" + ] + ] + }, + { + "id": "addbeff162b8a3af", + "type": "victron-input-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.system/0", + "path": "/Dc/Pv/Power", + "serviceObj": { + "service": "com.victronenergy.system/0", + "name": "com.victronenergy.system (0)" + }, + "pathObj": { + "path": "/Dc/Pv/Power", + "name": "/Dc/Pv/Power", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "roundValues": "0", + "x": 210, + "y": 1320, + "wires": [ + [ + "ab6599f35fcbdbe6" + ] + ] + }, + { + "id": "f3cd46b93ff1e4f8", + "type": "victron-output-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/BatteryLife/State", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/BatteryLife/State", + "name": "/Settings/CGwacs/BatteryLife/State", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 3130, + "y": 740, + "wires": [] + }, + { + "id": "4557b1dedb6f1222", + "type": "comment", + "z": "58aeeaac02a3a4c7", + "name": "Battery Related Info", + "info": "", + "x": 130, + "y": 20, + "wires": [] + }, + { + "id": "7f963f0a2d2c6e74", + "type": "comment", + "z": "58aeeaac02a3a4c7", + "name": "Ac Power Setpoint", + "info": "", + "x": 130, + "y": 720, + "wires": [] + }, + { + "id": "bff640c85ce35e30", + "type": "comment", + "z": "58aeeaac02a3a4c7", + "name": "Ac Out", + "info": "", + "x": 90, + "y": 1000, + "wires": [] + }, + { + "id": "7538736cb44e6df3", + "type": "comment", + "z": "58aeeaac02a3a4c7", + "name": "PV Power", + "info": "", + "x": 100, + "y": 1260, + "wires": [] + }, + { + "id": "2d500c6d04672fb7", + "type": "victron-output-custom", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/MaxDischargePower", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/MaxDischargePower", + "name": "/Settings/CGwacs/MaxDischargePower", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 3140, + "y": 200, + "wires": [] + }, + { + "id": "376d4eea80df1146", + "type": "debug", + "z": "58aeeaac02a3a4c7", + "name": "Debug for max inverter discharge power", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 3060, + "y": 260, + "wires": [] + }, + { + "id": "e121ac014fa242c5", + "type": "function", + "z": "58aeeaac02a3a4c7", + "name": "Get max inverter discharge power", + "func": "msg.payload = msg.payload.max_inverter_discharge_power;\nreturn msg;", + "outputs": 1, + "timeout": "", + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 2680, + "y": 260, + "wires": [ + [ + "2d500c6d04672fb7", + "376d4eea80df1146" + ] + ] + }, + { + "id": "d2d01224ca8ace19", + "type": "comment", + "z": "58aeeaac02a3a4c7", + "name": "Max Discharge Power of Inverter(Battery+PV)", + "info": "", + "x": 3070, + "y": 160, + "wires": [] + }, + { + "id": "9b93fb5a4717969a", + "type": "function", + "z": "58aeeaac02a3a4c7", + "name": "Battery Controller", + "func": "// get inverter num of phases\nif(msg.payload.num_phases == null){\n num_phases = 10000000000;// mimic to make power setpoint be 0 when there is no inverter phase there \n}else{\n num_phases = msg.payload.num_phases;\n}\n\n// get max charge power\nif(msg.payload.max_configured_charge_power == null ||msg.payload.max_configured_charge_power<0){\n max_charge_power=msg.payload.max_battery_charge_power;\n}else{\n max_charge_power=Math.min(msg.payload.max_configured_charge_power,msg.payload.max_battery_charge_power);\n}\n\n// get battery number\nif(msg.payload.num_batteries == null){\n n_batteries = 0;\n}else{\n n_batteries = msg.payload.num_batteries;\n}\n\n// get current battery power\nif(msg.payload.battery_power == null){\n battery_power = 0;\n}else{\n battery_power = msg.payload.battery_power;\n}\n\n// get current power setpoint\nif(msg.payload.L1_AcPowerSetpoint == null){\n L1_AcPowerSetpoint = 0;\n}else{\n L1_AcPowerSetpoint=msg.payload.L1_AcPowerSetpoint;\n}\n\nif(msg.payload.L2_AcPowerSetpoint == null){\n L2_AcPowerSetpoint = 0;\n}else{\n L2_AcPowerSetpoint=msg.payload.L2_AcPowerSetpoint;\n}\n\nif(msg.payload.L3_AcPowerSetpoint == null){\n L3_AcPowerSetpoint = 0;\n}else{\n L3_AcPowerSetpoint=msg.payload.L3_AcPowerSetpoint;\n}\n\ninverter_power_setpoint= L1_AcPowerSetpoint+L2_AcPowerSetpoint+L3_AcPowerSetpoint;\n\n// get AC Out whihc is critical loads\nif(msg.payload.L1_AC_Out == null ||msg.payload.L2_AC_Out == null || msg.payload.L3_AC_Out == null){\n AC_out=0;\n}else{\n AC_out = msg.payload.L1_AC_Out + msg.payload.L2_AC_Out+msg.payload.L3_AC_Out;\n}\n\n// get PV production\nif(msg.payload.PVs_Power == null){\n PV_production = 0;\n}else{\n PV_production = msg.payload.PVs_Power;\n}\n\n// cal calculated max inverter power based on limb strings<=1 and DC Bus voltage >=44V when discharging, further details in flow 3\nconfigured_max_inverter_power = num_phases*3000;//3000W for each phase\nmax_discharge_current_batteries = 15*(5*n_batteries-msg.payload.num_limb_string);\nDC_BUS_Voltage = msg.payload.DC_BUS_Voltage;\n\nif(44.1=2 limb strings", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 2120, + "y": 540, + "wires": [] + }, + { + "id": "fcb98057e9e5a076", + "type": "debug", + "z": "58aeeaac02a3a4c7", + "name": "num_limb_string", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload.num_limb_string", + "targetType": "msg", + "statusVal": "payload.num_limb_string", + "statusType": "auto", + "x": 1540, + "y": 480, + "wires": [] + }, + { + "id": "933a8eed519beb7a", + "type": "ui_text", + "z": "58aeeaac02a3a4c7", + "group": "e177392401620838", + "order": 3, + "width": 0, + "height": 0, + "name": "", + "label": "Battery Health", + "format": "{{msg.payload.battery_health}}", + "layout": "row-spread", + "className": "", + "style": false, + "font": "Arial,Arial,Helvetica,sans-serif", + "fontSize": "20", + "color": "#000000", + "x": 1540, + "y": 360, + "wires": [] + }, + { + "id": "4f18dc53efd7160c", + "type": "function", + "z": "58aeeaac02a3a4c7", + "name": "get_total_number_of_limb_strings", + "func": "let total_num_limb_string=0;\nlimb_string_list = msg.payload.limb_string_list;\nmain_switch_state_list = msg.payload.main_switch_state;\nbattery_health = [];\nif (limb_string_list == null || main_switch_state_list == null){\n msg.payload.battery_health = \"BMS connection lost!\";\n msg.payload.num_limb_string = -1\n return msg;\n}\n\nfor (let i = 0; i < limb_string_list.length; i++) {\n num_limb_string = limb_string_list[i][1][0];\n main_switch_state=main_switch_state_list[i][1][0];\n \n if(num_limb_string>1){\n total_num_limb_string = -1;//if there are more than 1 limb string in a battery, should give alarm to stop use this battery\n battery_health.push(\"Battery \"+(i+2)+\" has more than 1 limb string!\");\n }\n \n if(main_switch_state == false){\n total_num_limb_string = -1;//no meaning here, just used for stopping calculating max discharge power\n battery_health.push(\"Battery \"+(i+2)+\" has main switch open!\");\n }\n \n if(num_limb_string<=1){\n total_num_limb_string+=num_limb_string;\n battery_health.push(\"Battery \"+(i+2)+\" has \" + num_limb_string+ \" limb strings.\");\n }\n}\nmsg.payload.num_limb_string = total_num_limb_string;\nmsg.payload.battery_health = battery_health;\n\nreturn msg;", + "outputs": 1, + "timeout": 0, + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 1240, + "y": 420, + "wires": [ + [ + "fe54eaf69a7fe10f", + "933a8eed519beb7a", + "fcb98057e9e5a076" + ] + ] + }, + { + "id": "c40f347a74b9ef10", + "type": "ui_text", + "z": "58aeeaac02a3a4c7", + "group": "e177392401620838", + "order": 1, + "width": 0, + "height": 0, + "name": "Controller Info", + "label": "Controller Info", + "format": "{{msg.payload.controller_info}}", + "layout": "row-spread", + "className": "", + "style": false, + "font": "Arial,Arial,Helvetica,sans-serif", + "fontSize": "20", + "color": "#000000", + "x": 2620, + "y": 160, + "wires": [] + }, + { + "id": "fbdb4518063f2fd0", + "type": "victron-input-vebus", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Ac/NumberOfPhases", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Ac/NumberOfPhases", + "type": "float", + "name": "Phase count" + }, + "name": "", + "onlyChanges": false, + "x": 190, + "y": 1560, + "wires": [ + [ + "e8fb7b9a4581ed30" + ] + ] + }, + { + "id": "52f0fa857f1d3a3c", + "type": "debug", + "z": "58aeeaac02a3a4c7", + "name": "Debug for controller info", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload.controller_info", + "targetType": "msg", + "statusVal": "payload.controller_info", + "statusType": "auto", + "x": 2650, + "y": 200, + "wires": [] + }, + { + "id": "92ad8226f257785c", + "type": "victron-input-vebus", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Dc/0/Voltage", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Dc/0/Voltage", + "type": "float", + "name": "Battery voltage (V)" + }, + "name": "", + "onlyChanges": false, + "roundValues": "0", + "x": 210, + "y": 660, + "wires": [ + [ + "5b2a12a31ab376c0" + ] + ] + }, + { + "id": "02257e9416d1a7fa", + "type": "victron-input-ess", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Hub4/L1/AcPowerSetpoint", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Hub4/L1/AcPowerSetpoint", + "type": "integer", + "name": "AC Power L1 setpoint (W)" + }, + "name": "", + "onlyChanges": false, + "x": 230, + "y": 760, + "wires": [ + [ + "709a15b9236b4bb4" + ] + ] + }, + { + "id": "f2117fb0ac8584e0", + "type": "victron-input-ess", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Hub4/L2/AcPowerSetpoint", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Hub4/L2/AcPowerSetpoint", + "type": "integer", + "name": "AC Power L2 setpoint (W)" + }, + "name": "", + "onlyChanges": false, + "x": 230, + "y": 840, + "wires": [ + [ + "50510b712eae0e61" + ] + ] + }, + { + "id": "4c40824190b88353", + "type": "victron-input-ess", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Hub4/L3/AcPowerSetpoint", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Hub4/L3/AcPowerSetpoint", + "type": "integer", + "name": "AC Power L3 setpoint (W)" + }, + "name": "", + "onlyChanges": false, + "x": 230, + "y": 920, + "wires": [ + [ + "3cef16a03dd3e544" + ] + ] + }, + { + "id": "d4ab83f8c5fbbd6d", + "type": "victron-output-ess", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Hub4/L3/AcPowerSetpoint", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Hub4/L3/AcPowerSetpoint", + "type": "integer", + "name": "AC Power L3 setpoint (W)", + "writable": true + }, + "name": "", + "onlyChanges": false, + "x": 3090, + "y": 460, + "wires": [] + }, + { + "id": "44d34af9d1e68b87", + "type": "victron-output-ess", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Hub4/L2/AcPowerSetpoint", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Hub4/L2/AcPowerSetpoint", + "type": "integer", + "name": "AC Power L2 setpoint (W)", + "writable": true + }, + "name": "", + "onlyChanges": false, + "x": 3090, + "y": 400, + "wires": [] + }, + { + "id": "4ad6bb1ca25ee32a", + "type": "victron-output-ess", + "z": "58aeeaac02a3a4c7", + "service": "com.victronenergy.vebus/276", + "path": "/Hub4/L1/AcPowerSetpoint", + "serviceObj": { + "service": "com.victronenergy.vebus/276", + "name": "MultiGrid 48/3000/35-50" + }, + "pathObj": { + "path": "/Hub4/L1/AcPowerSetpoint", + "type": "integer", + "name": "AC Power L1 setpoint (W)", + "writable": true + }, + "name": "", + "onlyChanges": false, + "x": 3090, + "y": 340, + "wires": [] + }, + { + "id": "283f5123601abd28", + "type": "debug", + "z": "58aeeaac02a3a4c7", + "name": "power_to_hold_min_soc", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload.test", + "targetType": "msg", + "statusVal": "payload.test", + "statusType": "auto", + "x": 2360, + "y": 160, + "wires": [] + }, + { + "id": "bf31818b5561403e", + "type": "comment", + "z": "449f3115316b1767", + "name": "Parse Alarms: 26", + "info": "", + "x": 240, + "y": 80, + "wires": [] + }, + { + "id": "6cfdcab40b355672", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/CME", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/CME", + "name": "/AlarmFlags/CME", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 920, + "wires": [ + [ + "48d69f82af0f511b" + ] + ] + }, + { + "id": "0d3303a79e289d82", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/DATA", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/DATA", + "name": "/AlarmFlags/DATA", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 1480, + "wires": [ + [ + "b0769222c0c81594" + ] + ] + }, + { + "id": "563074cf40579013", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/FUSE", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/FUSE", + "name": "/AlarmFlags/FUSE", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 680, + "wires": [ + [ + "ea496aafa62fca89" + ] + ] + }, + { + "id": "51b00c9cd2fbe1b1", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/HEBT", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/HEBT", + "name": "/AlarmFlags/HEBT", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1580, + "wires": [ + [ + "2687afa4a3fb4fdd" + ] + ] + }, + { + "id": "70e342a4485f1343", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/HTFS", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/HTFS", + "name": "/AlarmFlags/HTFS", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1440, + "wires": [ + [ + "63715023d827fc1e" + ] + ] + }, + { + "id": "23c6de3f75ca40f7", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/HTRE", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/HTRE", + "name": "/AlarmFlags/HTRE", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 740, + "wires": [ + [ + "48a80e06931ecc03" + ] + ] + }, + { + "id": "22e2996f04a6a03b", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/HWEM", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/HWEM", + "name": "/AlarmFlags/HWEM", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1040, + "wires": [ + [ + "8b5302ef345a5ade" + ] + ] + }, + { + "id": "9ae9fbd4b39b8fd9", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/HWFL", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/HWFL", + "name": "/AlarmFlags/HWFL", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 980, + "wires": [ + [ + "c6162a8cbc2fa1dc" + ] + ] + }, + { + "id": "cc6841127ddbf006", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/IDM2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/IDM2", + "name": "/AlarmFlags/IDM2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 500, + "wires": [ + [ + "5da7a58078b2d992" + ] + ] + }, + { + "id": "d60a6034a1e15ff7", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/ISOB", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/ISOB", + "name": "/AlarmFlags/ISOB", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 560, + "wires": [ + [ + "145932ec555401dd" + ] + ] + }, + { + "id": "1617808449c51a48", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/LMPA", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/LMPA", + "name": "/AlarmFlags/LMPA", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1540, + "wires": [ + [ + "47401c80b95f2d5f" + ] + ] + }, + { + "id": "211ea82f9826409d", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/MID2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/MID2", + "name": "/AlarmFlags/MID2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 1380, + "wires": [ + [ + "52aa4d1b9994d41f" + ] + ] + }, + { + "id": "324aea697f08ab86", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/MSWE", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/MSWE", + "name": "/AlarmFlags/MSWE", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 620, + "wires": [ + [ + "7020aeefc2b065f7" + ] + ] + }, + { + "id": "2aef1941d39c2783", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/STRE", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/STRE", + "name": "/AlarmFlags/STRE", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 860, + "wires": [ + [ + "ea1c84dce431e761" + ] + ] + }, + { + "id": "73fde116548e1d39", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/TCPE", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/TCPE", + "name": "/AlarmFlags/TCPE", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 800, + "wires": [ + [ + "f5dd3b77cf306c42" + ] + ] + }, + { + "id": "c5eaf605d4d54f6d", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/TaM2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/TaM2", + "name": "/AlarmFlags/TaM2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 200, + "wires": [ + [ + "f0bd6424378e6521" + ] + ] + }, + { + "id": "3e3a5c9dae5a45c8", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/Tam", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/Tam", + "name": "/AlarmFlags/Tam", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 140, + "wires": [ + [ + "651cbb5a7a3b7c86" + ] + ] + }, + { + "id": "42e7c69a41b04403", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/TbM2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/TbM2", + "name": "/AlarmFlags/TbM2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 320, + "wires": [ + [ + "940370b80f41928c" + ] + ] + }, + { + "id": "5e766a107066d90e", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/Tbm", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/Tbm", + "name": "/AlarmFlags/Tbm", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 260, + "wires": [ + [ + "fccbcd60339ffc71" + ] + ] + }, + { + "id": "5d32609a7b711e03", + "type": "function", + "z": "449f3115316b1767", + "name": "Parse_alarms_and_warnings", + "func": "//battery health list\nbattery_health = [];\n\n//alarm list initialization\nTam_list = [];\nTaM2_list=[];\nTbm_list=[];\nTbM2_list=[];\nVBm2_list=[];\nVBM2_list=[];\nIDM2_list=[];\nISOB_list=[];\nMSWE_list=[];\nFUSE_list=[];\nHTRE_list=[];\nTCPE_list=[];\nSTRE_list=[];\nCME_list=[];\nHWFL_list=[];\nHWEM_list=[];\nThM_list=[];\nvsm2_list=[];\nvsM2_list=[];\niCM2_list=[];\niDM2_list=[];\nMID2_list=[];\nHTFS_list=[];\nDATA_list=[];\nLMPA_list=[];\nHEBT_list=[];\n\n//warning list initialization\nTaM1_list=[];\nTbM1_list=[];\nVBm1_list=[];\nVBM1_list=[];\nIDM1_list=[];\nvsm1_list=[];\nvsM1_list=[];\niCM1_list=[];\niDM1_list=[];\nMID1_list=[];\nBLPW_list=[];\nCCBF_list=[];\nAh_W_list=[];\nMPMM_list=[];\nTCdi_list=[];\nLMPW_list=[];\nTOCW_list=[];\n\n//read alarm data\nalarm_Tam = msg.payload.Tam;\nalarm_TaM2 = msg.payload.TaM2;\nalarm_Tbm = msg.payload.Tbm;\nalarm_TbM2 = msg.payload.TbM2;\nalarm_VBm2 = msg.payload.VBm2;\nalarm_VBM2 = msg.payload.VBM2;\nalarm_IDM2 = msg.payload.IDM2;\nalarm_ISOB = msg.payload.ISOB;\nalarm_MSWE = msg.payload.MSWE;\nalarm_FUSE = msg.payload.FUSE;\nalarm_HTRE = msg.payload.HTRE;\nalarm_TCPE = msg.payload.TCPE;\nalarm_STRE = msg.payload.STRE;\nalarm_CME = msg.payload.CME;\nalarm_HWFL = msg.payload.HWFL;\nalarm_HWEM = msg.payload.HWEM;\nalarm_ThM = msg.payload.ThM;\nalarm_vsm2 = msg.payload.vsm2;\nalarm_vsM2 = msg.payload.vsM2;\nalarm_iCM2 = msg.payload.iCM2;\nalarm_iDM2 = msg.payload.iDM2;\nalarm_MID2 = msg.payload.MID2;\nalarm_HTFS = msg.payload.HTFS;\nalarm_DATA = msg.payload.DATA;\nalarm_LMPA = msg.payload.LMPA;\nalarm_HEBT = msg.payload.HEBT;\n\n//read warning data\nwar_TaM1=msg.payload.TaM1;\nwar_TbM1=msg.payload.TbM1;\nwar_VBm1=msg.payload.VBm1;\nwar_VBM1=msg.payload.VBM1;\nwar_IDM1=msg.payload.IDM1;\nwar_vsm1=msg.payload.vsm1;\nwar_vsM1=msg.payload.vsM1;\nwar_iCM1=msg.payload.iCM1;\nwar_iDM1=msg.payload.iDM1;\nwar_MID1=msg.payload.MID1;\nwar_BLPW=msg.payload.BLPW;\nwar_CCBF=msg.payload.CCBF;\nwar_Ah_W=msg.payload.Ah_W;\nwar_MPMM=msg.payload.MPMM;\nwar_TCdi=msg.payload.TCdi;\nwar_LMPW=msg.payload.LMPW;\nwar_TOCW=msg.payload.TOCW;\n\n//equals to the number of battery\nif(alarm_Tam == null){\n msg.payload = \"Battery lost!\"\n return msg;\n}else{\n n=alarm_Tam.length;\n}\n\nfor (i = 0; i < n; i++) {\n Tam_list.push(alarm_Tam[i][1][0]);\n TaM2_list.push(alarm_TaM2[i][1][0]);\n Tbm_list.push(alarm_Tbm[i][1][0]);\n TbM2_list.push(alarm_TbM2[i][1][0]);\n VBm2_list.push(alarm_VBm2[i][1][0]);\n VBM2_list.push(alarm_VBM2[i][1][0]);\n IDM2_list.push(alarm_IDM2[i][1][0]);\n ISOB_list.push(alarm_ISOB[i][1][0]);\n MSWE_list.push(alarm_MSWE[i][1][0]);\n FUSE_list.push(alarm_FUSE[i][1][0]);\n HTRE_list.push(alarm_HTRE[i][1][0]);\n TCPE_list.push(alarm_TCPE[i][1][0]);\n STRE_list.push(alarm_STRE[i][1][0]);\n CME_list.push(alarm_CME[i][1][0]);\n HWFL_list.push(alarm_HWFL[i][1][0]);\n HWEM_list.push(alarm_HWEM[i][1][0]);\n ThM_list.push(alarm_ThM[i][1][0]);\n vsm2_list.push(alarm_vsm2[i][1][0]);\n vsM2_list.push(alarm_vsM2[i][1][0]);\n iCM2_list.push(alarm_iCM2[i][1][0]);\n iDM2_list.push(alarm_iDM2[i][1][0]);\n MID2_list.push(alarm_MID2[i][1][0]);\n HTFS_list.push(alarm_HTFS[i][1][0]);\n DATA_list.push(alarm_DATA[i][1][0]);\n LMPA_list.push(alarm_LMPA[i][1][0]);\n HEBT_list.push(alarm_HEBT[i][1][0]);\n \n TaM1_list.push(war_TaM1[i][1][0]);\n TbM1_list.push(war_TbM1[i][1][0]);\n VBm1_list.push(war_VBm1[i][1][0]);\n VBM1_list.push(war_VBM1[i][1][0]);\n IDM1_list.push(war_IDM1[i][1][0]);\n vsm1_list.push(war_vsm1[i][1][0]);\n vsM1_list.push(war_vsM1[i][1][0]);\n iCM1_list.push(war_iCM1[i][1][0]);\n iDM1_list.push(war_iDM1[i][1][0]);\n MID1_list.push(war_TaM1[i][1][0]);\n BLPW_list.push(war_BLPW[i][1][0]);\n CCBF_list.push(war_CCBF[i][1][0]);\n Ah_W_list.push(war_Ah_W[i][1][0]);\n MPMM_list.push(war_MPMM[i][1][0]);\n TCdi_list.push(war_TCdi[i][1][0]);\n LMPW_list.push(war_LMPW[i][1][0]);\n TOCW_list.push(war_TOCW[i][1][0]);\n}\n\nif(Tam_list.includes(true)){\n battery_health.push(\"Recoverable: BMS temperature too low\");\n}\n\nif(TaM2_list.includes(true)){\n battery_health.push(\"Recoverable: BMS temperature too high\");\n}\n\nif(Tbm_list.includes(true)){\n battery_health.push(\"Recoverable: Battery temperature too low\");\n}\n\nif(TbM2_list.includes(true)){\n battery_health.push(\"Recoverable: Battery temperature too high\");\n}\n\nif(VBm2_list.includes(true)){\n battery_health.push(\"Recoverable: Bus voltage too low\");\n}\n\nif(VBM2_list.includes(true)){\n battery_health.push(\"Recoverable: Bus voltage too high\");\n}\n\nif(IDM2_list.includes(true)){\n battery_health.push(\"Recoverable: Discharge current too high\");\n}\n\nif(ISOB_list.includes(true)){\n battery_health.push(\"Unrecoverable: Electrical insulation failure\");\n}\n\nif(MSWE_list.includes(true)){\n battery_health.push(\"Unrecoverable: Main switch failure\");\n}\n\nif(FUSE_list.includes(true)){\n battery_health.push(\"Unrecoverable: Main fuse blown\");\n}\n\nif(HTRE_list.includes(true)){\n battery_health.push(\"Recoverable: Battery failed to warm up\");\n}\n\nif(TCPE_list.includes(true)){\n battery_health.push(\"Unrecoverable: Temperature sensor failure\");\n}\n\nif(STRE_list.includes(true)){\n battery_health.push(\"Recoverable: Voltage measurement circuit fails\");\n}\n\nif(CME_list.includes(true)){\n battery_health.push(\"Recoverable: Current sensor failure\");\n} \n\nif(HWFL_list.includes(true)){\n battery_health.push(\"Recoverable: BMS hardware failure\");\n} \n\nif(HWEM_list.includes(true)){\n battery_health.push(\"Recoverable: Hardware protection tripped\");\n}\n\nif(ThM_list.includes(true)){\n battery_health.push(\"Recoverable: Heatsink temperature too high\");\n}\n\nif(vsm2_list.includes(true)){\n battery_health.push(\"Unrecoverable: Low string voltage failure\");\n}\n\nif(vsM2_list.includes(true)){\n battery_health.push(\"Recoverable: String voltage too high\");\n}\n\nif(iCM2_list.includes(true)){\n battery_health.push(\"Unrecoverable: Charge current too high\");\n}\n\nif(iDM2_list.includes(true)){\n battery_health.push(\"Recoverable: Discharge current too high\");\n} \n\nif(MID2_list.includes(true)){\n battery_health.push(\"Recoverable: String voltage unbalance too high\");\n} \n\nif(HTFS_list.includes(true)){\n battery_health.push(\"Unrecoverable: Heater Fuse Blown\");\n} \n\nif(DATA_list.includes(true)){\n battery_health.push(\"Unrecoverable: Parameters out of range\");\n} \n\nif(LMPA_list.includes(true)){\n battery_health.push(\"Unrecoverable: String voltages unbalance alarm\");\n} \n\nif(HEBT_list.includes(true)){\n battery_health.push(\"Recoverable: Loss of heartbeat\");\n} \n\nif(TaM1_list.includes(true)){\n battery_health.push(\"Warning: BMS temperature high\");\n} \n\nif(TbM1_list.includes(true)){\n battery_health.push(\"Warning: Battery temperature high\");\n} \n\nif(VBm1_list.includes(true)){\n battery_health.push(\"Warning: Bus voltage low\");\n} \n\nif(VBM1_list.includes(true)){\n battery_health.push(\"Warning: Bus voltage high\");\n} \n\nif(IDM1_list.includes(true)){\n battery_health.push(\"Warning: Discharge current high\");\n} \n\nif(vsm1_list.includes(true)){\n battery_health.push(\"Warning: String voltage too low\");\n} \n\nif(vsM1_list.includes(true)){\n battery_health.push(\"Warning: String voltage high\");\n}\n\nif(iCM1_list.includes(true)){\n battery_health.push(\"Warning: Charge current high\");\n} \n\nif(iDM1_list.includes(true)){\n battery_health.push(\"Warning: Discharge current high\");\n} \n\nif(MID1_list.includes(true)){\n battery_health.push(\"Warning: String voltages unbalanced\");\n} \n\nif(BLPW_list.includes(true)){\n battery_health.push(\"Warning: Not enough charging power on bus\");\n} \n\nif(CCBF_list.includes(true)){\n battery_health.push(\"Warning: Internal charger hardware failure\");\n} \n\nif(Ah_W_list.includes(true)){\n battery_health.push(\"Warning: String SOC low\");\n} \n\nif(MPMM_list.includes(true)){\n battery_health.push(\"Warning: Midpoint wiring problem\");\n} \n\nif(TCdi_list.includes(true)){\n battery_health.push(\"Warning: Temperature difference between strings high\");\n} \n\nif(LMPW_list.includes(true)){\n battery_health.push(\"Warning: String voltages unbalance warning\");\n} \n\nif(TOCW_list.includes(true)){\n battery_health.push(\"Warning: Top of Charge requested\");\n}\n\nif(battery_health.length === 0){\n battery_health.push(\"No warning and no alarm\");\n}\n\nmsg.payload = battery_health;\nreturn msg;", + "outputs": 1, + "timeout": 0, + "noerr": 0, + "initialize": "", + "finalize": "", + "libs": [], + "x": 1120, + "y": 1700, + "wires": [ + [ + "9950564581b5af4e", + "c8f9fbd1a718e92b" + ] + ] + }, + { + "id": "651cbb5a7a3b7c86", + "type": "change", + "z": "449f3115316b1767", + "name": "Tam", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "Tam", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 140, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "714cafa3bd1315f1", + "type": "join", + "z": "449f3115316b1767", + "name": "", + "mode": "custom", + "build": "object", + "property": "payload", + "propertyType": "msg", + "key": "topic", + "joiner": "\\n", + "joinerType": "str", + "accumulate": true, + "timeout": "", + "count": "43", + "reduceRight": false, + "reduceExp": "", + "reduceInit": "", + "reduceInitType": "", + "reduceFixup": "", + "x": 860, + "y": 1700, + "wires": [ + [ + "5d32609a7b711e03" + ] + ] + }, + { + "id": "f0bd6424378e6521", + "type": "change", + "z": "449f3115316b1767", + "name": "TaM2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "TaM2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 200, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "9950564581b5af4e", + "type": "ui_text", + "z": "449f3115316b1767", + "group": "e177392401620838", + "order": 4, + "width": 0, + "height": 0, + "name": "", + "label": "Warnings and Alarms", + "format": "{{msg.payload}}", + "layout": "row-spread", + "className": "", + "style": false, + "font": "Arial,Arial,Helvetica,sans-serif", + "fontSize": "20", + "color": "#000000", + "x": 1400, + "y": 1700, + "wires": [] + }, + { + "id": "fccbcd60339ffc71", + "type": "change", + "z": "449f3115316b1767", + "name": "Tbm", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "Tbm", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 260, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "b87eb54e5438f25d", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/VBm2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/VBm2", + "name": "/AlarmFlags/VBm2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 380, + "wires": [ + [ + "1935d7ec73a6e600" + ] + ] + }, + { + "id": "c7506cd384e34f9f", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/VBM2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/VBM2", + "name": "/AlarmFlags/VBM2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 440, + "wires": [ + [ + "da45a83ea0455f32" + ] + ] + }, + { + "id": "6892a20f1aa2b6e5", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/ThM", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/ThM", + "name": "/AlarmFlags/ThM", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 1100, + "wires": [ + [ + "0eea322902b7925f" + ] + ] + }, + { + "id": "98888650ea5d2a96", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/vsm2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/vsm2", + "name": "/AlarmFlags/vsm2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 1160, + "wires": [ + [ + "d5c9a8c50fc8d0f4" + ] + ] + }, + { + "id": "c3f7ef97557d7615", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/vsM2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/vsM2", + "name": "/AlarmFlags/vsM2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 1220, + "wires": [ + [ + "69f4c090bfb3b6a7" + ] + ] + }, + { + "id": "8634dd2bb374e7dc", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/iCM2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/iCM2", + "name": "/AlarmFlags/iCM2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 1280, + "wires": [ + [ + "3a8599bc483c79aa" + ] + ] + }, + { + "id": "7fa84f8332c8fb96", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/AlarmFlags/iDM2", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/AlarmFlags/iDM2", + "name": "/AlarmFlags/iDM2", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 310, + "y": 1340, + "wires": [ + [ + "d274893e4a47892d" + ] + ] + }, + { + "id": "940370b80f41928c", + "type": "change", + "z": "449f3115316b1767", + "name": "TbM2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "TbM2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 320, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "1935d7ec73a6e600", + "type": "change", + "z": "449f3115316b1767", + "name": "VBm2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "VBm2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 380, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "da45a83ea0455f32", + "type": "change", + "z": "449f3115316b1767", + "name": "VBM2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "VBM2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 440, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "5da7a58078b2d992", + "type": "change", + "z": "449f3115316b1767", + "name": "IDM2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "IDM2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 500, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "145932ec555401dd", + "type": "change", + "z": "449f3115316b1767", + "name": "ISOB", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "ISOB", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 560, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "7020aeefc2b065f7", + "type": "change", + "z": "449f3115316b1767", + "name": "MSWE", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "MSWE", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 560, + "y": 620, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "ea496aafa62fca89", + "type": "change", + "z": "449f3115316b1767", + "name": "FUSE", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "FUSE", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 680, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "48a80e06931ecc03", + "type": "change", + "z": "449f3115316b1767", + "name": "HTRE", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "HTRE", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 740, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "f5dd3b77cf306c42", + "type": "change", + "z": "449f3115316b1767", + "name": "TCPE", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "TCPE", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 800, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "ea1c84dce431e761", + "type": "change", + "z": "449f3115316b1767", + "name": "STRE", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "STRE", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 860, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "48d69f82af0f511b", + "type": "change", + "z": "449f3115316b1767", + "name": "CME", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "CME", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 920, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "c6162a8cbc2fa1dc", + "type": "change", + "z": "449f3115316b1767", + "name": "HWFL", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "HWFL", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 980, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "8b5302ef345a5ade", + "type": "change", + "z": "449f3115316b1767", + "name": "HWEM", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "HWEM", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 560, + "y": 1040, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "0eea322902b7925f", + "type": "change", + "z": "449f3115316b1767", + "name": "ThM", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "ThM", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1100, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "d5c9a8c50fc8d0f4", + "type": "change", + "z": "449f3115316b1767", + "name": "vsm2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "vsm2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1160, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "69f4c090bfb3b6a7", + "type": "change", + "z": "449f3115316b1767", + "name": "vsM2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "vsM2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1220, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "3a8599bc483c79aa", + "type": "change", + "z": "449f3115316b1767", + "name": "iCM2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "iCM2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1280, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "d274893e4a47892d", + "type": "change", + "z": "449f3115316b1767", + "name": "iDM2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "iDM2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1340, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "52aa4d1b9994d41f", + "type": "change", + "z": "449f3115316b1767", + "name": "MID2", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "MID2", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1380, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "63715023d827fc1e", + "type": "change", + "z": "449f3115316b1767", + "name": "HTFS", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "HTFS", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1440, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "b0769222c0c81594", + "type": "change", + "z": "449f3115316b1767", + "name": "DATA", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "DATA", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1480, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "47401c80b95f2d5f", + "type": "change", + "z": "449f3115316b1767", + "name": "LMPA", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "LMPA", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1540, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "2687afa4a3fb4fdd", + "type": "change", + "z": "449f3115316b1767", + "name": "HEBT", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "HEBT", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1580, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "50d6cb46307252fa", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/TaM1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/TaM1", + "name": "/WarningFlags/TaM1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1700, + "wires": [ + [ + "3f7e9f9d738dbdbd" + ] + ] + }, + { + "id": "70096fd1a8d1e527", + "type": "comment", + "z": "449f3115316b1767", + "name": "Parse Warnings: 17", + "info": "", + "x": 250, + "y": 1660, + "wires": [] + }, + { + "id": "028062370fed913e", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/TbM1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/TbM1", + "name": "/WarningFlags/TbM1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1760, + "wires": [ + [ + "d810e3509532a76d" + ] + ] + }, + { + "id": "1b22f9619f20aef4", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/VBm1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/VBm1", + "name": "/WarningFlags/VBm1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1820, + "wires": [ + [ + "aec3c11c306fc487" + ] + ] + }, + { + "id": "b1b68c7e0f04d624", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/VBM1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/VBM1", + "name": "/WarningFlags/VBM1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1880, + "wires": [ + [ + "1a27a18f8afcc0fd" + ] + ] + }, + { + "id": "fc32c5e6a2a29520", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/IDM1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/IDM1", + "name": "/WarningFlags/IDM1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 1940, + "wires": [ + [ + "933f082d544c7d43" + ] + ] + }, + { + "id": "bd531ea175cf9396", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/vsm1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/vsm1", + "name": "/WarningFlags/vsm1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2000, + "wires": [ + [ + "b68dfa2ca1af5042" + ] + ] + }, + { + "id": "849842cd109d005a", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/vsM1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/vsM1", + "name": "/WarningFlags/vsM1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2060, + "wires": [ + [ + "646620aa94a5a58b" + ] + ] + }, + { + "id": "62e7d8499b663da4", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/iCM1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/iCM1", + "name": "/WarningFlags/iCM1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2120, + "wires": [ + [ + "2f126354727a23bf" + ] + ] + }, + { + "id": "9dd7fb6509c7f419", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/iDM1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/iDM1", + "name": "/WarningFlags/iDM1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2180, + "wires": [ + [ + "633fb1e3fb40aa85" + ] + ] + }, + { + "id": "7d7ae3d9bcd23f66", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/MID1", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/MID1", + "name": "/WarningFlags/MID1", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2240, + "wires": [ + [ + "59be305aa99ae241" + ] + ] + }, + { + "id": "091b46b7dbb1bc8c", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/BLPW", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/BLPW", + "name": "/WarningFlags/BLPW", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2300, + "wires": [ + [ + "fb2f75ab6931515f" + ] + ] + }, + { + "id": "083488e978f234a8", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/CCBF", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/CCBF", + "name": "/WarningFlags/CCBF", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2360, + "wires": [ + [ + "6248c986d96c6ad6" + ] + ] + }, + { + "id": "b20ac5ae742b5138", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/Ah_W", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/Ah_W", + "name": "/WarningFlags/Ah_W", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2420, + "wires": [ + [ + "aa8070809a252db6" + ] + ] + }, + { + "id": "71b228354309c56f", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/MPMM", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/MPMM", + "name": "/WarningFlags/MPMM", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 330, + "y": 2480, + "wires": [ + [ + "4faab6568a5d4404" + ] + ] + }, + { + "id": "ad7f9ba97722ce51", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/TCdi", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/TCdi", + "name": "/WarningFlags/TCdi", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 320, + "y": 2540, + "wires": [ + [ + "52365fad40a27f93" + ] + ] + }, + { + "id": "a8d7dd081a0fdf21", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/LMPW", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/LMPW", + "name": "/WarningFlags/LMPW", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 330, + "y": 2600, + "wires": [ + [ + "df72d64cca5dd3cd" + ] + ] + }, + { + "id": "3087d4cf9e009296", + "type": "victron-input-custom", + "z": "449f3115316b1767", + "service": "com.victronenergy.battery/1", + "path": "/WarningFlags/TOCW", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/WarningFlags/TOCW", + "name": "/WarningFlags/TOCW", + "type": "object" + }, + "name": "", + "onlyChanges": false, + "x": 330, + "y": 2660, + "wires": [ + [ + "52110469fa1648e6" + ] + ] + }, + { + "id": "3f7e9f9d738dbdbd", + "type": "change", + "z": "449f3115316b1767", + "name": "TaM1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "TaM1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1700, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "d810e3509532a76d", + "type": "change", + "z": "449f3115316b1767", + "name": "TbM1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "TbM1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1760, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "aec3c11c306fc487", + "type": "change", + "z": "449f3115316b1767", + "name": "VBm1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "VBm1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1820, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "1a27a18f8afcc0fd", + "type": "change", + "z": "449f3115316b1767", + "name": "VBM1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "VBM1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1880, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "933f082d544c7d43", + "type": "change", + "z": "449f3115316b1767", + "name": "IDM1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "IDM1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 1940, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "b68dfa2ca1af5042", + "type": "change", + "z": "449f3115316b1767", + "name": "vsm1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "vsm1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2000, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "646620aa94a5a58b", + "type": "change", + "z": "449f3115316b1767", + "name": "vsM1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "vsM1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2060, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "2f126354727a23bf", + "type": "change", + "z": "449f3115316b1767", + "name": "iCM1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "iCM1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2120, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "633fb1e3fb40aa85", + "type": "change", + "z": "449f3115316b1767", + "name": "iDM1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "iDM1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2180, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "59be305aa99ae241", + "type": "change", + "z": "449f3115316b1767", + "name": "MID1", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "MID1", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2240, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "fb2f75ab6931515f", + "type": "change", + "z": "449f3115316b1767", + "name": "BLPW", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "BLPW", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2300, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "6248c986d96c6ad6", + "type": "change", + "z": "449f3115316b1767", + "name": "CCBF", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "CCBF", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2360, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "aa8070809a252db6", + "type": "change", + "z": "449f3115316b1767", + "name": "Ah_W", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "Ah_W", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2420, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "4faab6568a5d4404", + "type": "change", + "z": "449f3115316b1767", + "name": "MPMM", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "MPMM", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 580, + "y": 2480, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "52365fad40a27f93", + "type": "change", + "z": "449f3115316b1767", + "name": "TCdi", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "TCdi", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 550, + "y": 2540, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "df72d64cca5dd3cd", + "type": "change", + "z": "449f3115316b1767", + "name": "LMPW", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "LMPW", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 570, + "y": 2600, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "52110469fa1648e6", + "type": "change", + "z": "449f3115316b1767", + "name": "TOCW", + "rules": [ + { + "t": "set", + "p": "topic", + "pt": "msg", + "to": "TOCW", + "tot": "str" + } + ], + "action": "", + "property": "", + "from": "", + "to": "", + "reg": false, + "x": 570, + "y": 2660, + "wires": [ + [ + "714cafa3bd1315f1" + ] + ] + }, + { + "id": "c8f9fbd1a718e92b", + "type": "debug", + "z": "449f3115316b1767", + "name": "Debug for warnings and alarms", + "active": false, + "tosidebar": true, + "console": false, + "tostatus": true, + "complete": "payload", + "targetType": "msg", + "statusVal": "payload", + "statusType": "auto", + "x": 1430, + "y": 1640, + "wires": [] + }, + { + "id": "1d4797e6a377f6bd", + "type": "ui_text_input", + "z": "9744d7fd57e81fe3", + "name": "Grid Setpoint", + "label": "Grid Setpoint", + "tooltip": "", + "group": "3290bd5996bd3175", + "order": 1, + "width": 0, + "height": 0, + "passthru": true, + "mode": "number", + "delay": "0", + "topic": "topic", + "sendOnBlur": false, + "className": "", + "topicType": "msg", + "x": 710, + "y": 320, + "wires": [ + [ + "47479c689064cedc" + ] + ] + }, + { + "id": "47479c689064cedc", + "type": "victron-output-custom", + "z": "9744d7fd57e81fe3", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/AcPowerSetPoint", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/AcPowerSetPoint", + "name": "/Settings/CGwacs/AcPowerSetPoint", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 1070, + "y": 320, + "wires": [] + }, + { + "id": "556390611f6a678e", + "type": "victron-input-custom", + "z": "9744d7fd57e81fe3", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/AcPowerSetPoint", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/AcPowerSetPoint", + "name": "/Settings/CGwacs/AcPowerSetPoint", + "type": "number" + }, + "name": "", + "onlyChanges": true, + "x": 350, + "y": 320, + "wires": [ + [ + "1d4797e6a377f6bd" + ] + ] + }, + { + "id": "b0ce481c0c609c63", + "type": "victron-input-custom", + "z": "9744d7fd57e81fe3", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "name": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "type": "number" + }, + "name": "", + "onlyChanges": true, + "x": 390, + "y": 380, + "wires": [ + [ + "69cd8accae6fa947" + ] + ] + }, + { + "id": "69cd8accae6fa947", + "type": "ui_text_input", + "z": "9744d7fd57e81fe3", + "name": "Min SOC", + "label": "Min SOC", + "tooltip": "", + "group": "3290bd5996bd3175", + "order": 2, + "width": 0, + "height": 0, + "passthru": true, + "mode": "number", + "delay": "0", + "topic": "topic", + "sendOnBlur": false, + "className": "", + "topicType": "msg", + "x": 760, + "y": 380, + "wires": [ + [ + "c13e22a0d82b1d03" + ] + ] + }, + { + "id": "c13e22a0d82b1d03", + "type": "victron-output-custom", + "z": "9744d7fd57e81fe3", + "service": "com.victronenergy.settings", + "path": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "serviceObj": { + "service": "com.victronenergy.settings", + "name": "com.victronenergy.settings" + }, + "pathObj": { + "path": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "name": "/Settings/CGwacs/BatteryLife/MinimumSocLimit", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 1130, + "y": 380, + "wires": [] + }, + { + "id": "0b3ff7262ff95b26", + "type": "ui_template", + "z": "9744d7fd57e81fe3", + "group": "e177392401620838", + "name": "Battery Monitor", + "order": 5, + "width": 0, + "height": 0, + "format": " Battery Monitor \n", + "storeOutMessages": true, + "fwdInMessages": true, + "resendOnRefresh": true, + "templateScope": "local", + "className": "", + "x": 200, + "y": 500, + "wires": [ + [] + ], + "icon": "node-red/arrow-in.svg" + }, + { + "id": "d45cda2314ed0452", + "type": "victron-input-custom", + "z": "9744d7fd57e81fe3", + "service": "com.victronenergy.battery/1", + "path": "/Soc", + "serviceObj": { + "service": "com.victronenergy.battery/1", + "name": "FZS 48TL200 x2 (1)" + }, + "pathObj": { + "path": "/Soc", + "name": "/Soc", + "type": "number" + }, + "name": "", + "onlyChanges": false, + "x": 230, + "y": 440, + "wires": [ + [ + "2ba3d56099bc52e7" + ] + ] + }, + { + "id": "2ba3d56099bc52e7", + "type": "ui_text", + "z": "9744d7fd57e81fe3", + "group": "3290bd5996bd3175", + "order": 3, + "width": 0, + "height": 0, + "name": "SOC", + "label": "SOC", + "format": "{{msg.payload}}", + "layout": "row-spread", + "className": "", + "style": false, + "font": "", + "fontSize": 16, + "color": "#000000", + "x": 430, + "y": 440, + "wires": [] + } +] \ No newline at end of file diff --git a/NodeRed/NodeRedFiles/openvpn b/NodeRed/NodeRedFiles/openvpn new file mode 100644 index 000000000..466bd39d8 Binary files /dev/null and b/NodeRed/NodeRedFiles/openvpn differ diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/.checkignore b/NodeRed/NodeRedFiles/pika-0.13.1/.checkignore new file mode 100644 index 000000000..e5a2365ca --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/.checkignore @@ -0,0 +1,5 @@ +**/docs +**/examples +**/test +**/utils +setup.py diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/.codeclimate.yml b/NodeRed/NodeRedFiles/pika-0.13.1/.codeclimate.yml new file mode 100644 index 000000000..ba9e7cc78 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/.codeclimate.yml @@ -0,0 +1,8 @@ +languages: + - python +exclude_paths: + - docs/* + - tests/* + - utils/* + - pika/examples/* + - pika/spec.py diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/.coveragerc b/NodeRed/NodeRedFiles/pika-0.13.1/.coveragerc new file mode 100644 index 000000000..16b0aa24c --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/.coveragerc @@ -0,0 +1,2 @@ +[run] +omit = pika/spec.py diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/.github/ISSUE_TEMPLATE.md b/NodeRed/NodeRedFiles/pika-0.13.1/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..97428b0c5 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,15 @@ +Thank you for using Pika. + +GitHub issues are **strictly** used for actionable work and pull +requests. + +Pika's maintainers do NOT use GitHub issues for questions, root cause +analysis, conversations, code reviews, etc. + +Please direct all non-work issues to either the `pika-python` or +`rabbitmq-users` mailing list: + +* https://groups.google.com/forum/#!forum/pika-python +* https://groups.google.com/forum/#!forum/rabbitmq-users + +Thank you diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/.github/PULL_REQUEST_TEMPLATE.md b/NodeRed/NodeRedFiles/pika-0.13.1/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..13131ca02 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,43 @@ +## Proposed Changes + +Please describe the big picture of your changes here to communicate to +the Pika team why we should accept this pull request. If it fixes a bug +or resolves a feature request, be sure to link to that issue. + +A pull request that doesn't explain **why** the change was made has a +much lower chance of being accepted. + +If English isn't your first language, don't worry about it and try to +communicate the problem you are trying to solve to the best of your +abilities. As long as we can understand the intent, it's all good. + +## Types of Changes + +What types of changes does your code introduce to this project? +_Put an `x` in the boxes that apply_ + +- [ ] Bugfix (non-breaking change which fixes issue #NNNN) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) +- [ ] Documentation (correction or otherwise) +- [ ] Cosmetics (whitespace, appearance) + +## Checklist + +_Put an `x` in the boxes that apply. You can also fill these out after +creating the PR. If you're unsure about any of them, don't hesitate to +ask on the +[`pika-python`](https://groups.google.com/forum/#!forum/pika-python) +mailing list. We're here to help! This is simply a reminder of what we +are going to look for before merging your code._ + +- [ ] I have read the `CONTRIBUTING.md` document +- [ ] All tests pass locally with my changes +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] I have added necessary documentation (if appropriate) + +## Further Comments + +If this is a relatively large or complex change, kick off the discussion +by explaining why you chose the solution you did and what alternatives +you considered, etc. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/.gitignore b/NodeRed/NodeRedFiles/pika-0.13.1/.gitignore new file mode 100644 index 000000000..8d1682e26 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/.gitignore @@ -0,0 +1,20 @@ +*.pyc +*~ +.idea +.coverage +.tox +.DS_Store +.python-version +pika.iml +codegen +pika.egg-info +debug/ +examples/pika +examples/blocking/pika +atlassian*xml +build +dist +docs/_build +venv*/ +env/ +testdata/*.conf diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/.travis.yml b/NodeRed/NodeRedFiles/pika-0.13.1/.travis.yml new file mode 100644 index 000000000..a359dff13 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/.travis.yml @@ -0,0 +1,103 @@ +language: python + +sudo: false + +addons: + apt: + sources: + - sourceline: deb https://packages.erlang-solutions.com/ubuntu trusty contrib + key_url: https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc + packages: + # apt-cache show erlang-nox=1:20.3-1 | grep Depends | tr ' ' '\n' | grep erlang | grep -v erlang-base-hipe | tr -d ',' | sed 's/$/=1:20.3-1/' + - erlang-nox + +env: + global: + - RABBITMQ_VERSION=3.7.8 + - RABBITMQ_DOWNLOAD_URL="https://github.com/rabbitmq/rabbitmq-server/releases/download/v$RABBITMQ_VERSION/rabbitmq-server-generic-unix-$RABBITMQ_VERSION.tar.xz" + - RABBITMQ_TAR="rabbitmq-$RABBITMQ_VERSION.tar.xz" + - PATH=$HOME/.local/bin:$PATH + - AWS_DEFAULT_REGION=us-east-1 + - secure: "Eghft2UgJmWuCgnqz6O+KV5F9AERzUbKIeXkcw7vsFAVdkB9z01XgqVLhQ6N+n6i8mkiRDkc0Jes6htVtO4Hi6lTTFeDhu661YCXXTFdRdsx+D9v5bgw8Q2bP41xFy0iao7otYqkzFKIo32Q2cUYzMUqXlS661Yai5DXldr3mjM=" + - secure: "LjieH/Yh0ng5gwT6+Pl3rL7RMxxb/wOlogoLG7cS99XKdX6N4WRVFvWbHWwCxoVr0be2AcyQynu4VOn+0jC8iGfQjkJZ7UrJjZCDGWbNjAWrNcY0F9VdretFDy8Vn2sHfBXq8fINqszJkgTnmbQk8dZWUtj0m/RNVnOBeBcsIOU=" + +stages: +- test +- name: coverage + if: repo = pika/pika +- name: deploy + if: tag IS present + +cache: + apt: true + directories: + - $HOME/.cache + +install: + - pip install -r test-requirements.txt + - pip install awscli==1.11.18 + - if [ ! -d "$HOME/.cache" ]; then mkdir "$HOME/.cache"; fi + - if [ -s "$HOME/.cache/$RABBITMQ_TAR" ]; then echo "[INFO] found cached $RABBITMQ_TAR file"; else wget -O "$HOME/.cache/$RABBITMQ_TAR" "$RABBITMQ_DOWNLOAD_URL"; fi + - tar -C "$TRAVIS_BUILD_DIR" -xvf "$HOME/.cache/$RABBITMQ_TAR" + - sed -e "s#PIKA_DIR#$TRAVIS_BUILD_DIR#g" "$TRAVIS_BUILD_DIR/testdata/rabbitmq.conf.in" > "$TRAVIS_BUILD_DIR/testdata/rabbitmq.conf" + +before_script: + - pip freeze + - /bin/sh -c "RABBITMQ_PID_FILE=$TRAVIS_BUILD_DIR/rabbitmq.pid RABBITMQ_CONFIG_FILE=$TRAVIS_BUILD_DIR/testdata/rabbitmq $TRAVIS_BUILD_DIR/rabbitmq_server-$RABBITMQ_VERSION/sbin/rabbitmq-server &" + - /bin/sh "$TRAVIS_BUILD_DIR/rabbitmq_server-$RABBITMQ_VERSION/sbin/rabbitmqctl" wait "$TRAVIS_BUILD_DIR/rabbitmq.pid" + - /bin/sh "$TRAVIS_BUILD_DIR/rabbitmq_server-$RABBITMQ_VERSION/sbin/rabbitmqctl" status + +script: + # See https://github.com/travis-ci/travis-ci/issues/1066 and https://github.com/pika/pika/pull/984#issuecomment-370565220 + # as to why 'set -e' and 'set +e' are added here + - set -e + - nosetests + - PIKA_TEST_TLS=true nosetests + - set +e + +after_success: + - aws s3 cp .coverage "s3://com-gavinroy-travis/pika/$TRAVIS_BUILD_NUMBER/.coverage.${TRAVIS_PYTHON_VERSION}" + +jobs: + include: + - python: pypy3 + - python: pypy + - python: 2.7 + - python: 3.4 + - python: 3.5 + - python: 3.6 + - python: 3.7 + dist: xenial # required for Python 3.7 (travis-ci/travis-ci#9069) + - stage: coverage + if: fork = false OR type != pull_request + python: 3.6 + services: [] + install: + - pip install awscli coverage codecov + before_script: [] + script: + - mkdir coverage + - aws s3 cp --recursive s3://com-gavinroy-travis/pika/$TRAVIS_BUILD_NUMBER/ coverage + - cd coverage + - coverage combine + - cd .. + - mv coverage/.coverage . + - coverage report + after_success: codecov + - stage: deploy + if: repo = pika/pika + python: 3.6 + services: [] + install: true + before_script: [] + script: true + after_success: [] + deploy: + distributions: sdist bdist_wheel + provider: pypi + user: crad + on: + tags: true + all_branches: true + password: + secure: "V/JTU/X9C6uUUVGEAWmWWbmKW7NzVVlC/JWYpo05Ha9c0YV0vX4jOfov2EUAphM0WwkD/MRhz4dq3kCU5+cjHxR3aTSb+sbiElsCpaciaPkyrns+0wT5MCMO29Lpnq2qBLc1ePR1ey5aTWC/VibgFJOL7H/3wyvukL6ZaCnktYk=" diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/CHANGELOG.rst b/NodeRed/NodeRedFiles/pika-0.13.1/CHANGELOG.rst new file mode 100644 index 000000000..8a7578d77 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/CHANGELOG.rst @@ -0,0 +1,760 @@ +Version History +=============== + +0.13.1 2019-03-07 +----------------- + +`GitHub milestone `_ + +0.13.0 2019-01-17 +----------------- + +`GitHub milestone `_ + +- `AsyncioConnection`, `TornadoConnection` and `TwistedProtocolConnection` are no longer auto-imported (`PR `_) +- Python `3.7` support (`Issue `_) + +0.12.0 2018-06-19 +----------------- + +`GitHub milestone `_ + +This is an interim release prior to version `1.0.0`. It includes the following backported pull requests and commits from the `master` branch: + +- `PR #908 `_ +- `PR #910 `_ +- `PR #918 `_ +- `PR #920 `_ +- `PR #924 `_ +- `PR #937 `_ +- `PR #938 `_ +- `PR #933 `_ +- `PR #940 `_ +- `PR #932 `_ +- `PR #928 `_ +- `PR #934 `_ +- `PR #915 `_ +- `PR #946 `_ +- `PR #947 `_ +- `PR #952 `_ +- `PR #956 `_ +- `PR #966 `_ +- `PR #975 `_ +- `PR #978 `_ +- `PR #981 `_ +- `PR #994 `_ +- `PR #1007 `_ +- `PR #1045 `_ (manually backported) +- `PR #1011 `_ + +Commits: + +Travis CI fail fast - 3f0e739 + +New features: + +`BlockingConnection` now supports the `add_callback_threadsafe` method which allows a function to be executed correctly on the IO loop thread. The main use-case for this is as follows: + +- Application sets up a thread for `BlockingConnection` and calls `basic_consume` on it +- When a message is received, work is done on another thread +- When the work is done, the worker uses `connection.add_callback_threadsafe` to call the `basic_ack` method on the channel instance. + +Please see `examples/basic_consumer_threaded.py` for an example. As always, `SelectConnection` and a fully async consumer/publisher is the preferred method of using Pika. + +Heartbeats are now sent at an interval equal to 1/2 of the negotiated idle connection timeout. RabbitMQ's default timeout value is 60 seconds, so heartbeats will be sent at a 30 second interval. In addition, Pika's check for an idle connection will be done at an interval equal to the timeout value plus 5 seconds to allow for delays. This results in an interval of 65 seconds by default. + +0.11.2 2017-11-30 +----------------- + +`GitHub milestone `_ + +`0.11.2 `_ + +- Remove `+` character from platform releases string (`PR `_) + +0.11.1 2017-11-27 +----------------- + +`GitHub milestone `_ + +`0.11.1 `_ + +- Fix `BlockingConnection` to ensure event loop exits (`PR `_) +- Heartbeat timeouts will use the client value if specified (`PR `_) +- Allow setting some common TCP options (`PR `_) +- Errors when decoding Unicode are ignored (`PR `_) +- Fix large number encoding (`PR `_) + +0.11.0 2017-07-29 +----------------- + +`GitHub milestone `_ + +`0.11.0 `_ + + - Simplify Travis CI configuration for OS X. + - Add `asyncio` connection adapter for Python 3.4 and newer. + - Connection failures that occur after the socket is opened and before the + AMQP connection is ready to go are now reported by calling the connection + error callback. Previously these were not consistently reported. + - In BaseConnection.close, call _handle_ioloop_stop only if the connection is + already closed to allow the asynchronous close operation to complete + gracefully. + - Pass error information from failed socket connection to user callbacks + on_open_error_callback and on_close_callback with result_code=-1. + - ValueError is raised when a completion callback is passed to an asynchronous + (nowait) Channel operation. It's an application error to pass a non-None + completion callback with an asynchronous request, because this callback can + never be serviced in the asynchronous scenario. + - `Channel.basic_reject` fixed to allow `delivery_tag` to be of type `long` + as well as `int`. (by quantum5) + - Implemented support for blocked connection timeouts in + `pika.connection.Connection`. This feature is available to all pika adapters. + See `pika.connection.ConnectionParameters` docstring to learn more about + `blocked_connection_timeout` configuration. + - Deprecated the `heartbeat_interval` arg in `pika.ConnectionParameters` in + favor of the `heartbeat` arg for consistency with the other connection + parameters classes `pika.connection.Parameters` and `pika.URLParameters`. + - When the `port` arg is not set explicitly in `ConnectionParameters` + constructor, but the `ssl` arg is set explicitly, then set the port value to + to the default AMQP SSL port if SSL is enabled, otherwise to the default + AMQP plaintext port. + - `URLParameters` will raise ValueError if a non-empty URL scheme other than + {amqp | amqps | http | https} is specified. + - `InvalidMinimumFrameSize` and `InvalidMaximumFrameSize` exceptions are + deprecated. pika.connection.Parameters.frame_max property setter now raises + the standard `ValueError` exception when the value is out of bounds. + - Removed deprecated parameter `type` in `Channel.exchange_declare` and + `BlockingChannel.exchange_declare` in favor of the `exchange_type` arg that + doesn't overshadow the builtin `type` keyword. + - Channel.close() on OPENING channel transitions it to CLOSING instead of + raising ChannelClosed. + - Channel.close() on CLOSING channel raises `ChannelAlreadyClosing`; used to + raise `ChannelClosed`. + - Connection.channel() raises `ConnectionClosed` if connection is not in OPEN + state. + - When performing graceful close on a channel and `Channel.Close` from broker + arrives while waiting for CloseOk, don't release the channel number until + CloseOk arrives to avoid race condition that may lead to a new channel + receiving the CloseOk that was destined for the closing channel. + - The `backpressure_detection` option of `ConnectionParameters` and + `URLParameters` property is DEPRECATED in favor of `Connection.Blocked` and + `Connection.Unblocked`. See `Connection.add_on_connection_blocked_callback`. + +0.10.0 2015-09-02 +----------------- + +`0.10.0 `_ + + - a9bf96d - LibevConnection: Fixed dict chgd size during iteration (Michael Laing) + - 388c55d - SelectConnection: Fixed KeyError exceptions in IOLoop timeout executions (Shinji Suzuki) + - 4780de3 - BlockingConnection: Add support to make BlockingConnection a Context Manager (@reddec) + +0.10.0b2 2015-07-15 +------------------- + + - f72b58f - Fixed failure to purge _ConsumerCancellationEvt from BlockingChannel._pending_events during basic_cancel. (Vitaly Kruglikov) + +0.10.0b1 2015-07-10 +------------------- + +High-level summary of notable changes: + +- Change to 3-Clause BSD License +- Python 3.x support +- Over 150 commits from 19 contributors +- Refactoring of SelectConnection ioloop +- This major release contains certain non-backward-compatible API changes as + well as significant performance improvements in the `BlockingConnection` + adapter. +- Non-backward-compatible changes in `Channel.add_on_return_callback` callback's + signature. +- The `AsyncoreConnection` adapter was retired + +**Details** + +Python 3.x: this release introduces python 3.x support. Tested on Python 3.3 +and 3.4. + +`AsyncoreConnection`: Retired this legacy adapter to reduce maintenance burden; +the recommended replacement is the `SelectConnection` adapter. + +`SelectConnection`: ioloop was refactored for compatibility with other ioloops. + +`Channel.add_on_return_callback`: The callback is now passed the individual +parameters channel, method, properties, and body instead of a tuple of those +values for congruence with other similar callbacks. + +`BlockingConnection`: This adapter underwent a makeover under the hood and +gained significant performance improvements as well as enhanced timer +resolution. It is now implemented as a client of the `SelectConnection` adapter. + +Below is an overview of the `BlockingConnection` and `BlockingChannel` API +changes: + + - Recursion: the new implementation eliminates callback recursion that + sometimes blew out the stack in the legacy implementation (e.g., + publish -> consumer_callback -> publish -> consumer_callback, etc.). While + `BlockingConnection.process_data_events` and `BlockingConnection.sleep` may + still be called from the scope of the blocking adapter's callbacks in order + to process pending I/O, additional callbacks will be suppressed whenever + `BlockingConnection.process_data_events` and `BlockingConnection.sleep` are + nested in any combination; in that case, the callback information will be + bufferred and dispatched once nesting unwinds and control returns to the + level-zero dispatcher. + - `BlockingConnection.connect`: this method was removed in favor of the + constructor as the only way to establish connections; this reduces + maintenance burden, while improving reliability of the adapter. + - `BlockingConnection.process_data_events`: added the optional parameter + `time_limit`. + - `BlockingConnection.add_on_close_callback`: removed; legacy raised + `NotImplementedError`. + - `BlockingConnection.add_on_open_callback`: removed; legacy raised + `NotImplementedError`. + - `BlockingConnection.add_on_open_error_callback`: removed; legacy raised + `NotImplementedError`. + - `BlockingConnection.add_backpressure_callback`: not supported + - `BlockingConnection.set_backpressure_multiplier`: not supported + - `BlockingChannel.add_on_flow_callback`: not supported; per docstring in + channel.py: "Note that newer versions of RabbitMQ will not issue this but + instead use TCP backpressure". + - `BlockingChannel.flow`: not supported + - `BlockingChannel.force_data_events`: removed as it is no longer necessary + following redesign of the adapter. + - Removed the `nowait` parameter from `BlockingChannel` methods, forcing + `nowait=False` (former API default) in the implementation; this is more + suitable for the blocking nature of the adapter and its error-reporting + strategy; this concerns the following methods: `basic_cancel`, + `confirm_delivery`, `exchange_bind`, `exchange_declare`, `exchange_delete`, + `exchange_unbind`, `queue_bind`, `queue_declare`, `queue_delete`, and + `queue_purge`. + - `BlockingChannel.basic_cancel`: returns a sequence instead of None; for a + `no_ack=True` consumer, `basic_cancel` returns a sequence of pending + messages that arrived before broker confirmed the cancellation. + - `BlockingChannel.consume`: added new optional kwargs `arguments` and + `inactivity_timeout`. Also, raises ValueError if the consumer creation + parameters don't match those used to create the existing queue consumer + generator, if any; this happens when you break out of the consume loop, then + call `BlockingChannel.consume` again with different consumer-creation args + without first cancelling the previous queue consumer generator via + `BlockingChannel.cancel`. The legacy implementation would silently resume + consuming from the existing queue consumer generator even if the subsequent + `BlockingChannel.consume` was invoked with a different queue name, etc. + - `BlockingChannel.cancel`: returns 0; the legacy implementation tried to + return the number of requeued messages, but this number was not accurate + as it didn't include the messages returned by the Channel class; this count + is not generally useful, so returning 0 is a reasonable replacement. + - `BlockingChannel.open`: removed in favor of having a single mechanism for + creating a channel (`BlockingConnection.channel`); this reduces maintenance + burden, while improving reliability of the adapter. + - `BlockingChannel.confirm_delivery`: raises UnroutableError when unroutable + messages that were sent prior to this call are returned before we receive + Confirm.Select-ok. + - `BlockingChannel.basic_publish: always returns True when delivery + confirmation is not enabled (publisher-acks = off); the legacy implementation + returned a bool in this case if `mandatory=True` to indicate whether the + message was delivered; however, this was non-deterministic, because + Basic.Return is asynchronous and there is no way to know how long to wait + for it or its absence. The legacy implementation returned None when + publishing with publisher-acks = off and `mandatory=False`. The new + implementation always returns True when publishing while + publisher-acks = off. + - `BlockingChannel.publish`: a new alternate method (vs. `basic_publish`) for + publishing a message with more detailed error reporting via UnroutableError + and NackError exceptions. + - `BlockingChannel.start_consuming`: raises pika.exceptions.RecursionError if + called from the scope of a `BlockingConnection` or `BlockingChannel` + callback. + - `BlockingChannel.get_waiting_message_count`: new method; returns the number + of messages that may be retrieved from the current queue consumer generator + via `BasicChannel.consume` without blocking. + +**Commits** + + - 5aaa753 - Fixed SSL import and removed no_ack=True in favor of explicit AMQP message handling based on deferreds (skftn) + - 7f222c2 - Add checkignore for codeclimate (Gavin M. Roy) + - 4dec370 - Implemented BlockingChannel.flow; Implemented BlockingConnection.add_on_connection_blocked_callback; Implemented BlockingConnection.add_on_connection_unblocked_callback. (Vitaly Kruglikov) + - 4804200 - Implemented blocking adapter acceptance test for exchange-to-exchange binding. Added rudimentary validation of BasicProperties passthru in blocking adapter publish tests. Updated CHANGELOG. (Vitaly Kruglikov) + - 4ec07fd - Fixed sending of data in TwistedProtocolConnection (Vitaly Kruglikov) + - a747fb3 - Remove my copyright from forward_server.py test utility. (Vitaly Kruglikov) + - 94246d2 - Return True from basic_publish when pubacks is off. Implemented more blocking adapter accceptance tests. (Vitaly Kruglikov) + - 3ce013d - PIKA-609 Wait for broker to dispatch all messages to client before cancelling consumer in TestBasicCancelWithNonAckableConsumer and TestBasicCancelWithAckableConsumer (Vitaly Kruglikov) + - 293f778 - Created CHANGELOG entry for release 0.10.0. Fixed up callback documentation for basic_get, basic_consume, and add_on_return_callback. (Vitaly Kruglikov) + - 16d360a - Removed the legacy AsyncoreConnection adapter in favor of the recommended SelectConnection adapter. (Vitaly Kruglikov) + - 240a82c - Defer creation of poller's event loop interrupt socket pair until start is called, because some SelectConnection users (e.g., BlockingConnection adapter) don't use the event loop, and these sockets would just get reported as resource leaks. (Vitaly Kruglikov) + - aed5cae - Added EINTR loops in select_connection pollers. Addressed some pylint findings, including an error or two. Wrap socket.send and socket.recv calls in EINTR loops Use the correct exception for socket.error and select.error and get errno depending on python version. (Vitaly Kruglikov) + - 498f1be - Allow passing exchange, queue and routing_key as text, handle short strings as text in python3 (saarni) + - 9f7f243 - Restored basic_consume, basic_cancel, and add_on_cancel_callback (Vitaly Kruglikov) + - 18c9909 - Reintroduced BlockingConnection.process_data_events. (Vitaly Kruglikov) + - 4b25cb6 - Fixed BlockingConnection/BlockingChannel acceptance and unit tests (Vitaly Kruglikov) + - bfa932f - Facilitate proper connection state after BasicConnection._adapter_disconnect (Vitaly Kruglikov) + - 9a09268 - Fixed BlockingConnection test that was failing with ConnectionClosed error. (Vitaly Kruglikov) + - 5a36934 - Copied synchronous_connection.py from pika-synchronous branch Fixed pylint findings Integrated SynchronousConnection with the new ioloop in SelectConnection Defined dedicated message classes PolledMessage and ConsumerMessage and moved from BlockingChannel to module-global scope. Got rid of nowait args from BlockingChannel public API methods Signal unroutable messages via UnroutableError exception. Signal Nack'ed messages via NackError exception. These expose more information about the failure than legacy basic_publich API. Removed set_timeout and backpressure callback methods Restored legacy `is_open`, etc. property names (Vitaly Kruglikov) + - 6226dc0 - Remove deprecated --use-mirrors (Gavin M. Roy) + - 1a7112f - Raise ConnectionClosed when sending a frame with no connection (#439) (Gavin M. Roy) + - 9040a14 - Make delivery_tag non-optional (#498) (Gavin M. Roy) + - 86aabc2 - Bump version (Gavin M. Roy) + - 562075a - Update a few testing things (Gavin M. Roy) + - 4954d38 - use unicode_type in blocking_connection.py (Antti Haapala) + - 133d6bc - Let Travis install ordereddict for Python 2.6, and ttest 3.3, 3.4 too. (Antti Haapala) + - 0d2287d - Pika Python 3 support (Antti Haapala) + - 3125c79 - SSLWantRead is not supported before python 2.7.9 and 3.3 (Will) + - 9a9c46c - Fixed TestDisconnectDuringConnectionStart: it turns out that depending on callback order, it might get either ProbableAuthenticationError or ProbableAccessDeniedError. (Vitaly Kruglikov) + - cd8c9b0 - A fix the write starvation problem that we see with tornado and pika (Will) + - 8654fbc - SelectConnection - make interrupt socketpair non-blocking (Will) + - 4f3666d - Added copyright in forward_server.py and fixed NameError bug (Vitaly Kruglikov) + - f8ebbbc - ignore docs (Gavin M. Roy) + - a344f78 - Updated codeclimate config (Gavin M. Roy) + - 373c970 - Try and fix pathing issues in codeclimate (Gavin M. Roy) + - 228340d - Ignore codegen (Gavin M. Roy) + - 4db0740 - Add a codeclimate config (Gavin M. Roy) + - 7e989f9 - Slight code re-org, usage comment and better naming of test file. (Will) + - 287be36 - Set up _kqueue member of KQueuePoller before calling super constructor to avoid exception due to missing _kqueue member. Call `self._map_event(event)` instead of `self._map_event(event.filter)`, because `KQueuePoller._map_event()` assumes it's getting an event, not an event filter. (Vitaly Kruglikov) + - 62810fb - Fix issue #412: reset BlockingConnection._read_poller in BlockingConnection._adapter_disconnect() to guard against accidental access to old file descriptor. (Vitaly Kruglikov) + - 03400ce - Rationalise adapter acceptance tests (Will) + - 9414153 - Fix bug selecting non epoll poller (Will) + - 4f063df - Use user heartbeat setting if server proposes none (Pau Gargallo) + - 9d04d6e - Deactivate heartbeats when heartbeat_interval is 0 (Pau Gargallo) + - a52a608 - Bug fix and review comments. (Will) + - e3ebb6f - Fix incorrect x-expires argument in acceptance tests (Will) + - 294904e - Get BlockingConnection into consistent state upon loss of TCP/IP connection with broker and implement acceptance tests for those cases. (Vitaly Kruglikov) + - 7f91a68 - Make SelectConnection behave like an ioloop (Will) + - dc9db2b - Perhaps 5 seconds is too agressive for travis (Gavin M. Roy) + - c23e532 - Lower the stuck test timeout (Gavin M. Roy) + - 1053ebc - Late night bug (Gavin M. Roy) + - cd6c1bf - More BaseConnection._handle_error cleanup (Gavin M. Roy) + - a0ff21c - Fix the test to work with Python 2.6 (Gavin M. Roy) + - 748e8aa - Remove pypy for now (Gavin M. Roy) + - 1c921c1 - Socket close/shutdown cleanup (Gavin M. Roy) + - 5289125 - Formatting update from PR (Gavin M. Roy) + - d235989 - Be more specific when calling getaddrinfo (Gavin M. Roy) + - b5d1b31 - Reflect the method name change in pika.callback (Gavin M. Roy) + - df7d3b7 - Cleanup BlockingConnection in a few places (Gavin M. Roy) + - cd99e1c - Rename method due to use in BlockingConnection (Gavin M. Roy) + - 7e0d1b3 - Use google style with yapf instead of pep8 (Gavin M. Roy) + - 7dc9bab - Refactor socket writing to not use sendall #481 (Gavin M. Roy) + - 4838789 - Dont log the fd #521 (Gavin M. Roy) + - 765107d - Add Connection.Blocked callback registration methods #476 (Gavin M. Roy) + - c15b5c1 - Fix _blocking typo pointed out in #513 (Gavin M. Roy) + - 759ac2c - yapf of codegen (Gavin M. Roy) + - 9dadd77 - yapf cleanup of codegen and spec (Gavin M. Roy) + - ddba7ce - Do not reject consumers with no_ack=True #486 #530 (Gavin M. Roy) + - 4528a1a - yapf reformatting of tests (Gavin M. Roy) + - e7b6d73 - Remove catching AttributError (#531) (Gavin M. Roy) + - 41ea5ea - Update README badges [skip ci] (Gavin M. Roy) + - 6af987b - Add note on contributing (Gavin M. Roy) + - 161fc0d - yapf formatting cleanup (Gavin M. Roy) + - edcb619 - Add PYPY to travis testing (Gavin M. Roy) + - 2225771 - Change the coverage badge (Gavin M. Roy) + - 8f7d451 - Move to codecov from coveralls (Gavin M. Roy) + - b80407e - Add confirm_delivery to example (Andrew Smith) + - 6637212 - Update base_connection.py (bstemshorn) + - 1583537 - #544 get_waiting_message_count() (markcf) + - 0c9be99 - Fix #535: pass expected reply_code and reply_text from method frame to Connection._on_disconnect from Connection._on_connection_closed (Vitaly Kruglikov) + - d11e73f - Propagate ConnectionClosed exception out of BlockingChannel._send_method() and log ConnectionClosed in BlockingConnection._on_connection_closed() (Vitaly Kruglikov) + - 63d2951 - Fix #541 - make sure connection state is properly reset when BlockingConnection._check_state_on_disconnect raises ConnectionClosed. This supplements the previously-merged PR #450 by getting the connection into consistent state. (Vitaly Kruglikov) + - 71bc0eb - Remove unused self.fd attribute from BaseConnection (Vitaly Kruglikov) + - 8c08f93 - PIKA-532 Removed unnecessary params (Vitaly Kruglikov) + - 6052ecf - PIKA-532 Fix bug in BlockingConnection._handle_timeout that was preventing _on_connection_closed from being called when not closing. (Vitaly Kruglikov) + - 562aa15 - pika: callback: Display exception message when callback fails. (Stuart Longland) + - 452995c - Typo fix in connection.py (Andrew) + - 361c0ad - Added some missing yields (Robert Weidlich) + - 0ab5a60 - Added complete example for python twisted service (Robert Weidlich) + - 4429110 - Add deployment and webhooks (Gavin M. Roy) + - 7e50302 - Fix has_content style in codegen (Andrew Grigorev) + - 28c2214 - Fix the trove categorization (Gavin M. Roy) + - de8b545 - Ensure frames can not be interspersed on send (Gavin M. Roy) + - 8fe6bdd - Fix heartbeat behaviour after connection failure. (Kyösti Herrala) + - c123472 - Updating BlockingChannel.basic_get doc (it does not receive a callback like the rest of the adapters) (Roberto Decurnex) + - b5f52fb - Fix number of arguments passed to _on_return callback (Axel Eirola) + - 765139e - Lower default TIMEOUT to 0.01 (bra-fsn) + - 6cc22a5 - Fix confirmation on reconnects (bra-fsn) + - f4faf0a - asynchronous publisher and subscriber examples refactored to follow the StepDown rule (Riccardo Cirimelli) + +0.9.14 - 2014-07-11 +------------------- + +`0.9.14 `_ + + - 57fe43e - fix test to generate a correct range of random ints (ml) + - 0d68dee - fix async watcher for libev_connection (ml) + - 01710ad - Use default username and password if not specified in URLParameters (Sean Dwyer) + - fae328e - documentation typo (Jeff Fein-Worton) + - afbc9e0 - libev_connection: reset_io_watcher (ml) + - 24332a2 - Fix the manifest (Gavin M. Roy) + - acdfdef - Remove useless test (Gavin M. Roy) + - 7918e1a - Skip libev tests if pyev is not installed or if they are being run in pypy (Gavin M. Roy) + - bb583bf - Remove the deprecated test (Gavin M. Roy) + - aecf3f2 - Don't reject a message if the channel is not open (Gavin M. Roy) + - e37f336 - Remove UTF-8 decoding in spec (Gavin M. Roy) + - ddc35a9 - Update the unittest to reflect removal of force binary (Gavin M. Roy) + - fea2476 - PEP8 cleanup (Gavin M. Roy) + - 9b97956 - Remove force_binary (Gavin M. Roy) + - a42dd90 - Whitespace required (Gavin M. Roy) + - 85867ea - Update the content_frame_dispatcher tests to reflect removal of auto-cast utf-8 (Gavin M. Roy) + - 5a4bd5d - Remove unicode casting (Gavin M. Roy) + - efea53d - Remove force binary and unicode casting (Gavin M. Roy) + - e918d15 - Add methods to remove deprecation warnings from asyncore (Gavin M. Roy) + - 117f62d - Add a coveragerc to ignore the auto generated pika.spec (Gavin M. Roy) + - 52f4485 - Remove pypy tests from travis for now (Gavin M. Roy) + - c3aa958 - Update README.rst (Gavin M. Roy) + - 3e2319f - Delete README.md (Gavin M. Roy) + - c12b0f1 - Move to RST (Gavin M. Roy) + - 704f5be - Badging updates (Gavin M. Roy) + - 7ae33ca - Update for coverage info (Gavin M. Roy) + - ae7ca86 - add libev_adapter_tests.py; modify .travis.yml to install libev and pyev (ml) + - f86aba5 - libev_connection: add **kwargs to _handle_event; suppress default_ioloop reuse warning (ml) + - 603f1cf - async_test_base: add necessary args to _on_cconn_closed (ml) + - 3422007 - add libev_adapter_tests.py (ml) + - 6cbab0c - removed relative imports and importing urlparse from urllib.parse for py3+ (a-tal) + - f808464 - libev_connection: add async watcher; add optional parameters to add_timeout (ml) + - c041c80 - Remove ev all together for now (Gavin M. Roy) + - 9408388 - Update the test descriptions and timeout (Gavin M. Roy) + - 1b552e0 - Increase timeout (Gavin M. Roy) + - 69a1f46 - Remove the pyev requirement for 2.6 testing (Gavin M. Roy) + - fe062d2 - Update package name (Gavin M. Roy) + - 611ad0e - Distribute the LICENSE and README.md (#350) (Gavin M. Roy) + - df5e1d8 - Ensure that the entire frame is written using socket.sendall (#349) (Gavin M. Roy) + - 69ec8cf - Move the libev install to before_install (Gavin M. Roy) + - a75f693 - Update test structure (Gavin M. Roy) + - 636b424 - Update things to ignore (Gavin M. Roy) + - b538c68 - Add tox, nose.cfg, update testing config (Gavin M. Roy) + - a0e7063 - add some tests to increase coverage of pika.connection (Charles Law) + - c76d9eb - Address issue #459 (Gavin M. Roy) + - 86ad2db - Raise exception if positional arg for parameters isn't an instance of Parameters (Gavin M. Roy) + - 14d08e1 - Fix for python 2.6 (Gavin M. Roy) + - bd388a3 - Use the first unused channel number addressing #404, #460 (Gavin M. Roy) + - e7676e6 - removing a debug that was left in last commit (James Mutton) + - 6c93b38 - Fixing connection-closed behavior to detect on attempt to publish (James Mutton) + - c3f0356 - Initialize bytes_written in _handle_write() (Jonathan Kirsch) + - 4510e95 - Fix _handle_write() may not send full frame (Jonathan Kirsch) + - 12b793f - fixed Tornado Consumer example to successfully reconnect (Yang Yang) + - f074444 - remove forgotten import of ordereddict (Pedro Abranches) + - 1ba0aea - fix last merge (Pedro Abranches) + - 10490a6 - change timeouts structure to list to maintain scheduling order (Pedro Abranches) + - 7958394 - save timeouts in ordered dict instead of dict (Pedro Abranches) + - d2746bf - URLParameters and ConnectionParameters accept unicode strings (Allard Hoeve) + - 596d145 - previous fix for AttributeError made parent and child class methods identical, remove duplication (James Mutton) + - 42940dd - UrlParameters Docs: fixed amqps scheme examples (Riccardo Cirimelli) + - 43904ff - Dont test this in PyPy due to sort order issue (Gavin M. Roy) + - d7d293e - Don't leave __repr__ sorting up to chance (Gavin M. Roy) + - 848c594 - Add integration test to travis and fix invocation (Gavin M. Roy) + - 2678275 - Add pypy to travis tests (Gavin M. Roy) + - 1877f3d - Also addresses issue #419 (Gavin M. Roy) + - 470c245 - Address issue #419 (Gavin M. Roy) + - ca3cb59 - Address issue #432 (Gavin M. Roy) + - a3ff6f2 - Default frame max should be AMQP FRAME_MAX (Gavin M. Roy) + - ff3d5cb - Remove max consumer tag test due to change in code. (Gavin M. Roy) + - 6045dda - Catch KeyError (#437) to ensure that an exception is not raised in a race condition (Gavin M. Roy) + - 0b4d53a - Address issue #441 (Gavin M. Roy) + - 180e7c4 - Update license and related files (Gavin M. Roy) + - 256ed3d - Added Jython support. (Erik Olof Gunnar Andersson) + - f73c141 - experimental work around for recursion issue. (Erik Olof Gunnar Andersson) + - a623f69 - Prevent #436 by iterating the keys and not the dict (Gavin M. Roy) + - 755fcae - Add support for authentication_failure_close, connection.blocked (Gavin M. Roy) + - c121243 - merge upstream master (Michael Laing) + - a08dc0d - add arg to channel.basic_consume (Pedro Abranches) + - 10b136d - Documentation fix (Anton Ryzhov) + - 9313307 - Fixed minor markup errors. (Jorge Puente Sarrín) + - fb3e3cf - Fix the spelling of UnsupportedAMQPFieldException (Garrett Cooper) + - 03d5da3 - connection.py: Propagate the force_channel keyword parameter to methods involved in channel creation (Michael Laing) + - 7bbcff5 - Documentation fix for basic_publish (JuhaS) + - 01dcea7 - Expose no_ack and exclusive to BlockingChannel.consume (Jeff Tang) + - d39b6aa - Fix BlockingChannel.basic_consume does not block on non-empty queues (Juhyeong Park) + - 6e1d295 - fix for issue 391 and issue 307 (Qi Fan) + - d9ffce9 - Update parameters.rst (cacovsky) + - 6afa41e - Add additional badges (Gavin M. Roy) + - a255925 - Fix return value on dns resolution issue (Laurent Eschenauer) + - 3f7466c - libev_connection: tweak docs (Michael Laing) + - 0aaed93 - libev_connection: Fix varable naming (Michael Laing) + - 0562d08 - libev_connection: Fix globals warning (Michael Laing) + - 22ada59 - libev_connection: use globals to track sigint and sigterm watchers as they are created globally within libev (Michael Laing) + - 2649b31 - Move badge [skip ci] (Gavin M. Roy) + - f70eea1 - Remove pypy and installation attempt of pyev (Gavin M. Roy) + - f32e522 - Conditionally skip external connection adapters if lib is not installed (Gavin M. Roy) + - cce97c5 - Only install pyev on python 2.7 (Gavin M. Roy) + - ff84462 - Add travis ci support (Gavin M. Roy) + - cf971da - lib_evconnection: improve signal handling; add callback (Michael Laing) + - 9adb269 - bugfix in returning a list in Py3k (Alex Chandel) + - c41d5b9 - update exception syntax for Py3k (Alex Chandel) + - c8506f1 - fix _adapter_connect (Michael Laing) + - 67cb660 - Add LibevConnection to README (Michael Laing) + - 1f9e72b - Propagate low-level connection errors to the AMQPConnectionError. (Bjorn Sandberg) + - e1da447 - Avoid race condition in _on_getok on successive basic_get() when clearing out callbacks (Jeff) + - 7a09979 - Add support for upcoming Connection.Blocked/Unblocked (Gavin M. Roy) + - 53cce88 - TwistedChannel correctly handles multi-argument deferreds. (eivanov) + - 66f8ace - Use uuid when creating unique consumer tag (Perttu Ranta-aho) + - 4ee2738 - Limit the growth of Channel._cancelled, use deque instead of list. (Perttu Ranta-aho) + - 0369aed - fix adapter references and tweak docs (Michael Laing) + - 1738c23 - retry select.select() on EINTR (Cenk Alti) + - 1e55357 - libev_connection: reset internal state on reconnect (Michael Laing) + - 708559e - libev adapter (Michael Laing) + - a6b7c8b - Prioritize EPollPoller and KQueuePoller over PollPoller and SelectPoller (Anton Ryzhov) + - 53400d3 - Handle socket errors in PollPoller and EPollPoller Correctly check 'select.poll' availability (Anton Ryzhov) + - a6dc969 - Use dict.keys & items instead of iterkeys & iteritems (Alex Chandel) + - 5c1b0d0 - Use print function syntax, in examples (Alex Chandel) + - ac9f87a - Fixed a typo in the name of the Asyncore Connection adapter (Guruprasad) + - dfbba50 - Fixed bug mentioned in Issue #357 (Erik Andersson) + - c906a2d - Drop additional flags when getting info for the hostnames, log errors (#352) (Gavin M. Roy) + - baf23dd - retry poll() on EINTR (Cenk Alti) + - 7cd8762 - Address ticket #352 catching an error when socket.getprotobyname fails (Gavin M. Roy) + - 6c3ec75 - Prep for 0.9.14 (Gavin M. Roy) + - dae7a99 - Bump to 0.9.14p0 (Gavin M. Roy) + - 620edc7 - Use default port and virtual host if omitted in URLParameters (Issue #342) (Gavin M. Roy) + - 42a8787 - Move the exception handling inside the while loop (Gavin M. Roy) + - 10e0264 - Fix connection back pressure detection issue #347 (Gavin M. Roy) + - 0bfd670 - Fixed mistake in commit 3a19d65. (Erik Andersson) + - da04bc0 - Fixed Unknown state on disconnect error message generated when closing connections. (Erik Andersson) + - 3a19d65 - Alternative solution to fix #345. (Erik Andersson) + - abf9fa8 - switch to sendall to send entire frame (Dustin Koupal) + - 9ce8ce4 - Fixed the async publisher example to work with reconnections (Raphaël De Giusti) + - 511028a - Fix typo in TwistedChannel docstring (cacovsky) + - 8b69e5a - calls self._adapter_disconnect() instead of self.disconnect() which doesn't actually exist #294 (Mark Unsworth) + - 06a5cf8 - add NullHandler to prevent logging warnings (Cenk Alti) + - f404a9a - Fix #337 cannot start ioloop after stop (Ralf Nyren) + +0.9.13 - 2013-05-15 +------------------- + +`0.9.13 `_ + +**Major Changes** + +- IPv6 Support with thanks to Alessandro Tagliapietra for initial prototype +- Officially remove support for <= Python 2.5 even though it was broken already +- Drop pika.simplebuffer.SimpleBuffer in favor of the Python stdlib collections.deque object +- New default object for receiving content is a "bytes" object which is a str wrapper in Python 2, but paves way for Python 3 support +- New "Raw" mode for frame decoding content frames (#334) addresses issues #331, #229 added by Garth Williamson +- Connection and Disconnection logic refactored, allowing for cleaner separation of protocol logic and socket handling logic as well as connection state management +- New "on_open_error_callback" argument in creating connection objects and new Connection.add_on_open_error_callback method +- New Connection.connect method to cleanly allow for reconnection code +- Support for all AMQP field types, using protocol specified signed/unsigned unpacking + +**Backwards Incompatible Changes** + +- Method signature for creating connection objects has new argument "on_open_error_callback" which is positionally before "on_close_callback" +- Internal callback variable names in connection.Connection have been renamed and constants used. If you relied on any of these callbacks outside of their internal use, make sure to check out the new constants. +- Connection._connect method, which was an internal only method is now deprecated and will raise a DeprecationWarning. If you relied on this method, your code needs to change. +- pika.simplebuffer has been removed + +**Bugfixes** + +- BlockingConnection consumer generator does not free buffer when exited (#328) +- Unicode body payloads in the blocking adapter raises exception (#333) +- Support "b" short-short-int AMQP data type (#318) +- Docstring type fix in adapters/select_connection (#316) fix by Rikard Hultén +- IPv6 not supported (#309) +- Stop the HeartbeatChecker when connection is closed (#307) +- Unittest fix for SelectConnection (#336) fix by Erik Andersson +- Handle condition where no connection or socket exists but SelectConnection needs a timeout for retrying a connection (#322) +- TwistedAdapter lagging behind BaseConnection changes (#321) fix by Jan Urbański + +**Other** + +- Refactored documentation +- Added Twisted Adapter example (#314) by nolinksoft + +0.9.12 - 2013-03-18 +------------------- + +`0.9.12 `_ + +**Bugfixes** + +- New timeout id hashing was not unique + +0.9.11 - 2013-03-17 +------------------- + +`0.9.11 `_ + +**Bugfixes** + +- Address inconsistent channel close callback documentation and add the signature + change to the TwistedChannel class (#305) +- Address a missed timeout related internal data structure name change + introduced in the SelectConnection 0.9.10 release. Update all connection + adapters to use same signature and docstring (#306). + +0.9.10 - 2013-03-16 +------------------- + +`0.9.10 `_ + +**Bugfixes** + +- Fix timeout in twisted adapter (Submitted by cellscape) +- Fix blocking_connection poll timer resolution to milliseconds (Submitted by cellscape) +- Fix channel._on_close() without a method frame (Submitted by Richard Boulton) +- Addressed exception on close (Issue #279 - fix by patcpsc) +- 'messages' not initialized in BlockingConnection.cancel() (Issue #289 - fix by Mik Kocikowski) +- Make queue_unbind behave like queue_bind (Issue #277) +- Address closing behavioral issues for connections and channels (Issue #275) +- Pass a Method frame to Channel._on_close in Connection._on_disconnect (Submitted by Jan Urbański) +- Fix channel closed callback signature in the Twisted adapter (Submitted by Jan Urbański) +- Don't stop the IOLoop on connection close for in the Twisted adapter (Submitted by Jan Urbański) +- Update the asynchronous examples to fix reconnecting and have it work +- Warn if the socket was closed such as if RabbitMQ dies without a Close frame +- Fix URLParameters ssl_options (Issue #296) +- Add state to BlockingConnection addressing (Issue #301) +- Encode unicode body content prior to publishing (Issue #282) +- Fix an issue with unicode keys in BasicProperties headers key (Issue #280) +- Change how timeout ids are generated (Issue #254) +- Address post close state issues in Channel (Issue #302) + +** Behavior changes ** + +- Change core connection communication behavior to prefer outbound writes over reads, addressing a recursion issue +- Update connection on close callbacks, changing callback method signature +- Update channel on close callbacks, changing callback method signature +- Give more info in the ChannelClosed exception +- Change the constructor signature for BlockingConnection, block open/close callbacks +- Disable the use of add_on_open_callback/add_on_close_callback methods in BlockingConnection + + +0.9.9 - 2013-01-29 +------------------ + +`0.9.9 `_ + +**Bugfixes** + +- Only remove the tornado_connection.TornadoConnection file descriptor from the IOLoop if it's still open (Issue #221) +- Allow messages with no body (Issue #227) +- Allow for empty routing keys (Issue #224) +- Don't raise an exception when trying to send a frame to a closed connection (Issue #229) +- Only send a Connection.CloseOk if the connection is still open. (Issue #236 - Fix by noleaf) +- Fix timeout threshold in blocking connection - (Issue #232 - Fix by Adam Flynn) +- Fix closing connection while a channel is still open (Issue #230 - Fix by Adam Flynn) +- Fixed misleading warning and exception messages in BaseConnection (Issue #237 - Fix by Tristan Penman) +- Pluralised and altered the wording of the AMQPConnectionError exception (Issue #237 - Fix by Tristan Penman) +- Fixed _adapter_disconnect in TornadoConnection class (Issue #237 - Fix by Tristan Penman) +- Fixing hang when closing connection without any channel in BlockingConnection (Issue #244 - Fix by Ales Teska) +- Remove the process_timeouts() call in SelectConnection (Issue #239) +- Change the string validation to basestring for host connection parameters (Issue #231) +- Add a poller to the BlockingConnection to address latency issues introduced in Pika 0.9.8 (Issue #242) +- reply_code and reply_text is not set in ChannelException (Issue #250) +- Add the missing constraint parameter for Channel._on_return callback processing (Issue #257 - Fix by patcpsc) +- Channel callbacks not being removed from callback manager when channel is closed or deleted (Issue #261) + +0.9.8 - 2012-11-18 +------------------ + +`0.9.8 `_ + +**Bugfixes** + +- Channel.queue_declare/BlockingChannel.queue_declare not setting up callbacks property for empty queue name (Issue #218) +- Channel.queue_bind/BlockingChannel.queue_bind not allowing empty routing key +- Connection._on_connection_closed calling wrong method in Channel (Issue #219) +- Fix tx_commit and tx_rollback bugs in BlockingChannel (Issue #217) + +0.9.7 - 2012-11-11 +------------------ + +`0.9.7 `_ + +**New features** + +- generator based consumer in BlockingChannel (See :doc:`examples/blocking_consumer_generator` for example) + +**Changes** + +- BlockingChannel._send_method will only wait if explicitly told to + +**Bugfixes** + +- Added the exchange "type" parameter back but issue a DeprecationWarning +- Dont require a queue name in Channel.queue_declare() +- Fixed KeyError when processing timeouts (Issue # 215 - Fix by Raphael De Giusti) +- Don't try and close channels when the connection is closed (Issue #216 - Fix by Charles Law) +- Dont raise UnexpectedFrame exceptions, log them instead +- Handle multiple synchronous RPC calls made without waiting for the call result (Issues #192, #204, #211) +- Typo in docs (Issue #207 Fix by Luca Wehrstedt) +- Only sleep on connection failure when retry attempts are > 0 (Issue #200) +- Bypass _rpc method and just send frames for Basic.Ack, Basic.Nack, Basic.Reject (Issue #205) + +0.9.6 - 2012-10-29 +------------------ + +`0.9.6 `_ + +**New features** + +- URLParameters +- BlockingChannel.start_consuming() and BlockingChannel.stop_consuming() +- Delivery Confirmations +- Improved unittests + +**Major bugfix areas** + +- Connection handling +- Blocking functionality in the BlockingConnection +- SSL +- UTF-8 Handling + +**Removals** + +- pika.reconnection_strategies +- pika.channel.ChannelTransport +- pika.log +- pika.template +- examples directory + +0.9.5 - 2011-03-29 +------------------ + +`0.9.5 `_ + +**Changelog** + +- Scope changes with adapter IOLoops and CallbackManager allowing for cleaner, multi-threaded operation +- Add support for Confirm.Select with channel.Channel.confirm_delivery() +- Add examples of delivery confirmation to examples (demo_send_confirmed.py) +- Update uses of log.warn with warning.warn for TCP Back-pressure alerting +- License boilerplate updated to simplify license text in source files +- Increment the timeout in select_connection.SelectPoller reducing CPU utilization +- Bug fix in Heartbeat frame delivery addressing issue #35 +- Remove abuse of pika.log.method_call through a majority of the code +- Rename of key modules: table to data, frames to frame +- Cleanup of frame module and related classes +- Restructure of tests and test runner +- Update functional tests to respect RABBITMQ_HOST, RABBITMQ_PORT environment variables +- Bug fixes to reconnection_strategies module +- Fix the scale of timeout for PollPoller to be specified in milliseconds +- Remove mutable default arguments in RPC calls +- Add data type validation to RPC calls +- Move optional credentials erasing out of connection.Connection into credentials module +- Add support to allow for additional external credential types +- Add a NullHandler to prevent the 'No handlers could be found for logger "pika"' error message when not using pika.log in a client app at all. +- Clean up all examples to make them easier to read and use +- Move documentation into its own repository https://github.com/pika/documentation + +- channel.py + + - Move channel.MAX_CHANNELS constant from connection.CHANNEL_MAX + - Add default value of None to ChannelTransport.rpc + - Validate callback and acceptable replies parameters in ChannelTransport.RPC + - Remove unused connection attribute from Channel + +- connection.py + + - Remove unused import of struct + - Remove direct import of pika.credentials.PlainCredentials + - Change to import pika.credentials + - Move CHANNEL_MAX to channel.MAX_CHANNELS + - Change ConnectionParameters initialization parameter heartbeat to boolean + - Validate all inbound parameter types in ConnectionParameters + - Remove the Connection._erase_credentials stub method in favor of letting the Credentials object deal with that itself. + - Warn if the credentials object intends on erasing the credentials and a reconnection strategy other than NullReconnectionStrategy is specified. + - Change the default types for callback and acceptable_replies in Connection._rpc + - Validate the callback and acceptable_replies data types in Connection._rpc + +- adapters.blocking_connection.BlockingConnection + + - Addition of _adapter_disconnect to blocking_connection.BlockingConnection + - Add timeout methods to BlockingConnection addressing issue #41 + - BlockingConnection didn't allow you register more than one consumer callback because basic_consume was overridden to block immediately. New behavior allows you to do so. + - Removed overriding of base basic_consume and basic_cancel methods. Now uses underlying Channel versions of those methods. + - Added start_consuming() method to BlockingChannel to start the consumption loop. + - Updated stop_consuming() to iterate through all the registered consumers in self._consumers and issue a basic_cancel. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/CONTRIBUTING.md b/NodeRed/NodeRedFiles/pika-0.13.1/CONTRIBUTING.md new file mode 100644 index 000000000..d856697ca --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/CONTRIBUTING.md @@ -0,0 +1,68 @@ +# Contributing + +## Test Coverage + +To contribute to Pika, please make sure that any new features or changes +to existing functionality **include test coverage**. + +*Pull requests that add or change code without coverage have a much lower chance +of being accepted.* + + +## Prerequisites + +Pika test suite has a couple of requirements: + + * Dependencies from `test-dependencies.txt` are installed + * A RabbitMQ node with all defaults is running on `localhost:5672` + + +## Installing Dependencies + +To install the dependencies needed to run Pika tests, use + + pip install -r test-requirements.txt + +which on Python 3 might look like this + + pip3 install -r test-requirements.txt + + +## Running Tests + +To run all test suites, use + + nosetests + +Note that some tests are OS-specific (e.g. epoll on Linux +or kqueue on MacOS and BSD). Those will be skipped +automatically. + +If you would like to run TLS/SSL tests, use the following procedure: + +* Create a `rabbitmq.conf` file: + + ``` + sed -e "s#PIKA_DIR#$PWD#g" ./testdata/rabbitmq.conf.in > ./testdata/rabbitmq.conf + ``` + +* Start RabbitMQ and use the configuration file you just created. An example command + that works with the `generic-unix` package is as follows: + + ``` + $ RABBITMQ_CONFIG_FILE=/path/to/pika/testdata/rabbitmq.conf ./sbin/rabbitmq-server + ``` + +* Run the tests indicating that TLS/SSL connections should be used: + + ``` + PIKA_TEST_TLS=true nosetests + ``` + + +## Code Formatting + +Please format your code using [yapf](http://pypi.python.org/pypi/yapf) +with ``google`` style prior to issuing your pull request. *Note: only format those +lines that you have changed in your pull request. If you format an entire file and +change code outside of the scope of your PR, it will likely be rejected.* diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/LICENSE b/NodeRed/NodeRedFiles/pika-0.13.1/LICENSE new file mode 100644 index 000000000..0aed110fe --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2009-2017, Tony Garnock-Jones, Gavin M. Roy, Pivotal and others. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the Pika project nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/MANIFEST.in b/NodeRed/NodeRedFiles/pika-0.13.1/MANIFEST.in new file mode 100644 index 000000000..9c8317c45 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/MANIFEST.in @@ -0,0 +1,2 @@ +include LICENSE +include README.rst \ No newline at end of file diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/README.rst b/NodeRed/NodeRedFiles/pika-0.13.1/README.rst new file mode 100644 index 000000000..ac39d70f2 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/README.rst @@ -0,0 +1,157 @@ +Pika +==== +Pika is a RabbitMQ (AMQP-0-9-1) client library for Python. + +|Version| |Python versions| |Status| |Coverage| |License| |Docs| + +Introduction +------------- +Pika is a pure-Python implementation of the AMQP 0-9-1 protocol including RabbitMQ's +extensions. + +- Python 2.7 and 3.4+ are supported. + +- Since threads aren't appropriate to every situation, it doesn't + require threads. It takes care not to forbid them, either. The same + goes for greenlets, callbacks, continuations and generators. It is + not necessarily thread-safe however, and your mileage will vary. + +- People may be using direct sockets, plain old `select()`, + or any of the wide variety of ways of getting network events to and from a + python application. Pika tries to stay compatible with all of these, and to + make adapting it to a new environment as simple as possible. + +Documentation +------------- +Pika's documentation can be found at `https://pika.readthedocs.io `_ + +Example +------- +Here is the most simple example of use, sending a message with the BlockingConnection adapter: + +.. code :: python + + import pika + connection = pika.BlockingConnection() + channel = connection.channel() + channel.basic_publish(exchange='example', + routing_key='test', + body='Test Message') + connection.close() + +And an example of writing a blocking consumer: + +.. code :: python + + import pika + connection = pika.BlockingConnection() + channel = connection.channel() + + for method_frame, properties, body in channel.consume('test'): + + # Display the message parts and ack the message + print(method_frame, properties, body) + channel.basic_ack(method_frame.delivery_tag) + + # Escape out of the loop after 10 messages + if method_frame.delivery_tag == 10: + break + + # Cancel the consumer and return any pending messages + requeued_messages = channel.cancel() + print('Requeued %i messages' % requeued_messages) + connection.close() + +Pika provides the following adapters +------------------------------------ + +- AsyncioConnection - adapter for the Python3 AsyncIO event loop +- BlockingConnection - enables blocking, synchronous operation on top of library for simple uses +- SelectConnection - fast asynchronous adapter +- TornadoConnection - adapter for use with the Tornado IO Loop http://tornadoweb.org +- TwistedConnection - adapter for use with the Twisted asynchronous package http://twistedmatrix.com/ + +Requesting message ACKs from another thread +------------------------------------------- +The single-threaded usage constraint of an individual Pika connection adapter +instance may result in a dropped AMQP/stream connection due to AMQP heartbeat +timeout in consumers that take a long time to process an incoming message. A +common solution is to delegate processing of the incoming messages to another +thread, while the connection adapter's thread continues to service its ioloop's +message pump, permitting AMQP heartbeats and other I/O to be serviced in a +timely fashion. + +Messages processed in another thread may not be ACK'ed directly from that thread, +since all accesses to the connection adapter instance must be from a single +thread - the thread that is running the adapter's ioloop. However, this may be +accomplished by requesting a callback to be executed in the adapter's ioloop +thread. For example, the callback function's implementation might look like this: + +.. code :: python + + def ack_message(channel, delivery_tag): + """Note that `channel` must be the same pika channel instance via which + the message being ACKed was retrieved (AMQP protocol constraint). + """ + if channel.is_open: + channel.basic_ack(delivery_tag) + else: + # Channel is already closed, so we can't ACK this message; + # log and/or do something that makes sense for your app in this case. + pass + +The code running in the other thread may request the `ack_message()` function +to be executed in the connection adapter's ioloop thread using an +adapter-specific mechanism: + +- :py:class:`pika.BlockingConnection` abstracts its ioloop from the application + and thus exposes :py:meth:`pika.BlockingConnection.add_callback_threadsafe()`. + Refer to this method's docstring for additional information. For example: + + .. code :: python + + connection.add_callback_threadsafe(functools.partial(ack_message, channel, delivery_tag)) + +- When using a non-blocking connection adapter, such as +:py:class:`pika.adapters.asyncio_connection.AsyncioConnection` or +:py:class:`pika.SelectConnection`, you use the underlying asynchronous +framework's native API for requesting an ioloop-bound callback from +another thread. For example, `SelectConnection`'s `IOLoop` provides +`add_callback_threadsafe()`, `Tornado`'s `IOLoop` has +`add_callback()`, while `asyncio`'s event loop exposes +`call_soon_threadsafe()`. + +This threadsafe callback request mechanism may also be used to delegate +publishing of messages, etc., from a background thread to the connection adapter's +thread. + +Contributing +------------ +To contribute to pika, please make sure that any new features or changes +to existing functionality **include test coverage**. + +*Pull requests that add or change code without coverage will most likely be rejected.* + +Additionally, please format your code using `yapf `_ +with ``google`` style prior to issuing your pull request. *Note: only format those +lines that you have changed in your pull request. If you format an entire file and +change code outside of the scope of your PR, it will likely be rejected.* + +.. |Version| image:: https://img.shields.io/pypi/v/pika.svg? + :target: http://badge.fury.io/py/pika + +.. |Python versions| image:: https://img.shields.io/pypi/pyversions/pika.svg + :target: https://pypi.python.org/pypi/pika + +.. |Status| image:: https://img.shields.io/travis/pika/pika.svg? + :target: https://travis-ci.org/pika/pika + +.. |Coverage| image:: https://img.shields.io/codecov/c/github/pika/pika.svg? + :target: https://codecov.io/github/pika/pika?branch=master + +.. |License| image:: https://img.shields.io/pypi/l/pika.svg? + :target: https://pika.readthedocs.io + +.. |Docs| image:: https://readthedocs.org/projects/pika/badge/?version=stable + :target: https://pika.readthedocs.io + :alt: Documentation Status diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/appveyor.yml b/NodeRed/NodeRedFiles/pika-0.13.1/appveyor.yml new file mode 100644 index 000000000..13259f809 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/appveyor.yml @@ -0,0 +1,107 @@ +# Windows build and test of Pika + +environment: + erlang_download_url: "http://erlang.org/download/otp_win64_19.3.exe" + erlang_exe_path: "C:\\Users\\appveyor\\erlang_19.3.exe" + erlang_home_dir: "C:\\Users\\appveyor\\erlang" + erlang_erts_version: "erts-8.3" + + rabbitmq_version: 3.7.4 + rabbitmq_installer_download_url: "https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.7.4/rabbitmq-server-3.7.4.exe" + rabbitmq_installer_path: "C:\\Users\\appveyor\\rabbitmq-server-3.7.4.exe" + + matrix: + - PYTHON_ARCH: "32" + PYTHONHOME: "C:\\Python27" + PIKA_TEST_TLS: false + - PYTHON_ARCH: "32" + PYTHONHOME: "C:\\Python27" + PIKA_TEST_TLS: true + + +cache: + # RabbitMQ is a pretty big package, so caching it in hopes of expediting the + # runtime + - "%erlang_exe_path%" + - "%rabbitmq_installer_path%" + + +install: + - SET PYTHONPATH=%PYTHONHOME% + - SET PATH=%PYTHONHOME%\Scripts;%PYTHONHOME%;%PATH% + + # For diagnostics + - ECHO %PYTHONPATH% + - ECHO %PATH% + - python --version + + - ECHO Upgrading pip... + - python -m pip install --upgrade pip setuptools + - pip --version + + - ECHO Installing wheel... + - pip install wheel + + +build_script: + - ECHO Building distributions... + - python setup.py sdist bdist bdist_wheel + - DIR /s *.whl + + +artifacts: + - path: 'dist\*.whl' + name: pika wheel + + +before_test: + # Install test requirements + - ECHO Installing pika... + - python setup.py install + + - ECHO Installing pika test requirements... + - pip install -r test-requirements.txt + + # List conents of C:\ to help debug caching of rabbitmq artifacts + # - DIR C:\ + + - ps: $webclient=New-Object System.Net.WebClient + + - ECHO Downloading Erlang... + - ps: if (-Not (Test-Path "$env:erlang_exe_path")) { $webclient.DownloadFile("$env:erlang_download_url", "$env:erlang_exe_path") } else { Write-Host "Found" $env:erlang_exe_path "in cache." } + + - ECHO Installing Erlang... + - start /B /WAIT %erlang_exe_path% /S /D=%erlang_home_dir% + - set ERLANG_HOME=%erlang_home_dir% + + - ECHO Downloading RabbitMQ... + - ps: if (-Not (Test-Path "$env:rabbitmq_installer_path")) { $webclient.DownloadFile("$env:rabbitmq_installer_download_url", "$env:rabbitmq_installer_path") } else { Write-Host "Found" $env:rabbitmq_installer_path "in cache." } + + - ECHO Creating directory %AppData%\RabbitMQ... + - ps: New-Item -ItemType Directory -ErrorAction Continue -Path "$env:AppData/RabbitMQ" + + - ECHO Creating RabbitMQ configuration file in %AppData%\RabbitMQ... + - ps: Get-Content C:/Projects/pika/testdata/rabbitmq.conf.in | %{ $_ -replace 'PIKA_DIR', 'C:/projects/pika' } | Set-Content -Path "$env:AppData/RabbitMQ/rabbitmq.conf" + - ps: Get-Content "$env:AppData/RabbitMQ/rabbitmq.conf" + + - ECHO Creating Erlang cookie files... + - ps: '[System.IO.File]::WriteAllText("C:\Users\appveyor\.erlang.cookie", "PIKAISTHEBEST", [System.Text.Encoding]::ASCII)' + - ps: '[System.IO.File]::WriteAllText("C:\Windows\System32\config\systemprofile\.erlang.cookie", "PIKAISTHEBEST", [System.Text.Encoding]::ASCII)' + + - ECHO Installing and starting RabbitMQ with default config... + - start /B /WAIT %rabbitmq_installer_path% /S + - ps: (Get-Service -Name RabbitMQ).Status + + - ECHO Waiting for epmd to report that RabbitMQ has started... + - ps: 'C:\projects\pika\testdata\wait-epmd.ps1' + - ps: 'C:\projects\pika\testdata\wait-rabbitmq.ps1' + + - ECHO Getting RabbitMQ status... + - cmd /c "C:\Program Files\RabbitMQ Server\rabbitmq_server-%rabbitmq_version%\sbin\rabbitmqctl.bat" status + + +test_script: + - nosetests + +# Since Pika is source-only there's no need to deploy from Windows +deploy: false diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/Makefile b/NodeRed/NodeRedFiles/pika-0.13.1/docs/Makefile new file mode 100644 index 000000000..f7b78b37a --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pika.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pika.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/pika" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pika" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/conf.py b/NodeRed/NodeRedFiles/pika-0.13.1/docs/conf.py new file mode 100644 index 000000000..bb1aa36dc --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/conf.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +import sys +sys.path.insert(0, '../') +#needs_sphinx = '1.0' + +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', + 'sphinx.ext.intersphinx'] + +intersphinx_mapping = {'python': ('https://docs.python.org/3/', + 'https://docs.python.org/3/objects.inv'), + 'tornado': ('http://www.tornadoweb.org/en/stable/', + 'http://www.tornadoweb.org/en/stable/objects.inv')} + +templates_path = ['_templates'] + +source_suffix = '.rst' +master_doc = 'index' + +project = 'pika' +copyright = '2009-2017, Tony Garnock-Jones, Gavin M. Roy, Pivotal Software, Inc and contributors.' + +import pika +release = pika.__version__ +version = '.'.join(release.split('.')[0:1]) + +exclude_patterns = ['_build'] +add_function_parentheses = True +add_module_names = True +show_authors = True +pygments_style = 'sphinx' +modindex_common_prefix = ['pika'] +html_theme = 'default' +html_static_path = ['_static'] +htmlhelp_basename = 'pikadoc' diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/contributors.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/contributors.rst new file mode 100644 index 000000000..9cd50ef20 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/contributors.rst @@ -0,0 +1,104 @@ +Contributors +============ +The following people have directly contributes code by way of new features and/or bug fixes to Pika: + + - Gavin M. Roy + - Tony Garnock-Jones + - Vitaly Kruglikov + - Michael Laing + - Marek Majkowski + - Jan Urbański + - Brian K. Jones + - Ask Solem + - ml + - Will + - atatsu + - Fredrik Svensson + - Pedro Abranches + - Kyösti Herrala + - Erik Andersson + - Charles Law + - Alex Chandel + - Tristan Penman + - Raphaël De Giusti + - Jozef Van Eenbergen + - Josh Braegger + - Jason J. W. Williams + - James Mutton + - Cenk Alti + - Asko Soukka + - Antti Haapala + - Anton Ryzhov + - cellscape + - cacovsky + - bra-fsn + - ateska + - Roey Berman + - Robert Weidlich + - Riccardo Cirimelli + - Perttu Ranta-aho + - Pau Gargallo + - Kane + - Kamil Kisiel + - Jonty Wareing + - Jonathan Kirsch + - Jacek 'Forger' Całusiński + - Garth Williamson + - Erik Olof Gunnar Andersson + - David Strauss + - Anton V. Yanchenko + - Alexey Myasnikov + - Alessandro Tagliapietra + - Adam Flynn + - skftn + - saarni + - pavlobaron + - nonleaf + - markcf + - george y + - eivanov + - bstemshorn + - a-tal + - Yang Yang + - Stuart Longland + - Sigurd Høgsbro + - Sean Dwyer + - Samuel Stauffer + - Roberto Decurnex + - Rikard Hultén + - Richard Boulton + - Ralf Nyren + - Qi Fan + - Peter Magnusson + - Pankrat + - Olivier Le Thanh Duong + - Njal Karevoll + - Milan Skuhra + - Mik Kocikowski + - Michael Kenney + - Mark Unsworth + - Luca Wehrstedt + - Laurent Eschenauer + - Lars van de Kerkhof + - Kyösti Herrala + - Juhyeong Park + - JuhaS + - Josh Hansen + - Jorge Puente Sarrín + - Jeff Tang + - Jeff Fein-Worton + - Jeff + - Hunter Morris + - Guruprasad + - Garrett Cooper + - Frank Slaughter + - Dustin Koupal + - Bjorn Sandberg + - Axel Eirola + - Andrew Smith + - Andrew Grigorev + - Andrew + - Allard Hoeve + - A.Shaposhnikov + +*Contributors listed by commit count.* diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples.rst new file mode 100644 index 000000000..5df6740c8 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples.rst @@ -0,0 +1,23 @@ +Usage Examples +============== + +Pika has various methods of use, between the synchronous BlockingConnection adapter and the various asynchronous connection adapter. The following examples illustrate the various ways that you can use Pika in your projects. + +.. toctree:: + :glob: + :maxdepth: 1 + + examples/using_urlparameters + examples/connecting_async + examples/blocking_basic_get + examples/blocking_consume + examples/blocking_consumer_generator + examples/comparing_publishing_sync_async + examples/blocking_delivery_confirmations + examples/blocking_publish_mandatory + examples/asynchronous_consumer_example + examples/asynchronous_publisher_example + examples/twisted_example + examples/tornado_consumer + examples/tls_mutual_authentication + examples/tls_server_authentication diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asynchronous_consumer_example.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asynchronous_consumer_example.rst new file mode 100644 index 000000000..8ec3f25f7 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asynchronous_consumer_example.rst @@ -0,0 +1,357 @@ +Asynchronous consumer example +============================= +The following example implements a consumer that will respond to RPC commands sent from RabbitMQ. For example, it will reconnect if RabbitMQ closes the connection and will shutdown if RabbitMQ cancels the consumer or closes the channel. While it may look intimidating, each method is very short and represents a individual actions that a consumer can do. + +consumer.py:: + + # -*- coding: utf-8 -*- + + import logging + import pika + + LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') + LOGGER = logging.getLogger(__name__) + + + class ExampleConsumer(object): + """This is an example consumer that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + If the channel is closed, it will indicate a problem with one of the + commands that were issued and that should surface in the output as well. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Create a new instance of the consumer class, passing in the AMQP + URL used to connect to RabbitMQ. + + :param str amqp_url: The AMQP url to connect with + + """ + self._connection = None + self._channel = None + self._closing = False + self._consumer_tag = None + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return pika.SelectConnection(pika.URLParameters(self._url), + self.on_connection_open, + stop_ioloop_on_close=False) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.add_on_connection_close_callback() + self.open_channel() + + def add_on_connection_close_callback(self): + """This method adds an on close callback that will be invoked by pika + when RabbitMQ closes the connection to the publisher unexpectedly. + + """ + LOGGER.info('Adding connection close callback') + self._connection.add_on_close_callback(self.on_connection_closed) + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._closing: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self.reconnect) + + def reconnect(self): + """Will be invoked by the IOLoop timer if the connection is + closed. See the on_connection_closed method. + + """ + # This is the old connection IOLoop instance, stop its ioloop + self._connection.ioloop.stop() + + if not self._closing: + + # Create a new connection + self._connection = self.connect() + + # There is now a new connection, needs a new ioloop to run + self._connection.ioloop.start() + + def open_channel(self): + """Open a new channel with RabbitMQ by issuing the Channel.Open RPC + command. When RabbitMQ responds that the channel is open, the + on_channel_open callback will be invoked by pika. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel %i was closed: (%s) %s', + channel, reply_code, reply_text) + self._connection.close() + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def on_bindok(self, unused_frame): + """Invoked by pika when the Queue.Bind method has completed. At this + point we will start consuming messages by calling start_consuming + which will invoke the needed RPC commands to start the process. + + :param pika.frame.Method unused_frame: The Queue.BindOk response frame + + """ + LOGGER.info('Queue bound') + self.start_consuming() + + def start_consuming(self): + """This method sets up the consumer by first calling + add_on_cancel_callback so that the object is notified if RabbitMQ + cancels the consumer. It then issues the Basic.Consume RPC command + which returns the consumer tag that is used to uniquely identify the + consumer with RabbitMQ. We keep the value to use it when we want to + cancel consuming. The on_message method is passed in as a callback pika + will invoke when a message is fully received. + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.add_on_cancel_callback() + self._consumer_tag = self._channel.basic_consume(self.on_message, + self.QUEUE) + + def add_on_cancel_callback(self): + """Add a callback that will be invoked if RabbitMQ cancels the consumer + for some reason. If RabbitMQ does cancel the consumer, + on_consumer_cancelled will be invoked by pika. + + """ + LOGGER.info('Adding consumer cancellation callback') + self._channel.add_on_cancel_callback(self.on_consumer_cancelled) + + def on_consumer_cancelled(self, method_frame): + """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer + receiving messages. + + :param pika.frame.Method method_frame: The Basic.Cancel frame + + """ + LOGGER.info('Consumer was cancelled remotely, shutting down: %r', + method_frame) + if self._channel: + self._channel.close() + + def on_message(self, unused_channel, basic_deliver, properties, body): + """Invoked by pika when a message is delivered from RabbitMQ. The + channel is passed for your convenience. The basic_deliver object that + is passed in carries the exchange, routing key, delivery tag and + a redelivered flag for the message. The properties passed in is an + instance of BasicProperties with the message properties and the body + is the message that was sent. + + :param pika.channel.Channel unused_channel: The channel object + :param pika.Spec.Basic.Deliver: basic_deliver method + :param pika.Spec.BasicProperties: properties + :param str|unicode body: The message body + + """ + LOGGER.info('Received message # %s from %s: %s', + basic_deliver.delivery_tag, properties.app_id, body) + self.acknowledge_message(basic_deliver.delivery_tag) + + def acknowledge_message(self, delivery_tag): + """Acknowledge the message delivery from RabbitMQ by sending a + Basic.Ack RPC method for the delivery tag. + + :param int delivery_tag: The delivery tag from the Basic.Deliver frame + + """ + LOGGER.info('Acknowledging message %s', delivery_tag) + self._channel.basic_ack(delivery_tag) + + def stop_consuming(self): + """Tell RabbitMQ that you would like to stop consuming by sending the + Basic.Cancel RPC command. + + """ + if self._channel: + LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ') + self._channel.basic_cancel(self.on_cancelok, self._consumer_tag) + + def on_cancelok(self, unused_frame): + """This method is invoked by pika when RabbitMQ acknowledges the + cancellation of a consumer. At this point we will close the channel. + This will invoke the on_channel_closed method once the channel has been + closed, which will in-turn close the connection. + + :param pika.frame.Method unused_frame: The Basic.CancelOk frame + + """ + LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer') + self.close_channel() + + def close_channel(self): + """Call to close the channel with RabbitMQ cleanly by issuing the + Channel.Close RPC command. + + """ + LOGGER.info('Closing the channel') + self._channel.close() + + def run(self): + """Run the example consumer by connecting to RabbitMQ and then + starting the IOLoop to block and allow the SelectConnection to operate. + + """ + self._connection = self.connect() + self._connection.ioloop.start() + + def stop(self): + """Cleanly shutdown the connection to RabbitMQ by stopping the consumer + with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok + will be invoked by pika, which will then closing the channel and + connection. The IOLoop is started again because this method is invoked + when CTRL-C is pressed raising a KeyboardInterrupt exception. This + exception stops the IOLoop which needs to be running for pika to + communicate with RabbitMQ. All of the commands issued prior to starting + the IOLoop will be buffered but not processed. + + """ + LOGGER.info('Stopping') + self._closing = True + self.stop_consuming() + self._connection.ioloop.start() + LOGGER.info('Stopped') + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + LOGGER.info('Closing connection') + self._connection.close() + + + def main(): + logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) + example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F') + try: + example.run() + except KeyboardInterrupt: + example.stop() + + + if __name__ == '__main__': + main() + diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asynchronous_publisher_example.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asynchronous_publisher_example.rst new file mode 100644 index 000000000..887148db0 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asynchronous_publisher_example.rst @@ -0,0 +1,359 @@ +Asynchronous publisher example +============================== +The following example implements a publisher that will respond to RPC commands sent from RabbitMQ and uses delivery confirmations. It will reconnect if RabbitMQ closes the connection and will shutdown if RabbitMQ closes the channel. While it may look intimidating, each method is very short and represents a individual actions that a publisher can do. + +publisher.py:: + + # -*- coding: utf-8 -*- + + import logging + import pika + import json + + LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') + LOGGER = logging.getLogger(__name__) + + + class ExamplePublisher(object): + """This is an example publisher that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + It uses delivery confirmations and illustrates one way to keep track of + messages that have been sent and if they've been confirmed by RabbitMQ. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + PUBLISH_INTERVAL = 1 + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Setup the example publisher object, passing in the URL we will use + to connect to RabbitMQ. + + :param str amqp_url: The URL for connecting to RabbitMQ + + """ + self._connection = None + self._channel = None + + self._deliveries = None + self._acked = None + self._nacked = None + self._message_number = None + + self._stopping = False + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. If you want the reconnection to work, make + sure you set stop_ioloop_on_close to False, which is not the default + behavior of this adapter. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return pika.SelectConnection(pika.URLParameters(self._url), + on_open_callback=self.on_connection_open, + on_close_callback=self.on_connection_closed, + stop_ioloop_on_close=False) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.open_channel() + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._stopping: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self._connection.ioloop.stop) + + def open_channel(self): + """This method will open a new channel with RabbitMQ by issuing the + Channel.Open RPC command. When RabbitMQ confirms the channel is open + by sending the Channel.OpenOK RPC reply, the on_channel_open method + will be invoked. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text) + self._channel = None + if not self._stopping: + self._connection.close() + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def on_bindok(self, unused_frame): + """This method is invoked by pika when it receives the Queue.BindOk + response from RabbitMQ. Since we know we're now setup and bound, it's + time to start publishing.""" + LOGGER.info('Queue bound') + self.start_publishing() + + def start_publishing(self): + """This method will enable delivery confirmations and schedule the + first message to be sent to RabbitMQ + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.enable_delivery_confirmations() + self.schedule_next_message() + + def enable_delivery_confirmations(self): + """Send the Confirm.Select RPC method to RabbitMQ to enable delivery + confirmations on the channel. The only way to turn this off is to close + the channel and create a new one. + + When the message is confirmed from RabbitMQ, the + on_delivery_confirmation method will be invoked passing in a Basic.Ack + or Basic.Nack method from RabbitMQ that will indicate which messages it + is confirming or rejecting. + + """ + LOGGER.info('Issuing Confirm.Select RPC command') + self._channel.confirm_delivery(self.on_delivery_confirmation) + + def on_delivery_confirmation(self, method_frame): + """Invoked by pika when RabbitMQ responds to a Basic.Publish RPC + command, passing in either a Basic.Ack or Basic.Nack frame with + the delivery tag of the message that was published. The delivery tag + is an integer counter indicating the message number that was sent + on the channel via Basic.Publish. Here we're just doing house keeping + to keep track of stats and remove message numbers that we expect + a delivery confirmation of from the list used to keep track of messages + that are pending confirmation. + + :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame + + """ + confirmation_type = method_frame.method.NAME.split('.')[1].lower() + LOGGER.info('Received %s for delivery tag: %i', + confirmation_type, + method_frame.method.delivery_tag) + if confirmation_type == 'ack': + self._acked += 1 + elif confirmation_type == 'nack': + self._nacked += 1 + self._deliveries.remove(method_frame.method.delivery_tag) + LOGGER.info('Published %i messages, %i have yet to be confirmed, ' + '%i were acked and %i were nacked', + self._message_number, len(self._deliveries), + self._acked, self._nacked) + + def schedule_next_message(self): + """If we are not closing our connection to RabbitMQ, schedule another + message to be delivered in PUBLISH_INTERVAL seconds. + + """ + LOGGER.info('Scheduling next message for %0.1f seconds', + self.PUBLISH_INTERVAL) + self._connection.add_timeout(self.PUBLISH_INTERVAL, + self.publish_message) + + def publish_message(self): + """If the class is not stopping, publish a message to RabbitMQ, + appending a list of deliveries with the message number that was sent. + This list will be used to check for delivery confirmations in the + on_delivery_confirmations method. + + Once the message has been sent, schedule another message to be sent. + The main reason I put scheduling in was just so you can get a good idea + of how the process is flowing by slowing down and speeding up the + delivery intervals by changing the PUBLISH_INTERVAL constant in the + class. + + """ + if self._channel is None or not self._channel.is_open: + return + + hdrs = {u'مفتاح': u' قيمة', + u'键': u'值', + u'キー': u'値'} + properties = pika.BasicProperties(app_id='example-publisher', + content_type='application/json', + headers=hdrs) + + message = u'مفتاح قيمة 键 值 キー 値' + self._channel.basic_publish(self.EXCHANGE, self.ROUTING_KEY, + json.dumps(message, ensure_ascii=False), + properties) + self._message_number += 1 + self._deliveries.append(self._message_number) + LOGGER.info('Published message # %i', self._message_number) + self.schedule_next_message() + + def run(self): + """Run the example code by connecting and then starting the IOLoop. + + """ + while not self._stopping: + self._connection = None + self._deliveries = [] + self._acked = 0 + self._nacked = 0 + self._message_number = 0 + + try: + self._connection = self.connect() + self._connection.ioloop.start() + except KeyboardInterrupt: + self.stop() + if (self._connection is not None and + not self._connection.is_closed): + # Finish closing + self._connection.ioloop.start() + + LOGGER.info('Stopped') + + def stop(self): + """Stop the example by closing the channel and connection. We + set a flag here so that we stop scheduling new messages to be + published. The IOLoop is started because this method is + invoked by the Try/Catch below when KeyboardInterrupt is caught. + Starting the IOLoop again will allow the publisher to cleanly + disconnect from RabbitMQ. + + """ + LOGGER.info('Stopping') + self._stopping = True + self.close_channel() + self.close_connection() + + def close_channel(self): + """Invoke this command to close the channel with RabbitMQ by sending + the Channel.Close RPC command. + + """ + if self._channel is not None: + LOGGER.info('Closing the channel') + self._channel.close() + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + if self._connection is not None: + LOGGER.info('Closing connection') + self._connection.close() + + + def main(): + logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) + + # Connect to localhost:5672 as guest with the password guest and virtual host "/" (%2F) + example = ExamplePublisher('amqp://guest:guest@localhost:5672/%2F?connection_attempts=3&heartbeat_interval=3600') + example.run() + + + if __name__ == '__main__': + main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asyncio_consumer.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asyncio_consumer.rst new file mode 100644 index 000000000..1ea654ae7 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/asyncio_consumer.rst @@ -0,0 +1,355 @@ +Asyncio Consumer +================ +The following example implements a consumer using the +:class:`Asyncio adapter ` for the +`Asyncio library `_ that will respond to RPC commands sent + from RabbitMQ. For example, it will reconnect if RabbitMQ closes the connection and will shutdown if + RabbitMQ cancels the consumer or closes the channel. While it may look intimidating, each method is + very short and represents a individual actions that a consumer can do. + +consumer.py:: + + from pika import adapters + import pika + import logging + + LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') + + LOGGER = logging.getLogger(__name__) + + + class ExampleConsumer(object): + """This is an example consumer that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + If the channel is closed, it will indicate a problem with one of the + commands that were issued and that should surface in the output as well. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Create a new instance of the consumer class, passing in the AMQP + URL used to connect to RabbitMQ. + + :param str amqp_url: The AMQP url to connect with + + """ + self._connection = None + self._channel = None + self._closing = False + self._consumer_tag = None + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return adapters.asyncio_connection.AsyncioConnection(pika.URLParameters(self._url), + self.on_connection_open) + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + LOGGER.info('Closing connection') + self._connection.close() + + def add_on_connection_close_callback(self): + """This method adds an on close callback that will be invoked by pika + when RabbitMQ closes the connection to the publisher unexpectedly. + + """ + LOGGER.info('Adding connection close callback') + self._connection.add_on_close_callback(self.on_connection_closed) + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._closing: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self.reconnect) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.add_on_connection_close_callback() + self.open_channel() + + def reconnect(self): + """Will be invoked by the IOLoop timer if the connection is + closed. See the on_connection_closed method. + + """ + if not self._closing: + + # Create a new connection + self._connection = self.connect() + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel %i was closed: (%s) %s', + channel, reply_code, reply_text) + self._connection.close() + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def add_on_cancel_callback(self): + """Add a callback that will be invoked if RabbitMQ cancels the consumer + for some reason. If RabbitMQ does cancel the consumer, + on_consumer_cancelled will be invoked by pika. + + """ + LOGGER.info('Adding consumer cancellation callback') + self._channel.add_on_cancel_callback(self.on_consumer_cancelled) + + def on_consumer_cancelled(self, method_frame): + """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer + receiving messages. + + :param pika.frame.Method method_frame: The Basic.Cancel frame + + """ + LOGGER.info('Consumer was cancelled remotely, shutting down: %r', + method_frame) + if self._channel: + self._channel.close() + + def acknowledge_message(self, delivery_tag): + """Acknowledge the message delivery from RabbitMQ by sending a + Basic.Ack RPC method for the delivery tag. + + :param int delivery_tag: The delivery tag from the Basic.Deliver frame + + """ + LOGGER.info('Acknowledging message %s', delivery_tag) + self._channel.basic_ack(delivery_tag) + + def on_message(self, unused_channel, basic_deliver, properties, body): + """Invoked by pika when a message is delivered from RabbitMQ. The + channel is passed for your convenience. The basic_deliver object that + is passed in carries the exchange, routing key, delivery tag and + a redelivered flag for the message. The properties passed in is an + instance of BasicProperties with the message properties and the body + is the message that was sent. + + :param pika.channel.Channel unused_channel: The channel object + :param pika.Spec.Basic.Deliver: basic_deliver method + :param pika.Spec.BasicProperties: properties + :param str|unicode body: The message body + + """ + LOGGER.info('Received message # %s from %s: %s', + basic_deliver.delivery_tag, properties.app_id, body) + self.acknowledge_message(basic_deliver.delivery_tag) + + def on_cancelok(self, unused_frame): + """This method is invoked by pika when RabbitMQ acknowledges the + cancellation of a consumer. At this point we will close the channel. + This will invoke the on_channel_closed method once the channel has been + closed, which will in-turn close the connection. + + :param pika.frame.Method unused_frame: The Basic.CancelOk frame + + """ + LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer') + self.close_channel() + + def stop_consuming(self): + """Tell RabbitMQ that you would like to stop consuming by sending the + Basic.Cancel RPC command. + + """ + if self._channel: + LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ') + self._channel.basic_cancel(self.on_cancelok, self._consumer_tag) + + def start_consuming(self): + """This method sets up the consumer by first calling + add_on_cancel_callback so that the object is notified if RabbitMQ + cancels the consumer. It then issues the Basic.Consume RPC command + which returns the consumer tag that is used to uniquely identify the + consumer with RabbitMQ. We keep the value to use it when we want to + cancel consuming. The on_message method is passed in as a callback pika + will invoke when a message is fully received. + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.add_on_cancel_callback() + self._consumer_tag = self._channel.basic_consume(self.on_message, + self.QUEUE) + + def on_bindok(self, unused_frame): + """Invoked by pika when the Queue.Bind method has completed. At this + point we will start consuming messages by calling start_consuming + which will invoke the needed RPC commands to start the process. + + :param pika.frame.Method unused_frame: The Queue.BindOk response frame + + """ + LOGGER.info('Queue bound') + self.start_consuming() + + def close_channel(self): + """Call to close the channel with RabbitMQ cleanly by issuing the + Channel.Close RPC command. + + """ + LOGGER.info('Closing the channel') + self._channel.close() + + def open_channel(self): + """Open a new channel with RabbitMQ by issuing the Channel.Open RPC + command. When RabbitMQ responds that the channel is open, the + on_channel_open callback will be invoked by pika. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def run(self): + """Run the example consumer by connecting to RabbitMQ and then + starting the IOLoop to block and allow the SelectConnection to operate. + + """ + self._connection = self.connect() + self._connection.ioloop.start() + + def stop(self): + """Cleanly shutdown the connection to RabbitMQ by stopping the consumer + with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok + will be invoked by pika, which will then closing the channel and + connection. The IOLoop is started again because this method is invoked + when CTRL-C is pressed raising a KeyboardInterrupt exception. This + exception stops the IOLoop which needs to be running for pika to + communicate with RabbitMQ. All of the commands issued prior to starting + the IOLoop will be buffered but not processed. + + """ + LOGGER.info('Stopping') + self._closing = True + self.stop_consuming() + self._connection.ioloop.start() + LOGGER.info('Stopped') + + + def main(): + logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) + example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F') + try: + example.run() + except KeyboardInterrupt: + example.stop() + + + if __name__ == '__main__': + main() + diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_basic_get.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_basic_get.rst new file mode 100644 index 000000000..d679ea825 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_basic_get.rst @@ -0,0 +1,23 @@ +Using the Blocking Connection to get a message from RabbitMQ +============================================================ + +.. _example_blocking_basic_get: + +The :py:meth:`BlockingChannel.basic_get ` method will return a tuple with the members. + +If the server returns a message, the first item in the tuple will be a :class:`pika.spec.Basic.GetOk` object with the current message count, the redelivered flag, the routing key that was used to put the message in the queue, and the exchange the message was published to. The second item will be a :py:class:`~pika.spec.BasicProperties` object and the third will be the message body. + +If the server did not return a message a tuple of None, None, None will be returned. + +Example of getting a message and acknowledging it:: + + import pika + + connection = pika.BlockingConnection() + channel = connection.channel() + method_frame, header_frame, body = channel.basic_get('test') + if method_frame: + print(method_frame, header_frame, body) + channel.basic_ack(method_frame.delivery_tag) + else: + print('No message returned') diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_consume.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_consume.rst new file mode 100644 index 000000000..85852e460 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_consume.rst @@ -0,0 +1,29 @@ +Using the Blocking Connection to consume messages from RabbitMQ +=============================================================== + +.. _example_blocking_basic_consume: + +The :py:meth:`BlockingChannel.basic_consume ` method assign a callback method to be called every time that RabbitMQ delivers messages to your consuming application. + +When pika calls your method, it will pass in the channel, a :py:class:`pika.spec.Basic.Deliver` object with the delivery tag, the redelivered flag, the routing key that was used to put the message in the queue, and the exchange the message was published to. The third argument will be a :py:class:`pika.spec.BasicProperties` object and the last will be the message body. + +Example of consuming messages and acknowledging them:: + + import pika + + + def on_message(channel, method_frame, header_frame, body): + print(method_frame.delivery_tag) + print(body) + print() + channel.basic_ack(delivery_tag=method_frame.delivery_tag) + + + connection = pika.BlockingConnection() + channel = connection.channel() + channel.basic_consume(on_message, 'test') + try: + channel.start_consuming() + except KeyboardInterrupt: + channel.stop_consuming() + connection.close() \ No newline at end of file diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_consumer_generator.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_consumer_generator.rst new file mode 100644 index 000000000..f875a1095 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_consumer_generator.rst @@ -0,0 +1,73 @@ +Using the BlockingChannel.consume generator to consume messages +=============================================================== + +.. _example_blocking_basic_get: + +The :py:meth:`BlockingChannel.consume ` method is a generator that will return a tuple of method, properties and body. + +When you escape out of the loop, be sure to call consumer.cancel() to return any unprocessed messages. + +Example of consuming messages and acknowledging them:: + + import pika + + connection = pika.BlockingConnection() + channel = connection.channel() + + # Get ten messages and break out + for method_frame, properties, body in channel.consume('test'): + + # Display the message parts + print(method_frame) + print(properties) + print(body) + + # Acknowledge the message + channel.basic_ack(method_frame.delivery_tag) + + # Escape out of the loop after 10 messages + if method_frame.delivery_tag == 10: + break + + # Cancel the consumer and return any pending messages + requeued_messages = channel.cancel() + print('Requeued %i messages' % requeued_messages) + + # Close the channel and the connection + channel.close() + connection.close() + +If you have pending messages in the test queue, your output should look something like:: + + (pika)gmr-0x02:pika gmr$ python blocking_nack.py + + + Hello World! + + + Hello World! + + + Hello World! + + + Hello World! + + + Hello World! + + + Hello World! + + + Hello World! + + + Hello World! + + + Hello World! + + + Hello World! + Requeued 1894 messages diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_delivery_confirmations.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_delivery_confirmations.rst new file mode 100644 index 000000000..ade888f58 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_delivery_confirmations.rst @@ -0,0 +1,28 @@ +Using Delivery Confirmations with the BlockingConnection +======================================================== + +The following code demonstrates how to turn on delivery confirmations with the BlockingConnection and how to check for confirmation from RabbitMQ:: + + import pika + + # Open a connection to RabbitMQ on localhost using all default parameters + connection = pika.BlockingConnection() + + # Open the channel + channel = connection.channel() + + # Declare the queue + channel.queue_declare(queue="test", durable=True, exclusive=False, auto_delete=False) + + # Turn on delivery confirmations + channel.confirm_delivery() + + # Send a message + if channel.basic_publish(exchange='test', + routing_key='test', + body='Hello World!', + properties=pika.BasicProperties(content_type='text/plain', + delivery_mode=1)): + print('Message publish was confirmed') + else: + print('Message could not be confirmed') diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_publish_mandatory.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_publish_mandatory.rst new file mode 100644 index 000000000..800cf667d --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/blocking_publish_mandatory.rst @@ -0,0 +1,29 @@ +Ensuring message delivery with the mandatory flag +================================================= + +The following example demonstrates how to check if a message is delivered by setting the mandatory flag and checking the return result when using the BlockingConnection:: + + import pika + + # Open a connection to RabbitMQ on localhost using all default parameters + connection = pika.BlockingConnection() + + # Open the channel + channel = connection.channel() + + # Declare the queue + channel.queue_declare(queue="test", durable=True, exclusive=False, auto_delete=False) + + # Enabled delivery confirmations + channel.confirm_delivery() + + # Send a message + if channel.basic_publish(exchange='test', + routing_key='test', + body='Hello World!', + properties=pika.BasicProperties(content_type='text/plain', + delivery_mode=1), + mandatory=True): + print('Message was published') + else: + print('Message was returned') diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/comparing_publishing_sync_async.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/comparing_publishing_sync_async.rst new file mode 100644 index 000000000..89c48faa7 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/comparing_publishing_sync_async.rst @@ -0,0 +1,64 @@ +Comparing Message Publishing with BlockingConnection and SelectConnection +========================================================================= + +For those doing simple, non-asynchronous programming, :py:meth:`pika.adapters.blocking_connection.BlockingConnection` proves to be the easiest way to get up and running with Pika to publish messages. + +In the following example, a connection is made to RabbitMQ listening to port *5672* on *localhost* using the username *guest* and password *guest* and virtual host */*. Once connected, a channel is opened and a message is published to the *test_exchange* exchange using the *test_routing_key* routing key. The BasicProperties value passed in sets the message to delivery mode *1* (non-persisted) with a content-type of *text/plain*. Once the message is published, the connection is closed:: + + import pika + + parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F') + + connection = pika.BlockingConnection(parameters) + + channel = connection.channel() + + channel.basic_publish('test_exchange', + 'test_routing_key', + 'message body value', + pika.BasicProperties(content_type='text/plain', + delivery_mode=1)) + + connection.close() + + +In contrast, using :py:meth:`pika.adapters.select_connection.SelectConnection` and the other asynchronous adapters is more complicated and less pythonic, but when used with other asynchronous services can have tremendous performance improvements. In the following code example, all of the same parameters and values are used as were used in the previous example:: + + import pika + + # Step #3 + def on_open(connection): + + connection.channel(on_channel_open) + + # Step #4 + def on_channel_open(channel): + + channel.basic_publish('test_exchange', + 'test_routing_key', + 'message body value', + pika.BasicProperties(content_type='text/plain', + delivery_mode=1)) + + connection.close() + + # Step #1: Connect to RabbitMQ + parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F') + + connection = pika.SelectConnection(parameters=parameters, + on_open_callback=on_open) + + try: + + # Step #2 - Block on the IOLoop + connection.ioloop.start() + + # Catch a Keyboard Interrupt to make sure that the connection is closed cleanly + except KeyboardInterrupt: + + # Gracefully close the connection + connection.close() + + # Start the IOLoop again so Pika can communicate, it will stop on its own when the connection is closed + connection.ioloop.start() + diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/connecting_async.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/connecting_async.rst new file mode 100644 index 000000000..125de3c96 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/connecting_async.rst @@ -0,0 +1,49 @@ +Connecting to RabbitMQ with Callback-Passing Style +================================================== + +When you connect to RabbitMQ with an asynchronous adapter, you are writing event +oriented code. The connection adapter will block on the IOLoop that is watching +to see when pika should read data from and write data to RabbitMQ. Because you're +now blocking on the IOLoop, you will receive callback notifications when specific +events happen. + +Example Code +------------ +In the example, there are three steps that take place: + +1. Setup the connection to RabbitMQ +2. Start the IOLoop +3. Once connected, the on_open method will be called by Pika with a handle to + the connection. In this method, a new channel will be opened on the connection. +4. Once the channel is opened, you can do your other actions, whether they be + publishing messages, consuming messages or other RabbitMQ related activities.:: + + import pika + + # Step #3 + def on_open(connection): + connection.channel(on_channel_open) + + # Step #4 + def on_channel_open(channel): + channel.basic_publish('exchange_name', + 'routing_key', + 'Test Message', + pika.BasicProperties(content_type='text/plain', + type='example')) + + # Step #1: Connect to RabbitMQ + connection = pika.SelectConnection(on_open_callback=on_open) + + try: + # Step #2 - Block on the IOLoop + connection.ioloop.start() + + # Catch a Keyboard Interrupt to make sure that the connection is closed cleanly + except KeyboardInterrupt: + + # Gracefully close the connection + connection.close() + + # Start the IOLoop again so Pika can communicate, it will stop on its own when the connection is closed + connection.ioloop.start() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/direct_reply_to.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/direct_reply_to.rst new file mode 100644 index 000000000..3d8f6d8ec --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/direct_reply_to.rst @@ -0,0 +1,81 @@ +Direct reply-to example +============================== +The following example demonstrates the use of the RabbitMQ "Direct reply-to" feature via `pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html for more info about this feature. + +direct_reply_to.py:: + + # -*- coding: utf-8 -*- + + """ + This example demonstrates the RabbitMQ "Direct reply-to" usage via + `pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html + for more info about this feature. + """ + import pika + + + SERVER_QUEUE = 'rpc.server.queue' + + + def main(): + """ Here, Client sends "Marco" to RPC Server, and RPC Server replies with + "Polo". + + NOTE Normally, the server would be running separately from the client, but + in this very simple example both are running in the same thread and sharing + connection and channel. + + """ + with pika.BlockingConnection() as conn: + channel = conn.channel() + + # Set up server + + channel.queue_declare(queue=SERVER_QUEUE, + exclusive=True, + auto_delete=True) + channel.basic_consume(on_server_rx_rpc_request, queue=SERVER_QUEUE) + + + # Set up client + + # NOTE Client must create its consumer and publish RPC requests on the + # same channel to enable the RabbitMQ broker to make the necessary + # associations. + # + # Also, client must create the consumer *before* starting to publish the + # RPC requests. + # + # Client must create its consumer with no_ack=True, because the reply-to + # queue isn't real. + + channel.basic_consume(on_client_rx_reply_from_server, + queue='amq.rabbitmq.reply-to', + no_ack=True) + channel.basic_publish( + exchange='', + routing_key=SERVER_QUEUE, + body='Marco', + properties=pika.BasicProperties(reply_to='amq.rabbitmq.reply-to')) + + channel.start_consuming() + + + def on_server_rx_rpc_request(ch, method_frame, properties, body): + print 'RPC Server got request:', body + + ch.basic_publish('', routing_key=properties.reply_to, body='Polo') + + ch.basic_ack(delivery_tag=method_frame.delivery_tag) + + print 'RPC Server says good bye' + + + def on_client_rx_reply_from_server(ch, method_frame, properties, body): + print 'RPC Client got reply:', body + + # NOTE A real client might want to make additional RPC requests, but in this + # simple example we're closing the channel after getting our first reply + # to force control to return from channel.start_consuming() + print 'RPC Client says bye' + ch.close() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/heartbeat_and_blocked_timeouts.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/heartbeat_and_blocked_timeouts.rst new file mode 100644 index 000000000..d7469a100 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/heartbeat_and_blocked_timeouts.rst @@ -0,0 +1,37 @@ +Ensuring well-behaved connection with heartbeat and blocked-connection timeouts +=============================================================================== + + +This example demonstrates explicit setting of heartbeat and blocked connection timeouts. + +Starting with RabbitMQ 3.5.5, the broker's default heartbeat timeout decreased from 580 seconds to 60 seconds. As a result, applications that perform lengthy processing in the same thread that also runs their Pika connection may experience unexpected dropped connections due to heartbeat timeout. Here, we specify an explicit lower bound for heartbeat timeout. + +When RabbitMQ broker is running out of certain resources, such as memory and disk space, it may block connections that are performing resource-consuming operations, such as publishing messages. Once a connection is blocked, RabbitMQ stops reading from that connection's socket, so no commands from the client will get through to the broker on that connection until the broker unblocks it. A blocked connection may last for an indefinite period of time, stalling the connection and possibly resulting in a hang (e.g., in BlockingConnection) until the connection is unblocked. Blocked Connection Timeout is intended to interrupt (i.e., drop) a connection that has been blocked longer than the given timeout value. + +Example of configuring hertbeat and blocked-connection timeouts:: + + import pika + + + def main(): + + # NOTE: These parameters work with all Pika connection types + params = pika.ConnectionParameters(heartbeat_interval=600, + blocked_connection_timeout=300) + + conn = pika.BlockingConnection(params) + + chan = conn.channel() + + chan.basic_publish('', 'my-alphabet-queue', "abc") + + # If publish causes the connection to become blocked, then this conn.close() + # would hang until the connection is unblocked, if ever. However, the + # blocked_connection_timeout connection parameter would interrupt the wait, + # resulting in ConnectionClosed exception from BlockingConnection (or the + # on_connection_closed callback call in an asynchronous adapter) + conn.close() + + + if __name__ == '__main__': + main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tls_mutual_authentication.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tls_mutual_authentication.rst new file mode 100644 index 000000000..9cd8decd1 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tls_mutual_authentication.rst @@ -0,0 +1,61 @@ +TLS parameters example +============================= +This examples demonstrates a TLS session with RabbitMQ using mutual authentication. + +It was tested against RabbitMQ 3.6.10, using Python 3.6.1 and pre-release Pika `0.11.0` + +Note the use of `ssl_version=ssl.PROTOCOL_TLSv1`. The recent verions of RabbitMQ disable older versions of +SSL due to security vulnerabilities. + +See https://www.rabbitmq.com/ssl.html for certificate creation and rabbitmq SSL configuration instructions. + + +tls_example.py:: + + import ssl + import pika + import logging + + logging.basicConfig(level=logging.INFO) + + cp = pika.ConnectionParameters( + ssl=True, + ssl_options=dict( + ssl_version=ssl.PROTOCOL_TLSv1, + ca_certs="/Users/me/tls-gen/basic/testca/cacert.pem", + keyfile="/Users/me/tls-gen/basic/client/key.pem", + certfile="/Users/me/tls-gen/basic/client/cert.pem", + cert_reqs=ssl.CERT_REQUIRED)) + + conn = pika.BlockingConnection(cp) + ch = conn.channel() + print(ch.queue_declare("sslq")) + ch.publish("", "sslq", "abc") + print(ch.basic_get("sslq")) + + +rabbitmq.config:: + + %% Both the client and rabbitmq server were running on the same machine, a MacBookPro laptop. + %% + %% rabbitmq.config was created in its default location for OS X: /usr/local/etc/rabbitmq/rabbitmq.config. + %% + %% The contents of the example rabbitmq.config are for demonstration purposes only. See https://www.rabbitmq.com/ssl.html for instructions about creating the test certificates and the contents of rabbitmq.config. + + + [ + {rabbit, + [ + {ssl_listeners, [{"127.0.0.1", 5671}]}, + + %% Configuring SSL. + %% See http://www.rabbitmq.com/ssl.html for full documentation. + %% + {ssl_options, [{cacertfile, "/Users/me/tls-gen/basic/testca/cacert.pem"}, + {certfile, "/Users/me/tls-gen/basic/server/cert.pem"}, + {keyfile, "/Users/me/tls-gen/basic/server/key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, true}]} + ] + } + ]. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tls_server_uathentication.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tls_server_uathentication.rst new file mode 100644 index 000000000..2cdb4ecd7 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tls_server_uathentication.rst @@ -0,0 +1,60 @@ +TLS parameters example +============================= +This examples demonstrates a TLS session with RabbitMQ using server authentication. + +It was tested against RabbitMQ 3.6.10, using Python 3.6.1 and pre-release Pika `0.11.0` + +Note the use of `ssl_version=ssl.PROTOCOL_TLSv1`. The recent versions of RabbitMQ disable older versions of +SSL due to security vulnerabilities. + +See https://www.rabbitmq.com/ssl.html for certificate creation and rabbitmq SSL configuration instructions. + + +tls_example.py:: + + import ssl + import pika + import logging + + logging.basicConfig(level=logging.INFO) + + cp = pika.ConnectionParameters( + ssl=True, + ssl_options=dict( + ssl_version=ssl.PROTOCOL_TLSv1, + ca_certs="/Users/me/tls-gen/basic/testca/cacert.pem", + cert_reqs=ssl.CERT_REQUIRED)) + + conn = pika.BlockingConnection(cp) + ch = conn.channel() + print(ch.queue_declare("sslq")) + ch.publish("", "sslq", "abc") + print(ch.basic_get("sslq")) + + +rabbitmq.config:: + + %% Both the client and rabbitmq server were running on the same machine, a MacBookPro laptop. + %% + %% rabbitmq.config was created in its default location for OS X: /usr/local/etc/rabbitmq/rabbitmq.config. + %% + %% The contents of the example rabbitmq.config are for demonstration purposes only. See https://www.rabbitmq.com/ssl.html for instructions about creating the test certificates and the contents of rabbitmq.config. + %% + %% Note that the {fail_if_no_peer_cert,false} option, states that RabbitMQ should accept clients that don't have a certificate to send to the broker, but through the {verify,verify_peer} option, we state that if the client does send a certificate to the broker, the broker must be able to establish a chain of trust to it. + + [ + {rabbit, + [ + {ssl_listeners, [{"127.0.0.1", 5671}]}, + + %% Configuring SSL. + %% See http://www.rabbitmq.com/ssl.html for full documentation. + %% + {ssl_options, [{cacertfile, "/Users/me/tls-gen/basic/testca/cacert.pem"}, + {certfile, "/Users/me/tls-gen/basic/server/cert.pem"}, + {keyfile, "/Users/me/tls-gen/basic/server/key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, false}]} + ] + } + ]. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tornado_consumer.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tornado_consumer.rst new file mode 100644 index 000000000..0dae22181 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/tornado_consumer.rst @@ -0,0 +1,349 @@ +Tornado Consumer +================ +The following example implements a consumer using the :class:`Tornado adapter ` for the `Tornado framework `_ that will respond to RPC commands sent from RabbitMQ. For example, it will reconnect if RabbitMQ closes the connection and will shutdown if RabbitMQ cancels the consumer or closes the channel. While it may look intimidating, each method is very short and represents a individual actions that a consumer can do. + +consumer.py:: + + from pika import adapters + import pika + import logging + + LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') + LOGGER = logging.getLogger(__name__) + + + class ExampleConsumer(object): + """This is an example consumer that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + If the channel is closed, it will indicate a problem with one of the + commands that were issued and that should surface in the output as well. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Create a new instance of the consumer class, passing in the AMQP + URL used to connect to RabbitMQ. + + :param str amqp_url: The AMQP url to connect with + + """ + self._connection = None + self._channel = None + self._closing = False + self._consumer_tag = None + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return adapters.tornado_connection.TornadoConnection(pika.URLParameters(self._url), + self.on_connection_open) + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + LOGGER.info('Closing connection') + self._connection.close() + + def add_on_connection_close_callback(self): + """This method adds an on close callback that will be invoked by pika + when RabbitMQ closes the connection to the publisher unexpectedly. + + """ + LOGGER.info('Adding connection close callback') + self._connection.add_on_close_callback(self.on_connection_closed) + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._closing: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self.reconnect) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.add_on_connection_close_callback() + self.open_channel() + + def reconnect(self): + """Will be invoked by the IOLoop timer if the connection is + closed. See the on_connection_closed method. + + """ + if not self._closing: + + # Create a new connection + self._connection = self.connect() + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel %i was closed: (%s) %s', + channel, reply_code, reply_text) + self._connection.close() + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def add_on_cancel_callback(self): + """Add a callback that will be invoked if RabbitMQ cancels the consumer + for some reason. If RabbitMQ does cancel the consumer, + on_consumer_cancelled will be invoked by pika. + + """ + LOGGER.info('Adding consumer cancellation callback') + self._channel.add_on_cancel_callback(self.on_consumer_cancelled) + + def on_consumer_cancelled(self, method_frame): + """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer + receiving messages. + + :param pika.frame.Method method_frame: The Basic.Cancel frame + + """ + LOGGER.info('Consumer was cancelled remotely, shutting down: %r', + method_frame) + if self._channel: + self._channel.close() + + def acknowledge_message(self, delivery_tag): + """Acknowledge the message delivery from RabbitMQ by sending a + Basic.Ack RPC method for the delivery tag. + + :param int delivery_tag: The delivery tag from the Basic.Deliver frame + + """ + LOGGER.info('Acknowledging message %s', delivery_tag) + self._channel.basic_ack(delivery_tag) + + def on_message(self, unused_channel, basic_deliver, properties, body): + """Invoked by pika when a message is delivered from RabbitMQ. The + channel is passed for your convenience. The basic_deliver object that + is passed in carries the exchange, routing key, delivery tag and + a redelivered flag for the message. The properties passed in is an + instance of BasicProperties with the message properties and the body + is the message that was sent. + + :param pika.channel.Channel unused_channel: The channel object + :param pika.Spec.Basic.Deliver: basic_deliver method + :param pika.Spec.BasicProperties: properties + :param str|unicode body: The message body + + """ + LOGGER.info('Received message # %s from %s: %s', + basic_deliver.delivery_tag, properties.app_id, body) + self.acknowledge_message(basic_deliver.delivery_tag) + + def on_cancelok(self, unused_frame): + """This method is invoked by pika when RabbitMQ acknowledges the + cancellation of a consumer. At this point we will close the channel. + This will invoke the on_channel_closed method once the channel has been + closed, which will in-turn close the connection. + + :param pika.frame.Method unused_frame: The Basic.CancelOk frame + + """ + LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer') + self.close_channel() + + def stop_consuming(self): + """Tell RabbitMQ that you would like to stop consuming by sending the + Basic.Cancel RPC command. + + """ + if self._channel: + LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ') + self._channel.basic_cancel(self.on_cancelok, self._consumer_tag) + + def start_consuming(self): + """This method sets up the consumer by first calling + add_on_cancel_callback so that the object is notified if RabbitMQ + cancels the consumer. It then issues the Basic.Consume RPC command + which returns the consumer tag that is used to uniquely identify the + consumer with RabbitMQ. We keep the value to use it when we want to + cancel consuming. The on_message method is passed in as a callback pika + will invoke when a message is fully received. + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.add_on_cancel_callback() + self._consumer_tag = self._channel.basic_consume(self.on_message, + self.QUEUE) + + def on_bindok(self, unused_frame): + """Invoked by pika when the Queue.Bind method has completed. At this + point we will start consuming messages by calling start_consuming + which will invoke the needed RPC commands to start the process. + + :param pika.frame.Method unused_frame: The Queue.BindOk response frame + + """ + LOGGER.info('Queue bound') + self.start_consuming() + + def close_channel(self): + """Call to close the channel with RabbitMQ cleanly by issuing the + Channel.Close RPC command. + + """ + LOGGER.info('Closing the channel') + self._channel.close() + + def open_channel(self): + """Open a new channel with RabbitMQ by issuing the Channel.Open RPC + command. When RabbitMQ responds that the channel is open, the + on_channel_open callback will be invoked by pika. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def run(self): + """Run the example consumer by connecting to RabbitMQ and then + starting the IOLoop to block and allow the SelectConnection to operate. + + """ + self._connection = self.connect() + self._connection.ioloop.start() + + def stop(self): + """Cleanly shutdown the connection to RabbitMQ by stopping the consumer + with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok + will be invoked by pika, which will then closing the channel and + connection. The IOLoop is started again because this method is invoked + when CTRL-C is pressed raising a KeyboardInterrupt exception. This + exception stops the IOLoop which needs to be running for pika to + communicate with RabbitMQ. All of the commands issued prior to starting + the IOLoop will be buffered but not processed. + + """ + LOGGER.info('Stopping') + self._closing = True + self.stop_consuming() + self._connection.ioloop.start() + LOGGER.info('Stopped') + + + def main(): + logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) + example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F') + try: + example.run() + except KeyboardInterrupt: + example.stop() + + + if __name__ == '__main__': + main() + diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/twisted_example.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/twisted_example.rst new file mode 100644 index 000000000..e4a36f8a1 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/twisted_example.rst @@ -0,0 +1,49 @@ +Twisted Consumer Example +======================== +Example of writing a consumer using the :py:class:`Twisted connection adapter `:: + + # -*- coding:utf-8 -*- + + import pika + from pika import exceptions + from pika.adapters import twisted_connection + from twisted.internet import defer, reactor, protocol,task + + + @defer.inlineCallbacks + def run(connection): + + channel = yield connection.channel() + + exchange = yield channel.exchange_declare(exchange='topic_link', exchange_type='topic') + + queue = yield channel.queue_declare(queue='hello', auto_delete=False, exclusive=False) + + yield channel.queue_bind(exchange='topic_link',queue='hello',routing_key='hello.world') + + yield channel.basic_qos(prefetch_count=1) + + queue_object, consumer_tag = yield channel.basic_consume(queue='hello',no_ack=False) + + l = task.LoopingCall(read, queue_object) + + l.start(0.01) + + + @defer.inlineCallbacks + def read(queue_object): + + ch,method,properties,body = yield queue_object.get() + + if body: + print(body) + + yield ch.basic_ack(delivery_tag=method.delivery_tag) + + + parameters = pika.ConnectionParameters() + cc = protocol.ClientCreator(reactor, twisted_connection.TwistedProtocolConnection, parameters) + d = cc.connectTCP('hostname', 5672) + d.addCallback(lambda protocol: protocol.ready) + d.addCallback(run) + reactor.run() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/using_urlparameters.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/using_urlparameters.rst new file mode 100644 index 000000000..b9f73cd8d --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/examples/using_urlparameters.rst @@ -0,0 +1,68 @@ +Using URLParameters +=================== +Pika has two methods of encapsulating the data that lets it know how to connect +to RabbitMQ, :py:class:`pika.connection.ConnectionParameters` and :py:class:`pika.connection.URLParameters`. + +.. note:: + If you're connecting to RabbitMQ on localhost on port 5672, with the default virtual host of */* and the default username and password of *guest* and *guest*, you do not need to specify connection parameters when connecting. + +Using :py:class:`pika.connection.URLParameters` is an easy way to minimize the +variables required to connect to RabbitMQ and supports all of the directives +that :py:class:`pika.connection.ConnectionParameters` supports. + +The following is the format for the URLParameters connection value:: + + scheme://username:password@host:port/virtual_host?key=value&key=value + +As you can see, by default, the scheme (amqp, amqps), username, password, host, port and virtual host make up the core of the URL and any other parameter is passed in as query string values. + +Example Connection URLS +----------------------- + +The default connection URL connects to the / virtual host as guest using the guest password on localhost port 5672. Note the forwardslash in the URL is encoded to %2F:: + + amqp://guest:guest@localhost:5672/%2F + +Connect to a host *rabbit1* as the user *www-data* using the password *rabbit_pwd* on the virtual host *web_messages*:: + + amqp://www-data:rabbit_pwd@rabbit1/web_messages + +Connecting via SSL is pretty easy too. To connect via SSL for the previous example, simply change the scheme to *amqps*. If you do not specify a port, Pika will use the default SSL port of 5671:: + + amqps://www-data:rabbit_pwd@rabbit1/web_messages + +If you're looking to tweak other parameters, such as enabling heartbeats, simply add the key/value pair as a query string value. The following builds upon the SSL connection, enabling heartbeats every 30 seconds:: + + amqps://www-data:rabbit_pwd@rabbit1/web_messages?heartbeat=30 + + +Options that are available as query string values: + +- backpressure_detection: Pass in a value of *t* to enable backpressure detection, it is disabled by default. +- channel_max: Alter the default channel maximum by passing in a 32-bit integer value here. +- connection_attempts: Alter the default of 1 connection attempt by passing in an integer value here. +- frame_max: Alter the default frame maximum size value by passing in a long integer value [#f1]_. +- heartbeat: Pass a value greater than zero to enable heartbeats between the server and your application. The integer value you pass here will be the number of seconds between heartbeats. +- locale: Set the locale of the client using underscore delimited posix Locale code in ll_CC format (en_US, pt_BR, de_DE). +- retry_delay: The number of seconds to wait before attempting to reconnect on a failed connection, if connection_attempts is > 0. +- socket_timeout: Change the default socket timeout duration from 0.25 seconds to another integer or float value. Adjust with caution. +- ssl_options: A url encoded dict of values for the SSL connection. The available keys are: + - ca_certs + - cert_reqs + - certfile + - keyfile + - ssl_version + +For an information on what the ssl_options can be set to reference the `official Python documentation `_. Here is an example of setting the client certificate and key:: + + amqp://www-data:rabbit_pwd@rabbit1/web_messages?heartbeat=30&ssl_options=%7B%27keyfile%27%3A+%27%2Fetc%2Fssl%2Fmykey.pem%27%2C+%27certfile%27%3A+%27%2Fetc%2Fssl%2Fmycert.pem%27%7D + +The following example demonstrates how to generate the ssl_options string with `Python's urllib `_:: + + import urllib + urllib.urlencode({'ssl_options': {'certfile': '/etc/ssl/mycert.pem', 'keyfile': '/etc/ssl/mykey.pem'}}) + + +.. rubric:: Footnotes + +.. [#f1] The AMQP specification states that a server can reject a request for a frame size larger than the value it passes during content negotiation. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/faq.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/faq.rst new file mode 100644 index 000000000..f70ef5528 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/faq.rst @@ -0,0 +1,18 @@ +Frequently Asked Questions +-------------------------- + +- Is Pika thread safe? + + Pika does not have any notion of threading in the code. If you want to use Pika with threading, make sure you have a Pika connection per thread, created in that thread. It is not safe to share one Pika connection across threads, with one exception: you may call the connection method `add_callback_threadsafe` from another thread to schedule a callback within an active pika connection. + +- How do I report a bug with Pika? + + The `main Pika repository `_ is hosted on `Github `_ and we use the Issue tracker at `https://github.com/pika/pika/issues `_. + +- Is there a mailing list for Pika? + + Yes, Pika's mailing list is available `on Google Groups `_ and the email address is pika-python@googlegroups.com, though traditionally questions about Pika have been asked on the `RabbitMQ-Discuss mailing list `_. + +- How can I contribute to Pika? + + You can `fork the project on Github `_ and issue `Pull Requests `_ when you believe you have something solid to be added to the main repository. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/index.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/index.rst new file mode 100644 index 000000000..7f7677864 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/index.rst @@ -0,0 +1,37 @@ +Introduction to Pika +==================== +Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library. + +If you have not developed with Pika or RabbitMQ before, the :doc:`intro` documentation is a good place to get started. + +Installing Pika +--------------- +Pika is available for download via PyPI and may be installed using easy_install or pip:: + + pip install pika + +or:: + + easy_install pika + +To install from source, run "python setup.py install" in the root source directory. + +Using Pika +---------- +.. toctree:: + :glob: + :maxdepth: 1 + + intro + modules/index + examples + faq + contributors + version_history + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/intro.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/intro.rst new file mode 100644 index 000000000..ab701879d --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/intro.rst @@ -0,0 +1,125 @@ +Introduction to Pika +==================== + +IO and Event Looping +-------------------- +As AMQP is a two-way RPC protocol where the client can send requests to the server and the server can send requests to a client, Pika implements or extends IO loops in each of its asynchronous connection adapters. These IO loops are blocking methods which loop and listen for events. Each asynchronous adapter follows the same standard for invoking the IO loop. The IO loop is created when the connection adapter is created. To start an IO loop for any given adapter, call the ``connection.ioloop.start()`` method. + +If you are using an external IO loop such as Tornado's :class:`~tornado.ioloop.IOLoop` you invoke it normally and then add the Pika Tornado adapter to it. + +Example:: + + import pika + + def on_open(connection): + # Invoked when the connection is open + pass + + # Create our connection object, passing in the on_open method + connection = pika.SelectConnection(on_open_callback=on_open) + + try: + # Loop so we can communicate with RabbitMQ + connection.ioloop.start() + except KeyboardInterrupt: + # Gracefully close the connection + connection.close() + # Loop until we're fully closed, will stop on its own + connection.ioloop.start() + +.. _intro_to_cps: + +Continuation-Passing Style +-------------------------- + +Interfacing with Pika asynchronously is done by passing in callback methods you would like to have invoked when a certain event completes. For example, if you are going to declare a queue, you pass in a method that will be called when the RabbitMQ server returns a `Queue.DeclareOk `_ response. + +In our example below we use the following five easy steps: + +#. We start by creating our connection object, then starting our event loop. +#. When we are connected, the *on_connected* method is called. In that method we create a channel. +#. When the channel is created, the *on_channel_open* method is called. In that method we declare a queue. +#. When the queue is declared successfully, *on_queue_declared* is called. In that method we call :py:meth:`channel.basic_consume ` telling it to call the handle_delivery for each message RabbitMQ delivers to us. +#. When RabbitMQ has a message to send us, it calls the handle_delivery method passing the AMQP Method frame, Header frame, and Body. + +.. NOTE:: + Step #1 is on line #28 and Step #2 is on line #6. This is so that Python knows about the functions we'll call in Steps #2 through #5. + +.. _cps_example: + +Example:: + + import pika + + # Create a global channel variable to hold our channel object in + channel = None + + # Step #2 + def on_connected(connection): + """Called when we are fully connected to RabbitMQ""" + # Open a channel + connection.channel(on_channel_open) + + # Step #3 + def on_channel_open(new_channel): + """Called when our channel has opened""" + global channel + channel = new_channel + channel.queue_declare(queue="test", durable=True, exclusive=False, auto_delete=False, callback=on_queue_declared) + + # Step #4 + def on_queue_declared(frame): + """Called when RabbitMQ has told us our Queue has been declared, frame is the response from RabbitMQ""" + channel.basic_consume(handle_delivery, queue='test') + + # Step #5 + def handle_delivery(channel, method, header, body): + """Called when we receive a message from RabbitMQ""" + print(body) + + # Step #1: Connect to RabbitMQ using the default parameters + parameters = pika.ConnectionParameters() + connection = pika.SelectConnection(parameters, on_connected) + + try: + # Loop so we can communicate with RabbitMQ + connection.ioloop.start() + except KeyboardInterrupt: + # Gracefully close the connection + connection.close() + # Loop until we're fully closed, will stop on its own + connection.ioloop.start() + +Credentials +----------- +The :mod:`pika.credentials` module provides the mechanism by which you pass the username and password to the :py:class:`ConnectionParameters ` class when it is created. + +Example:: + + import pika + credentials = pika.PlainCredentials('username', 'password') + parameters = pika.ConnectionParameters(credentials=credentials) + +.. _connection_parameters: + +Connection Parameters +--------------------- +There are two types of connection parameter classes in Pika to allow you to pass the connection information into a connection adapter, :class:`ConnectionParameters ` and :class:`URLParameters `. Both classes share the same default connection values. + + +.. _intro_to_backpressure: + +TCP Backpressure +---------------- + +As of RabbitMQ 2.0, client side `Channel.Flow `_ has been removed [#f1]_. Instead, the RabbitMQ broker uses TCP Backpressure to slow your client if it is delivering messages too fast. If you pass in backpressure_detection into your connection parameters, Pika attempts to help you handle this situation by providing a mechanism by which you may be notified if Pika has noticed too many frames have yet to be delivered. By registering a callback function with the :py:meth:`add_backpressure_callback ` method of any connection adapter, your function will be called when Pika sees that a backlog of 10 times the average frame size you have been sending has been exceeded. You may tweak the notification multiplier value by calling the :py:meth:`set_backpressure_multiplier ` method passing any integer value. + +Example:: + + import pika + + parameters = pika.URLParameters('amqp://guest:guest@rabbit-server1:5672/%2F?backpressure_detection=t') + +.. rubric:: Footnotes + +.. [#f1] "more effective flow control mechanism that does not require cooperation from clients and reacts quickly to prevent the broker from exhausting memory - see http://www.rabbitmq.com/extensions.html#memsup" from http://lists.rabbitmq.com/pipermail/rabbitmq-announce/attachments/20100825/2c672695/attachment.txt diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/asyncio.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/asyncio.rst new file mode 100644 index 000000000..2e58db450 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/asyncio.rst @@ -0,0 +1,9 @@ +asyncio Connection Adapter +========================== +.. automodule:: pika.adapters.asyncio_connection + +Be sure to check out the :doc:`asynchronous examples ` including the asyncio specific :doc:`consumer ` example. + +.. autoclass:: pika.adapters.asyncio_connection.AsyncioConnection + :members: + :inherited-members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/blocking.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/blocking.rst new file mode 100644 index 000000000..81b2eb504 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/blocking.rst @@ -0,0 +1,13 @@ +BlockingConnection +------------------ +.. automodule:: pika.adapters.blocking_connection + +Be sure to check out examples in :doc:`/examples`. + +.. autoclass:: pika.adapters.blocking_connection.BlockingConnection + :members: + :inherited-members: + +.. autoclass:: pika.adapters.blocking_connection.BlockingChannel + :members: + :inherited-members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/index.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/index.rst new file mode 100644 index 000000000..7bc694da4 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/index.rst @@ -0,0 +1,15 @@ +Connection Adapters +=================== +Pika uses connection adapters to provide a flexible method for adapting pika's +core communication to different IOLoop implementations. In addition to asynchronous adapters, there is the :class:`BlockingConnection ` adapter that provides a more idiomatic procedural approach to using Pika. + +Adapters +-------- +.. toctree:: + :glob: + :maxdepth: 1 + + blocking + select + tornado + twisted diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/select.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/select.rst new file mode 100644 index 000000000..e02b57135 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/select.rst @@ -0,0 +1,7 @@ +Select Connection Adapter +========================== +.. automodule:: pika.adapters.select_connection + +.. autoclass:: pika.adapters.select_connection.SelectConnection + :members: + :inherited-members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/tornado.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/tornado.rst new file mode 100644 index 000000000..97784a5c5 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/tornado.rst @@ -0,0 +1,9 @@ +Tornado Connection Adapter +========================== +.. automodule:: pika.adapters.tornado_connection + +Be sure to check out the :doc:`asynchronous examples ` including the Tornado specific :doc:`consumer ` example. + +.. autoclass:: pika.adapters.tornado_connection.TornadoConnection + :members: + :inherited-members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/twisted.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/twisted.rst new file mode 100644 index 000000000..434201ced --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/adapters/twisted.rst @@ -0,0 +1,15 @@ +Twisted Connection Adapter +========================== +.. automodule:: pika.adapters.twisted_connection + +.. autoclass:: pika.adapters.twisted_connection.TwistedConnection + :members: + :inherited-members: + +.. autoclass:: pika.adapters.twisted_connection.TwistedProtocolConnection + :members: + :inherited-members: + +.. autoclass:: pika.adapters.twisted_connection.TwistedChannel + :members: + :inherited-members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/channel.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/channel.rst new file mode 100644 index 000000000..eb729c564 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/channel.rst @@ -0,0 +1,10 @@ +Channel +======= +.. automodule:: pika.channel + +Channel +------- +.. autoclass:: Channel + :members: + :inherited-members: + :member-order: bysource diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/connection.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/connection.rst new file mode 100644 index 000000000..de42f5c72 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/connection.rst @@ -0,0 +1,7 @@ +Connection +---------- +The :class:`~pika.connection.Connection` class implements the base behavior +that all connection adapters extend. + +.. autoclass:: pika.connection.Connection + :members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/credentials.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/credentials.rst new file mode 100644 index 000000000..94a2de54e --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/credentials.rst @@ -0,0 +1,18 @@ +Authentication Credentials +========================== +.. automodule:: pika.credentials + +PlainCredentials +---------------- +.. autoclass:: PlainCredentials + :members: + :inherited-members: + :noindex: + +ExternalCredentials +------------------- +.. autoclass:: ExternalCredentials + :members: + :inherited-members: + :noindex: + diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/exceptions.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/exceptions.rst new file mode 100644 index 000000000..3bb3afdac --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/exceptions.rst @@ -0,0 +1,5 @@ +Exceptions +========== +.. automodule:: pika.exceptions + :members: + :undoc-members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/index.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/index.rst new file mode 100644 index 000000000..33f5b0c72 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/index.rst @@ -0,0 +1,21 @@ +Core Class and Module Documentation +=================================== +For the end user, Pika is organized into a small set of objects for all communication with RabbitMQ. + +- A :doc:`connection adapter ` is used to connect to RabbitMQ and manages the connection. +- :doc:`Connection parameters ` are used to instruct the :class:`~pika.connection.Connection` object how to connect to RabbitMQ. +- :doc:`credentials` are used to encapsulate all authentication information for the :class:`~pika.connection.ConnectionParameters` class. +- A :class:`~pika.channel.Channel` object is used to communicate with RabbitMQ via the AMQP RPC methods. +- :doc:`exceptions` are raised at various points when using Pika when something goes wrong. + +.. toctree:: + :hidden: + :maxdepth: 1 + + adapters/index + channel + connection + credentials + exceptions + parameters + spec diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/parameters.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/parameters.rst new file mode 100644 index 000000000..fa05ed094 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/parameters.rst @@ -0,0 +1,42 @@ +Connection Parameters +===================== +To maintain flexibility in how you specify the connection information required for your applications to properly connect to RabbitMQ, pika implements two classes for encapsulating the information, :class:`~pika.connection.ConnectionParameters` and :class:`~pika.connection.URLParameters`. + +ConnectionParameters +-------------------- +The classic object for specifying all of the connection parameters required to connect to RabbitMQ, :class:`~pika.connection.ConnectionParameters` provides attributes for tweaking every possible connection option. + +Example:: + + import pika + + # Set the connection parameters to connect to rabbit-server1 on port 5672 + # on the / virtual host using the username "guest" and password "guest" + credentials = pika.PlainCredentials('guest', 'guest') + parameters = pika.ConnectionParameters('rabbit-server1', + 5672, + '/', + credentials) + +.. autoclass:: pika.connection.ConnectionParameters + :members: + :inherited-members: + :member-order: bysource + +URLParameters +------------- +The :class:`~pika.connection.URLParameters` class allows you to pass in an AMQP URL when creating the object and supports the host, port, virtual host, ssl, username and password in the base URL and other options are passed in via query parameters. + +Example:: + + import pika + + # Set the connection parameters to connect to rabbit-server1 on port 5672 + # on the / virtual host using the username "guest" and password "guest" + parameters = pika.URLParameters('amqp://guest:guest@rabbit-server1:5672/%2F') + +.. autoclass:: pika.connection.URLParameters + :members: + :inherited-members: + :member-order: bysource + diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/spec.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/spec.rst new file mode 100644 index 000000000..d494300a8 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/modules/spec.rst @@ -0,0 +1,8 @@ +pika.spec +========= + +.. automodule:: pika.spec + :members: + :inherited-members: + :member-order: bysource + :undoc-members: diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/docs/version_history.rst b/NodeRed/NodeRedFiles/pika-0.13.1/docs/version_history.rst new file mode 100644 index 000000000..8a7578d77 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/docs/version_history.rst @@ -0,0 +1,760 @@ +Version History +=============== + +0.13.1 2019-03-07 +----------------- + +`GitHub milestone `_ + +0.13.0 2019-01-17 +----------------- + +`GitHub milestone `_ + +- `AsyncioConnection`, `TornadoConnection` and `TwistedProtocolConnection` are no longer auto-imported (`PR `_) +- Python `3.7` support (`Issue `_) + +0.12.0 2018-06-19 +----------------- + +`GitHub milestone `_ + +This is an interim release prior to version `1.0.0`. It includes the following backported pull requests and commits from the `master` branch: + +- `PR #908 `_ +- `PR #910 `_ +- `PR #918 `_ +- `PR #920 `_ +- `PR #924 `_ +- `PR #937 `_ +- `PR #938 `_ +- `PR #933 `_ +- `PR #940 `_ +- `PR #932 `_ +- `PR #928 `_ +- `PR #934 `_ +- `PR #915 `_ +- `PR #946 `_ +- `PR #947 `_ +- `PR #952 `_ +- `PR #956 `_ +- `PR #966 `_ +- `PR #975 `_ +- `PR #978 `_ +- `PR #981 `_ +- `PR #994 `_ +- `PR #1007 `_ +- `PR #1045 `_ (manually backported) +- `PR #1011 `_ + +Commits: + +Travis CI fail fast - 3f0e739 + +New features: + +`BlockingConnection` now supports the `add_callback_threadsafe` method which allows a function to be executed correctly on the IO loop thread. The main use-case for this is as follows: + +- Application sets up a thread for `BlockingConnection` and calls `basic_consume` on it +- When a message is received, work is done on another thread +- When the work is done, the worker uses `connection.add_callback_threadsafe` to call the `basic_ack` method on the channel instance. + +Please see `examples/basic_consumer_threaded.py` for an example. As always, `SelectConnection` and a fully async consumer/publisher is the preferred method of using Pika. + +Heartbeats are now sent at an interval equal to 1/2 of the negotiated idle connection timeout. RabbitMQ's default timeout value is 60 seconds, so heartbeats will be sent at a 30 second interval. In addition, Pika's check for an idle connection will be done at an interval equal to the timeout value plus 5 seconds to allow for delays. This results in an interval of 65 seconds by default. + +0.11.2 2017-11-30 +----------------- + +`GitHub milestone `_ + +`0.11.2 `_ + +- Remove `+` character from platform releases string (`PR `_) + +0.11.1 2017-11-27 +----------------- + +`GitHub milestone `_ + +`0.11.1 `_ + +- Fix `BlockingConnection` to ensure event loop exits (`PR `_) +- Heartbeat timeouts will use the client value if specified (`PR `_) +- Allow setting some common TCP options (`PR `_) +- Errors when decoding Unicode are ignored (`PR `_) +- Fix large number encoding (`PR `_) + +0.11.0 2017-07-29 +----------------- + +`GitHub milestone `_ + +`0.11.0 `_ + + - Simplify Travis CI configuration for OS X. + - Add `asyncio` connection adapter for Python 3.4 and newer. + - Connection failures that occur after the socket is opened and before the + AMQP connection is ready to go are now reported by calling the connection + error callback. Previously these were not consistently reported. + - In BaseConnection.close, call _handle_ioloop_stop only if the connection is + already closed to allow the asynchronous close operation to complete + gracefully. + - Pass error information from failed socket connection to user callbacks + on_open_error_callback and on_close_callback with result_code=-1. + - ValueError is raised when a completion callback is passed to an asynchronous + (nowait) Channel operation. It's an application error to pass a non-None + completion callback with an asynchronous request, because this callback can + never be serviced in the asynchronous scenario. + - `Channel.basic_reject` fixed to allow `delivery_tag` to be of type `long` + as well as `int`. (by quantum5) + - Implemented support for blocked connection timeouts in + `pika.connection.Connection`. This feature is available to all pika adapters. + See `pika.connection.ConnectionParameters` docstring to learn more about + `blocked_connection_timeout` configuration. + - Deprecated the `heartbeat_interval` arg in `pika.ConnectionParameters` in + favor of the `heartbeat` arg for consistency with the other connection + parameters classes `pika.connection.Parameters` and `pika.URLParameters`. + - When the `port` arg is not set explicitly in `ConnectionParameters` + constructor, but the `ssl` arg is set explicitly, then set the port value to + to the default AMQP SSL port if SSL is enabled, otherwise to the default + AMQP plaintext port. + - `URLParameters` will raise ValueError if a non-empty URL scheme other than + {amqp | amqps | http | https} is specified. + - `InvalidMinimumFrameSize` and `InvalidMaximumFrameSize` exceptions are + deprecated. pika.connection.Parameters.frame_max property setter now raises + the standard `ValueError` exception when the value is out of bounds. + - Removed deprecated parameter `type` in `Channel.exchange_declare` and + `BlockingChannel.exchange_declare` in favor of the `exchange_type` arg that + doesn't overshadow the builtin `type` keyword. + - Channel.close() on OPENING channel transitions it to CLOSING instead of + raising ChannelClosed. + - Channel.close() on CLOSING channel raises `ChannelAlreadyClosing`; used to + raise `ChannelClosed`. + - Connection.channel() raises `ConnectionClosed` if connection is not in OPEN + state. + - When performing graceful close on a channel and `Channel.Close` from broker + arrives while waiting for CloseOk, don't release the channel number until + CloseOk arrives to avoid race condition that may lead to a new channel + receiving the CloseOk that was destined for the closing channel. + - The `backpressure_detection` option of `ConnectionParameters` and + `URLParameters` property is DEPRECATED in favor of `Connection.Blocked` and + `Connection.Unblocked`. See `Connection.add_on_connection_blocked_callback`. + +0.10.0 2015-09-02 +----------------- + +`0.10.0 `_ + + - a9bf96d - LibevConnection: Fixed dict chgd size during iteration (Michael Laing) + - 388c55d - SelectConnection: Fixed KeyError exceptions in IOLoop timeout executions (Shinji Suzuki) + - 4780de3 - BlockingConnection: Add support to make BlockingConnection a Context Manager (@reddec) + +0.10.0b2 2015-07-15 +------------------- + + - f72b58f - Fixed failure to purge _ConsumerCancellationEvt from BlockingChannel._pending_events during basic_cancel. (Vitaly Kruglikov) + +0.10.0b1 2015-07-10 +------------------- + +High-level summary of notable changes: + +- Change to 3-Clause BSD License +- Python 3.x support +- Over 150 commits from 19 contributors +- Refactoring of SelectConnection ioloop +- This major release contains certain non-backward-compatible API changes as + well as significant performance improvements in the `BlockingConnection` + adapter. +- Non-backward-compatible changes in `Channel.add_on_return_callback` callback's + signature. +- The `AsyncoreConnection` adapter was retired + +**Details** + +Python 3.x: this release introduces python 3.x support. Tested on Python 3.3 +and 3.4. + +`AsyncoreConnection`: Retired this legacy adapter to reduce maintenance burden; +the recommended replacement is the `SelectConnection` adapter. + +`SelectConnection`: ioloop was refactored for compatibility with other ioloops. + +`Channel.add_on_return_callback`: The callback is now passed the individual +parameters channel, method, properties, and body instead of a tuple of those +values for congruence with other similar callbacks. + +`BlockingConnection`: This adapter underwent a makeover under the hood and +gained significant performance improvements as well as enhanced timer +resolution. It is now implemented as a client of the `SelectConnection` adapter. + +Below is an overview of the `BlockingConnection` and `BlockingChannel` API +changes: + + - Recursion: the new implementation eliminates callback recursion that + sometimes blew out the stack in the legacy implementation (e.g., + publish -> consumer_callback -> publish -> consumer_callback, etc.). While + `BlockingConnection.process_data_events` and `BlockingConnection.sleep` may + still be called from the scope of the blocking adapter's callbacks in order + to process pending I/O, additional callbacks will be suppressed whenever + `BlockingConnection.process_data_events` and `BlockingConnection.sleep` are + nested in any combination; in that case, the callback information will be + bufferred and dispatched once nesting unwinds and control returns to the + level-zero dispatcher. + - `BlockingConnection.connect`: this method was removed in favor of the + constructor as the only way to establish connections; this reduces + maintenance burden, while improving reliability of the adapter. + - `BlockingConnection.process_data_events`: added the optional parameter + `time_limit`. + - `BlockingConnection.add_on_close_callback`: removed; legacy raised + `NotImplementedError`. + - `BlockingConnection.add_on_open_callback`: removed; legacy raised + `NotImplementedError`. + - `BlockingConnection.add_on_open_error_callback`: removed; legacy raised + `NotImplementedError`. + - `BlockingConnection.add_backpressure_callback`: not supported + - `BlockingConnection.set_backpressure_multiplier`: not supported + - `BlockingChannel.add_on_flow_callback`: not supported; per docstring in + channel.py: "Note that newer versions of RabbitMQ will not issue this but + instead use TCP backpressure". + - `BlockingChannel.flow`: not supported + - `BlockingChannel.force_data_events`: removed as it is no longer necessary + following redesign of the adapter. + - Removed the `nowait` parameter from `BlockingChannel` methods, forcing + `nowait=False` (former API default) in the implementation; this is more + suitable for the blocking nature of the adapter and its error-reporting + strategy; this concerns the following methods: `basic_cancel`, + `confirm_delivery`, `exchange_bind`, `exchange_declare`, `exchange_delete`, + `exchange_unbind`, `queue_bind`, `queue_declare`, `queue_delete`, and + `queue_purge`. + - `BlockingChannel.basic_cancel`: returns a sequence instead of None; for a + `no_ack=True` consumer, `basic_cancel` returns a sequence of pending + messages that arrived before broker confirmed the cancellation. + - `BlockingChannel.consume`: added new optional kwargs `arguments` and + `inactivity_timeout`. Also, raises ValueError if the consumer creation + parameters don't match those used to create the existing queue consumer + generator, if any; this happens when you break out of the consume loop, then + call `BlockingChannel.consume` again with different consumer-creation args + without first cancelling the previous queue consumer generator via + `BlockingChannel.cancel`. The legacy implementation would silently resume + consuming from the existing queue consumer generator even if the subsequent + `BlockingChannel.consume` was invoked with a different queue name, etc. + - `BlockingChannel.cancel`: returns 0; the legacy implementation tried to + return the number of requeued messages, but this number was not accurate + as it didn't include the messages returned by the Channel class; this count + is not generally useful, so returning 0 is a reasonable replacement. + - `BlockingChannel.open`: removed in favor of having a single mechanism for + creating a channel (`BlockingConnection.channel`); this reduces maintenance + burden, while improving reliability of the adapter. + - `BlockingChannel.confirm_delivery`: raises UnroutableError when unroutable + messages that were sent prior to this call are returned before we receive + Confirm.Select-ok. + - `BlockingChannel.basic_publish: always returns True when delivery + confirmation is not enabled (publisher-acks = off); the legacy implementation + returned a bool in this case if `mandatory=True` to indicate whether the + message was delivered; however, this was non-deterministic, because + Basic.Return is asynchronous and there is no way to know how long to wait + for it or its absence. The legacy implementation returned None when + publishing with publisher-acks = off and `mandatory=False`. The new + implementation always returns True when publishing while + publisher-acks = off. + - `BlockingChannel.publish`: a new alternate method (vs. `basic_publish`) for + publishing a message with more detailed error reporting via UnroutableError + and NackError exceptions. + - `BlockingChannel.start_consuming`: raises pika.exceptions.RecursionError if + called from the scope of a `BlockingConnection` or `BlockingChannel` + callback. + - `BlockingChannel.get_waiting_message_count`: new method; returns the number + of messages that may be retrieved from the current queue consumer generator + via `BasicChannel.consume` without blocking. + +**Commits** + + - 5aaa753 - Fixed SSL import and removed no_ack=True in favor of explicit AMQP message handling based on deferreds (skftn) + - 7f222c2 - Add checkignore for codeclimate (Gavin M. Roy) + - 4dec370 - Implemented BlockingChannel.flow; Implemented BlockingConnection.add_on_connection_blocked_callback; Implemented BlockingConnection.add_on_connection_unblocked_callback. (Vitaly Kruglikov) + - 4804200 - Implemented blocking adapter acceptance test for exchange-to-exchange binding. Added rudimentary validation of BasicProperties passthru in blocking adapter publish tests. Updated CHANGELOG. (Vitaly Kruglikov) + - 4ec07fd - Fixed sending of data in TwistedProtocolConnection (Vitaly Kruglikov) + - a747fb3 - Remove my copyright from forward_server.py test utility. (Vitaly Kruglikov) + - 94246d2 - Return True from basic_publish when pubacks is off. Implemented more blocking adapter accceptance tests. (Vitaly Kruglikov) + - 3ce013d - PIKA-609 Wait for broker to dispatch all messages to client before cancelling consumer in TestBasicCancelWithNonAckableConsumer and TestBasicCancelWithAckableConsumer (Vitaly Kruglikov) + - 293f778 - Created CHANGELOG entry for release 0.10.0. Fixed up callback documentation for basic_get, basic_consume, and add_on_return_callback. (Vitaly Kruglikov) + - 16d360a - Removed the legacy AsyncoreConnection adapter in favor of the recommended SelectConnection adapter. (Vitaly Kruglikov) + - 240a82c - Defer creation of poller's event loop interrupt socket pair until start is called, because some SelectConnection users (e.g., BlockingConnection adapter) don't use the event loop, and these sockets would just get reported as resource leaks. (Vitaly Kruglikov) + - aed5cae - Added EINTR loops in select_connection pollers. Addressed some pylint findings, including an error or two. Wrap socket.send and socket.recv calls in EINTR loops Use the correct exception for socket.error and select.error and get errno depending on python version. (Vitaly Kruglikov) + - 498f1be - Allow passing exchange, queue and routing_key as text, handle short strings as text in python3 (saarni) + - 9f7f243 - Restored basic_consume, basic_cancel, and add_on_cancel_callback (Vitaly Kruglikov) + - 18c9909 - Reintroduced BlockingConnection.process_data_events. (Vitaly Kruglikov) + - 4b25cb6 - Fixed BlockingConnection/BlockingChannel acceptance and unit tests (Vitaly Kruglikov) + - bfa932f - Facilitate proper connection state after BasicConnection._adapter_disconnect (Vitaly Kruglikov) + - 9a09268 - Fixed BlockingConnection test that was failing with ConnectionClosed error. (Vitaly Kruglikov) + - 5a36934 - Copied synchronous_connection.py from pika-synchronous branch Fixed pylint findings Integrated SynchronousConnection with the new ioloop in SelectConnection Defined dedicated message classes PolledMessage and ConsumerMessage and moved from BlockingChannel to module-global scope. Got rid of nowait args from BlockingChannel public API methods Signal unroutable messages via UnroutableError exception. Signal Nack'ed messages via NackError exception. These expose more information about the failure than legacy basic_publich API. Removed set_timeout and backpressure callback methods Restored legacy `is_open`, etc. property names (Vitaly Kruglikov) + - 6226dc0 - Remove deprecated --use-mirrors (Gavin M. Roy) + - 1a7112f - Raise ConnectionClosed when sending a frame with no connection (#439) (Gavin M. Roy) + - 9040a14 - Make delivery_tag non-optional (#498) (Gavin M. Roy) + - 86aabc2 - Bump version (Gavin M. Roy) + - 562075a - Update a few testing things (Gavin M. Roy) + - 4954d38 - use unicode_type in blocking_connection.py (Antti Haapala) + - 133d6bc - Let Travis install ordereddict for Python 2.6, and ttest 3.3, 3.4 too. (Antti Haapala) + - 0d2287d - Pika Python 3 support (Antti Haapala) + - 3125c79 - SSLWantRead is not supported before python 2.7.9 and 3.3 (Will) + - 9a9c46c - Fixed TestDisconnectDuringConnectionStart: it turns out that depending on callback order, it might get either ProbableAuthenticationError or ProbableAccessDeniedError. (Vitaly Kruglikov) + - cd8c9b0 - A fix the write starvation problem that we see with tornado and pika (Will) + - 8654fbc - SelectConnection - make interrupt socketpair non-blocking (Will) + - 4f3666d - Added copyright in forward_server.py and fixed NameError bug (Vitaly Kruglikov) + - f8ebbbc - ignore docs (Gavin M. Roy) + - a344f78 - Updated codeclimate config (Gavin M. Roy) + - 373c970 - Try and fix pathing issues in codeclimate (Gavin M. Roy) + - 228340d - Ignore codegen (Gavin M. Roy) + - 4db0740 - Add a codeclimate config (Gavin M. Roy) + - 7e989f9 - Slight code re-org, usage comment and better naming of test file. (Will) + - 287be36 - Set up _kqueue member of KQueuePoller before calling super constructor to avoid exception due to missing _kqueue member. Call `self._map_event(event)` instead of `self._map_event(event.filter)`, because `KQueuePoller._map_event()` assumes it's getting an event, not an event filter. (Vitaly Kruglikov) + - 62810fb - Fix issue #412: reset BlockingConnection._read_poller in BlockingConnection._adapter_disconnect() to guard against accidental access to old file descriptor. (Vitaly Kruglikov) + - 03400ce - Rationalise adapter acceptance tests (Will) + - 9414153 - Fix bug selecting non epoll poller (Will) + - 4f063df - Use user heartbeat setting if server proposes none (Pau Gargallo) + - 9d04d6e - Deactivate heartbeats when heartbeat_interval is 0 (Pau Gargallo) + - a52a608 - Bug fix and review comments. (Will) + - e3ebb6f - Fix incorrect x-expires argument in acceptance tests (Will) + - 294904e - Get BlockingConnection into consistent state upon loss of TCP/IP connection with broker and implement acceptance tests for those cases. (Vitaly Kruglikov) + - 7f91a68 - Make SelectConnection behave like an ioloop (Will) + - dc9db2b - Perhaps 5 seconds is too agressive for travis (Gavin M. Roy) + - c23e532 - Lower the stuck test timeout (Gavin M. Roy) + - 1053ebc - Late night bug (Gavin M. Roy) + - cd6c1bf - More BaseConnection._handle_error cleanup (Gavin M. Roy) + - a0ff21c - Fix the test to work with Python 2.6 (Gavin M. Roy) + - 748e8aa - Remove pypy for now (Gavin M. Roy) + - 1c921c1 - Socket close/shutdown cleanup (Gavin M. Roy) + - 5289125 - Formatting update from PR (Gavin M. Roy) + - d235989 - Be more specific when calling getaddrinfo (Gavin M. Roy) + - b5d1b31 - Reflect the method name change in pika.callback (Gavin M. Roy) + - df7d3b7 - Cleanup BlockingConnection in a few places (Gavin M. Roy) + - cd99e1c - Rename method due to use in BlockingConnection (Gavin M. Roy) + - 7e0d1b3 - Use google style with yapf instead of pep8 (Gavin M. Roy) + - 7dc9bab - Refactor socket writing to not use sendall #481 (Gavin M. Roy) + - 4838789 - Dont log the fd #521 (Gavin M. Roy) + - 765107d - Add Connection.Blocked callback registration methods #476 (Gavin M. Roy) + - c15b5c1 - Fix _blocking typo pointed out in #513 (Gavin M. Roy) + - 759ac2c - yapf of codegen (Gavin M. Roy) + - 9dadd77 - yapf cleanup of codegen and spec (Gavin M. Roy) + - ddba7ce - Do not reject consumers with no_ack=True #486 #530 (Gavin M. Roy) + - 4528a1a - yapf reformatting of tests (Gavin M. Roy) + - e7b6d73 - Remove catching AttributError (#531) (Gavin M. Roy) + - 41ea5ea - Update README badges [skip ci] (Gavin M. Roy) + - 6af987b - Add note on contributing (Gavin M. Roy) + - 161fc0d - yapf formatting cleanup (Gavin M. Roy) + - edcb619 - Add PYPY to travis testing (Gavin M. Roy) + - 2225771 - Change the coverage badge (Gavin M. Roy) + - 8f7d451 - Move to codecov from coveralls (Gavin M. Roy) + - b80407e - Add confirm_delivery to example (Andrew Smith) + - 6637212 - Update base_connection.py (bstemshorn) + - 1583537 - #544 get_waiting_message_count() (markcf) + - 0c9be99 - Fix #535: pass expected reply_code and reply_text from method frame to Connection._on_disconnect from Connection._on_connection_closed (Vitaly Kruglikov) + - d11e73f - Propagate ConnectionClosed exception out of BlockingChannel._send_method() and log ConnectionClosed in BlockingConnection._on_connection_closed() (Vitaly Kruglikov) + - 63d2951 - Fix #541 - make sure connection state is properly reset when BlockingConnection._check_state_on_disconnect raises ConnectionClosed. This supplements the previously-merged PR #450 by getting the connection into consistent state. (Vitaly Kruglikov) + - 71bc0eb - Remove unused self.fd attribute from BaseConnection (Vitaly Kruglikov) + - 8c08f93 - PIKA-532 Removed unnecessary params (Vitaly Kruglikov) + - 6052ecf - PIKA-532 Fix bug in BlockingConnection._handle_timeout that was preventing _on_connection_closed from being called when not closing. (Vitaly Kruglikov) + - 562aa15 - pika: callback: Display exception message when callback fails. (Stuart Longland) + - 452995c - Typo fix in connection.py (Andrew) + - 361c0ad - Added some missing yields (Robert Weidlich) + - 0ab5a60 - Added complete example for python twisted service (Robert Weidlich) + - 4429110 - Add deployment and webhooks (Gavin M. Roy) + - 7e50302 - Fix has_content style in codegen (Andrew Grigorev) + - 28c2214 - Fix the trove categorization (Gavin M. Roy) + - de8b545 - Ensure frames can not be interspersed on send (Gavin M. Roy) + - 8fe6bdd - Fix heartbeat behaviour after connection failure. (Kyösti Herrala) + - c123472 - Updating BlockingChannel.basic_get doc (it does not receive a callback like the rest of the adapters) (Roberto Decurnex) + - b5f52fb - Fix number of arguments passed to _on_return callback (Axel Eirola) + - 765139e - Lower default TIMEOUT to 0.01 (bra-fsn) + - 6cc22a5 - Fix confirmation on reconnects (bra-fsn) + - f4faf0a - asynchronous publisher and subscriber examples refactored to follow the StepDown rule (Riccardo Cirimelli) + +0.9.14 - 2014-07-11 +------------------- + +`0.9.14 `_ + + - 57fe43e - fix test to generate a correct range of random ints (ml) + - 0d68dee - fix async watcher for libev_connection (ml) + - 01710ad - Use default username and password if not specified in URLParameters (Sean Dwyer) + - fae328e - documentation typo (Jeff Fein-Worton) + - afbc9e0 - libev_connection: reset_io_watcher (ml) + - 24332a2 - Fix the manifest (Gavin M. Roy) + - acdfdef - Remove useless test (Gavin M. Roy) + - 7918e1a - Skip libev tests if pyev is not installed or if they are being run in pypy (Gavin M. Roy) + - bb583bf - Remove the deprecated test (Gavin M. Roy) + - aecf3f2 - Don't reject a message if the channel is not open (Gavin M. Roy) + - e37f336 - Remove UTF-8 decoding in spec (Gavin M. Roy) + - ddc35a9 - Update the unittest to reflect removal of force binary (Gavin M. Roy) + - fea2476 - PEP8 cleanup (Gavin M. Roy) + - 9b97956 - Remove force_binary (Gavin M. Roy) + - a42dd90 - Whitespace required (Gavin M. Roy) + - 85867ea - Update the content_frame_dispatcher tests to reflect removal of auto-cast utf-8 (Gavin M. Roy) + - 5a4bd5d - Remove unicode casting (Gavin M. Roy) + - efea53d - Remove force binary and unicode casting (Gavin M. Roy) + - e918d15 - Add methods to remove deprecation warnings from asyncore (Gavin M. Roy) + - 117f62d - Add a coveragerc to ignore the auto generated pika.spec (Gavin M. Roy) + - 52f4485 - Remove pypy tests from travis for now (Gavin M. Roy) + - c3aa958 - Update README.rst (Gavin M. Roy) + - 3e2319f - Delete README.md (Gavin M. Roy) + - c12b0f1 - Move to RST (Gavin M. Roy) + - 704f5be - Badging updates (Gavin M. Roy) + - 7ae33ca - Update for coverage info (Gavin M. Roy) + - ae7ca86 - add libev_adapter_tests.py; modify .travis.yml to install libev and pyev (ml) + - f86aba5 - libev_connection: add **kwargs to _handle_event; suppress default_ioloop reuse warning (ml) + - 603f1cf - async_test_base: add necessary args to _on_cconn_closed (ml) + - 3422007 - add libev_adapter_tests.py (ml) + - 6cbab0c - removed relative imports and importing urlparse from urllib.parse for py3+ (a-tal) + - f808464 - libev_connection: add async watcher; add optional parameters to add_timeout (ml) + - c041c80 - Remove ev all together for now (Gavin M. Roy) + - 9408388 - Update the test descriptions and timeout (Gavin M. Roy) + - 1b552e0 - Increase timeout (Gavin M. Roy) + - 69a1f46 - Remove the pyev requirement for 2.6 testing (Gavin M. Roy) + - fe062d2 - Update package name (Gavin M. Roy) + - 611ad0e - Distribute the LICENSE and README.md (#350) (Gavin M. Roy) + - df5e1d8 - Ensure that the entire frame is written using socket.sendall (#349) (Gavin M. Roy) + - 69ec8cf - Move the libev install to before_install (Gavin M. Roy) + - a75f693 - Update test structure (Gavin M. Roy) + - 636b424 - Update things to ignore (Gavin M. Roy) + - b538c68 - Add tox, nose.cfg, update testing config (Gavin M. Roy) + - a0e7063 - add some tests to increase coverage of pika.connection (Charles Law) + - c76d9eb - Address issue #459 (Gavin M. Roy) + - 86ad2db - Raise exception if positional arg for parameters isn't an instance of Parameters (Gavin M. Roy) + - 14d08e1 - Fix for python 2.6 (Gavin M. Roy) + - bd388a3 - Use the first unused channel number addressing #404, #460 (Gavin M. Roy) + - e7676e6 - removing a debug that was left in last commit (James Mutton) + - 6c93b38 - Fixing connection-closed behavior to detect on attempt to publish (James Mutton) + - c3f0356 - Initialize bytes_written in _handle_write() (Jonathan Kirsch) + - 4510e95 - Fix _handle_write() may not send full frame (Jonathan Kirsch) + - 12b793f - fixed Tornado Consumer example to successfully reconnect (Yang Yang) + - f074444 - remove forgotten import of ordereddict (Pedro Abranches) + - 1ba0aea - fix last merge (Pedro Abranches) + - 10490a6 - change timeouts structure to list to maintain scheduling order (Pedro Abranches) + - 7958394 - save timeouts in ordered dict instead of dict (Pedro Abranches) + - d2746bf - URLParameters and ConnectionParameters accept unicode strings (Allard Hoeve) + - 596d145 - previous fix for AttributeError made parent and child class methods identical, remove duplication (James Mutton) + - 42940dd - UrlParameters Docs: fixed amqps scheme examples (Riccardo Cirimelli) + - 43904ff - Dont test this in PyPy due to sort order issue (Gavin M. Roy) + - d7d293e - Don't leave __repr__ sorting up to chance (Gavin M. Roy) + - 848c594 - Add integration test to travis and fix invocation (Gavin M. Roy) + - 2678275 - Add pypy to travis tests (Gavin M. Roy) + - 1877f3d - Also addresses issue #419 (Gavin M. Roy) + - 470c245 - Address issue #419 (Gavin M. Roy) + - ca3cb59 - Address issue #432 (Gavin M. Roy) + - a3ff6f2 - Default frame max should be AMQP FRAME_MAX (Gavin M. Roy) + - ff3d5cb - Remove max consumer tag test due to change in code. (Gavin M. Roy) + - 6045dda - Catch KeyError (#437) to ensure that an exception is not raised in a race condition (Gavin M. Roy) + - 0b4d53a - Address issue #441 (Gavin M. Roy) + - 180e7c4 - Update license and related files (Gavin M. Roy) + - 256ed3d - Added Jython support. (Erik Olof Gunnar Andersson) + - f73c141 - experimental work around for recursion issue. (Erik Olof Gunnar Andersson) + - a623f69 - Prevent #436 by iterating the keys and not the dict (Gavin M. Roy) + - 755fcae - Add support for authentication_failure_close, connection.blocked (Gavin M. Roy) + - c121243 - merge upstream master (Michael Laing) + - a08dc0d - add arg to channel.basic_consume (Pedro Abranches) + - 10b136d - Documentation fix (Anton Ryzhov) + - 9313307 - Fixed minor markup errors. (Jorge Puente Sarrín) + - fb3e3cf - Fix the spelling of UnsupportedAMQPFieldException (Garrett Cooper) + - 03d5da3 - connection.py: Propagate the force_channel keyword parameter to methods involved in channel creation (Michael Laing) + - 7bbcff5 - Documentation fix for basic_publish (JuhaS) + - 01dcea7 - Expose no_ack and exclusive to BlockingChannel.consume (Jeff Tang) + - d39b6aa - Fix BlockingChannel.basic_consume does not block on non-empty queues (Juhyeong Park) + - 6e1d295 - fix for issue 391 and issue 307 (Qi Fan) + - d9ffce9 - Update parameters.rst (cacovsky) + - 6afa41e - Add additional badges (Gavin M. Roy) + - a255925 - Fix return value on dns resolution issue (Laurent Eschenauer) + - 3f7466c - libev_connection: tweak docs (Michael Laing) + - 0aaed93 - libev_connection: Fix varable naming (Michael Laing) + - 0562d08 - libev_connection: Fix globals warning (Michael Laing) + - 22ada59 - libev_connection: use globals to track sigint and sigterm watchers as they are created globally within libev (Michael Laing) + - 2649b31 - Move badge [skip ci] (Gavin M. Roy) + - f70eea1 - Remove pypy and installation attempt of pyev (Gavin M. Roy) + - f32e522 - Conditionally skip external connection adapters if lib is not installed (Gavin M. Roy) + - cce97c5 - Only install pyev on python 2.7 (Gavin M. Roy) + - ff84462 - Add travis ci support (Gavin M. Roy) + - cf971da - lib_evconnection: improve signal handling; add callback (Michael Laing) + - 9adb269 - bugfix in returning a list in Py3k (Alex Chandel) + - c41d5b9 - update exception syntax for Py3k (Alex Chandel) + - c8506f1 - fix _adapter_connect (Michael Laing) + - 67cb660 - Add LibevConnection to README (Michael Laing) + - 1f9e72b - Propagate low-level connection errors to the AMQPConnectionError. (Bjorn Sandberg) + - e1da447 - Avoid race condition in _on_getok on successive basic_get() when clearing out callbacks (Jeff) + - 7a09979 - Add support for upcoming Connection.Blocked/Unblocked (Gavin M. Roy) + - 53cce88 - TwistedChannel correctly handles multi-argument deferreds. (eivanov) + - 66f8ace - Use uuid when creating unique consumer tag (Perttu Ranta-aho) + - 4ee2738 - Limit the growth of Channel._cancelled, use deque instead of list. (Perttu Ranta-aho) + - 0369aed - fix adapter references and tweak docs (Michael Laing) + - 1738c23 - retry select.select() on EINTR (Cenk Alti) + - 1e55357 - libev_connection: reset internal state on reconnect (Michael Laing) + - 708559e - libev adapter (Michael Laing) + - a6b7c8b - Prioritize EPollPoller and KQueuePoller over PollPoller and SelectPoller (Anton Ryzhov) + - 53400d3 - Handle socket errors in PollPoller and EPollPoller Correctly check 'select.poll' availability (Anton Ryzhov) + - a6dc969 - Use dict.keys & items instead of iterkeys & iteritems (Alex Chandel) + - 5c1b0d0 - Use print function syntax, in examples (Alex Chandel) + - ac9f87a - Fixed a typo in the name of the Asyncore Connection adapter (Guruprasad) + - dfbba50 - Fixed bug mentioned in Issue #357 (Erik Andersson) + - c906a2d - Drop additional flags when getting info for the hostnames, log errors (#352) (Gavin M. Roy) + - baf23dd - retry poll() on EINTR (Cenk Alti) + - 7cd8762 - Address ticket #352 catching an error when socket.getprotobyname fails (Gavin M. Roy) + - 6c3ec75 - Prep for 0.9.14 (Gavin M. Roy) + - dae7a99 - Bump to 0.9.14p0 (Gavin M. Roy) + - 620edc7 - Use default port and virtual host if omitted in URLParameters (Issue #342) (Gavin M. Roy) + - 42a8787 - Move the exception handling inside the while loop (Gavin M. Roy) + - 10e0264 - Fix connection back pressure detection issue #347 (Gavin M. Roy) + - 0bfd670 - Fixed mistake in commit 3a19d65. (Erik Andersson) + - da04bc0 - Fixed Unknown state on disconnect error message generated when closing connections. (Erik Andersson) + - 3a19d65 - Alternative solution to fix #345. (Erik Andersson) + - abf9fa8 - switch to sendall to send entire frame (Dustin Koupal) + - 9ce8ce4 - Fixed the async publisher example to work with reconnections (Raphaël De Giusti) + - 511028a - Fix typo in TwistedChannel docstring (cacovsky) + - 8b69e5a - calls self._adapter_disconnect() instead of self.disconnect() which doesn't actually exist #294 (Mark Unsworth) + - 06a5cf8 - add NullHandler to prevent logging warnings (Cenk Alti) + - f404a9a - Fix #337 cannot start ioloop after stop (Ralf Nyren) + +0.9.13 - 2013-05-15 +------------------- + +`0.9.13 `_ + +**Major Changes** + +- IPv6 Support with thanks to Alessandro Tagliapietra for initial prototype +- Officially remove support for <= Python 2.5 even though it was broken already +- Drop pika.simplebuffer.SimpleBuffer in favor of the Python stdlib collections.deque object +- New default object for receiving content is a "bytes" object which is a str wrapper in Python 2, but paves way for Python 3 support +- New "Raw" mode for frame decoding content frames (#334) addresses issues #331, #229 added by Garth Williamson +- Connection and Disconnection logic refactored, allowing for cleaner separation of protocol logic and socket handling logic as well as connection state management +- New "on_open_error_callback" argument in creating connection objects and new Connection.add_on_open_error_callback method +- New Connection.connect method to cleanly allow for reconnection code +- Support for all AMQP field types, using protocol specified signed/unsigned unpacking + +**Backwards Incompatible Changes** + +- Method signature for creating connection objects has new argument "on_open_error_callback" which is positionally before "on_close_callback" +- Internal callback variable names in connection.Connection have been renamed and constants used. If you relied on any of these callbacks outside of their internal use, make sure to check out the new constants. +- Connection._connect method, which was an internal only method is now deprecated and will raise a DeprecationWarning. If you relied on this method, your code needs to change. +- pika.simplebuffer has been removed + +**Bugfixes** + +- BlockingConnection consumer generator does not free buffer when exited (#328) +- Unicode body payloads in the blocking adapter raises exception (#333) +- Support "b" short-short-int AMQP data type (#318) +- Docstring type fix in adapters/select_connection (#316) fix by Rikard Hultén +- IPv6 not supported (#309) +- Stop the HeartbeatChecker when connection is closed (#307) +- Unittest fix for SelectConnection (#336) fix by Erik Andersson +- Handle condition where no connection or socket exists but SelectConnection needs a timeout for retrying a connection (#322) +- TwistedAdapter lagging behind BaseConnection changes (#321) fix by Jan Urbański + +**Other** + +- Refactored documentation +- Added Twisted Adapter example (#314) by nolinksoft + +0.9.12 - 2013-03-18 +------------------- + +`0.9.12 `_ + +**Bugfixes** + +- New timeout id hashing was not unique + +0.9.11 - 2013-03-17 +------------------- + +`0.9.11 `_ + +**Bugfixes** + +- Address inconsistent channel close callback documentation and add the signature + change to the TwistedChannel class (#305) +- Address a missed timeout related internal data structure name change + introduced in the SelectConnection 0.9.10 release. Update all connection + adapters to use same signature and docstring (#306). + +0.9.10 - 2013-03-16 +------------------- + +`0.9.10 `_ + +**Bugfixes** + +- Fix timeout in twisted adapter (Submitted by cellscape) +- Fix blocking_connection poll timer resolution to milliseconds (Submitted by cellscape) +- Fix channel._on_close() without a method frame (Submitted by Richard Boulton) +- Addressed exception on close (Issue #279 - fix by patcpsc) +- 'messages' not initialized in BlockingConnection.cancel() (Issue #289 - fix by Mik Kocikowski) +- Make queue_unbind behave like queue_bind (Issue #277) +- Address closing behavioral issues for connections and channels (Issue #275) +- Pass a Method frame to Channel._on_close in Connection._on_disconnect (Submitted by Jan Urbański) +- Fix channel closed callback signature in the Twisted adapter (Submitted by Jan Urbański) +- Don't stop the IOLoop on connection close for in the Twisted adapter (Submitted by Jan Urbański) +- Update the asynchronous examples to fix reconnecting and have it work +- Warn if the socket was closed such as if RabbitMQ dies without a Close frame +- Fix URLParameters ssl_options (Issue #296) +- Add state to BlockingConnection addressing (Issue #301) +- Encode unicode body content prior to publishing (Issue #282) +- Fix an issue with unicode keys in BasicProperties headers key (Issue #280) +- Change how timeout ids are generated (Issue #254) +- Address post close state issues in Channel (Issue #302) + +** Behavior changes ** + +- Change core connection communication behavior to prefer outbound writes over reads, addressing a recursion issue +- Update connection on close callbacks, changing callback method signature +- Update channel on close callbacks, changing callback method signature +- Give more info in the ChannelClosed exception +- Change the constructor signature for BlockingConnection, block open/close callbacks +- Disable the use of add_on_open_callback/add_on_close_callback methods in BlockingConnection + + +0.9.9 - 2013-01-29 +------------------ + +`0.9.9 `_ + +**Bugfixes** + +- Only remove the tornado_connection.TornadoConnection file descriptor from the IOLoop if it's still open (Issue #221) +- Allow messages with no body (Issue #227) +- Allow for empty routing keys (Issue #224) +- Don't raise an exception when trying to send a frame to a closed connection (Issue #229) +- Only send a Connection.CloseOk if the connection is still open. (Issue #236 - Fix by noleaf) +- Fix timeout threshold in blocking connection - (Issue #232 - Fix by Adam Flynn) +- Fix closing connection while a channel is still open (Issue #230 - Fix by Adam Flynn) +- Fixed misleading warning and exception messages in BaseConnection (Issue #237 - Fix by Tristan Penman) +- Pluralised and altered the wording of the AMQPConnectionError exception (Issue #237 - Fix by Tristan Penman) +- Fixed _adapter_disconnect in TornadoConnection class (Issue #237 - Fix by Tristan Penman) +- Fixing hang when closing connection without any channel in BlockingConnection (Issue #244 - Fix by Ales Teska) +- Remove the process_timeouts() call in SelectConnection (Issue #239) +- Change the string validation to basestring for host connection parameters (Issue #231) +- Add a poller to the BlockingConnection to address latency issues introduced in Pika 0.9.8 (Issue #242) +- reply_code and reply_text is not set in ChannelException (Issue #250) +- Add the missing constraint parameter for Channel._on_return callback processing (Issue #257 - Fix by patcpsc) +- Channel callbacks not being removed from callback manager when channel is closed or deleted (Issue #261) + +0.9.8 - 2012-11-18 +------------------ + +`0.9.8 `_ + +**Bugfixes** + +- Channel.queue_declare/BlockingChannel.queue_declare not setting up callbacks property for empty queue name (Issue #218) +- Channel.queue_bind/BlockingChannel.queue_bind not allowing empty routing key +- Connection._on_connection_closed calling wrong method in Channel (Issue #219) +- Fix tx_commit and tx_rollback bugs in BlockingChannel (Issue #217) + +0.9.7 - 2012-11-11 +------------------ + +`0.9.7 `_ + +**New features** + +- generator based consumer in BlockingChannel (See :doc:`examples/blocking_consumer_generator` for example) + +**Changes** + +- BlockingChannel._send_method will only wait if explicitly told to + +**Bugfixes** + +- Added the exchange "type" parameter back but issue a DeprecationWarning +- Dont require a queue name in Channel.queue_declare() +- Fixed KeyError when processing timeouts (Issue # 215 - Fix by Raphael De Giusti) +- Don't try and close channels when the connection is closed (Issue #216 - Fix by Charles Law) +- Dont raise UnexpectedFrame exceptions, log them instead +- Handle multiple synchronous RPC calls made without waiting for the call result (Issues #192, #204, #211) +- Typo in docs (Issue #207 Fix by Luca Wehrstedt) +- Only sleep on connection failure when retry attempts are > 0 (Issue #200) +- Bypass _rpc method and just send frames for Basic.Ack, Basic.Nack, Basic.Reject (Issue #205) + +0.9.6 - 2012-10-29 +------------------ + +`0.9.6 `_ + +**New features** + +- URLParameters +- BlockingChannel.start_consuming() and BlockingChannel.stop_consuming() +- Delivery Confirmations +- Improved unittests + +**Major bugfix areas** + +- Connection handling +- Blocking functionality in the BlockingConnection +- SSL +- UTF-8 Handling + +**Removals** + +- pika.reconnection_strategies +- pika.channel.ChannelTransport +- pika.log +- pika.template +- examples directory + +0.9.5 - 2011-03-29 +------------------ + +`0.9.5 `_ + +**Changelog** + +- Scope changes with adapter IOLoops and CallbackManager allowing for cleaner, multi-threaded operation +- Add support for Confirm.Select with channel.Channel.confirm_delivery() +- Add examples of delivery confirmation to examples (demo_send_confirmed.py) +- Update uses of log.warn with warning.warn for TCP Back-pressure alerting +- License boilerplate updated to simplify license text in source files +- Increment the timeout in select_connection.SelectPoller reducing CPU utilization +- Bug fix in Heartbeat frame delivery addressing issue #35 +- Remove abuse of pika.log.method_call through a majority of the code +- Rename of key modules: table to data, frames to frame +- Cleanup of frame module and related classes +- Restructure of tests and test runner +- Update functional tests to respect RABBITMQ_HOST, RABBITMQ_PORT environment variables +- Bug fixes to reconnection_strategies module +- Fix the scale of timeout for PollPoller to be specified in milliseconds +- Remove mutable default arguments in RPC calls +- Add data type validation to RPC calls +- Move optional credentials erasing out of connection.Connection into credentials module +- Add support to allow for additional external credential types +- Add a NullHandler to prevent the 'No handlers could be found for logger "pika"' error message when not using pika.log in a client app at all. +- Clean up all examples to make them easier to read and use +- Move documentation into its own repository https://github.com/pika/documentation + +- channel.py + + - Move channel.MAX_CHANNELS constant from connection.CHANNEL_MAX + - Add default value of None to ChannelTransport.rpc + - Validate callback and acceptable replies parameters in ChannelTransport.RPC + - Remove unused connection attribute from Channel + +- connection.py + + - Remove unused import of struct + - Remove direct import of pika.credentials.PlainCredentials + - Change to import pika.credentials + - Move CHANNEL_MAX to channel.MAX_CHANNELS + - Change ConnectionParameters initialization parameter heartbeat to boolean + - Validate all inbound parameter types in ConnectionParameters + - Remove the Connection._erase_credentials stub method in favor of letting the Credentials object deal with that itself. + - Warn if the credentials object intends on erasing the credentials and a reconnection strategy other than NullReconnectionStrategy is specified. + - Change the default types for callback and acceptable_replies in Connection._rpc + - Validate the callback and acceptable_replies data types in Connection._rpc + +- adapters.blocking_connection.BlockingConnection + + - Addition of _adapter_disconnect to blocking_connection.BlockingConnection + - Add timeout methods to BlockingConnection addressing issue #41 + - BlockingConnection didn't allow you register more than one consumer callback because basic_consume was overridden to block immediately. New behavior allows you to do so. + - Removed overriding of base basic_consume and basic_cancel methods. Now uses underlying Channel versions of those methods. + - Added start_consuming() method to BlockingChannel to start the consumption loop. + - Updated stop_consuming() to iterate through all the registered consumers in self._consumers and issue a basic_cancel. diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/asynchronous_consumer_example.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/asynchronous_consumer_example.py new file mode 100644 index 000000000..58662e32e --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/asynchronous_consumer_example.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- + +import logging +import pika + +LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') +LOGGER = logging.getLogger(__name__) + + +class ExampleConsumer(object): + """This is an example consumer that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + If the channel is closed, it will indicate a problem with one of the + commands that were issued and that should surface in the output as well. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Create a new instance of the consumer class, passing in the AMQP + URL used to connect to RabbitMQ. + + :param str amqp_url: The AMQP url to connect with + + """ + self._connection = None + self._channel = None + self._closing = False + self._consumer_tag = None + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return pika.SelectConnection(pika.URLParameters(self._url), + self.on_connection_open, + stop_ioloop_on_close=False) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.add_on_connection_close_callback() + self.open_channel() + + def add_on_connection_close_callback(self): + """This method adds an on close callback that will be invoked by pika + when RabbitMQ closes the connection to the publisher unexpectedly. + + """ + LOGGER.info('Adding connection close callback') + self._connection.add_on_close_callback(self.on_connection_closed) + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._closing: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self.reconnect) + + def reconnect(self): + """Will be invoked by the IOLoop timer if the connection is + closed. See the on_connection_closed method. + + """ + # This is the old connection IOLoop instance, stop its ioloop + self._connection.ioloop.stop() + + if not self._closing: + + # Create a new connection + self._connection = self.connect() + + # There is now a new connection, needs a new ioloop to run + self._connection.ioloop.start() + + def open_channel(self): + """Open a new channel with RabbitMQ by issuing the Channel.Open RPC + command. When RabbitMQ responds that the channel is open, the + on_channel_open callback will be invoked by pika. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel %i was closed: (%s) %s', + channel, reply_code, reply_text) + self._connection.close() + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def on_bindok(self, unused_frame): + """Invoked by pika when the Queue.Bind method has completed. At this + point we will start consuming messages by calling start_consuming + which will invoke the needed RPC commands to start the process. + + :param pika.frame.Method unused_frame: The Queue.BindOk response frame + + """ + LOGGER.info('Queue bound') + self.start_consuming() + + def start_consuming(self): + """This method sets up the consumer by first calling + add_on_cancel_callback so that the object is notified if RabbitMQ + cancels the consumer. It then issues the Basic.Consume RPC command + which returns the consumer tag that is used to uniquely identify the + consumer with RabbitMQ. We keep the value to use it when we want to + cancel consuming. The on_message method is passed in as a callback pika + will invoke when a message is fully received. + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.add_on_cancel_callback() + self._consumer_tag = self._channel.basic_consume(self.on_message, + self.QUEUE) + + def add_on_cancel_callback(self): + """Add a callback that will be invoked if RabbitMQ cancels the consumer + for some reason. If RabbitMQ does cancel the consumer, + on_consumer_cancelled will be invoked by pika. + + """ + LOGGER.info('Adding consumer cancellation callback') + self._channel.add_on_cancel_callback(self.on_consumer_cancelled) + + def on_consumer_cancelled(self, method_frame): + """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer + receiving messages. + + :param pika.frame.Method method_frame: The Basic.Cancel frame + + """ + LOGGER.info('Consumer was cancelled remotely, shutting down: %r', + method_frame) + if self._channel: + self._channel.close() + + def on_message(self, unused_channel, basic_deliver, properties, body): + """Invoked by pika when a message is delivered from RabbitMQ. The + channel is passed for your convenience. The basic_deliver object that + is passed in carries the exchange, routing key, delivery tag and + a redelivered flag for the message. The properties passed in is an + instance of BasicProperties with the message properties and the body + is the message that was sent. + + :param pika.channel.Channel unused_channel: The channel object + :param pika.Spec.Basic.Deliver: basic_deliver method + :param pika.Spec.BasicProperties: properties + :param str|unicode body: The message body + + """ + LOGGER.info('Received message # %s from %s: %s', + basic_deliver.delivery_tag, properties.app_id, body) + self.acknowledge_message(basic_deliver.delivery_tag) + + def acknowledge_message(self, delivery_tag): + """Acknowledge the message delivery from RabbitMQ by sending a + Basic.Ack RPC method for the delivery tag. + + :param int delivery_tag: The delivery tag from the Basic.Deliver frame + + """ + LOGGER.info('Acknowledging message %s', delivery_tag) + self._channel.basic_ack(delivery_tag) + + def stop_consuming(self): + """Tell RabbitMQ that you would like to stop consuming by sending the + Basic.Cancel RPC command. + + """ + if self._channel: + LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ') + self._channel.basic_cancel(self.on_cancelok, self._consumer_tag) + + def on_cancelok(self, unused_frame): + """This method is invoked by pika when RabbitMQ acknowledges the + cancellation of a consumer. At this point we will close the channel. + This will invoke the on_channel_closed method once the channel has been + closed, which will in-turn close the connection. + + :param pika.frame.Method unused_frame: The Basic.CancelOk frame + + """ + LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer') + self.close_channel() + + def close_channel(self): + """Call to close the channel with RabbitMQ cleanly by issuing the + Channel.Close RPC command. + + """ + LOGGER.info('Closing the channel') + self._channel.close() + + def run(self): + """Run the example consumer by connecting to RabbitMQ and then + starting the IOLoop to block and allow the SelectConnection to operate. + + """ + self._connection = self.connect() + self._connection.ioloop.start() + + def stop(self): + """Cleanly shutdown the connection to RabbitMQ by stopping the consumer + with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok + will be invoked by pika, which will then closing the channel and + connection. The IOLoop is started again because this method is invoked + when CTRL-C is pressed raising a KeyboardInterrupt exception. This + exception stops the IOLoop which needs to be running for pika to + communicate with RabbitMQ. All of the commands issued prior to starting + the IOLoop will be buffered but not processed. + + """ + LOGGER.info('Stopping') + self._closing = True + self.stop_consuming() + self._connection.ioloop.start() + LOGGER.info('Stopped') + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + LOGGER.info('Closing connection') + self._connection.close() + + +def main(): + logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) + example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F') + try: + example.run() + except KeyboardInterrupt: + example.stop() + + +if __name__ == '__main__': + main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/asynchronous_publisher_example.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/asynchronous_publisher_example.py new file mode 100644 index 000000000..5ffbc89c3 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/asynchronous_publisher_example.py @@ -0,0 +1,353 @@ +# -*- coding: utf-8 -*- + +import logging +import pika +import json + +LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') +LOGGER = logging.getLogger(__name__) + + +class ExamplePublisher(object): + """This is an example publisher that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. + + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. + + It uses delivery confirmations and illustrates one way to keep track of + messages that have been sent and if they've been confirmed by RabbitMQ. + + """ + EXCHANGE = 'message' + EXCHANGE_TYPE = 'topic' + PUBLISH_INTERVAL = 1 + QUEUE = 'text' + ROUTING_KEY = 'example.text' + + def __init__(self, amqp_url): + """Setup the example publisher object, passing in the URL we will use + to connect to RabbitMQ. + + :param str amqp_url: The URL for connecting to RabbitMQ + + """ + self._connection = None + self._channel = None + + self._deliveries = None + self._acked = None + self._nacked = None + self._message_number = None + + self._stopping = False + self._url = amqp_url + + def connect(self): + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. If you want the reconnection to work, make + sure you set stop_ioloop_on_close to False, which is not the default + behavior of this adapter. + + :rtype: pika.SelectConnection + + """ + LOGGER.info('Connecting to %s', self._url) + return pika.SelectConnection(pika.URLParameters(self._url), + on_open_callback=self.on_connection_open, + on_close_callback=self.on_connection_closed, + stop_ioloop_on_close=False) + + def on_connection_open(self, unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :type unused_connection: pika.SelectConnection + + """ + LOGGER.info('Connection opened') + self.open_channel() + + def on_connection_closed(self, connection, reply_code, reply_text): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. + + :param pika.connection.Connection connection: The closed connection obj + :param int reply_code: The server provided reply_code if given + :param str reply_text: The server provided reply_text if given + + """ + self._channel = None + if self._stopping: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', + reply_code, reply_text) + self._connection.add_timeout(5, self._connection.ioloop.stop) + + def open_channel(self): + """This method will open a new channel with RabbitMQ by issuing the + Channel.Open RPC command. When RabbitMQ confirms the channel is open + by sending the Channel.OpenOK RPC reply, the on_channel_open method + will be invoked. + + """ + LOGGER.info('Creating a new channel') + self._connection.channel(on_open_callback=self.on_channel_open) + + def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ + LOGGER.info('Channel opened') + self._channel = channel + self.add_on_channel_close_callback() + self.setup_exchange(self.EXCHANGE) + + def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ + LOGGER.info('Adding channel close callback') + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reply_code, reply_text): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. + + :param pika.channel.Channel channel: The closed channel + :param int reply_code: The numeric reason the channel was closed + :param str reply_text: The text reason the channel was closed + + """ + LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text) + self._channel = None + if not self._stopping: + self._connection.close() + + def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ + LOGGER.info('Declaring exchange %s', exchange_name) + self._channel.exchange_declare(self.on_exchange_declareok, + exchange_name, + self.EXCHANGE_TYPE) + + def on_exchange_declareok(self, unused_frame): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + + """ + LOGGER.info('Exchange declared') + self.setup_queue(self.QUEUE) + + def setup_queue(self, queue_name): + """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC + command. When it is complete, the on_queue_declareok method will + be invoked by pika. + + :param str|unicode queue_name: The name of the queue to declare. + + """ + LOGGER.info('Declaring queue %s', queue_name) + self._channel.queue_declare(self.on_queue_declareok, queue_name) + + def on_queue_declareok(self, method_frame): + """Method invoked by pika when the Queue.Declare RPC call made in + setup_queue has completed. In this method we will bind the queue + and exchange together with the routing key by issuing the Queue.Bind + RPC command. When this command is complete, the on_bindok method will + be invoked by pika. + + :param pika.frame.Method method_frame: The Queue.DeclareOk frame + + """ + LOGGER.info('Binding %s to %s with %s', + self.EXCHANGE, self.QUEUE, self.ROUTING_KEY) + self._channel.queue_bind(self.on_bindok, self.QUEUE, + self.EXCHANGE, self.ROUTING_KEY) + + def on_bindok(self, unused_frame): + """This method is invoked by pika when it receives the Queue.BindOk + response from RabbitMQ. Since we know we're now setup and bound, it's + time to start publishing.""" + LOGGER.info('Queue bound') + self.start_publishing() + + def start_publishing(self): + """This method will enable delivery confirmations and schedule the + first message to be sent to RabbitMQ + + """ + LOGGER.info('Issuing consumer related RPC commands') + self.enable_delivery_confirmations() + self.schedule_next_message() + + def enable_delivery_confirmations(self): + """Send the Confirm.Select RPC method to RabbitMQ to enable delivery + confirmations on the channel. The only way to turn this off is to close + the channel and create a new one. + + When the message is confirmed from RabbitMQ, the + on_delivery_confirmation method will be invoked passing in a Basic.Ack + or Basic.Nack method from RabbitMQ that will indicate which messages it + is confirming or rejecting. + + """ + LOGGER.info('Issuing Confirm.Select RPC command') + self._channel.confirm_delivery(self.on_delivery_confirmation) + + def on_delivery_confirmation(self, method_frame): + """Invoked by pika when RabbitMQ responds to a Basic.Publish RPC + command, passing in either a Basic.Ack or Basic.Nack frame with + the delivery tag of the message that was published. The delivery tag + is an integer counter indicating the message number that was sent + on the channel via Basic.Publish. Here we're just doing house keeping + to keep track of stats and remove message numbers that we expect + a delivery confirmation of from the list used to keep track of messages + that are pending confirmation. + + :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame + + """ + confirmation_type = method_frame.method.NAME.split('.')[1].lower() + LOGGER.info('Received %s for delivery tag: %i', + confirmation_type, + method_frame.method.delivery_tag) + if confirmation_type == 'ack': + self._acked += 1 + elif confirmation_type == 'nack': + self._nacked += 1 + self._deliveries.remove(method_frame.method.delivery_tag) + LOGGER.info('Published %i messages, %i have yet to be confirmed, ' + '%i were acked and %i were nacked', + self._message_number, len(self._deliveries), + self._acked, self._nacked) + + def schedule_next_message(self): + """If we are not closing our connection to RabbitMQ, schedule another + message to be delivered in PUBLISH_INTERVAL seconds. + + """ + LOGGER.info('Scheduling next message for %0.1f seconds', + self.PUBLISH_INTERVAL) + self._connection.add_timeout(self.PUBLISH_INTERVAL, + self.publish_message) + + def publish_message(self): + """If the class is not stopping, publish a message to RabbitMQ, + appending a list of deliveries with the message number that was sent. + This list will be used to check for delivery confirmations in the + on_delivery_confirmations method. + + Once the message has been sent, schedule another message to be sent. + The main reason I put scheduling in was just so you can get a good idea + of how the process is flowing by slowing down and speeding up the + delivery intervals by changing the PUBLISH_INTERVAL constant in the + class. + + """ + if self._channel is None or not self._channel.is_open: + return + + hdrs = {u'مفتاح': u' قيمة', + u'键': u'值', + u'キー': u'値'} + properties = pika.BasicProperties(app_id='example-publisher', + content_type='application/json', + headers=hdrs) + + message = u'مفتاح قيمة 键 值 キー 値' + self._channel.basic_publish(self.EXCHANGE, self.ROUTING_KEY, + json.dumps(message, ensure_ascii=False), + properties) + self._message_number += 1 + self._deliveries.append(self._message_number) + LOGGER.info('Published message # %i', self._message_number) + self.schedule_next_message() + + def run(self): + """Run the example code by connecting and then starting the IOLoop. + + """ + while not self._stopping: + self._connection = None + self._deliveries = [] + self._acked = 0 + self._nacked = 0 + self._message_number = 0 + + try: + self._connection = self.connect() + self._connection.ioloop.start() + except KeyboardInterrupt: + self.stop() + if (self._connection is not None and + not self._connection.is_closed): + # Finish closing + self._connection.ioloop.start() + + LOGGER.info('Stopped') + + def stop(self): + """Stop the example by closing the channel and connection. We + set a flag here so that we stop scheduling new messages to be + published. The IOLoop is started because this method is + invoked by the Try/Catch below when KeyboardInterrupt is caught. + Starting the IOLoop again will allow the publisher to cleanly + disconnect from RabbitMQ. + + """ + LOGGER.info('Stopping') + self._stopping = True + self.close_channel() + self.close_connection() + + def close_channel(self): + """Invoke this command to close the channel with RabbitMQ by sending + the Channel.Close RPC command. + + """ + if self._channel is not None: + LOGGER.info('Closing the channel') + self._channel.close() + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + if self._connection is not None: + LOGGER.info('Closing connection') + self._connection.close() + + +def main(): + logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) + + # Connect to localhost:5672 as guest with the password guest and virtual host "/" (%2F) + example = ExamplePublisher('amqp://guest:guest@localhost:5672/%2F?connection_attempts=3&heartbeat_interval=3600') + example.run() + + +if __name__ == '__main__': + main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/basic_consumer_threaded.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/basic_consumer_threaded.py new file mode 100644 index 000000000..ba521c171 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/basic_consumer_threaded.py @@ -0,0 +1,68 @@ +import functools +import logging +import pika +import threading +import time + +LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') +LOGGER = logging.getLogger(__name__) + +logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) + +def ack_message(channel, delivery_tag): + """Note that `channel` must be the same pika channel instance via which + the message being ACKed was retrieved (AMQP protocol constraint). + """ + if channel.is_open: + channel.basic_ack(delivery_tag) + else: + # Channel is already closed, so we can't ACK this message; + # log and/or do something that makes sense for your app in this case. + pass + +def do_work(connection, channel, delivery_tag, body): + thread_id = threading.get_ident() + fmt1 = 'Thread id: {} Delivery tag: {} Message body: {}' + LOGGER.info(fmt1.format(thread_id, delivery_tag, body)) + # Sleeping to simulate 10 seconds of work + time.sleep(10) + cb = functools.partial(ack_message, channel, delivery_tag) + connection.add_callback_threadsafe(cb) + +def on_message(channel, method_frame, header_frame, body, args): + (connection, threads) = args + delivery_tag = method_frame.delivery_tag + t = threading.Thread(target=do_work, args=(connection, channel, delivery_tag, body)) + t.start() + threads.append(t) + +credentials = pika.PlainCredentials('guest', 'guest') +# Note: sending a short heartbeat to prove that heartbeats are still +# sent even though the worker simulates long-running work +parameters = pika.ConnectionParameters('localhost', credentials=credentials, heartbeat=5) +connection = pika.BlockingConnection(parameters) + +channel = connection.channel() +channel.exchange_declare(exchange="test_exchange", exchange_type="direct", passive=False, durable=True, auto_delete=False) +channel.queue_declare(queue="standard", auto_delete=True) +channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key") +# Note: prefetch is set to 1 here as an example only and to keep the number of threads created +# to a reasonable amount. In production you will want to test with different prefetch values +# to find which one provides the best performance and usability for your solution +channel.basic_qos(prefetch_count=1) + +threads = [] +on_message_callback = functools.partial(on_message, args=(connection, threads)) +channel.basic_consume(on_message_callback, 'standard') + +try: + channel.start_consuming() +except KeyboardInterrupt: + channel.stop_consuming() + +# Wait for all to complete +for thread in threads: + thread.join() + +connection.close() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/confirmation.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/confirmation.py new file mode 100644 index 000000000..231147044 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/confirmation.py @@ -0,0 +1,47 @@ +import pika +from pika import spec +import logging + +ITERATIONS = 100 + +logging.basicConfig(level=logging.INFO) + +confirmed = 0 +errors = 0 +published = 0 + +def on_open(connection): + connection.channel(on_channel_open) + + +def on_channel_open(channel): + global published + channel.confirm_delivery(on_delivery_confirmation) + for iteration in xrange(0, ITERATIONS): + channel.basic_publish('test', 'test.confirm', + 'message body value', + pika.BasicProperties(content_type='text/plain', + delivery_mode=1)) + published += 1 + +def on_delivery_confirmation(frame): + global confirmed, errors + if isinstance(frame.method, spec.Basic.Ack): + confirmed += 1 + logging.info('Received confirmation: %r', frame.method) + else: + logging.error('Received negative confirmation: %r', frame.method) + errors += 1 + if (confirmed + errors) == ITERATIONS: + logging.info('All confirmations received, published %i, confirmed %i with %i errors', published, confirmed, errors) + connection.close() + +parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F?connection_attempts=50') +connection = pika.SelectConnection(parameters=parameters, + on_open_callback=on_open) + +try: + connection.ioloop.start() +except KeyboardInterrupt: + connection.close() + connection.ioloop.start() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/consume.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/consume.py new file mode 100644 index 000000000..26e4620f6 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/consume.py @@ -0,0 +1,44 @@ +"""Basic message consumer example""" +import functools +import logging +import pika + +LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') +LOGGER = logging.getLogger(__name__) + +logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) + +def on_message(chan, method_frame, _header_frame, body, userdata=None): + """Called when a message is received. Log message and ack it.""" + LOGGER.info('Userdata: %s Message body: %s', userdata, body) + chan.basic_ack(delivery_tag=method_frame.delivery_tag) + +def main(): + """Main method.""" + credentials = pika.PlainCredentials('guest', 'guest') + parameters = pika.ConnectionParameters('localhost', credentials=credentials) + connection = pika.BlockingConnection(parameters) + + channel = connection.channel() + channel.exchange_declare(exchange="test_exchange", + exchange_type="direct", + passive=False, + durable=True, + auto_delete=False) + channel.queue_declare(queue="standard", auto_delete=True) + channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key") + channel.basic_qos(prefetch_count=1) + + on_message_callback = functools.partial(on_message, userdata='on_message_userdata') + channel.basic_consume(on_message_callback, 'standard') + + try: + channel.start_consuming() + except KeyboardInterrupt: + channel.stop_consuming() + + connection.close() + +if __name__ == '__main__': + main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/consumer_queued.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/consumer_queued.py new file mode 100644 index 000000000..f0d527f8a --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/consumer_queued.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import pika +import json +import threading + + +buffer = [] +lock = threading.Lock() + +print('pika version: %s' % pika.__version__) + + +connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost')) + +main_channel = connection.channel() +consumer_channel = connection.channel() +bind_channel = connection.channel() + +if pika.__version__=='0.9.5': + main_channel.exchange_declare(exchange='com.micex.sten', type='direct') + main_channel.exchange_declare(exchange='com.micex.lasttrades', type='direct') +else: + main_channel.exchange_declare(exchange='com.micex.sten', exchange_type='direct') + main_channel.exchange_declare(exchange='com.micex.lasttrades', exchange_type='direct') + +queue = main_channel.queue_declare(exclusive=True).method.queue +queue_tickers = main_channel.queue_declare(exclusive=True).method.queue + +main_channel.queue_bind(exchange='com.micex.sten', queue=queue, routing_key='order.stop.create') + + + +def process_buffer(): + if not lock.acquire(False): + print('locked!') + return + try: + while len(buffer): + body = buffer.pop(0) + + ticker = None + if 'ticker' in body['data']['params']['condition']: ticker = body['data']['params']['condition']['ticker'] + if not ticker: continue + + print('got ticker %s, gonna bind it...' % ticker) + bind_channel.queue_bind(exchange='com.micex.lasttrades', queue=queue_tickers, routing_key=str(ticker)) + print('ticker %s binded ok' % ticker) + finally: + lock.release() + + +def callback(ch, method, properties, body): + body = json.loads(body)['order.stop.create'] + buffer.append(body) + process_buffer() + + +consumer_channel.basic_consume(callback, + queue=queue, no_ack=True) + +try: + consumer_channel.start_consuming() +finally: + connection.close() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/consumer_simple.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/consumer_simple.py new file mode 100644 index 000000000..6866f7675 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/consumer_simple.py @@ -0,0 +1,57 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import pika +import json + + +print('pika version: %s' % pika.__version__) + + +connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost')) + +main_channel = connection.channel() +consumer_channel = connection.channel() +bind_channel = connection.channel() + +if pika.__version__=='0.9.5': + main_channel.exchange_declare(exchange='com.micex.sten', type='direct') + main_channel.exchange_declare(exchange='com.micex.lasttrades', type='direct') +else: + main_channel.exchange_declare(exchange='com.micex.sten', exchange_type='direct') + main_channel.exchange_declare(exchange='com.micex.lasttrades', exchange_type='direct') + +queue = main_channel.queue_declare(exclusive=True).method.queue +queue_tickers = main_channel.queue_declare(exclusive=True).method.queue + +main_channel.queue_bind(exchange='com.micex.sten', queue=queue, routing_key='order.stop.create') + + +def hello(): + print('Hello world') + +connection.add_timeout(5, hello) + + +def callback(ch, method, properties, body): + body = json.loads(body)['order.stop.create'] + + ticker = None + if 'ticker' in body['data']['params']['condition']: ticker = body['data']['params']['condition']['ticker'] + if not ticker: return + + print('got ticker %s, gonna bind it...' % ticker) + bind_channel.queue_bind(exchange='com.micex.lasttrades', queue=queue_tickers, routing_key=str(ticker)) + print('ticker %s binded ok' % ticker) + + +import logging +logging.basicConfig(level=logging.INFO) + +consumer_channel.basic_consume(callback, + queue=queue, no_ack=True) + +try: + consumer_channel.start_consuming() +finally: + connection.close() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/direct_reply_to.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/direct_reply_to.py new file mode 100644 index 000000000..43173dc1d --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/direct_reply_to.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- + +""" +This example demonstrates RabbitMQ's "Direct reply-to" usage via +`pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html +for more info about this feature. +""" + +import pika + + +SERVER_QUEUE = 'rpc.server.queue' + + +def main(): + """ Here, Client sends "Marco" to RPC Server, and RPC Server replies with + "Polo". + + NOTE Normally, the server would be running separately from the client, but + in this very simple example both are running in the same thread and sharing + connection and channel. + + """ + with pika.BlockingConnection() as conn: + channel = conn.channel() + + # Set up server + + channel.queue_declare(queue=SERVER_QUEUE, + exclusive=True, + auto_delete=True) + channel.basic_consume(on_server_rx_rpc_request, queue=SERVER_QUEUE) + + + # Set up client + + # NOTE Client must create its consumer and publish RPC requests on the + # same channel to enable the RabbitMQ broker to make the necessary + # associations. + # + # Also, client must create the consumer *before* starting to publish the + # RPC requests. + # + # Client must create its consumer with no_ack=True, because the reply-to + # queue isn't real. + + channel.basic_consume(on_client_rx_reply_from_server, + queue='amq.rabbitmq.reply-to', + no_ack=True) + channel.basic_publish( + exchange='', + routing_key=SERVER_QUEUE, + body='Marco', + properties=pika.BasicProperties(reply_to='amq.rabbitmq.reply-to')) + + channel.start_consuming() + + +def on_server_rx_rpc_request(ch, method_frame, properties, body): + print 'RPC Server got request:', body + + ch.basic_publish('', routing_key=properties.reply_to, body='Polo') + + ch.basic_ack(delivery_tag=method_frame.delivery_tag) + + print 'RPC Server says good bye' + + +def on_client_rx_reply_from_server(ch, method_frame, properties, body): + print 'RPC Client got reply:', body + + # NOTE A real client might want to make additional RPC requests, but in this + # simple example we're closing the channel after getting our first reply + # to force control to return from channel.start_consuming() + print 'RPC Client says bye' + ch.close() + + +if __name__ == '__main__': + main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/heartbeat_and_blocked_timeouts.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/heartbeat_and_blocked_timeouts.py new file mode 100644 index 000000000..3fe9a9956 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/heartbeat_and_blocked_timeouts.py @@ -0,0 +1,48 @@ +""" +This example demonstrates explicit setting of heartbeat and blocked connection +timeouts. + +Starting with RabbitMQ 3.5.5, the broker's default hearbeat timeout decreased +from 580 seconds to 60 seconds. As a result, applications that perform lengthy +processing in the same thread that also runs their Pika connection may +experience unexpected dropped connections due to heartbeat timeout. Here, we +specify an explicit lower bound for heartbeat timeout. + +When RabbitMQ broker is running out of certain resources, such as memory and +disk space, it may block connections that are performing resource-consuming +operations, such as publishing messages. Once a connection is blocked, RabbiMQ +stops reading from that connection's socket, so no commands from the client will +get through to te broker on that connection until the broker unblocks it. A +blocked connection may last for an indefinite period of time, stalling the +connection and possibly resulting in a hang (e.g., in BlockingConnection) until +the connection is unblocked. Blocked Connectin Timeout is intended to interrupt +(i.e., drop) a connection that has been blocked longer than the given timeout +value. +""" + + +import pika + + +def main(): + + # NOTE: These paramerers work with all Pika connection types + params = pika.ConnectionParameters(heartbeat_interval=600, + blocked_connection_timeout=300) + + conn = pika.BlockingConnection(params) + + chan = conn.channel() + + chan.basic_publish('', 'my-alphabet-queue', "abc") + + # If publish causes the connection to become blocked, then this conn.close() + # would hang until the connection is unblocked, if ever. However, the + # blocked_connection_timeout connection parameter would interrupt the wait, + # resulting in ConnectionClosed exception from BlockingConnection (or the + # on_connection_closed callback call in an asynchronous adapter) + conn.close() + + +if __name__ == '__main__': + main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/producer.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/producer.py new file mode 100644 index 000000000..11bc7e87b --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/producer.py @@ -0,0 +1,42 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import pika +import json +import random + +print('pika version: %s' % pika.__version__) + +connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost')) +main_channel = connection.channel() + +if pika.__version__=='0.9.5': + main_channel.exchange_declare(exchange='com.micex.sten', type='direct') + main_channel.exchange_declare(exchange='com.micex.lasttrades', type='direct') +else: + main_channel.exchange_declare(exchange='com.micex.sten', exchange_type='direct') + main_channel.exchange_declare(exchange='com.micex.lasttrades', exchange_type='direct') + +tickers = {'MXSE.EQBR.LKOH': (1933, 1940), + 'MXSE.EQBR.MSNG': (1.35, 1.45), + 'MXSE.EQBR.SBER': (90, 92), + 'MXSE.EQNE.GAZP': (156, 162), + 'MXSE.EQNE.PLZL': (1025, 1040), + 'MXSE.EQNL.VTBR': (0.05, 0.06)} + + +def getticker(): return list(tickers.keys())[random.randrange(0,len(tickers)-1)] + +_COUNT_ = 10 + +for i in range(0,_COUNT_): + ticker = getticker() + msg = {'order.stop.create':{'data':{'params':{'condition':{'ticker':ticker}}}}} + main_channel.basic_publish(exchange='com.micex.sten', + routing_key='order.stop.create', + body=json.dumps(msg), + properties=pika.BasicProperties(content_type='application/json') + ) + print('send ticker %s' % ticker) + +connection.close() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/publish.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/publish.py new file mode 100644 index 000000000..d31ad9068 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/publish.py @@ -0,0 +1,32 @@ +import pika +import logging + +logging.basicConfig(level=logging.DEBUG) + +credentials = pika.PlainCredentials('guest', 'guest') +parameters = pika.ConnectionParameters('localhost', credentials=credentials) +connection = pika.BlockingConnection(parameters) +channel = connection.channel() +channel.exchange_declare(exchange="test_exchange", exchange_type="direct", + passive=False, durable=True, auto_delete=False) + +print("Sending message to create a queue") +channel.basic_publish('test_exchange', 'standard_key', 'queue:group', + pika.BasicProperties(content_type='text/plain', + delivery_mode=1)) + +connection.sleep(5) + +print("Sending text message to group") +channel.basic_publish('test_exchange', 'group_key', 'Message to group_key', + pika.BasicProperties(content_type='text/plain', + delivery_mode=1)) + +connection.sleep(5) + +print("Sending text message") +channel.basic_publish('test_exchange', 'standard_key', 'Message to standard_key', + pika.BasicProperties(content_type='text/plain', + delivery_mode=1)) + +connection.close() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/send.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/send.py new file mode 100644 index 000000000..57098f8f9 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/send.py @@ -0,0 +1,41 @@ +import pika +import time +import logging + +logging.basicConfig(level=logging.DEBUG) + +ITERATIONS = 100 + +connection = pika.BlockingConnection(pika.URLParameters('amqp://guest:guest@localhost:5672/%2F?heartbeat_interval=1')) +channel = connection.channel() + +def closeit(): + print('Close it') + connection.close() + +connection.add_timeout(5, closeit) + +connection.sleep(100) + +""" +channel.confirm_delivery() +start_time = time.time() + +for x in range(0, ITERATIONS): + if not channel.basic_publish(exchange='test', + routing_key='', + body='Test 123', + properties=pika.BasicProperties(content_type='text/plain', + app_id='test', + delivery_mode=1)): + print('Delivery not confirmed') + else: + print('Confirmed delivery') + +channel.close() +connection.close() + +duration = time.time() - start_time +print("Published %i messages in %.4f seconds (%.2f messages per second)" % (ITERATIONS, duration, (ITERATIONS/duration))) + +""" diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/examples/twisted_service.py b/NodeRed/NodeRedFiles/pika-0.13.1/examples/twisted_service.py new file mode 100644 index 000000000..235f1e897 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/examples/twisted_service.py @@ -0,0 +1,209 @@ +""" +# -*- coding:utf-8 -*- +# based on: +# - txamqp-helpers by Dan Siemon (March 2010) +# http://git.coverfire.com/?p=txamqp-twistd.git;a=tree +# - Post by Brian Chandler +# https://groups.google.com/forum/#!topic/pika-python/o_deVmGondk +# - Pika Documentation +# https://pika.readthedocs.io/en/latest/examples/twisted_example.html + + +Fire up this test application via `twistd -ny twisted_service.py` + +The application will answer to requests to exchange "foobar" and any of the +routing_key values: "request1", "request2", or "request3" +with messages to the same exchange, but with routing_key "response" + +When a routing_key of "task" is used on the exchange "foobar", +the application can asynchronously run a maximum of 2 tasks at once +as defined by PREFETCH_COUNT +""" + +import pika +from pika import spec +from pika import exceptions +from pika.adapters import twisted_connection + +from twisted.internet import protocol +from twisted.application import internet +from twisted.application import service +from twisted.internet.defer import inlineCallbacks +from twisted.internet import ssl, defer, task +from twisted.python import log +from twisted.internet import reactor + +PREFETCH_COUNT = 2 + +class PikaService(service.MultiService): + name = 'amqp' + + def __init__(self, parameter): + service.MultiService.__init__(self) + self.parameters = parameter + + def startService(self): + self.connect() + service.MultiService.startService(self) + + def getFactory(self): + if len(self.services) > 0: + return self.services[0].factory + + def connect(self): + f = PikaFactory(self.parameters) + if self.parameters.ssl: + s = ssl.ClientContextFactory() + serv = internet.SSLClient(host=self.parameters.host, port=self.parameters.port, factory=f, contextFactory=s) + else: + serv = internet.TCPClient(host=self.parameters.host, port=self.parameters.port, factory=f) + serv.factory = f + f.service = serv + name = '%s%s:%d' % ('ssl:' if self.parameters.ssl else '', self.parameters.host, self.parameters.port) + serv.__repr__ = lambda : '' % name + serv.setName(name) + serv.parent = self + self.addService(serv) + + +class PikaProtocol(twisted_connection.TwistedProtocolConnection): + connected = False + name = 'AMQP:Protocol' + + @inlineCallbacks + def connected(self, connection): + self.channel = yield connection.channel() + yield self.channel.basic_qos(prefetch_count=PREFETCH_COUNT) + self.connected = True + for (exchange, routing_key, callback,) in self.factory.read_list: + yield self.setup_read(exchange, routing_key, callback) + + self.send() + + @inlineCallbacks + def read(self, exchange, routing_key, callback): + """Add an exchange to the list of exchanges to read from.""" + if self.connected: + yield self.setup_read(exchange, routing_key, callback) + + @inlineCallbacks + def setup_read(self, exchange, routing_key, callback): + """This function does the work to read from an exchange.""" + if not exchange == '': + yield self.channel.exchange_declare(exchange=exchange, exchange_type='topic', durable=True, auto_delete=False) + + yield self.channel.queue_declare(queue=routing_key, durable=True) + yield self.channel.queue_bind(queue=routing_key, exchange=exchange, routing_key=routing_key) + + (queue, consumer_tag,) = yield self.channel.basic_consume(queue=routing_key, no_ack=False) + d = queue.get() + d.addCallback(self._read_item, queue, callback) + d.addErrback(self._read_item_err) + + def _read_item(self, item, queue, callback): + """Callback function which is called when an item is read.""" + d = queue.get() + d.addCallback(self._read_item, queue, callback) + d.addErrback(self._read_item_err) + (channel, deliver, props, msg,) = item + + log.msg('%s (%s): %s' % (deliver.exchange, deliver.routing_key, repr(msg)), system='Pika:<=') + d = defer.maybeDeferred(callback, item) + d.addCallbacks( + lambda _: channel.basic_ack(deliver.delivery_tag), + lambda _: channel.basic_nack(deliver.delivery_tag) + ) + + def _read_item_err(self, error): + print(error) + + def send(self): + """If connected, send all waiting messages.""" + if self.connected: + while len(self.factory.queued_messages) > 0: + (exchange, r_key, message,) = self.factory.queued_messages.pop(0) + self.send_message(exchange, r_key, message) + + @inlineCallbacks + def send_message(self, exchange, routing_key, msg): + """Send a single message.""" + log.msg('%s (%s): %s' % (exchange, routing_key, repr(msg)), system='Pika:=>') + yield self.channel.exchange_declare(exchange=exchange, exchange_type='topic', durable=True, auto_delete=False) + prop = spec.BasicProperties(delivery_mode=2) + try: + yield self.channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg, properties=prop) + except Exception as error: + log.msg('Error while sending message: %s' % error, system=self.name) + + +class PikaFactory(protocol.ReconnectingClientFactory): + name = 'AMQP:Factory' + + def __init__(self, parameters): + self.parameters = parameters + self.client = None + self.queued_messages = [] + self.read_list = [] + + def startedConnecting(self, connector): + log.msg('Started to connect.', system=self.name) + + def buildProtocol(self, addr): + self.resetDelay() + log.msg('Connected', system=self.name) + self.client = PikaProtocol(self.parameters) + self.client.factory = self + self.client.ready.addCallback(self.client.connected) + return self.client + + def clientConnectionLost(self, connector, reason): + log.msg('Lost connection. Reason: %s' % reason, system=self.name) + protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason) + + def clientConnectionFailed(self, connector, reason): + log.msg('Connection failed. Reason: %s' % reason, system=self.name) + protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) + + def send_message(self, exchange = None, routing_key = None, message = None): + self.queued_messages.append((exchange, routing_key, message)) + if self.client is not None: + self.client.send() + + def read_messages(self, exchange, routing_key, callback): + """Configure an exchange to be read from.""" + self.read_list.append((exchange, routing_key, callback)) + if self.client is not None: + self.client.read(exchange, routing_key, callback) + + +application = service.Application("pikaapplication") + +ps = PikaService(pika.ConnectionParameters(host="localhost", virtual_host="/", credentials=pika.PlainCredentials("guest", "guest"))) +ps.setServiceParent(application) + + +class TestService(service.Service): + + def task(self, msg): + """ + Method for a time consuming task. + + This function must return a deferred. If it is successfull, + a `basic.ack` will be sent to AMQP. If the task was not completed a + `basic.nack` will be sent. In this example it will always return + successfully after a 2 second pause. + """ + return task.deferLater(reactor, 2, lambda: log.msg("task completed")) + + def respond(self, msg): + self.amqp.send_message('foobar', 'response', msg[3]) + + def startService(self): + self.amqp = self.parent.getServiceNamed("amqp").getFactory() + self.amqp.read_messages("foobar", "request1", self.respond) + self.amqp.read_messages("foobar", "request2", self.respond) + self.amqp.read_messages("foobar", "request3", self.respond) + self.amqp.read_messages("foobar", "task", self.task) + +ts = TestService() +ts.setServiceParent(application) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/__init__.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/__init__.py new file mode 100644 index 000000000..f21dc8b67 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/__init__.py @@ -0,0 +1,17 @@ +__version__ = '0.13.1' + +import logging +from logging import NullHandler + +# Add NullHandler to prevent logging warnings +logging.getLogger(__name__).addHandler(NullHandler()) + +from pika.connection import ConnectionParameters +from pika.connection import URLParameters +from pika.connection import SSLOptions +from pika.credentials import PlainCredentials +from pika.spec import BasicProperties + +from pika.adapters import BaseConnection +from pika.adapters import BlockingConnection +from pika.adapters import SelectConnection diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/__init__.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/__init__.py new file mode 100644 index 000000000..236a98246 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/__init__.py @@ -0,0 +1,21 @@ +""" +Connection Adapters +=================== + +Pika provides multiple adapters to connect to RabbitMQ: + +- adapters.asyncio_connection.AsyncioConnection: Native Python3 AsyncIO use +- adapters.blocking_connection.BlockingConnection: Enables blocking, + synchronous operation on top of library for simple uses. +- adapters.select_connection.SelectConnection: A native event based connection + adapter that implements select, kqueue, poll and epoll. +- adapters.tornado_connection.TornadoConnection: Connection adapter for use + with the Tornado web framework. +- adapters.twisted_connection.TwistedConnection: Connection adapter for use + with the Twisted framework + +""" +from pika.adapters.base_connection import BaseConnection +from pika.adapters.blocking_connection import BlockingConnection +from pika.adapters.select_connection import SelectConnection +from pika.adapters.select_connection import IOLoop diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/asyncio_connection.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/asyncio_connection.py new file mode 100644 index 000000000..57cb2d443 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/asyncio_connection.py @@ -0,0 +1,254 @@ +"""Use pika with the Asyncio EventLoop""" +import asyncio +from functools import partial + +from pika.adapters import base_connection + + +class IOLoopAdapter: + def __init__(self, loop): + """ + Basic adapter for asyncio event loop + + :type loop: asyncio.AbstractEventLoop + :param loop: Asyncio Loop + + """ + self.loop = loop + + self.handlers = {} + self.readers = set() + self.writers = set() + + def close(self): + """Release ioloop's resources. + + This method is intended to be called by the application or test code + only after the ioloop's outermost `start()` call returns. After calling + `close()`, no other interaction with the closed instance of ioloop + should be performed. + + """ + self.loop.close() + + def add_timeout(self, deadline, callback_method): + """Add the callback_method to the EventLoop timer to fire after deadline + seconds. Returns a Handle to the timeout. + + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + :rtype: asyncio.Handle + + """ + return self.loop.call_later(deadline, callback_method) + + @staticmethod + def remove_timeout(handle): + """ + Cancel asyncio.Handle + + :type handle: asyncio.Handle + :rtype: bool + """ + return handle.cancel() + + def add_callback_threadsafe(self, callback): + """Requests a call to the given function as soon as possible in the + context of this IOLoop's thread. + + NOTE: This is the only thread-safe method offered by the IOLoop adapter. + All other manipulations of the IOLoop adapter and its parent connection + must be performed from the connection's thread. + + For example, a thread may request a call to the + `channel.basic_ack` method of a connection that is running in a + different thread via + + ``` + connection.add_callback_threadsafe( + functools.partial(channel.basic_ack, delivery_tag=...)) + ``` + + :param method callback: The callback method; must be callable. + + """ + self.loop.call_soon_threadsafe(callback) + + def add_handler(self, fd, cb, event_state): + """ Registers the given handler to receive the given events for ``fd``. + + The ``fd`` argument is an integer file descriptor. + + The ``event_state`` argument is a bitwise or of the constants + ``base_connection.BaseConnection.READ``, ``base_connection.BaseConnection.WRITE``, + and ``base_connection.BaseConnection.ERROR``. + + """ + + if fd in self.handlers: + raise ValueError("fd {} added twice".format(fd)) + self.handlers[fd] = cb + + if event_state & base_connection.BaseConnection.READ: + self.loop.add_reader( + fd, + partial( + cb, + fd=fd, + events=base_connection.BaseConnection.READ + ) + ) + self.readers.add(fd) + + if event_state & base_connection.BaseConnection.WRITE: + self.loop.add_writer( + fd, + partial( + cb, + fd=fd, + events=base_connection.BaseConnection.WRITE + ) + ) + self.writers.add(fd) + + def remove_handler(self, fd): + """ Stop listening for events on ``fd``. """ + + if fd not in self.handlers: + return + + if fd in self.readers: + self.loop.remove_reader(fd) + self.readers.remove(fd) + + if fd in self.writers: + self.loop.remove_writer(fd) + self.writers.remove(fd) + + del self.handlers[fd] + + def update_handler(self, fd, event_state): + if event_state & base_connection.BaseConnection.READ: + if fd not in self.readers: + self.loop.add_reader( + fd, + partial( + self.handlers[fd], + fd=fd, + events=base_connection.BaseConnection.READ + ) + ) + self.readers.add(fd) + else: + if fd in self.readers: + self.loop.remove_reader(fd) + self.readers.remove(fd) + + if event_state & base_connection.BaseConnection.WRITE: + if fd not in self.writers: + self.loop.add_writer( + fd, + partial( + self.handlers[fd], + fd=fd, + events=base_connection.BaseConnection.WRITE + ) + ) + self.writers.add(fd) + else: + if fd in self.writers: + self.loop.remove_writer(fd) + self.writers.remove(fd) + + + def start(self): + """ Start Event Loop """ + if self.loop.is_running(): + return + + self.loop.run_forever() + + def stop(self): + """ Stop Event Loop """ + if self.loop.is_closed(): + return + + self.loop.stop() + + +class AsyncioConnection(base_connection.BaseConnection): + """ The AsyncioConnection runs on the Asyncio EventLoop. + + :param pika.connection.Parameters parameters: Connection parameters + :param on_open_callback: The method to call when the connection is open + :type on_open_callback: method + :param on_open_error_callback: Method to call if the connection cant be opened + :type on_open_error_callback: method + :param asyncio.AbstractEventLoop loop: By default asyncio.get_event_loop() + + """ + def __init__(self, + parameters=None, + on_open_callback=None, + on_open_error_callback=None, + on_close_callback=None, + stop_ioloop_on_close=False, + custom_ioloop=None): + """ Create a new instance of the AsyncioConnection class, connecting + to RabbitMQ automatically + + :param pika.connection.Parameters parameters: Connection parameters + :param on_open_callback: The method to call when the connection is open + :type on_open_callback: method + :param on_open_error_callback: Method to call if the connection cant be opened + :type on_open_error_callback: method + :param asyncio.AbstractEventLoop loop: By default asyncio.get_event_loop() + + """ + self.sleep_counter = 0 + self.loop = custom_ioloop or asyncio.get_event_loop() + self.ioloop = IOLoopAdapter(self.loop) + + super().__init__( + parameters, on_open_callback, + on_open_error_callback, + on_close_callback, self.ioloop, + stop_ioloop_on_close=stop_ioloop_on_close, + ) + + def _adapter_connect(self): + """Connect to the remote socket, adding the socket to the EventLoop if + connected. + + :rtype: bool + + """ + error = super()._adapter_connect() + + if not error: + self.ioloop.add_handler( + self.socket.fileno(), + self._handle_events, + self.event_state, + ) + + return error + + def _adapter_disconnect(self): + """Disconnect from the RabbitMQ broker""" + + if self.socket: + self.ioloop.remove_handler( + self.socket.fileno() + ) + + super()._adapter_disconnect() + + def _handle_disconnect(self): + # No other way to handle exceptions.ProbableAuthenticationError + try: + super()._handle_disconnect() + super()._handle_write() + except Exception as e: + # FIXME: Pass None or other constant instead "-1" + self._on_disconnect(-1, e) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/base_connection.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/base_connection.py new file mode 100644 index 000000000..60078e5e3 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/base_connection.py @@ -0,0 +1,582 @@ +"""Base class extended by connection adapters. This extends the +connection.Connection class to encapsulate connection behavior but still +isolate socket and low level communication. + +""" +import errno +import logging +import socket +import ssl + +import pika.compat +import pika.tcp_socket_opts + +from pika import __version__ +from pika import connection +from pika.compat import SOCKET_ERROR +from pika.compat import SOL_TCP + +LOGGER = logging.getLogger(__name__) + + +class BaseConnection(connection.Connection): + """BaseConnection class that should be extended by connection adapters""" + + # Use epoll's constants to keep life easy + READ = 0x0001 + WRITE = 0x0004 + ERROR = 0x0008 + + ERRORS_TO_ABORT = [ + errno.EBADF, errno.ECONNABORTED, errno.EPIPE, errno.ETIMEDOUT + ] + ERRORS_TO_IGNORE = [errno.EWOULDBLOCK, errno.EAGAIN, errno.EINTR] + DO_HANDSHAKE = True + WARN_ABOUT_IOLOOP = False + + def __init__(self, + parameters=None, + on_open_callback=None, + on_open_error_callback=None, + on_close_callback=None, + ioloop=None, + stop_ioloop_on_close=True): + """Create a new instance of the Connection object. + + :param pika.connection.Parameters parameters: Connection parameters + :param method on_open_callback: Method to call on connection open + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) + :param object ioloop: IOLoop object to use + :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected + :raises: RuntimeError + :raises: ValueError + + """ + if parameters and not isinstance(parameters, connection.Parameters): + raise ValueError( + 'Expected instance of Parameters, not %r' % (parameters,)) + + # Let the developer know we could not import SSL + if parameters and parameters.ssl and not ssl: + raise RuntimeError("SSL specified but it is not available") + self.base_events = self.READ | self.ERROR + self.event_state = self.base_events + self.ioloop = ioloop + self.socket = None + self.stop_ioloop_on_close = stop_ioloop_on_close + self.write_buffer = None + super(BaseConnection, + self).__init__(parameters, on_open_callback, + on_open_error_callback, on_close_callback) + + def __repr__(self): + + def get_socket_repr(sock): + """Return socket info suitable for use in repr""" + if sock is None: + return None + + sockname = None + peername = None + try: + sockname = sock.getsockname() + except SOCKET_ERROR: + # closed? + pass + else: + try: + peername = sock.getpeername() + except SOCKET_ERROR: + # not connected? + pass + + return '%s->%s' % (sockname, peername) + + return ('<%s %s socket=%s params=%s>' % + (self.__class__.__name__, + self._STATE_NAMES[self.connection_state], + get_socket_repr(self.socket), self.params)) + + def add_timeout(self, deadline, callback_method): + """Add the callback_method to the IOLoop timer to fire after deadline + seconds. Returns a handle to the timeout + + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + :rtype: str + + """ + return self.ioloop.add_timeout(deadline, callback_method) + + def close(self, reply_code=200, reply_text='Normal shutdown'): + """Disconnect from RabbitMQ. If there are any open channels, it will + attempt to close them prior to fully disconnecting. Channels which + have active consumers will attempt to send a Basic.Cancel to RabbitMQ + to cleanly stop the delivery of messages prior to closing the channel. + + :param int reply_code: The code number for the close + :param str reply_text: The text reason for the close + + """ + try: + super(BaseConnection, self).close(reply_code, reply_text) + finally: + if self.is_closed: + self._handle_ioloop_stop() + + def remove_timeout(self, timeout_id): + """Remove the timeout from the IOLoop by the ID returned from + add_timeout. + + :rtype: str + + """ + self.ioloop.remove_timeout(timeout_id) + + def add_callback_threadsafe(self, callback): + """Requests a call to the given function as soon as possible in the + context of this connection's IOLoop thread. + + NOTE: This is the only thread-safe method offered by the connection. All + other manipulations of the connection must be performed from the + connection's thread. + + For example, a thread may request a call to the + `channel.basic_ack` method of a connection that is running in a + different thread via + + ``` + connection.add_callback_threadsafe( + functools.partial(channel.basic_ack, delivery_tag=...)) + ``` + + :param method callback: The callback method; must be callable. + + """ + if not callable(callback): + raise TypeError( + 'callback must be a callable, but got %r' % (callback,)) + + self.ioloop.add_callback_threadsafe(callback) + + def _adapter_connect(self): + """Connect to the RabbitMQ broker, returning True if connected. + + :returns: error string or exception instance on error; None on success + + """ + # Get the addresses for the socket, supporting IPv4 & IPv6 + while True: + try: + addresses = self._getaddrinfo( + self.params.host, self.params.port, 0, socket.SOCK_STREAM, + socket.IPPROTO_TCP) + break + except SOCKET_ERROR as error: + if error.errno == errno.EINTR: + continue + + LOGGER.critical('Could not get addresses to use: %s (%s)', + error, self.params.host) + return error + + # If the socket is created and connected, continue on + error = "No socket addresses available" + for sock_addr in addresses: + error = self._create_and_connect_to_socket(sock_addr) + if not error: + # Make the socket non-blocking after the connect + self.socket.setblocking(0) + return None + self._cleanup_socket() + + # Failed to connect + return error + + def _adapter_disconnect(self): + """Invoked if the connection is being told to disconnect""" + try: + self._cleanup_socket() + finally: + self._handle_ioloop_stop() + + def _cleanup_socket(self): + """Close the socket cleanly""" + if self.socket: + try: + self.socket.shutdown(socket.SHUT_RDWR) + except SOCKET_ERROR: + pass + self.socket.close() + self.socket = None + + def _create_and_connect_to_socket(self, sock_addr_tuple): + """Create socket and connect to it, using SSL if enabled. + + :returns: error string on failure; None on success + """ + self.socket = self._create_tcp_connection_socket( + sock_addr_tuple[0], sock_addr_tuple[1], sock_addr_tuple[2]) + self.socket.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1) + self.socket.settimeout(self.params.socket_timeout) + pika.tcp_socket_opts.set_sock_opts(self.params.tcp_options, self.socket) + + # Wrap socket if using SSL + if self.params.ssl: + self.socket = self._wrap_socket(self.socket) + ssl_text = " with SSL" + else: + ssl_text = "" + + LOGGER.info('Pika version %s connecting to %s:%s%s', + __version__, + sock_addr_tuple[4][0], + sock_addr_tuple[4][1], ssl_text) + + # Connect to the socket + try: + self.socket.connect(sock_addr_tuple[4]) + except socket.timeout: + error = 'Connection to %s:%s failed: timeout' % ( + sock_addr_tuple[4][0], sock_addr_tuple[4][1]) + LOGGER.error(error) + return error + except SOCKET_ERROR as error: + error = 'Connection to %s:%s failed: %s' % (sock_addr_tuple[4][0], + sock_addr_tuple[4][1], + error) + LOGGER.error(error) + return error + + # Handle SSL Connection Negotiation + if self.params.ssl and self.DO_HANDSHAKE: + try: + self._do_ssl_handshake() + except ssl.SSLError as error: + error = 'SSL connection to %s:%s failed: %s' % ( + sock_addr_tuple[4][0], sock_addr_tuple[4][1], error) + LOGGER.error(error) + return error + # Made it this far + return None + + @staticmethod + def _create_tcp_connection_socket(sock_family, sock_type, sock_proto): + """ Create TCP/IP stream socket for AMQP connection + + :param int sock_family: socket family + :param int sock_type: socket type + :param int sock_proto: socket protocol number + + NOTE We break this out to make it easier to patch in mock tests + """ + return socket.socket(sock_family, sock_type, sock_proto) + + def _do_ssl_handshake(self): + """Perform SSL handshaking, copied from python stdlib test_ssl.py. + + """ + if not self.DO_HANDSHAKE: + return + while True: + try: + self.socket.do_handshake() + break + # TODO should be using SSLWantReadError, etc. directly + except ssl.SSLError as err: + # TODO these exc are for non-blocking sockets, but ours isn't + # at this stage, so it's not clear why we have this. + if err.args[0] == ssl.SSL_ERROR_WANT_READ: + self.event_state = self.READ + elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: + self.event_state = self.WRITE + else: + raise + self._manage_event_state() + + @staticmethod + def _getaddrinfo(host, port, family, socktype, proto): + """Wrap `socket.getaddrinfo` to make it easier to patch for unit tests + """ + return socket.getaddrinfo(host, port, family, socktype, proto) + + @staticmethod + def _get_error_code(error_value): + """Get the error code from the error_value accounting for Python + version differences. + + :rtype: int + + """ + if not error_value: + return None + + return error_value.errno + + def _flush_outbound(self): + """Have the state manager schedule the necessary I/O. + """ + # NOTE: We don't call _handle_write() from this context, because pika + # code was not designed to be writing to (or reading from) the socket + # from any methods, except from ioloop handler callbacks. Many methods + # in pika core and adapters do not deal gracefully with connection + # errors occurring in their context; e.g., Connection.channel (pika + # issue #659), Connection._on_connection_tune (if connection loss is + # detected in _send_connection_tune_ok, before _send_connection_open is + # called), etc., etc., etc. + self._manage_event_state() + + def _handle_ioloop_stop(self): + """Invoked when the connection is closed to determine if the IOLoop + should be stopped or not. + + """ + if self.stop_ioloop_on_close and self.ioloop: + self.ioloop.stop() + elif self.WARN_ABOUT_IOLOOP: + LOGGER.warning('Connection is closed but not stopping IOLoop') + + def _handle_error(self, error_value): + """Internal error handling method. Here we expect a socket error + coming in and will handle different socket errors differently. + + :param int|object error_value: The inbound error + + """ + # TODO doesn't seem right: docstring defines error_value as int|object, + # but _get_error_code expects a falsie or an exception-like object + error_code = self._get_error_code(error_value) + + if not error_code: + LOGGER.critical("Tried to handle an error where no error existed") + return + + # Ok errors, just continue what we were doing before + if error_code in self.ERRORS_TO_IGNORE: + LOGGER.debug("Ignoring %s", error_code) + return + + # Socket is no longer connected, abort + elif error_code in self.ERRORS_TO_ABORT: + LOGGER.error("Fatal Socket Error: %r", error_value) + + elif self.params.ssl and isinstance(error_value, ssl.SSLError): + + if error_value.args[0] == ssl.SSL_ERROR_WANT_READ: + # TODO doesn't seem right: this logic updates event state, but + # the logic at the bottom unconditionaly disconnects anyway. + self.event_state = self.READ + elif error_value.args[0] == ssl.SSL_ERROR_WANT_WRITE: + self.event_state = self.WRITE + else: + LOGGER.error("SSL Socket error: %r", error_value) + + else: + # Haven't run into this one yet, log it. + LOGGER.error("Socket Error: %s", error_code) + + # Disconnect from our IOLoop and let Connection know what's up + self._on_terminate(connection.InternalCloseReasons.SOCKET_ERROR, + repr(error_value)) + + def _handle_timeout(self): + """Handle a socket timeout in read or write. + We don't do anything in the non-blocking handlers because we + only have the socket in a blocking state during connect.""" + LOGGER.warning("Unexpected socket timeout") + + def _handle_events(self, fd, events, error=None, write_only=False): + """Handle IO/Event loop events, processing them. + + :param int fd: The file descriptor for the events + :param int events: Events from the IO/Event loop + :param int error: Was an error specified; TODO none of the current + adapters appear to be able to pass the `error` arg - is it needed? + :param bool write_only: Only handle write events + + """ + if not self.socket: + LOGGER.error('Received events on closed socket: %r', fd) + return + + if self.socket and (events & self.WRITE): + self._handle_write() + self._manage_event_state() + + if self.socket and not write_only and (events & self.READ): + self._handle_read() + + if (self.socket and write_only and (events & self.READ) and + (events & self.ERROR)): + error_msg = ('BAD libc: Write-Only but Read+Error. ' + 'Assume socket disconnected.') + LOGGER.error(error_msg) + self._on_terminate(connection.InternalCloseReasons.SOCKET_ERROR, + error_msg) + + if self.socket and (events & self.ERROR): + LOGGER.error('Error event %r, %r', events, error) + self._handle_error(error) + + def _handle_read(self): + """Read from the socket and call our on_data_available with the data.""" + try: + while True: + try: + if self.params.ssl: + data = self.socket.read(self._buffer_size) + else: + data = self.socket.recv(self._buffer_size) + + break + except SOCKET_ERROR as error: + if error.errno == errno.EINTR: + continue + else: + raise + + except socket.timeout: + self._handle_timeout() + return 0 + + except ssl.SSLError as error: + if error.args[0] == ssl.SSL_ERROR_WANT_READ: + # ssl wants more data but there is nothing currently + # available in the socket, wait for it to become readable. + return 0 + return self._handle_error(error) + + except SOCKET_ERROR as error: + if error.errno in (errno.EAGAIN, errno.EWOULDBLOCK): + return 0 + return self._handle_error(error) + + # Empty data, should disconnect + if not data or data == 0: + LOGGER.error('Read empty data, calling disconnect') + return self._on_terminate( + connection.InternalCloseReasons.SOCKET_ERROR, "EOF") + + # Pass the data into our top level frame dispatching method + self._on_data_available(data) + return len(data) + + def _handle_write(self): + """Try and write as much as we can, if we get blocked requeue + what's left""" + total_bytes_sent = 0 + try: + while self.outbound_buffer: + frame = self.outbound_buffer.popleft() + while True: + try: + num_bytes_sent = self.socket.send(frame) + break + except SOCKET_ERROR as error: + if error.errno == errno.EINTR: + continue + else: + raise + + total_bytes_sent += num_bytes_sent + if num_bytes_sent < len(frame): + LOGGER.debug("Partial write, requeing remaining data") + self.outbound_buffer.appendleft(frame[num_bytes_sent:]) + break + + except socket.timeout: + # Will only come here if the socket is blocking + LOGGER.debug("socket timeout, requeuing frame") + self.outbound_buffer.appendleft(frame) + self._handle_timeout() + + except ssl.SSLError as error: + if error.args[0] == ssl.SSL_ERROR_WANT_WRITE: + # In Python 3.5+, SSLSocket.send raises this if the socket is + # not currently able to write. Handle this just like an + # EWOULDBLOCK socket error. + LOGGER.debug("Would block, requeuing frame") + self.outbound_buffer.appendleft(frame) + else: + return self._handle_error(error) + + except SOCKET_ERROR as error: + if error.errno in (errno.EAGAIN, errno.EWOULDBLOCK): + LOGGER.debug("Would block, requeuing frame") + self.outbound_buffer.appendleft(frame) + else: + return self._handle_error(error) + + return total_bytes_sent + + def _init_connection_state(self): + """Initialize or reset all of our internal state variables for a given + connection. If we disconnect and reconnect, all of our state needs to + be wiped. + + """ + super(BaseConnection, self)._init_connection_state() + self.base_events = self.READ | self.ERROR + self.event_state = self.base_events + self.socket = None + + def _manage_event_state(self): + """Manage the bitmask for reading/writing/error which is used by the + io/event handler to specify when there is an event such as a read or + write. + + """ + if self.outbound_buffer: + if not self.event_state & self.WRITE: + self.event_state |= self.WRITE + self.ioloop.update_handler(self.socket.fileno(), + self.event_state) + elif self.event_state & self.WRITE: + self.event_state = self.base_events + self.ioloop.update_handler(self.socket.fileno(), self.event_state) + + def _wrap_socket(self, sock): + """Wrap the socket for connecting over SSL. This allows the user to use + a dict for the usual SSL options or an SSLOptions object for more + advanced control. + + :rtype: ssl.SSLSocket + + """ + ssl_options = self.params.ssl_options or {} + # our wrapped return sock + ssl_sock = None + + if isinstance(ssl_options, connection.SSLOptions): + context = ssl.SSLContext(ssl_options.ssl_version) + context.verify_mode = ssl_options.verify_mode + if ssl_options.certfile is not None: + context.load_cert_chain( + certfile=ssl_options.certfile, + keyfile=ssl_options.keyfile, + password=ssl_options.key_password) + + # only one of either cafile or capath have to defined + if ssl_options.cafile is not None or ssl_options.capath is not None: + context.load_verify_locations( + cafile=ssl_options.cafile, + capath=ssl_options.capath, + cadata=ssl_options.cadata) + + if ssl_options.ciphers is not None: + context.set_ciphers(ssl_options.ciphers) + + ssl_sock = context.wrap_socket( + sock, + server_side=ssl_options.server_side, + do_handshake_on_connect=ssl_options.do_handshake_on_connect, + suppress_ragged_eofs=ssl_options.suppress_ragged_eofs, + server_hostname=ssl_options.server_hostname) + else: + ssl_sock = ssl.wrap_socket( + sock, do_handshake_on_connect=self.DO_HANDSHAKE, **ssl_options) + + return ssl_sock diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/blocking_connection.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/blocking_connection.py new file mode 100644 index 000000000..9acf9c927 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/blocking_connection.py @@ -0,0 +1,2616 @@ +"""The blocking connection adapter module implements blocking semantics on top +of Pika's core AMQP driver. While most of the asynchronous expectations are +removed when using the blocking connection adapter, it attempts to remain true +to the asynchronous RPC nature of the AMQP protocol, supporting server sent +RPC commands. + +The user facing classes in the module consist of the +:py:class:`~pika.adapters.blocking_connection.BlockingConnection` +and the :class:`~pika.adapters.blocking_connection.BlockingChannel` +classes. + +""" +# Suppress too-many-lines +# pylint: disable=C0302 + +# Disable "access to protected member warnings: this wrapper implementation is +# a friend of those instances +# pylint: disable=W0212 + +from collections import namedtuple, deque +import contextlib +import functools +import logging +import time + +import pika.channel +from pika import compat +from pika import exceptions +import pika.spec +# NOTE: import SelectConnection after others to avoid circular depenency +from pika.adapters.select_connection import SelectConnection + + +LOGGER = logging.getLogger(__name__) + + +class _CallbackResult(object): + """ CallbackResult is a non-thread-safe implementation for receiving + callback results; INTERNAL USE ONLY! + """ + __slots__ = ('_value_class', '_ready', '_values') + def __init__(self, value_class=None): + """ + :param callable value_class: only needed if the CallbackResult + instance will be used with + `set_value_once` and `append_element`. + *args and **kwargs of the value setter + methods will be passed to this class. + + """ + self._value_class = value_class + self._ready = None + self._values = None + self.reset() + + def reset(self): + """Reset value, but not _value_class""" + self._ready = False + self._values = None + + def __bool__(self): + """ Called by python runtime to implement truth value testing and the + built-in operation bool(); NOTE: python 3.x + """ + return self.is_ready() + + # python 2.x version of __bool__ + __nonzero__ = __bool__ + + def __enter__(self): + """ Entry into context manager that automatically resets the object + on exit; this usage pattern helps garbage-collection by eliminating + potential circular references. + """ + return self + + def __exit__(self, *args, **kwargs): + """Reset value""" + self.reset() + + def is_ready(self): + """ + :returns: True if the object is in a signaled state + """ + return self._ready + + @property + def ready(self): + """True if the object is in a signaled state""" + return self._ready + + def signal_once(self, *_args, **_kwargs): + """ Set as ready + + :raises AssertionError: if result was already signalled + """ + assert not self._ready, '_CallbackResult was already set' + self._ready = True + + def set_value_once(self, *args, **kwargs): + """ Set as ready with value; the value may be retrieved via the `value` + property getter + + :raises AssertionError: if result was already set + """ + self.signal_once() + try: + self._values = (self._value_class(*args, **kwargs),) + except Exception: + LOGGER.error( + "set_value_once failed: value_class=%r; args=%r; kwargs=%r", + self._value_class, args, kwargs) + raise + + def append_element(self, *args, **kwargs): + """Append an element to values""" + assert not self._ready or isinstance(self._values, list), ( + '_CallbackResult state is incompatible with append_element: ' + 'ready=%r; values=%r' % (self._ready, self._values)) + + try: + value = self._value_class(*args, **kwargs) + except Exception: + LOGGER.error( + "append_element failed: value_class=%r; args=%r; kwargs=%r", + self._value_class, args, kwargs) + raise + + if self._values is None: + self._values = [value] + else: + self._values.append(value) + + self._ready = True + + + @property + def value(self): + """ + :returns: a reference to the value that was set via `set_value_once` + :raises AssertionError: if result was not set or value is incompatible + with `set_value_once` + """ + assert self._ready, '_CallbackResult was not set' + assert isinstance(self._values, tuple) and len(self._values) == 1, ( + '_CallbackResult value is incompatible with set_value_once: %r' + % (self._values,)) + + return self._values[0] + + + @property + def elements(self): + """ + :returns: a reference to the list containing one or more elements that + were added via `append_element` + :raises AssertionError: if result was not set or value is incompatible + with `append_element` + """ + assert self._ready, '_CallbackResult was not set' + assert isinstance(self._values, list) and self._values, ( + '_CallbackResult value is incompatible with append_element: %r' + % (self._values,)) + + return self._values + + +class _IoloopTimerContext(object): + """Context manager for registering and safely unregistering a + SelectConnection ioloop-based timer + """ + + def __init__(self, duration, connection): + """ + :param float duration: non-negative timer duration in seconds + :param SelectConnection connection: + """ + assert hasattr(connection, 'add_timeout'), connection + self._duration = duration + self._connection = connection + self._callback_result = _CallbackResult() + self._timer_id = None + + def __enter__(self): + """Register a timer""" + self._timer_id = self._connection.add_timeout( + self._duration, + self._callback_result.signal_once) + return self + + def __exit__(self, *_args, **_kwargs): + """Unregister timer if it hasn't fired yet""" + if not self._callback_result: + self._connection.remove_timeout(self._timer_id) + + def is_ready(self): + """ + :returns: True if timer has fired, False otherwise + """ + return self._callback_result.is_ready() + + +class _TimerEvt(object): + """Represents a timer created via `BlockingConnection.add_timeout`""" + __slots__ = ('timer_id', '_callback') + + def __init__(self, callback): + """ + :param callback: see callback_method in `BlockingConnection.add_timeout` + """ + self._callback = callback + + # Will be set to timer id returned from the underlying implementation's + # `add_timeout` method + self.timer_id = None + + def __repr__(self): + return '<%s timer_id=%s callback=%s>' % (self.__class__.__name__, + self.timer_id, self._callback) + + def dispatch(self): + """Dispatch the user's callback method""" + self._callback() + + +class _ConnectionBlockedUnblockedEvtBase(object): + """Base class for `_ConnectionBlockedEvt` and `_ConnectionUnblockedEvt`""" + __slots__ = ('_callback', '_method_frame') + + def __init__(self, callback, method_frame): + """ + :param callback: see callback_method parameter in + `BlockingConnection.add_on_connection_blocked_callback` and + `BlockingConnection.add_on_connection_unblocked_callback` + :param pika.frame.Method method_frame: with method_frame.method of type + `pika.spec.Connection.Blocked` or `pika.spec.Connection.Unblocked` + """ + self._callback = callback + self._method_frame = method_frame + + def __repr__(self): + return '<%s callback=%s, frame=%s>' % (self.__class__.__name__, + self._callback, + self._method_frame) + + def dispatch(self): + """Dispatch the user's callback method""" + self._callback(self._method_frame) + + +class _ConnectionBlockedEvt(_ConnectionBlockedUnblockedEvtBase): + """Represents a Connection.Blocked notification from RabbitMQ broker`""" + pass + + +class _ConnectionUnblockedEvt(_ConnectionBlockedUnblockedEvtBase): + """Represents a Connection.Unblocked notification from RabbitMQ broker`""" + pass + + +class BlockingConnection(object): + """The BlockingConnection creates a layer on top of Pika's asynchronous core + providing methods that will block until their expected response has + returned. Due to the asynchronous nature of the `Basic.Deliver` and + `Basic.Return` calls from RabbitMQ to your application, you can still + implement continuation-passing style asynchronous methods if you'd like to + receive messages from RabbitMQ using + :meth:`basic_consume ` or if you want to be + notified of a delivery failure when using + :meth:`basic_publish `. + + For more information about communicating with the blocking_connection + adapter, be sure to check out the + :class:`BlockingChannel ` class which implements the + :class:`Channel ` based communication for the + blocking_connection adapter. + + To prevent recursion/reentrancy, the blocking connection and channel + implementations queue asynchronously-delivered events received + in nested context (e.g., while waiting for `BlockingConnection.channel` or + `BlockingChannel.queue_declare` to complete), dispatching them synchronously + once nesting returns to the desired context. This concerns all callbacks, + such as those registered via `BlockingConnection.add_timeout`, + `BlockingConnection.add_on_connection_blocked_callback`, + `BlockingConnection.add_on_connection_unblocked_callback`, + `BlockingChannel.basic_consume`, etc. + + Blocked Connection deadlock avoidance: when RabbitMQ becomes low on + resources, it emits Connection.Blocked (AMQP extension) to the client + connection when client makes a resource-consuming request on that connection + or its channel (e.g., `Basic.Publish`); subsequently, RabbitMQ suspsends + processing requests from that connection until the affected resources are + restored. See http://www.rabbitmq.com/connection-blocked.html. This + may impact `BlockingConnection` and `BlockingChannel` operations in a + way that users might not be expecting. For example, if the user dispatches + `BlockingChannel.basic_publish` in non-publisher-confirmation mode while + RabbitMQ is in this low-resource state followed by a synchronous request + (e.g., `BlockingConnection.channel`, `BlockingChannel.consume`, + `BlockingChannel.basic_consume`, etc.), the synchronous request will block + indefinitely (until Connection.Unblocked) waiting for RabbitMQ to reply. If + the blocked state persists for a long time, the blocking operation will + appear to hang. In this state, `BlockingConnection` instance and its + channels will not dispatch user callbacks. SOLUTION: To break this potential + deadlock, applications may configure the `blocked_connection_timeout` + connection parameter when instantiating `BlockingConnection`. Upon blocked + connection timeout, this adapter will raise ConnectionClosed exception with + first exception arg of + `pika.connection.InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT`. See + `pika.connection.ConnectionParameters` documentation to learn more about + `blocked_connection_timeout` configuration. + + """ + # Connection-opened callback args + _OnOpenedArgs = namedtuple('BlockingConnection__OnOpenedArgs', + 'connection') + + # Connection-establishment error callback args + _OnOpenErrorArgs = namedtuple('BlockingConnection__OnOpenErrorArgs', + 'connection error') + + # Connection-closing callback args + _OnClosedArgs = namedtuple('BlockingConnection__OnClosedArgs', + 'connection reason_code reason_text') + + # Channel-opened callback args + _OnChannelOpenedArgs = namedtuple( + 'BlockingConnection__OnChannelOpenedArgs', + 'channel') + + def __init__(self, parameters=None, _impl_class=None): + """Create a new instance of the Connection object. + + :param pika.connection.Parameters parameters: Connection parameters + :param _impl_class: for tests/debugging only; implementation class; + None=default + + :raises RuntimeError: + + """ + # Used by the _acquire_event_dispatch decorator; when already greater + # than 0, event dispatch is already acquired higher up the call stack + self._event_dispatch_suspend_depth = 0 + + # Connection-specific events that are ready for dispatch: _TimerEvt, + # _ConnectionBlockedEvt, _ConnectionUnblockedEvt + self._ready_events = deque() + + # Channel numbers of channels that are requesting a call to their + # BlockingChannel._dispatch_events method; See + # `_request_channel_dispatch` + self._channels_pending_dispatch = set() + + # Receives on_open_callback args from Connection + self._opened_result = _CallbackResult(self._OnOpenedArgs) + + # Receives on_open_error_callback args from Connection + self._open_error_result = _CallbackResult(self._OnOpenErrorArgs) + + # Receives on_close_callback args from Connection + self._closed_result = _CallbackResult(self._OnClosedArgs) + + # Set to True when when user calls close() on the connection + # NOTE: this is a workaround to detect socket error because + # on_close_callback passes reason_code=0 when called due to socket error + self._user_initiated_close = False + + impl_class = _impl_class or SelectConnection + self._impl = impl_class( + parameters=parameters, + on_open_callback=self._opened_result.set_value_once, + on_open_error_callback=self._open_error_result.set_value_once, + on_close_callback=self._closed_result.set_value_once, + stop_ioloop_on_close=False) + + self._impl.ioloop.activate_poller() + + self._process_io_for_connection_setup() + + def __repr__(self): + return '<%s impl=%r>' % (self.__class__.__name__, self._impl) + + def _cleanup(self): + """Clean up members that might inhibit garbage collection""" + self._impl.ioloop.close() + self._ready_events.clear() + self._opened_result.reset() + self._open_error_result.reset() + self._closed_result.reset() + + @contextlib.contextmanager + def _acquire_event_dispatch(self): + """ Context manager that controls access to event dispatcher for + preventing reentrancy. + + The "as" value is True if the managed code block owns the event + dispatcher and False if caller higher up in the call stack already owns + it. Only managed code that gets ownership (got True) is permitted to + dispatch + """ + try: + # __enter__ part + self._event_dispatch_suspend_depth += 1 + yield self._event_dispatch_suspend_depth == 1 + finally: + # __exit__ part + self._event_dispatch_suspend_depth -= 1 + + def _process_io_for_connection_setup(self): + """ Perform follow-up processing for connection setup request: flush + connection output and process input while waiting for connection-open + or connection-error. + + :raises AMQPConnectionError: on connection open error + """ + if not self._open_error_result.ready: + self._flush_output(self._opened_result.is_ready, + self._open_error_result.is_ready) + + if self._open_error_result.ready: + try: + exception_or_message = self._open_error_result.value.error + if isinstance(exception_or_message, Exception): + raise exception_or_message + raise exceptions.AMQPConnectionError(exception_or_message) + finally: + self._cleanup() + + assert self._opened_result.ready + assert self._opened_result.value.connection is self._impl + + def _flush_output(self, *waiters): + """ Flush output and process input while waiting for any of the given + callbacks to return true. The wait is aborted upon connection-close. + Otherwise, processing continues until the output is flushed AND at least + one of the callbacks returns true. If there are no callbacks, then + processing ends when all output is flushed. + + :param waiters: sequence of zero or more callables taking no args and + returning true when it's time to stop processing. + Their results are OR'ed together. + """ + if self.is_closed: + raise exceptions.ConnectionClosed() + + # Conditions for terminating the processing loop: + # connection closed + # OR + # empty outbound buffer and no waiters + # OR + # empty outbound buffer and any waiter is ready + is_done = (lambda: + self._closed_result.ready or + (not self._impl.outbound_buffer and + (not waiters or any(ready() for ready in waiters)))) + + # Process I/O until our completion condition is satisified + while not is_done(): + self._impl.ioloop.poll() + self._impl.ioloop.process_timeouts() + + if self._open_error_result.ready or self._closed_result.ready: + try: + if not self._user_initiated_close: + if self._open_error_result.ready: + maybe_exception = self._open_error_result.value.error + LOGGER.error('Connection open failed - %r', + maybe_exception) + if isinstance(maybe_exception, Exception): + raise maybe_exception + else: + raise exceptions.ConnectionClosed(maybe_exception) + else: + result = self._closed_result.value + LOGGER.error('Connection close detected; result=%r', + result) + raise exceptions.ConnectionClosed(result.reason_code, + result.reason_text) + else: + LOGGER.info('Connection closed; result=%r', + self._closed_result.value) + finally: + self._cleanup() + + def _request_channel_dispatch(self, channel_number): + """Called by BlockingChannel instances to request a call to their + _dispatch_events method or to terminate `process_data_events`; + BlockingConnection will honor these requests from a safe context. + + :param int channel_number: positive channel number to request a call + to the channel's `_dispatch_events`; a negative channel number to + request termination of `process_data_events` + """ + self._channels_pending_dispatch.add(channel_number) + + def _dispatch_channel_events(self): + """Invoke the `_dispatch_events` method on open channels that requested + it + """ + if not self._channels_pending_dispatch: + return + + with self._acquire_event_dispatch() as dispatch_acquired: + if not dispatch_acquired: + # Nested dispatch or dispatch blocked higher in call stack + return + + candidates = list(self._channels_pending_dispatch) + self._channels_pending_dispatch.clear() + + for channel_number in candidates: + if channel_number < 0: + # This was meant to terminate process_data_events + continue + + try: + impl_channel = self._impl._channels[channel_number] + except KeyError: + continue + + if impl_channel.is_open: + impl_channel._get_cookie()._dispatch_events() + + def _on_timer_ready(self, evt): + """Handle expiry of a timer that was registered via `add_timeout` + + :param _TimerEvt evt: + + """ + self._ready_events.append(evt) + + def _on_threadsafe_callback(self, user_callback): + """Handle callback that was registered via `add_callback_threadsafe`. + + :param user_callback: callback passed to `add_callback_threadsafe` by + the application. + + """ + # Turn it into a 0-delay timeout to take advantage of our existing logic + # that deals with reentrancy + self.add_timeout(0, user_callback) + + + def _on_connection_blocked(self, user_callback, method_frame): + """Handle Connection.Blocked notification from RabbitMQ broker + + :param callable user_callback: callback_method passed to + `add_on_connection_blocked_callback` + :param pika.frame.Method method_frame: method frame having `method` + member of type `pika.spec.Connection.Blocked` + """ + self._ready_events.append( + _ConnectionBlockedEvt(user_callback, method_frame)) + + def _on_connection_unblocked(self, user_callback, method_frame): + """Handle Connection.Unblocked notification from RabbitMQ broker + + :param callable user_callback: callback_method passed to + `add_on_connection_unblocked_callback` + :param pika.frame.Method method_frame: method frame having `method` + member of type `pika.spec.Connection.Blocked` + """ + self._ready_events.append( + _ConnectionUnblockedEvt(user_callback, method_frame)) + + def _dispatch_connection_events(self): + """Dispatch ready connection events""" + if not self._ready_events: + return + + with self._acquire_event_dispatch() as dispatch_acquired: + if not dispatch_acquired: + # Nested dispatch or dispatch blocked higher in call stack + return + + # Limit dispatch to the number of currently ready events to avoid + # getting stuck in this loop + for _ in compat.xrange(len(self._ready_events)): + try: + evt = self._ready_events.popleft() + except IndexError: + # Some events (e.g., timers) must have been cancelled + break + + evt.dispatch() + + def add_on_connection_blocked_callback(self, callback_method): + """Add a callback to be notified when RabbitMQ has sent a + `Connection.Blocked` frame indicating that RabbitMQ is low on + resources. Publishers can use this to voluntarily suspend publishing, + instead of relying on back pressure throttling. The callback + will be passed the `Connection.Blocked` method frame. + + See also `ConnectionParameters.blocked_connection_timeout`. + + :param method callback_method: Callback to call on `Connection.Blocked`, + having the signature `callback_method(pika.frame.Method)`, where the + method frame's `method` member is of type + `pika.spec.Connection.Blocked` + + """ + self._impl.add_on_connection_blocked_callback( + functools.partial(self._on_connection_blocked, callback_method)) + + def add_on_connection_unblocked_callback(self, callback_method): + """Add a callback to be notified when RabbitMQ has sent a + `Connection.Unblocked` frame letting publishers know it's ok + to start publishing again. The callback will be passed the + `Connection.Unblocked` method frame. + + :param method callback_method: Callback to call on + `Connection.Unblocked`, having the signature + `callback_method(pika.frame.Method)`, where the method frame's + `method` member is of type `pika.spec.Connection.Unblocked` + + """ + self._impl.add_on_connection_unblocked_callback( + functools.partial(self._on_connection_unblocked, callback_method)) + + def add_timeout(self, deadline, callback_method): + """Create a single-shot timer to fire after deadline seconds. Do not + confuse with Tornado's timeout where you pass in the time you want to + have your callback called. Only pass in the seconds until it's to be + called. + + NOTE: the timer callbacks are dispatched only in the scope of + specially-designated methods: see + `BlockingConnection.process_data_events` and + `BlockingChannel.start_consuming`. + + :param float deadline: The number of seconds to wait to call callback + :param callable callback_method: The callback method with the signature + callback_method() + + :returns: opaque timer id + + """ + if not callable(callback_method): + raise ValueError( + 'callback_method parameter must be callable, but got %r' + % (callback_method,)) + + evt = _TimerEvt(callback=callback_method) + timer_id = self._impl.add_timeout( + deadline, + functools.partial(self._on_timer_ready, evt)) + evt.timer_id = timer_id + + return timer_id + + def add_callback_threadsafe(self, callback): + """Requests a call to the given function as soon as possible in the + context of this connection's thread. + + NOTE: This is the only thread-safe method in `BlockingConnection`. All + other manipulations of `BlockingConnection` must be performed from the + connection's thread. + + For example, a thread may request a call to the + `BlockingChannel.basic_ack` method of a `BlockingConnection` that is + running in a different thread via + + ``` + connection.add_callback_threadsafe( + functools.partial(channel.basic_ack, delivery_tag=...)) + ``` + + NOTE: if you know that the requester is running on the same thread as + the connection it is more efficient to use the + `BlockingConnection.add_timeout()` method with a deadline of 0. + + :param method callback: The callback method; must be callable + + """ + self._impl.add_callback_threadsafe( + functools.partial(self._on_threadsafe_callback, callback)) + + def remove_timeout(self, timeout_id): + """Remove a timer if it's still in the timeout stack + + :param timeout_id: The opaque timer id to remove + + """ + # Remove from the impl's timeout stack + self._impl.remove_timeout(timeout_id) + + # Remove from ready events, if the timer fired already + for i, evt in enumerate(self._ready_events): + if isinstance(evt, _TimerEvt) and evt.timer_id == timeout_id: + index_to_remove = i + break + else: + # Not found + return + + del self._ready_events[index_to_remove] + + def close(self, reply_code=200, reply_text='Normal shutdown'): + """Disconnect from RabbitMQ. If there are any open channels, it will + attempt to close them prior to fully disconnecting. Channels which + have active consumers will attempt to send a Basic.Cancel to RabbitMQ + to cleanly stop the delivery of messages prior to closing the channel. + + :param int reply_code: The code number for the close + :param str reply_text: The text reason for the close + + """ + if self.is_closed: + LOGGER.debug('Close called on closed connection (%s): %s', + reply_code, reply_text) + return + + LOGGER.info('Closing connection (%s): %s', reply_code, reply_text) + + self._user_initiated_close = True + + # Close channels that remain opened + for impl_channel in pika.compat.dictvalues(self._impl._channels): + channel = impl_channel._get_cookie() + if channel.is_open: + try: + channel.close(reply_code, reply_text) + except exceptions.ChannelClosed as exc: + # Log and suppress broker-closed channel + LOGGER.warning('Got ChannelClosed while closing channel ' + 'from connection.close: %r', exc) + + # Close the connection + self._impl.close(reply_code, reply_text) + + self._flush_output(self._closed_result.is_ready) + + def process_data_events(self, time_limit=0): + """Will make sure that data events are processed. Dispatches timer and + channel callbacks if not called from the scope of BlockingConnection or + BlockingChannel callback. Your app can block on this method. + + :param float time_limit: suggested upper bound on processing time in + seconds. The actual blocking time depends on the granularity of the + underlying ioloop. Zero means return as soon as possible. None means + there is no limit on processing time and the function will block + until I/O produces actionable events. Defaults to 0 for backward + compatibility. This parameter is NEW in pika 0.10.0. + """ + with self._acquire_event_dispatch() as dispatch_acquired: + # Check if we can actually process pending events + common_terminator = lambda: bool(dispatch_acquired and + (self._channels_pending_dispatch or self._ready_events)) + if time_limit is None: + self._flush_output(common_terminator) + else: + with _IoloopTimerContext(time_limit, self._impl) as timer: + self._flush_output(timer.is_ready, common_terminator) + + if self._ready_events: + self._dispatch_connection_events() + + if self._channels_pending_dispatch: + self._dispatch_channel_events() + + def sleep(self, duration): + """A safer way to sleep than calling time.sleep() directly that would + keep the adapter from ignoring frames sent from the broker. The + connection will "sleep" or block the number of seconds specified in + duration in small intervals. + + :param float duration: The time to sleep in seconds + + """ + assert duration >= 0, duration + + deadline = time.time() + duration + time_limit = duration + # Process events at least once + while True: + self.process_data_events(time_limit) + time_limit = deadline - time.time() + if time_limit <= 0: + break + + def channel(self, channel_number=None): + """Create a new channel with the next available channel number or pass + in a channel number to use. Must be non-zero if you would like to + specify but it is recommended that you let Pika manage the channel + numbers. + + :rtype: pika.adapters.blocking_connection.BlockingChannel + """ + with _CallbackResult(self._OnChannelOpenedArgs) as opened_args: + impl_channel = self._impl.channel( + on_open_callback=opened_args.set_value_once, + channel_number=channel_number) + + # Create our proxy channel + channel = BlockingChannel(impl_channel, self) + + # Link implementation channel with our proxy channel + impl_channel._set_cookie(channel) + + # Drive I/O until Channel.Open-ok + channel._flush_output(opened_args.is_ready) + + return channel + + def __enter__(self): + # Prepare `with` context + return self + + def __exit__(self, exc_type, value, traceback): + # Close connection after `with` context + self.close() + + # + # Connections state properties + # + + @property + def is_closed(self): + """ + Returns a boolean reporting the current connection state. + """ + return self._impl.is_closed + + @property + def is_closing(self): + """ + Returns True if connection is in the process of closing due to + client-initiated `close` request, but closing is not yet complete. + """ + return self._impl.is_closing + + @property + def is_open(self): + """ + Returns a boolean reporting the current connection state. + """ + return self._impl.is_open + + # + # Properties that reflect server capabilities for the current connection + # + + @property + def basic_nack_supported(self): + """Specifies if the server supports basic.nack on the active connection. + + :rtype: bool + + """ + return self._impl.basic_nack + + @property + def consumer_cancel_notify_supported(self): + """Specifies if the server supports consumer cancel notification on the + active connection. + + :rtype: bool + + """ + return self._impl.consumer_cancel_notify + + @property + def exchange_exchange_bindings_supported(self): + """Specifies if the active connection supports exchange to exchange + bindings. + + :rtype: bool + + """ + return self._impl.exchange_exchange_bindings + + @property + def publisher_confirms_supported(self): + """Specifies if the active connection can use publisher confirmations. + + :rtype: bool + + """ + return self._impl.publisher_confirms + + # Legacy property names for backward compatibility + basic_nack = basic_nack_supported + consumer_cancel_notify = consumer_cancel_notify_supported + exchange_exchange_bindings = exchange_exchange_bindings_supported + publisher_confirms = publisher_confirms_supported + + +class _ChannelPendingEvt(object): + """Base class for BlockingChannel pending events""" + pass + + +class _ConsumerDeliveryEvt(_ChannelPendingEvt): + """This event represents consumer message delivery `Basic.Deliver`; it + contains method, properties, and body of the delivered message. + """ + + __slots__ = ('method', 'properties', 'body') + + def __init__(self, method, properties, body): + """ + :param spec.Basic.Deliver method: NOTE: consumer_tag and delivery_tag + are valid only within source channel + :param spec.BasicProperties properties: message properties + :param body: message body; empty string if no body + :type body: str or unicode + """ + self.method = method + self.properties = properties + self.body = body + + +class _ConsumerCancellationEvt(_ChannelPendingEvt): + """This event represents server-initiated consumer cancellation delivered to + client via Basic.Cancel. After receiving Basic.Cancel, there will be no + further deliveries for the consumer identified by `consumer_tag` in + `Basic.Cancel` + """ + + __slots__ = ('method_frame',) + + def __init__(self, method_frame): + """ + :param pika.frame.Method method_frame: method frame with method of type + `spec.Basic.Cancel` + """ + self.method_frame = method_frame + + def __repr__(self): + return '<%s method_frame=%r>' % (self.__class__.__name__, + self.method_frame) + + @property + def method(self): + """method of type spec.Basic.Cancel""" + return self.method_frame.method + + +class _ReturnedMessageEvt(_ChannelPendingEvt): + """This event represents a message returned by broker via `Basic.Return`""" + + __slots__ = ('callback', 'channel', 'method', 'properties', 'body') + + def __init__(self, callback, channel, method, properties, body): + """ + :param callable callback: user's callback, having the signature + callback(channel, method, properties, body), where + channel: pika.Channel + method: pika.spec.Basic.Return + properties: pika.spec.BasicProperties + body: str, unicode, or bytes (python 3.x) + + :param pika.Channel channel: + :param pika.spec.Basic.Return method: + :param pika.spec.BasicProperties properties: + :param body: str, unicode, or bytes (python 3.x) + """ + self.callback = callback + self.channel = channel + self.method = method + self.properties = properties + self.body = body + + def __repr__(self): + return ('<%s callback=%r channel=%r method=%r properties=%r ' + 'body=%.300r>') % (self.__class__.__name__, self.callback, + self.channel, self.method, self.properties, + self.body) + + def dispatch(self): + """Dispatch user's callback""" + self.callback(self.channel, self.method, self.properties, self.body) + + +class ReturnedMessage(object): + """Represents a message returned via Basic.Return in publish-acknowledgments + mode + """ + + __slots__ = ('method', 'properties', 'body') + + def __init__(self, method, properties, body): + """ + :param spec.Basic.Return method: + :param spec.BasicProperties properties: message properties + :param body: message body; empty string if no body + :type body: str or unicode + """ + self.method = method + self.properties = properties + self.body = body + + +class _ConsumerInfo(object): + """Information about an active consumer""" + + __slots__ = ('consumer_tag', 'no_ack', 'consumer_cb', + 'alternate_event_sink', 'state') + + # Consumer states + SETTING_UP = 1 + ACTIVE = 2 + TEARING_DOWN = 3 + CANCELLED_BY_BROKER = 4 + + def __init__(self, consumer_tag, no_ack, consumer_cb=None, + alternate_event_sink=None): + """ + NOTE: exactly one of consumer_cb/alternate_event_sink musts be non-None. + + :param str consumer_tag: + :param bool no_ack: the no-ack value for the consumer + :param callable consumer_cb: The function for dispatching messages to + user, having the signature: + consumer_callback(channel, method, properties, body) + channel: BlockingChannel + method: spec.Basic.Deliver + properties: spec.BasicProperties + body: str or unicode + :param callable alternate_event_sink: if specified, _ConsumerDeliveryEvt + and _ConsumerCancellationEvt objects will be diverted to this + callback instead of being deposited in the channel's + `_pending_events` container. Signature: + alternate_event_sink(evt) + """ + assert (consumer_cb is None) != (alternate_event_sink is None), ( + 'exactly one of consumer_cb/alternate_event_sink must be non-None', + consumer_cb, alternate_event_sink) + self.consumer_tag = consumer_tag + self.no_ack = no_ack + self.consumer_cb = consumer_cb + self.alternate_event_sink = alternate_event_sink + self.state = self.SETTING_UP + + @property + def setting_up(self): + """True if in SETTING_UP state""" + return self.state == self.SETTING_UP + + @property + def active(self): + """True if in ACTIVE state""" + return self.state == self.ACTIVE + + @property + def tearing_down(self): + """True if in TEARING_DOWN state""" + return self.state == self.TEARING_DOWN + + @property + def cancelled_by_broker(self): + """True if in CANCELLED_BY_BROKER state""" + return self.state == self.CANCELLED_BY_BROKER + + +class _QueueConsumerGeneratorInfo(object): + """Container for information about the active queue consumer generator """ + __slots__ = ('params', 'consumer_tag', 'pending_events') + + def __init__(self, params, consumer_tag): + """ + :params tuple params: a three-tuple (queue, no_ack, exclusive) that were + used to create the queue consumer + :param str consumer_tag: consumer tag + """ + self.params = params + self.consumer_tag = consumer_tag + #self.messages = deque() + + # Holds pending events of types _ConsumerDeliveryEvt and + # _ConsumerCancellationEvt + self.pending_events = deque() + + def __repr__(self): + return '<%s params=%r consumer_tag=%r>' % ( + self.__class__.__name__, self.params, self.consumer_tag) + + +class BlockingChannel(object): + """The BlockingChannel implements blocking semantics for most things that + one would use callback-passing-style for with the + :py:class:`~pika.channel.Channel` class. In addition, + the `BlockingChannel` class implements a :term:`generator` that allows + you to :doc:`consume messages ` + without using callbacks. + + Example of creating a BlockingChannel:: + + import pika + + # Create our connection object + connection = pika.BlockingConnection() + + # The returned object will be a synchronous channel + channel = connection.channel() + + """ + + # Used as value_class with _CallbackResult for receiving Basic.GetOk args + _RxMessageArgs = namedtuple( + 'BlockingChannel__RxMessageArgs', + [ + 'channel', # implementation pika.Channel instance + 'method', # Basic.GetOk + 'properties', # pika.spec.BasicProperties + 'body' # str, unicode, or bytes (python 3.x) + ]) + + + # For use as value_class with any _CallbackResult that expects method_frame + # as the only arg + _MethodFrameCallbackResultArgs = namedtuple( + 'BlockingChannel__MethodFrameCallbackResultArgs', + 'method_frame') + + # Broker's basic-ack/basic-nack args when delivery confirmation is enabled; + # may concern a single or multiple messages + _OnMessageConfirmationReportArgs = namedtuple( + 'BlockingChannel__OnMessageConfirmationReportArgs', + 'method_frame') + + # Parameters for broker-initiated Channel.Close request: reply_code + # holds the broker's non-zero error code and reply_text holds the + # corresponding error message text. + _OnChannelClosedByBrokerArgs = namedtuple( + 'BlockingChannel__OnChannelClosedByBrokerArgs', + 'method_frame') + + # For use as value_class with _CallbackResult expecting Channel.Flow + # confirmation. + _FlowOkCallbackResultArgs = namedtuple( + 'BlockingChannel__FlowOkCallbackResultArgs', + 'active' # True if broker will start or continue sending; False if not + ) + + _CONSUMER_CANCELLED_CB_KEY = 'blocking_channel_consumer_cancelled' + + def __init__(self, channel_impl, connection): + """Create a new instance of the Channel + + :param channel_impl: Channel implementation object as returned from + SelectConnection.channel() + :param BlockingConnection connection: The connection object + + """ + self._impl = channel_impl + self._connection = connection + + # A mapping of consumer tags to _ConsumerInfo for active consumers + self._consumer_infos = dict() + + # Queue consumer generator generator info of type + # _QueueConsumerGeneratorInfo created by BlockingChannel.consume + self._queue_consumer_generator = None + + # Whether RabbitMQ delivery confirmation has been enabled + self._delivery_confirmation = False + + # Receives message delivery confirmation report (Basic.ack or + # Basic.nack) from broker when delivery confirmations are enabled + self._message_confirmation_result = _CallbackResult( + self._OnMessageConfirmationReportArgs) + + # deque of pending events: _ConsumerDeliveryEvt and + # _ConsumerCancellationEvt objects that will be returned by + # `BlockingChannel.get_event()` + self._pending_events = deque() + + # Holds a ReturnedMessage object representing a message received via + # Basic.Return in publisher-acknowledgments mode. + self._puback_return = None + + # Receives Basic.ConsumeOk reply from server + self._basic_consume_ok_result = _CallbackResult() + + # Receives the broker-inititated Channel.Close parameters + self._channel_closed_by_broker_result = _CallbackResult( + self._OnChannelClosedByBrokerArgs) + + # Receives args from Basic.GetEmpty response + # http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.get + self._basic_getempty_result = _CallbackResult( + self._MethodFrameCallbackResultArgs) + + self._impl.add_on_cancel_callback(self._on_consumer_cancelled_by_broker) + + self._impl.add_callback( + self._basic_consume_ok_result.signal_once, + replies=[pika.spec.Basic.ConsumeOk], + one_shot=False) + + self._impl.add_callback( + self._on_channel_closed, + replies=[pika.spec.Channel.Close], + one_shot=True) + + self._impl.add_callback( + self._basic_getempty_result.set_value_once, + replies=[pika.spec.Basic.GetEmpty], + one_shot=False) + + LOGGER.info("Created channel=%s", self.channel_number) + + def __int__(self): + """Return the channel object as its channel number + + NOTE: inherited from legacy BlockingConnection; might be error-prone; + use `channel_number` property instead. + + :rtype: int + + """ + return self.channel_number + + def __repr__(self): + return '<%s impl=%r>' % (self.__class__.__name__, self._impl) + + def __enter__(self): + return self + + def __exit__(self, exc_type, value, traceback): + try: + self.close() + except exceptions.ChannelClosed: + pass + + def _cleanup(self): + """Clean up members that might inhibit garbage collection""" + self._message_confirmation_result.reset() + self._pending_events = deque() + self._consumer_infos = dict() + + @property + def channel_number(self): + """Channel number""" + return self._impl.channel_number + + @property + def connection(self): + """The channel's BlockingConnection instance""" + return self._connection + + @property + def is_closed(self): + """Returns True if the channel is closed. + + :rtype: bool + + """ + return self._impl.is_closed + + @property + def is_closing(self): + """Returns True if client-initiated closing of the channel is in + progress. + + :rtype: bool + + """ + return self._impl.is_closing + + @property + def is_open(self): + """Returns True if the channel is open. + + :rtype: bool + + """ + return self._impl.is_open + + _ALWAYS_READY_WAITERS = ((lambda: True), ) + + def _flush_output(self, *waiters): + """ Flush output and process input while waiting for any of the given + callbacks to return true. The wait is aborted upon channel-close or + connection-close. + Otherwise, processing continues until the output is flushed AND at least + one of the callbacks returns true. If there are no callbacks, then + processing ends when all output is flushed. + + :param waiters: sequence of zero or more callables taking no args and + returning true when it's time to stop processing. + Their results are OR'ed together. + """ + if self.is_closed: + raise exceptions.ChannelClosed() + + if not waiters: + waiters = self._ALWAYS_READY_WAITERS + + self._connection._flush_output( + self._channel_closed_by_broker_result.is_ready, + *waiters) + + if self._channel_closed_by_broker_result: + # Channel was force-closed by broker + self._cleanup() + method = ( + self._channel_closed_by_broker_result.value.method_frame.method) + raise exceptions.ChannelClosed(method.reply_code, method.reply_text) + + def _on_puback_message_returned(self, channel, method, properties, body): + """Called as the result of Basic.Return from broker in + publisher-acknowledgements mode. Saves the info as a ReturnedMessage + instance in self._puback_return. + + :param pika.Channel channel: our self._impl channel + :param pika.spec.Basic.Return method: + :param pika.spec.BasicProperties properties: message properties + :param body: returned message body; empty string if no body + :type body: str, unicode + + """ + assert channel is self._impl, ( + channel.channel_number, self.channel_number) + + assert isinstance(method, pika.spec.Basic.Return), method + assert isinstance(properties, pika.spec.BasicProperties), ( + properties) + + LOGGER.warning( + "Published message was returned: _delivery_confirmation=%s; " + "channel=%s; method=%r; properties=%r; body_size=%d; " + "body_prefix=%.255r", self._delivery_confirmation, + channel.channel_number, method, properties, + len(body) if body is not None else None, body) + + self._puback_return = ReturnedMessage(method, properties, body) + + + def _add_pending_event(self, evt): + """Append an event to the channel's list of events that are ready for + dispatch to user and signal our connection that this channel is ready + for event dispatch + + :param _ChannelPendingEvt evt: an event derived from _ChannelPendingEvt + """ + self._pending_events.append(evt) + self.connection._request_channel_dispatch(self.channel_number) + + + def _on_channel_closed(self, method_frame): + """Called by impl when a channel is closed by the broker + via Channel.Close + + :param pika.Channel channel: channel closed by the + `spec.Channel.Close` method + :param int reply_code: The reply code sent via Channel.Close + :param str reply_text: The reply text sent via Channel.Close + + """ + LOGGER.debug('_on_channel_closed_by_broker %s', method_frame) + self._channel_closed_by_broker_result.set_value_once(method_frame) + channel_number = method_frame.channel_number + self.connection._request_channel_dispatch(-channel_number) + self._cleanup() + method = method_frame.method + raise exceptions.ChannelClosed(method.reply_code, + method.reply_text) + + def _on_consumer_cancelled_by_broker(self, method_frame): + """Called by impl when broker cancels consumer via Basic.Cancel. + + This is a RabbitMQ-specific feature. The circumstances include deletion + of queue being consumed as well as failure of a HA node responsible for + the queue being consumed. + + :param pika.frame.Method method_frame: method frame with the + `spec.Basic.Cancel` method + + """ + evt = _ConsumerCancellationEvt(method_frame) + + consumer = self._consumer_infos[method_frame.method.consumer_tag] + + # Don't interfere with client-initiated cancellation flow + if not consumer.tearing_down: + consumer.state = _ConsumerInfo.CANCELLED_BY_BROKER + + if consumer.alternate_event_sink is not None: + consumer.alternate_event_sink(evt) + else: + self._add_pending_event(evt) + + def _on_consumer_message_delivery(self, _channel, method, properties, body): + """Called by impl when a message is delivered for a consumer + + :param Channel channel: The implementation channel object + :param spec.Basic.Deliver method: + :param pika.spec.BasicProperties properties: message properties + :param body: delivered message body; empty string if no body + :type body: str, unicode, or bytes (python 3.x) + + """ + evt = _ConsumerDeliveryEvt(method, properties, body) + + consumer = self._consumer_infos[method.consumer_tag] + + if consumer.alternate_event_sink is not None: + consumer.alternate_event_sink(evt) + else: + self._add_pending_event(evt) + + def _on_consumer_generator_event(self, evt): + """Sink for the queue consumer generator's consumer events; append the + event to queue consumer generator's pending events buffer. + + :param evt: an object of type _ConsumerDeliveryEvt or + _ConsumerCancellationEvt + """ + self._queue_consumer_generator.pending_events.append(evt) + # Schedule termination of connection.process_data_events using a + # negative channel number + self.connection._request_channel_dispatch(-self.channel_number) + + def _cancel_all_consumers(self): + """Cancel all consumers. + + NOTE: pending non-ackable messages will be lost; pending ackable + messages will be rejected. + + """ + if self._consumer_infos: + LOGGER.debug('Cancelling %i consumers', len(self._consumer_infos)) + + if self._queue_consumer_generator is not None: + # Cancel queue consumer generator + self.cancel() + + # Cancel consumers created via basic_consume + for consumer_tag in pika.compat.dictkeys(self._consumer_infos): + self.basic_cancel(consumer_tag) + + def _dispatch_events(self): + """Called by BlockingConnection to dispatch pending events. + + `BlockingChannel` schedules this callback via + `BlockingConnection._request_channel_dispatch` + """ + while self._pending_events: + evt = self._pending_events.popleft() + + if type(evt) is _ConsumerDeliveryEvt: + consumer_info = self._consumer_infos[evt.method.consumer_tag] + consumer_info.consumer_cb(self, evt.method, evt.properties, + evt.body) + + elif type(evt) is _ConsumerCancellationEvt: + del self._consumer_infos[evt.method_frame.method.consumer_tag] + + self._impl.callbacks.process(self.channel_number, + self._CONSUMER_CANCELLED_CB_KEY, + self, + evt.method_frame) + else: + evt.dispatch() + + + def close(self, reply_code=0, reply_text="Normal shutdown"): + """Will invoke a clean shutdown of the channel with the AMQP Broker. + + :param int reply_code: The reply code to close the channel with + :param str reply_text: The reply text to close the channel with + + """ + LOGGER.debug('Channel.close(%s, %s)', reply_code, reply_text) + + # Cancel remaining consumers + self._cancel_all_consumers() + + # Close the channel + try: + with _CallbackResult() as close_ok_result: + self._impl.add_callback(callback=close_ok_result.signal_once, + replies=[pika.spec.Channel.CloseOk], + one_shot=True) + + self._impl.close(reply_code=reply_code, reply_text=reply_text) + self._flush_output(close_ok_result.is_ready) + finally: + self._cleanup() + + def flow(self, active): + """Turn Channel flow control off and on. + + NOTE: RabbitMQ doesn't support active=False; per + https://www.rabbitmq.com/specification.html: "active=false is not + supported by the server. Limiting prefetch with basic.qos provides much + better control" + + For more information, please reference: + + http://www.rabbitmq.com/amqp-0-9-1-reference.html#channel.flow + + :param bool active: Turn flow on (True) or off (False) + + :returns: True if broker will start or continue sending; False if not + :rtype: bool + + """ + with _CallbackResult(self._FlowOkCallbackResultArgs) as flow_ok_result: + self._impl.flow(callback=flow_ok_result.set_value_once, + active=active) + self._flush_output(flow_ok_result.is_ready) + return flow_ok_result.value.active + + def add_on_cancel_callback(self, callback): + """Pass a callback function that will be called when Basic.Cancel + is sent by the broker. The callback function should receive a method + frame parameter. + + :param callable callback: a callable for handling broker's Basic.Cancel + notification with the call signature: callback(method_frame) + where method_frame is of type `pika.frame.Method` with method of + type `spec.Basic.Cancel` + + """ + self._impl.callbacks.add(self.channel_number, + self._CONSUMER_CANCELLED_CB_KEY, + callback, + one_shot=False) + + def add_on_return_callback(self, callback): + """Pass a callback function that will be called when a published + message is rejected and returned by the server via `Basic.Return`. + + :param callable callback: The method to call on callback with the + signature callback(channel, method, properties, body), where + channel: pika.Channel + method: pika.spec.Basic.Return + properties: pika.spec.BasicProperties + body: str, unicode, or bytes (python 3.x) + + """ + self._impl.add_on_return_callback( + lambda _channel, method, properties, body: ( + self._add_pending_event( + _ReturnedMessageEvt( + callback, self, method, properties, body)))) + + def basic_consume(self, + consumer_callback, + queue, + no_ack=False, + exclusive=False, + consumer_tag=None, + arguments=None): + """Sends the AMQP command Basic.Consume to the broker and binds messages + for the consumer_tag to the consumer callback. If you do not pass in + a consumer_tag, one will be automatically generated for you. Returns + the consumer tag. + + NOTE: the consumer callbacks are dispatched only in the scope of + specially-designated methods: see + `BlockingConnection.process_data_events` and + `BlockingChannel.start_consuming`. + + For more information about Basic.Consume, see: + http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.consume + + :param callable consumer_callback: The function for dispatching messages + to user, having the signature: + consumer_callback(channel, method, properties, body) + channel: BlockingChannel + method: spec.Basic.Deliver + properties: spec.BasicProperties + body: str or unicode + :param queue: The queue to consume from + :type queue: str or unicode + :param bool no_ack: Tell the broker to not expect a response (i.e., + no ack/nack) + :param bool exclusive: Don't allow other consumers on the queue + :param consumer_tag: You may specify your own consumer tag; if left + empty, a consumer tag will be generated automatically + :type consumer_tag: str or unicode + :param dict arguments: Custom key/value pair arguments for the consumer + :returns: consumer tag + :rtype: str + + :raises pika.exceptions.DuplicateConsumerTag: if consumer with given + consumer_tag is already present. + + """ + if not callable(consumer_callback): + raise ValueError('consumer callback must be callable; got %r' + % consumer_callback) + + return self._basic_consume_impl( + queue=queue, + no_ack=no_ack, + exclusive=exclusive, + consumer_tag=consumer_tag, + arguments=arguments, + consumer_callback=consumer_callback) + + def _basic_consume_impl(self, + queue, + no_ack, + exclusive, + consumer_tag, + arguments=None, + consumer_callback=None, + alternate_event_sink=None): + """The low-level implementation used by `basic_consume` and `consume`. + See `basic_consume` docstring for more info. + + NOTE: exactly one of consumer_callback/alternate_event_sink musts be + non-None. + + This method has one additional parameter alternate_event_sink over the + args described in `basic_consume`. + + :param callable alternate_event_sink: if specified, _ConsumerDeliveryEvt + and _ConsumerCancellationEvt objects will be diverted to this + callback instead of being deposited in the channel's + `_pending_events` container. Signature: + alternate_event_sink(evt) + + :raises pika.exceptions.DuplicateConsumerTag: if consumer with given + consumer_tag is already present. + + """ + if (consumer_callback is None) == (alternate_event_sink is None): + raise ValueError( + ('exactly one of consumer_callback/alternate_event_sink must ' + 'be non-None', consumer_callback, alternate_event_sink)) + + if not consumer_tag: + # Need a consumer tag to register consumer info before sending + # request to broker, because I/O might dispatch incoming messages + # immediately following Basic.Consume-ok before _flush_output + # returns + consumer_tag = self._impl._generate_consumer_tag() + + if consumer_tag in self._consumer_infos: + raise exceptions.DuplicateConsumerTag(consumer_tag) + + # Create new consumer + self._consumer_infos[consumer_tag] = _ConsumerInfo( + consumer_tag, + no_ack=no_ack, + consumer_cb=consumer_callback, + alternate_event_sink=alternate_event_sink) + + try: + with self._basic_consume_ok_result as ok_result: + tag = self._impl.basic_consume( + consumer_callback=self._on_consumer_message_delivery, + queue=queue, + no_ack=no_ack, + exclusive=exclusive, + consumer_tag=consumer_tag, + arguments=arguments) + + assert tag == consumer_tag, (tag, consumer_tag) + + self._flush_output(ok_result.is_ready) + except Exception: + # If channel was closed, self._consumer_infos will be empty + if consumer_tag in self._consumer_infos: + del self._consumer_infos[consumer_tag] + # Schedule termination of connection.process_data_events using a + # negative channel number + self.connection._request_channel_dispatch(-self.channel_number) + raise + + # NOTE: Consumer could get cancelled by broker immediately after opening + # (e.g., queue getting deleted externally) + if self._consumer_infos[consumer_tag].setting_up: + self._consumer_infos[consumer_tag].state = _ConsumerInfo.ACTIVE + + return consumer_tag + + def basic_cancel(self, consumer_tag): + """This method cancels a consumer. This does not affect already + delivered messages, but it does mean the server will not send any more + messages for that consumer. The client may receive an arbitrary number + of messages in between sending the cancel method and receiving the + cancel-ok reply. + + NOTE: When cancelling a no_ack=False consumer, this implementation + automatically Nacks and suppresses any incoming messages that have not + yet been dispatched to the consumer's callback. However, when cancelling + a no_ack=True consumer, this method will return any pending messages + that arrived before broker confirmed the cancellation. + + :param str consumer_tag: Identifier for the consumer; the result of + passing a consumer_tag that was created on another channel is + undefined (bad things will happen) + + :returns: (NEW IN pika 0.10.0) empty sequence for a no_ack=False + consumer; for a no_ack=True consumer, returns a (possibly empty) + sequence of pending messages that arrived before broker confirmed + the cancellation (this is done instead of via consumer's callback in + order to prevent reentrancy/recursion. Each message is four-tuple: + (channel, method, properties, body) + channel: BlockingChannel + method: spec.Basic.Deliver + properties: spec.BasicProperties + body: str or unicode + """ + try: + consumer_info = self._consumer_infos[consumer_tag] + except KeyError: + LOGGER.warning( + "User is attempting to cancel an unknown consumer=%s; " + "already cancelled by user or broker?", consumer_tag) + return [] + + try: + # Assertion failure here is most likely due to reentrance + assert consumer_info.active or consumer_info.cancelled_by_broker, ( + consumer_info.state) + + # Assertion failure here signals disconnect between consumer state + # in BlockingChannel and Channel + assert (consumer_info.cancelled_by_broker or + consumer_tag in self._impl._consumers), consumer_tag + + no_ack = consumer_info.no_ack + + consumer_info.state = _ConsumerInfo.TEARING_DOWN + + with _CallbackResult() as cancel_ok_result: + # Nack pending messages for no_ack=False consumer + if not no_ack: + pending_messages = self._remove_pending_deliveries( + consumer_tag) + if pending_messages: + # NOTE: we use impl's basic_reject to avoid the + # possibility of redelivery before basic_cancel takes + # control of nacking. + # NOTE: we can't use basic_nack with the multiple option + # to avoid nacking messages already held by our client. + for message in pending_messages: + self._impl.basic_reject(message.method.delivery_tag, + requeue=True) + + # Cancel the consumer; impl takes care of rejecting any + # additional deliveries that arrive for a no_ack=False + # consumer + self._impl.basic_cancel( + callback=cancel_ok_result.signal_once, + consumer_tag=consumer_tag, + nowait=False) + + # Flush output and wait for Basic.Cancel-ok or + # broker-initiated Basic.Cancel + self._flush_output( + cancel_ok_result.is_ready, + lambda: consumer_tag not in self._impl._consumers) + + if no_ack: + # Return pending messages for no_ack=True consumer + return [ + (evt.method, evt.properties, evt.body) + for evt in self._remove_pending_deliveries(consumer_tag)] + else: + # impl takes care of rejecting any incoming deliveries during + # cancellation + messages = self._remove_pending_deliveries(consumer_tag) + assert not messages, messages + + return [] + finally: + # NOTE: The entry could be purged if channel or connection closes + if consumer_tag in self._consumer_infos: + del self._consumer_infos[consumer_tag] + # Schedule termination of connection.process_data_events using a + # negative channel number + self.connection._request_channel_dispatch(-self.channel_number) + + def _remove_pending_deliveries(self, consumer_tag): + """Extract _ConsumerDeliveryEvt objects destined for the given consumer + from pending events, discarding the _ConsumerCancellationEvt, if any + + :param str consumer_tag: + + :returns: a (possibly empty) sequence of _ConsumerDeliveryEvt destined + for the given consumer tag + """ + remaining_events = deque() + unprocessed_messages = [] + while self._pending_events: + evt = self._pending_events.popleft() + if type(evt) is _ConsumerDeliveryEvt: + if evt.method.consumer_tag == consumer_tag: + unprocessed_messages.append(evt) + continue + if type(evt) is _ConsumerCancellationEvt: + if evt.method_frame.method.consumer_tag == consumer_tag: + # A broker-initiated Basic.Cancel must have arrived + # before our cancel request completed + continue + + remaining_events.append(evt) + + self._pending_events = remaining_events + + return unprocessed_messages + + def start_consuming(self): + """Processes I/O events and dispatches timers and `basic_consume` + callbacks until all consumers are cancelled. + + NOTE: this blocking function may not be called from the scope of a + pika callback, because dispatching `basic_consume` callbacks from this + context would constitute recursion. + + :raises pika.exceptions.RecursionError: if called from the scope of a + `BlockingConnection` or `BlockingChannel` callback + + """ + # Check if called from the scope of an event dispatch callback + with self.connection._acquire_event_dispatch() as dispatch_allowed: + if not dispatch_allowed: + raise exceptions.RecursionError( + 'start_consuming may not be called from the scope of ' + 'another BlockingConnection or BlockingChannel callback') + + # Process events as long as consumers exist on this channel + while self._consumer_infos: + self.connection.process_data_events(time_limit=None) + + def stop_consuming(self, consumer_tag=None): + """ Cancels all consumers, signalling the `start_consuming` loop to + exit. + + NOTE: pending non-ackable messages will be lost; pending ackable + messages will be rejected. + + """ + if consumer_tag: + self.basic_cancel(consumer_tag) + else: + self._cancel_all_consumers() + + def consume(self, queue, no_ack=False, + exclusive=False, arguments=None, + inactivity_timeout=None): + """Blocking consumption of a queue instead of via a callback. This + method is a generator that yields each message as a tuple of method, + properties, and body. The active generator iterator terminates when the + consumer is cancelled by client via `BlockingChannel.cancel()` or by + broker. + + Example: + + for method, properties, body in channel.consume('queue'): + print body + channel.basic_ack(method.delivery_tag) + + You should call `BlockingChannel.cancel()` when you escape out of the + generator loop. + + If you don't cancel this consumer, then next call on the same channel + to `consume()` with the exact same (queue, no_ack, exclusive) parameters + will resume the existing consumer generator; however, calling with + different parameters will result in an exception. + + :param queue: The queue name to consume + :type queue: str or unicode + :param bool no_ack: Tell the broker to not expect a ack/nack response + :param bool exclusive: Don't allow other consumers on the queue + :param dict arguments: Custom key/value pair arguments for the consumer + :param float inactivity_timeout: if a number is given (in + seconds), will cause the method to yield (None, None, None) after the + given period of inactivity; this permits for pseudo-regular maintenance + activities to be carried out by the user while waiting for messages + to arrive. If None is given (default), then the method blocks until + the next event arrives. NOTE that timing granularity is limited by + the timer resolution of the underlying implementation. + NEW in pika 0.10.0. + + :yields: tuple(spec.Basic.Deliver, spec.BasicProperties, str or unicode) + + :raises ValueError: if consumer-creation parameters don't match those + of the existing queue consumer generator, if any. + NEW in pika 0.10.0 + """ + params = (queue, no_ack, exclusive) + + if self._queue_consumer_generator is not None: + if params != self._queue_consumer_generator.params: + raise ValueError( + 'Consume with different params not allowed on existing ' + 'queue consumer generator; previous params: %r; ' + 'new params: %r' + % (self._queue_consumer_generator.params, + (queue, no_ack, exclusive))) + else: + LOGGER.debug('Creating new queue consumer generator; params: %r', + params) + # Need a consumer tag to register consumer info before sending + # request to broker, because I/O might pick up incoming messages + # in addition to Basic.Consume-ok + consumer_tag = self._impl._generate_consumer_tag() + + self._queue_consumer_generator = _QueueConsumerGeneratorInfo( + params, + consumer_tag) + + try: + self._basic_consume_impl( + queue=queue, + no_ack=no_ack, + exclusive=exclusive, + consumer_tag=consumer_tag, + arguments=arguments, + alternate_event_sink=self._on_consumer_generator_event) + except Exception: + self._queue_consumer_generator = None + raise + + LOGGER.info('Created new queue consumer generator %r', + self._queue_consumer_generator) + + while self._queue_consumer_generator is not None: + if self._queue_consumer_generator.pending_events: + evt = self._queue_consumer_generator.pending_events.popleft() + if type(evt) is _ConsumerCancellationEvt: + # Consumer was cancelled by broker + self._queue_consumer_generator = None + break + else: + yield (evt.method, evt.properties, evt.body) + continue + + # Wait for a message to arrive + if inactivity_timeout is None: + self.connection.process_data_events(time_limit=None) + continue + + # Wait with inactivity timeout + wait_start_time = time.time() + wait_deadline = wait_start_time + inactivity_timeout + delta = inactivity_timeout + + while (self._queue_consumer_generator is not None and + not self._queue_consumer_generator.pending_events): + self.connection.process_data_events(time_limit=delta) + + if not self._queue_consumer_generator: + # Consumer was cancelled by client + break + + if self._queue_consumer_generator.pending_events: + # Got message(s) + break + + delta = wait_deadline - time.time() + if delta <= 0.0: + # Signal inactivity timeout + yield (None, None, None) + break + + def get_waiting_message_count(self): + """Returns the number of messages that may be retrieved from the current + queue consumer generator via `BlockingChannel.consume` without blocking. + NEW in pika 0.10.0 + + :rtype: int + """ + if self._queue_consumer_generator is not None: + pending_events = self._queue_consumer_generator.pending_events + count = len(pending_events) + if count and type(pending_events[-1]) is _ConsumerCancellationEvt: + count -= 1 + else: + count = 0 + + return count + + def cancel(self): + """Cancel the queue consumer created by `BlockingChannel.consume`, + rejecting all pending ackable messages. + + NOTE: If you're looking to cancel a consumer issued with + BlockingChannel.basic_consume then you should call + BlockingChannel.basic_cancel. + + :return int: The number of messages requeued by Basic.Nack. + NEW in 0.10.0: returns 0 + + """ + if self._queue_consumer_generator is None: + LOGGER.warning('cancel: queue consumer generator is inactive ' + '(already cancelled by client or broker?)') + return 0 + + try: + _, no_ack, _ = self._queue_consumer_generator.params + if not no_ack: + # Reject messages held by queue consumer generator; NOTE: we + # can't use basic_nack with the multiple option to avoid nacking + # messages already held by our client. + pending_events = self._queue_consumer_generator.pending_events + for _ in compat.xrange(self.get_waiting_message_count()): + evt = pending_events.popleft() + self._impl.basic_reject(evt.method.delivery_tag, + requeue=True) + + self.basic_cancel(self._queue_consumer_generator.consumer_tag) + finally: + self._queue_consumer_generator = None + + # Return 0 for compatibility with legacy implementation; the number of + # nacked messages is not meaningful since only messages consumed with + # no_ack=False may be nacked, and those arriving after calling + # basic_cancel will be rejected automatically by impl channel, so we'll + # never know how many of those were nacked. + return 0 + + def basic_ack(self, delivery_tag=0, multiple=False): + """Acknowledge one or more messages. When sent by the client, this + method acknowledges one or more messages delivered via the Deliver or + Get-Ok methods. When sent by server, this method acknowledges one or + more messages published with the Publish method on a channel in + confirm mode. The acknowledgement can be for a single message or a + set of messages up to and including a specific message. + + :param int delivery-tag: The server-assigned delivery tag + :param bool multiple: If set to True, the delivery tag is treated as + "up to and including", so that multiple messages + can be acknowledged with a single method. If set + to False, the delivery tag refers to a single + message. If the multiple field is 1, and the + delivery tag is zero, this indicates + acknowledgement of all outstanding messages. + """ + self._impl.basic_ack(delivery_tag=delivery_tag, multiple=multiple) + self._flush_output() + + def basic_nack(self, delivery_tag=None, multiple=False, requeue=True): + """This method allows a client to reject one or more incoming messages. + It can be used to interrupt and cancel large incoming messages, or + return untreatable messages to their original queue. + + :param int delivery-tag: The server-assigned delivery tag + :param bool multiple: If set to True, the delivery tag is treated as + "up to and including", so that multiple messages + can be acknowledged with a single method. If set + to False, the delivery tag refers to a single + message. If the multiple field is 1, and the + delivery tag is zero, this indicates + acknowledgement of all outstanding messages. + :param bool requeue: If requeue is true, the server will attempt to + requeue the message. If requeue is false or the + requeue attempt fails the messages are discarded or + dead-lettered. + + """ + self._impl.basic_nack(delivery_tag=delivery_tag, multiple=multiple, + requeue=requeue) + self._flush_output() + + def basic_get(self, queue=None, no_ack=False): + """Get a single message from the AMQP broker. Returns a sequence with + the method frame, message properties, and body. + + :param queue: Name of queue to get a message from + :type queue: str or unicode + :param bool no_ack: Tell the broker to not expect a reply + :returns: a three-tuple; (None, None, None) if the queue was empty; + otherwise (method, properties, body); NOTE: body may be None + :rtype: (None, None, None)|(spec.Basic.GetOk, + spec.BasicProperties, + str or unicode or None) + """ + assert not self._basic_getempty_result + # NOTE: nested with for python 2.6 compatibility + with _CallbackResult(self._RxMessageArgs) as get_ok_result: + with self._basic_getempty_result: + self._impl.basic_get(callback=get_ok_result.set_value_once, + queue=queue, + no_ack=no_ack) + self._flush_output(get_ok_result.is_ready, + self._basic_getempty_result.is_ready) + if get_ok_result: + evt = get_ok_result.value + return evt.method, evt.properties, evt.body + else: + assert self._basic_getempty_result, ( + "wait completed without GetOk and GetEmpty") + return None, None, None + + def basic_publish(self, exchange, routing_key, body, + properties=None, mandatory=False, immediate=False): + """Publish to the channel with the given exchange, routing key and body. + Returns a boolean value indicating the success of the operation. + + This is the legacy BlockingChannel method for publishing. See also + `BlockingChannel.publish` that provides more information about failures. + + For more information on basic_publish and what the parameters do, see: + + http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish + + NOTE: mandatory and immediate may be enabled even without delivery + confirmation, but in the absence of delivery confirmation the + synchronous implementation has no way to know how long to wait for + the Basic.Return or lack thereof. + + :param exchange: The exchange to publish to + :type exchange: str or unicode + :param routing_key: The routing key to bind on + :type routing_key: str or unicode + :param body: The message body; empty string if no body + :type body: str or unicode + :param pika.spec.BasicProperties properties: message properties + :param bool mandatory: The mandatory flag + :param bool immediate: The immediate flag + + :returns: True if delivery confirmation is not enabled (NEW in pika + 0.10.0); otherwise returns False if the message could not be + delivered (Basic.nack and/or Basic.Return) and True if the message + was delivered (Basic.ack and no Basic.Return) + """ + try: + self.publish(exchange, routing_key, body, properties, + mandatory, immediate) + except (exceptions.NackError, exceptions.UnroutableError): + return False + else: + return True + + def publish(self, exchange, routing_key, body, + properties=None, mandatory=False, immediate=False): + """Publish to the channel with the given exchange, routing key, and + body. Unlike the legacy `BlockingChannel.basic_publish`, this method + provides more information about failures via exceptions. + + For more information on basic_publish and what the parameters do, see: + + http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish + + NOTE: mandatory and immediate may be enabled even without delivery + confirmation, but in the absence of delivery confirmation the + synchronous implementation has no way to know how long to wait for + the Basic.Return. + + :param exchange: The exchange to publish to + :type exchange: str or unicode + :param routing_key: The routing key to bind on + :type routing_key: str or unicode + :param body: The message body; empty string if no body + :type body: str or unicode + :param pika.spec.BasicProperties properties: message properties + :param bool mandatory: The mandatory flag + :param bool immediate: The immediate flag + + :raises UnroutableError: raised when a message published in + publisher-acknowledgments mode (see + `BlockingChannel.confirm_delivery`) is returned via `Basic.Return` + followed by `Basic.Ack`. + :raises NackError: raised when a message published in + publisher-acknowledgements mode is Nack'ed by the broker. See + `BlockingChannel.confirm_delivery`. + + """ + if self._delivery_confirmation: + # In publisher-acknowledgments mode + with self._message_confirmation_result: + self._impl.basic_publish(exchange=exchange, + routing_key=routing_key, + body=body, + properties=properties, + mandatory=mandatory, + immediate=immediate) + + self._flush_output(self._message_confirmation_result.is_ready) + conf_method = (self._message_confirmation_result.value + .method_frame + .method) + + if isinstance(conf_method, pika.spec.Basic.Nack): + # Broker was unable to process the message due to internal + # error + LOGGER.warning( + "Message was Nack'ed by broker: nack=%r; channel=%s; " + "exchange=%s; routing_key=%s; mandatory=%r; " + "immediate=%r", conf_method, self.channel_number, + exchange, routing_key, mandatory, immediate) + if self._puback_return is not None: + returned_messages = [self._puback_return] + self._puback_return = None + else: + returned_messages = [] + raise exceptions.NackError(returned_messages) + + else: + assert isinstance(conf_method, pika.spec.Basic.Ack), ( + conf_method) + + if self._puback_return is not None: + # Unroutable message was returned + messages = [self._puback_return] + self._puback_return = None + raise exceptions.UnroutableError(messages) + else: + # In non-publisher-acknowledgments mode + self._impl.basic_publish(exchange=exchange, + routing_key=routing_key, + body=body, + properties=properties, + mandatory=mandatory, + immediate=immediate) + self._flush_output() + + def basic_qos(self, prefetch_size=0, prefetch_count=0, all_channels=False): + """Specify quality of service. This method requests a specific quality + of service. The QoS can be specified for the current channel or for all + channels on the connection. The client can request that messages be sent + in advance so that when the client finishes processing a message, the + following message is already held locally, rather than needing to be + sent down the channel. Prefetching gives a performance improvement. + + :param int prefetch_size: This field specifies the prefetch window + size. The server will send a message in + advance if it is equal to or smaller in size + than the available prefetch size (and also + falls into other prefetch limits). May be set + to zero, meaning "no specific limit", + although other prefetch limits may still + apply. The prefetch-size is ignored if the + no-ack option is set in the consumer. + :param int prefetch_count: Specifies a prefetch window in terms of whole + messages. This field may be used in + combination with the prefetch-size field; a + message will only be sent in advance if both + prefetch windows (and those at the channel + and connection level) allow it. The + prefetch-count is ignored if the no-ack + option is set in the consumer. + :param bool all_channels: Should the QoS apply to all channels + + """ + with _CallbackResult() as qos_ok_result: + self._impl.basic_qos(callback=qos_ok_result.signal_once, + prefetch_size=prefetch_size, + prefetch_count=prefetch_count, + all_channels=all_channels) + self._flush_output(qos_ok_result.is_ready) + + def basic_recover(self, requeue=False): + """This method asks the server to redeliver all unacknowledged messages + on a specified channel. Zero or more messages may be redelivered. This + method replaces the asynchronous Recover. + + :param bool requeue: If False, the message will be redelivered to the + original recipient. If True, the server will + attempt to requeue the message, potentially then + delivering it to an alternative subscriber. + + """ + with _CallbackResult() as recover_ok_result: + self._impl.basic_recover(callback=recover_ok_result.signal_once, + requeue=requeue) + self._flush_output(recover_ok_result.is_ready) + + def basic_reject(self, delivery_tag=None, requeue=True): + """Reject an incoming message. This method allows a client to reject a + message. It can be used to interrupt and cancel large incoming messages, + or return untreatable messages to their original queue. + + :param int delivery-tag: The server-assigned delivery tag + :param bool requeue: If requeue is true, the server will attempt to + requeue the message. If requeue is false or the + requeue attempt fails the messages are discarded or + dead-lettered. + + """ + self._impl.basic_reject(delivery_tag=delivery_tag, requeue=requeue) + self._flush_output() + + def confirm_delivery(self): + """Turn on RabbitMQ-proprietary Confirm mode in the channel. + + For more information see: + http://www.rabbitmq.com/extensions.html#confirms + """ + if self._delivery_confirmation: + LOGGER.error('confirm_delivery: confirmation was already enabled ' + 'on channel=%s', self.channel_number) + return + + with _CallbackResult() as select_ok_result: + self._impl.add_callback(callback=select_ok_result.signal_once, + replies=[pika.spec.Confirm.SelectOk], + one_shot=True) + + self._impl.confirm_delivery( + callback=self._message_confirmation_result.set_value_once, + nowait=False) + + self._flush_output(select_ok_result.is_ready) + + self._delivery_confirmation = True + + # Unroutable messages returned after this point will be in the context + # of publisher acknowledgments + self._impl.add_on_return_callback(self._on_puback_message_returned) + + def exchange_declare(self, exchange=None, + exchange_type='direct', passive=False, durable=False, + auto_delete=False, internal=False, + arguments=None): + """This method creates an exchange if it does not already exist, and if + the exchange exists, verifies that it is of the correct and expected + class. + + If passive set, the server will reply with Declare-Ok if the exchange + already exists with the same name, and raise an error if not and if the + exchange does not already exist, the server MUST raise a channel + exception with reply code 404 (not found). + + :param exchange: The exchange name consists of a non-empty sequence of + these characters: letters, digits, hyphen, underscore, + period, or colon. + :type exchange: str or unicode + :param str exchange_type: The exchange type to use + :param bool passive: Perform a declare or just check to see if it exists + :param bool durable: Survive a reboot of RabbitMQ + :param bool auto_delete: Remove when no more queues are bound to it + :param bool internal: Can only be published to by other exchanges + :param dict arguments: Custom key/value pair arguments for the exchange + + :returns: Method frame from the Exchange.Declare-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Exchange.DeclareOk` + + """ + with _CallbackResult( + self._MethodFrameCallbackResultArgs) as declare_ok_result: + self._impl.exchange_declare( + callback=declare_ok_result.set_value_once, + exchange=exchange, + exchange_type=exchange_type, + passive=passive, + durable=durable, + auto_delete=auto_delete, + internal=internal, + nowait=False, + arguments=arguments) + + self._flush_output(declare_ok_result.is_ready) + return declare_ok_result.value.method_frame + + def exchange_delete(self, exchange=None, if_unused=False): + """Delete the exchange. + + :param exchange: The exchange name + :type exchange: str or unicode + :param bool if_unused: only delete if the exchange is unused + + :returns: Method frame from the Exchange.Delete-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Exchange.DeleteOk` + + """ + with _CallbackResult( + self._MethodFrameCallbackResultArgs) as delete_ok_result: + self._impl.exchange_delete( + callback=delete_ok_result.set_value_once, + exchange=exchange, + if_unused=if_unused, + nowait=False) + + self._flush_output(delete_ok_result.is_ready) + return delete_ok_result.value.method_frame + + def exchange_bind(self, destination=None, source=None, routing_key='', + arguments=None): + """Bind an exchange to another exchange. + + :param destination: The destination exchange to bind + :type destination: str or unicode + :param source: The source exchange to bind to + :type source: str or unicode + :param routing_key: The routing key to bind on + :type routing_key: str or unicode + :param dict arguments: Custom key/value pair arguments for the binding + + :returns: Method frame from the Exchange.Bind-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Exchange.BindOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + bind_ok_result: + self._impl.exchange_bind( + callback=bind_ok_result.set_value_once, + destination=destination, + source=source, + routing_key=routing_key, + nowait=False, + arguments=arguments) + + self._flush_output(bind_ok_result.is_ready) + return bind_ok_result.value.method_frame + + def exchange_unbind(self, destination=None, source=None, routing_key='', + arguments=None): + """Unbind an exchange from another exchange. + + :param destination: The destination exchange to unbind + :type destination: str or unicode + :param source: The source exchange to unbind from + :type source: str or unicode + :param routing_key: The routing key to unbind + :type routing_key: str or unicode + :param dict arguments: Custom key/value pair arguments for the binding + + :returns: Method frame from the Exchange.Unbind-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Exchange.UnbindOk` + + """ + with _CallbackResult( + self._MethodFrameCallbackResultArgs) as unbind_ok_result: + self._impl.exchange_unbind( + callback=unbind_ok_result.set_value_once, + destination=destination, + source=source, + routing_key=routing_key, + nowait=False, + arguments=arguments) + + self._flush_output(unbind_ok_result.is_ready) + return unbind_ok_result.value.method_frame + + def queue_declare(self, queue='', passive=False, durable=False, + exclusive=False, auto_delete=False, + arguments=None): + """Declare queue, create if needed. This method creates or checks a + queue. When creating a new queue the client can specify various + properties that control the durability of the queue and its contents, + and the level of sharing for the queue. + + Leave the queue name empty for a auto-named queue in RabbitMQ + + :param queue: The queue name + :type queue: str or unicode; if empty string, the broker will create a + unique queue name; + :param bool passive: Only check to see if the queue exists and raise + `ChannelClosed` if it doesn't; + :param bool durable: Survive reboots of the broker + :param bool exclusive: Only allow access by the current connection + :param bool auto_delete: Delete after consumer cancels or disconnects + :param dict arguments: Custom key/value arguments for the queue + + :returns: Method frame from the Queue.Declare-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Queue.DeclareOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + declare_ok_result: + self._impl.queue_declare( + callback=declare_ok_result.set_value_once, + queue=queue, + passive=passive, + durable=durable, + exclusive=exclusive, + auto_delete=auto_delete, + nowait=False, + arguments=arguments) + + self._flush_output(declare_ok_result.is_ready) + return declare_ok_result.value.method_frame + + def queue_delete(self, queue='', if_unused=False, if_empty=False): + """Delete a queue from the broker. + + :param queue: The queue to delete + :type queue: str or unicode + :param bool if_unused: only delete if it's unused + :param bool if_empty: only delete if the queue is empty + + :returns: Method frame from the Queue.Delete-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Queue.DeleteOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + delete_ok_result: + self._impl.queue_delete(callback=delete_ok_result.set_value_once, + queue=queue, + if_unused=if_unused, + if_empty=if_empty, + nowait=False) + + self._flush_output(delete_ok_result.is_ready) + return delete_ok_result.value.method_frame + + def queue_purge(self, queue=''): + """Purge all of the messages from the specified queue + + :param queue: The queue to purge + :type queue: str or unicode + + :returns: Method frame from the Queue.Purge-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Queue.PurgeOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + purge_ok_result: + self._impl.queue_purge(callback=purge_ok_result.set_value_once, + queue=queue, + nowait=False) + + self._flush_output(purge_ok_result.is_ready) + return purge_ok_result.value.method_frame + + def queue_bind(self, queue, exchange, routing_key=None, + arguments=None): + """Bind the queue to the specified exchange + + :param queue: The queue to bind to the exchange + :type queue: str or unicode + :param exchange: The source exchange to bind to + :type exchange: str or unicode + :param routing_key: The routing key to bind on + :type routing_key: str or unicode + :param dict arguments: Custom key/value pair arguments for the binding + + :returns: Method frame from the Queue.Bind-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Queue.BindOk` + + """ + with _CallbackResult( + self._MethodFrameCallbackResultArgs) as bind_ok_result: + self._impl.queue_bind(callback=bind_ok_result.set_value_once, + queue=queue, + exchange=exchange, + routing_key=routing_key, + nowait=False, + arguments=arguments) + + self._flush_output(bind_ok_result.is_ready) + return bind_ok_result.value.method_frame + + def queue_unbind(self, queue='', exchange=None, routing_key=None, + arguments=None): + """Unbind a queue from an exchange. + + :param queue: The queue to unbind from the exchange + :type queue: str or unicode + :param exchange: The source exchange to bind from + :type exchange: str or unicode + :param routing_key: The routing key to unbind + :type routing_key: str or unicode + :param dict arguments: Custom key/value pair arguments for the binding + + :returns: Method frame from the Queue.Unbind-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Queue.UnbindOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + unbind_ok_result: + self._impl.queue_unbind(callback=unbind_ok_result.set_value_once, + queue=queue, + exchange=exchange, + routing_key=routing_key, + arguments=arguments) + self._flush_output(unbind_ok_result.is_ready) + return unbind_ok_result.value.method_frame + + def tx_select(self): + """Select standard transaction mode. This method sets the channel to use + standard transactions. The client must use this method at least once on + a channel before using the Commit or Rollback methods. + + :returns: Method frame from the Tx.Select-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Tx.SelectOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + select_ok_result: + self._impl.tx_select(select_ok_result.set_value_once) + + self._flush_output(select_ok_result.is_ready) + return select_ok_result.value.method_frame + + def tx_commit(self): + """Commit a transaction. + + :returns: Method frame from the Tx.Commit-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Tx.CommitOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + commit_ok_result: + self._impl.tx_commit(commit_ok_result.set_value_once) + + self._flush_output(commit_ok_result.is_ready) + return commit_ok_result.value.method_frame + + def tx_rollback(self): + """Rollback a transaction. + + :returns: Method frame from the Tx.Commit-ok response + :rtype: `pika.frame.Method` having `method` attribute of type + `spec.Tx.CommitOk` + + """ + with _CallbackResult(self._MethodFrameCallbackResultArgs) as \ + rollback_ok_result: + self._impl.tx_rollback(rollback_ok_result.set_value_once) + + self._flush_output(rollback_ok_result.is_ready) + return rollback_ok_result.value.method_frame diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/select_connection.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/select_connection.py new file mode 100644 index 000000000..29d856989 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/select_connection.py @@ -0,0 +1,1178 @@ +"""A connection adapter that tries to use the best polling method for the +platform pika is running on. + +""" +import abc +import collections +import errno +import functools +import heapq +import logging +import select +import time +import threading + +import pika.compat + +from pika.adapters.base_connection import BaseConnection + +LOGGER = logging.getLogger(__name__) + +# One of select, epoll, kqueue or poll +SELECT_TYPE = None + +# Use epoll's constants to keep life easy +READ = 0x0001 +WRITE = 0x0004 +ERROR = 0x0008 + +# Reason for this unconventional dict initialization is the fact that on some +# platforms select.error is an aliases for OSError. We don't want the lambda +# for select.error to win over one for OSError. +_SELECT_ERROR_CHECKERS = {} +if pika.compat.PY3: + #InterruptedError is undefined in PY2 + #pylint: disable=E0602 + _SELECT_ERROR_CHECKERS[InterruptedError] = lambda e: True +_SELECT_ERROR_CHECKERS[select.error] = lambda e: e.args[0] == errno.EINTR +_SELECT_ERROR_CHECKERS[IOError] = lambda e: e.errno == errno.EINTR +_SELECT_ERROR_CHECKERS[OSError] = lambda e: e.errno == errno.EINTR + +# We can reduce the number of elements in the list by looking at super-sub +# class relationship because only the most generic ones needs to be caught. +# For now the optimization is left out. +# Following is better but still incomplete. +#_SELECT_ERRORS = tuple(filter(lambda e: not isinstance(e, OSError), +# _SELECT_ERROR_CHECKERS.keys()) +# + [OSError]) +_SELECT_ERRORS = tuple(_SELECT_ERROR_CHECKERS.keys()) + + +def _is_resumable(exc): + ''' Check if caught exception represents EINTR error. + :param exc: exception; must be one of classes in _SELECT_ERRORS ''' + checker = _SELECT_ERROR_CHECKERS.get(exc.__class__, None) + if checker is not None: + return checker(exc) + else: + return False + + +class SelectConnection(BaseConnection): + """An asynchronous connection adapter that attempts to use the fastest + event loop adapter for the given platform. + + """ + + def __init__( + self, # pylint: disable=R0913 + parameters=None, + on_open_callback=None, + on_open_error_callback=None, + on_close_callback=None, + stop_ioloop_on_close=True, + custom_ioloop=None): + """Create a new instance of the Connection object. + + :param pika.connection.Parameters parameters: Connection parameters + :param method on_open_callback: Method to call on connection open + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) + :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected + :param custom_ioloop: Override using the global IOLoop in Tornado + :raises: RuntimeError + + """ + ioloop = custom_ioloop or IOLoop() + super(SelectConnection, self).__init__( + parameters, on_open_callback, on_open_error_callback, + on_close_callback, ioloop, stop_ioloop_on_close) + + def _adapter_connect(self): + """Connect to the RabbitMQ broker, returning True on success, False + on failure. + + :rtype: bool + + """ + error = super(SelectConnection, self)._adapter_connect() + if not error: + self.ioloop.add_handler(self.socket.fileno(), self._handle_events, + self.event_state) + return error + + def _adapter_disconnect(self): + """Disconnect from the RabbitMQ broker""" + if self.socket: + self.ioloop.remove_handler(self.socket.fileno()) + super(SelectConnection, self)._adapter_disconnect() + + +@functools.total_ordering +class _Timeout(object): + """Represents a timeout""" + + __slots__ = ('deadline', 'callback',) + + def __init__(self, deadline, callback): + """ + :param float deadline: timer expiration as non-negative epoch number + :param callable callback: callback to call when timeout expires + :raises ValueError, TypeError: + """ + + if deadline < 0: + raise ValueError( + 'deadline must be non-negative epoch number, but got %r' % + (deadline,)) + + if not callable(callback): + raise TypeError( + 'callback must be a callable, but got %r' % (callback,)) + + self.deadline = deadline + self.callback = callback + + def __eq__(self, other): + """NOTE: not supporting sort stability""" + return self.deadline == other.deadline + + def __lt__(self, other): + """NOTE: not supporting sort stability""" + return self.deadline < other.deadline + + def __le__(self, other): + """NOTE: not supporting sort stability""" + return self.deadline <= other.deadline + + +class _Timer(object): + """Manage timeouts for use in ioloop""" + + # Cancellation count threshold for triggering garbage collection of + # cancelled timers + _GC_CANCELLATION_THRESHOLD = 1024 + + def __init__(self): + self._timeout_heap = [] + + # Number of canceled timeouts on heap; for scheduling garbage + # collection of canceled timeouts + self._num_cancellations = 0 + + def close(self): + """Release resources. Don't use the `_Timer` instance after closing + it + """ + # Eliminate potential reference cycles to aid garbage-collection + if self._timeout_heap is not None: + for timeout in self._timeout_heap: + timeout.callback = None + self._timeout_heap = None + + def call_later(self, delay, callback): + """Schedule a one-shot timeout given delay seconds. + + NOTE: you may cancel the timer before dispatch of the callback. Timer + Manager cancels the timer upon dispatch of the callback. + + :param float delay: Non-negative number of seconds from now until + expiration + :param method callback: The callback method, having the signature + `callback()` + + :rtype: _Timeout + :raises ValueError, TypeError + + """ + if delay < 0: + raise ValueError( + 'call_later: delay must be non-negative, but got %r' + % (delay,)) + + now = time.time() + + timeout = _Timeout(now + delay, callback) + + heapq.heappush(self._timeout_heap, timeout) + + LOGGER.debug('call_later: added timeout %r with deadline=%r and ' + 'callback=%r; now=%s; delay=%s', timeout, timeout.deadline, + timeout.callback, now, delay) + + return timeout + + def remove_timeout(self, timeout): + """Cancel the timeout + + :param _Timeout timeout: The timer to cancel + + """ + # NOTE removing from the heap is difficult, so we just deactivate the + # timeout and garbage-collect it at a later time; see discussion + # in http://docs.python.org/library/heapq.html + if timeout.callback is None: + LOGGER.warning( + 'remove_timeout: timeout was already removed or called %r', + timeout) + else: + LOGGER.debug('remove_timeout: removing timeout %r with deadline=%r ' + 'and callback=%r', timeout, timeout.deadline, + timeout.callback) + timeout.callback = None + self._num_cancellations += 1 + + def get_remaining_interval(self): + """Get the interval to the next timeout expiration + + :returns: non-negative number of seconds until next timer expiration; + None if there are no timers + :rtype: float + + """ + if self._timeout_heap: + interval = max(0, self._timeout_heap[0].deadline - time.time()) + else: + interval = None + + return interval + + def process_timeouts(self): + """Process pending timeouts, invoking callbacks for those whose time has + come + + """ + if self._timeout_heap: + now = time.time() + + # Remove ready timeouts from the heap now to prevent IO starvation + # from timeouts added during callback processing + ready_timeouts = [] + + while self._timeout_heap and self._timeout_heap[0].deadline <= now: + timeout = heapq.heappop(self._timeout_heap) + if timeout.callback is not None: + ready_timeouts.append(timeout) + else: + self._num_cancellations -= 1 + + # Invoke ready timeout callbacks + for timeout in ready_timeouts: + if timeout.callback is None: + # Must have been canceled from a prior callback + self._num_cancellations -= 1 + continue + + timeout.callback() + timeout.callback = None + + # Garbage-collect canceled timeouts if they exceed threshold + if (self._num_cancellations >= self._GC_CANCELLATION_THRESHOLD and + self._num_cancellations > (len(self._timeout_heap) >> 1)): + self._num_cancellations = 0 + self._timeout_heap = [t for t in self._timeout_heap + if t.callback is not None] + heapq.heapify(self._timeout_heap) + + +class IOLoop(object): + """Singleton wrapper that decides which type of poller to use, creates an + instance of it in start_poller and keeps the invoking application in a + blocking state by calling the pollers start method. Poller should keep + looping until IOLoop.instance().stop() is called or there is a socket + error. + + Passes through all operations to the loaded poller object. + + """ + + def __init__(self): + self._timer = _Timer() + + # Callbacks requested via `add_callback` + self._callbacks = collections.deque() + + self._poller = self._get_poller(self._get_remaining_interval, + self.process_timeouts) + + def close(self): + """Release IOLoop's resources. + + `IOLoop.close` is intended to be called by the application or test code + only after `IOLoop.start()` returns. After calling `close()`, no other + interaction with the closed instance of `IOLoop` should be performed. + + """ + if self._callbacks is not None: + self._poller.close() + self._timer.close() + self._callbacks = None + + @staticmethod + def _get_poller(get_wait_seconds, process_timeouts): + """Determine the best poller to use for this environment and instantiate + it. + + :param get_wait_seconds: Function for getting the maximum number of + seconds to wait for IO for use by the poller + :param process_timeouts: Function for processing timeouts for use by the + poller + + :returns: the instantiated poller instance supporting `_PollerBase` API + """ + + poller = None + + kwargs = dict(get_wait_seconds=get_wait_seconds, + process_timeouts=process_timeouts) + + if hasattr(select, 'epoll'): + if not SELECT_TYPE or SELECT_TYPE == 'epoll': + LOGGER.debug('Using EPollPoller') + poller = EPollPoller(**kwargs) + + if not poller and hasattr(select, 'kqueue'): + if not SELECT_TYPE or SELECT_TYPE == 'kqueue': + LOGGER.debug('Using KQueuePoller') + poller = KQueuePoller(**kwargs) + + if (not poller and hasattr(select, 'poll') and + hasattr(select.poll(), 'modify')): # pylint: disable=E1101 + if not SELECT_TYPE or SELECT_TYPE == 'poll': + LOGGER.debug('Using PollPoller') + poller = PollPoller(**kwargs) + + if not poller: + LOGGER.debug('Using SelectPoller') + poller = SelectPoller(**kwargs) + + return poller + + def add_timeout(self, deadline, callback_method): + """[API] Add the callback_method to the IOLoop timer to fire after + deadline seconds. Returns a handle to the timeout. Do not confuse with + Tornado's timeout where you pass in the time you want to have your + callback called. Only pass in the seconds until it's to be called. + + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + :rtype: str + + """ + return self._timer.call_later(deadline, callback_method) + + def remove_timeout(self, timeout_id): + """[API] Remove a timeout + + :param str timeout_id: The timeout id to remove + + """ + self._timer.remove_timeout(timeout_id) + + def add_callback_threadsafe(self, callback): + """Requests a call to the given function as soon as possible in the + context of this IOLoop's thread. + + NOTE: This is the only thread-safe method in IOLoop. All other + manipulations of IOLoop must be performed from the IOLoop's thread. + + For example, a thread may request a call to the `stop` method of an + ioloop that is running in a different thread via + `ioloop.add_callback_threadsafe(ioloop.stop)` + + NOTE: if you know that the requester is running on the same thread as + the connection it is more efficient to use the + `call_later()` method with a delay of 0. + + :param method callback: The callback method + + """ + if not callable(callback): + raise TypeError( + 'callback must be a callable, but got %r' % (callback,)) + + # NOTE: `deque.append` is atomic + self._callbacks.append(callback) + + # Wake up the IOLoop which may be running in another thread + self._poller.wake_threadsafe() + + LOGGER.debug('add_callback_threadsafe: added callback=%r', callback) + + def process_timeouts(self): + """[Extension] Process pending callbacks and timeouts, invoking those + whose time has come. Internal use only. + + """ + # Avoid I/O starvation by postponing new callbacks to the next iteration + for _ in pika.compat.xrange(len(self._callbacks)): + self._callbacks.popleft()() + + self._timer.process_timeouts() + + def _get_remaining_interval(self): + """Get the remaining interval to the next callback or timeout + expiration. + + :returns: non-negative number of seconds until next callback or timer + expiration; None if there are no callbacks and timers + :rtype: float + + """ + if self._callbacks: + return 0 + + return self._timer.get_remaining_interval() + + def add_handler(self, fileno, handler, events): + """[API] Add a new fileno to the set to be monitored + + :param int fileno: The file descriptor + :param method handler: What is called when an event happens + :param int events: The event mask using READ, WRITE, ERROR + + """ + self._poller.add_handler(fileno, handler, events) + + def update_handler(self, fileno, events): + """[API] Set the events to the current events + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + + """ + self._poller.update_handler(fileno, events) + + def remove_handler(self, fileno): + """[API] Remove a file descriptor from the set + + :param int fileno: The file descriptor + + """ + self._poller.remove_handler(fileno) + + def start(self): + """[API] Start the main poller loop. It will loop until requested to + exit. See `IOLoop.stop`. + + """ + self._poller.start() + + def stop(self): + """[API] Request exit from the ioloop. The loop is NOT guaranteed to + stop before this method returns. + + To invoke `stop()` safely from a thread other than this IOLoop's thread, + call it via `add_callback_threadsafe`; e.g., + + `ioloop.add_callback_threadsafe(ioloop.stop)` + + """ + self._poller.stop() + + def activate_poller(self): + """[Extension] Activate the poller + + """ + self._thread_id = threading.current_thread().ident + self._poller.activate_poller() + + def deactivate_poller(self): + """[Extension] Deactivate the poller + + """ + self._poller.deactivate_poller() + + def poll(self): + """[Extension] Wait for events of interest on registered file + descriptors until an event of interest occurs or next timer deadline or + `_PollerBase._MAX_POLL_TIMEOUT`, whichever is sooner, and dispatch the + corresponding event handlers. + + """ + self._poller.poll() + + +_AbstractBase = abc.ABCMeta('_AbstractBase', (object,), {}) + + +class _PollerBase(_AbstractBase): # pylint: disable=R0902 + """Base class for select-based IOLoop implementations""" + + # Drop out of the poll loop every _MAX_POLL_TIMEOUT secs as a worst case; + # this is only a backstop value; we will run timeouts when they are + # scheduled. + _MAX_POLL_TIMEOUT = 5 + + # if the poller uses MS override with 1000 + POLL_TIMEOUT_MULT = 1 + + def __init__(self, get_wait_seconds, process_timeouts): + """ + :param get_wait_seconds: Function for getting the maximum number of + seconds to wait for IO for use by the poller + :param process_timeouts: Function for processing timeouts for use by the + poller + + """ + self._get_wait_seconds = get_wait_seconds + self._process_timeouts = process_timeouts + + # We guard access to the waking file descriptors to avoid races from + # closing them while another thread is calling our `wake()` method. + self._waking_mutex = threading.Lock() + + # fd-to-handler function mappings + self._fd_handlers = dict() + + # event-to-fdset mappings + self._fd_events = {READ: set(), WRITE: set(), ERROR: set()} + + self._processing_fd_event_map = {} + + # Reentrancy tracker of the `start` method + self._start_nesting_levels = 0 + + self._stopping = False + + # Create ioloop-interrupt socket pair and register read handler. + self._r_interrupt, self._w_interrupt = self._get_interrupt_pair() + self.add_handler(self._r_interrupt.fileno(), self._read_interrupt, READ) + + def close(self): + """Release poller's resources. + + `close()` is intended to be called after the poller's `start()` method + returns. After calling `close()`, no other interaction with the closed + poller instance should be performed. + + """ + # Unregister and close ioloop-interrupt socket pair; mutual exclusion is + # necessary to avoid race condition with `wake_threadsafe` executing in + # another thread's context + assert self._start_nesting_levels == 0, \ + 'Cannot call close() before start() unwinds.' + + with self._waking_mutex: + if self._w_interrupt is not None: + self.remove_handler(self._r_interrupt.fileno()) # pylint: disable=E1101 + self._r_interrupt.close() + self._r_interrupt = None + self._w_interrupt.close() + self._w_interrupt = None + + self.deactivate_poller() + + self._fd_handlers = None + self._fd_events = None + self._processing_fd_event_map = None + + def wake_threadsafe(self): + """Wake up the poller as soon as possible. As the name indicates, this + method is thread-safe. + + """ + with self._waking_mutex: + if self._w_interrupt is None: + return + + try: + # Send byte to interrupt the poll loop, use send() instead of + # os.write for Windows compatibility + self._w_interrupt.send(b'X') + except pika.compat.SOCKET_ERROR as err: + if err.errno != errno.EWOULDBLOCK: + raise + except Exception as err: + # There's nothing sensible to do here, we'll exit the interrupt + # loop after POLL_TIMEOUT secs in worst case anyway. + LOGGER.warning("Failed to send interrupt to poller: %s", err) + raise + + + def _get_max_wait(self): + """Get the interval to the next timeout event, or a default interval + + :returns: maximum number of self.POLL_TIMEOUT_MULT-scaled time units + to wait for IO events + + """ + delay = self._get_wait_seconds() + if delay is None: + delay = self._MAX_POLL_TIMEOUT + else: + delay = min(delay, self._MAX_POLL_TIMEOUT) + + return delay * self.POLL_TIMEOUT_MULT + + def add_handler(self, fileno, handler, events): + """Add a new fileno to the set to be monitored + + :param int fileno: The file descriptor + :param method handler: What is called when an event happens + :param int events: The event mask using READ, WRITE, ERROR + + """ + self._fd_handlers[fileno] = handler + self._set_handler_events(fileno, events) + + # Inform the derived class + self._register_fd(fileno, events) + + def update_handler(self, fileno, events): + """Set the events to the current events + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + + """ + # Record the change + events_cleared, events_set = self._set_handler_events(fileno, events) + + # Inform the derived class + self._modify_fd_events( + fileno, + events=events, + events_to_clear=events_cleared, + events_to_set=events_set) + + def remove_handler(self, fileno): + """Remove a file descriptor from the set + + :param int fileno: The file descriptor + + """ + try: + del self._processing_fd_event_map[fileno] + except KeyError: + pass + + events_cleared, _ = self._set_handler_events(fileno, 0) + del self._fd_handlers[fileno] + + # Inform the derived class + self._unregister_fd(fileno, events_to_clear=events_cleared) + + def _set_handler_events(self, fileno, events): + """Set the handler's events to the given events; internal to + `_PollerBase`. + + :param int fileno: The file descriptor + :param int events: The event mask (READ, WRITE, ERROR) + + :returns: a 2-tuple (events_cleared, events_set) + """ + events_cleared = 0 + events_set = 0 + + for evt in (READ, WRITE, ERROR): + if events & evt: + if fileno not in self._fd_events[evt]: + self._fd_events[evt].add(fileno) + events_set |= evt + else: + if fileno in self._fd_events[evt]: + self._fd_events[evt].discard(fileno) + events_cleared |= evt + + return events_cleared, events_set + + def activate_poller(self): + """Activate the poller + + """ + # Activate the underlying poller and register current events + self._init_poller() + fd_to_events = collections.defaultdict(int) + for event, file_descriptors in self._fd_events.items(): + for fileno in file_descriptors: + fd_to_events[fileno] |= event + + for fileno, events in fd_to_events.items(): + self._register_fd(fileno, events) + + def deactivate_poller(self): + """Deactivate the poller + + """ + self._uninit_poller() + + def start(self): + """Start the main poller loop. It will loop until requested to exit + + """ + self._start_nesting_levels += 1 + + if self._start_nesting_levels == 1: + LOGGER.debug('Entering IOLoop') + + # Activate the underlying poller and register current events + self.activate_poller() + + else: + LOGGER.debug('Reentering IOLoop at nesting level=%s', + self._start_nesting_levels) + + try: + # Run event loop + while not self._stopping: + self.poll() + self._process_timeouts() + + finally: + self._start_nesting_levels -= 1 + + if self._start_nesting_levels == 0: + try: + LOGGER.debug('Deactivating poller') + + # Deactivate the underlying poller + self.deactivate_poller() + finally: + self._stopping = False + else: + LOGGER.debug('Leaving IOLoop with %s nesting levels remaining', + self._start_nesting_levels) + + def stop(self): + """Request exit from the ioloop. The loop is NOT guaranteed to stop + before this method returns. + + """ + LOGGER.debug('Stopping IOLoop') + self._stopping = True + + self.wake_threadsafe() + + @abc.abstractmethod + def poll(self): + """Wait for events on interested filedescriptors. + """ + raise NotImplementedError + + @abc.abstractmethod + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + raise NotImplementedError + + @abc.abstractmethod + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + raise NotImplementedError + + @abc.abstractmethod + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: The event mask (READ, WRITE, ERROR) + """ + raise NotImplementedError + + @abc.abstractmethod + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + raise NotImplementedError + + @abc.abstractmethod + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + raise NotImplementedError + + def _dispatch_fd_events(self, fd_event_map): + """ Helper to dispatch callbacks for file descriptors that received + events. + + Before doing so we re-calculate the event mask based on what is + currently set in case it has been changed under our feet by a + previous callback. We also take a store a refernce to the + fd_event_map so that we can detect removal of an + fileno during processing of another callback and not generate + spurious callbacks on it. + + :param dict fd_event_map: Map of fds to events received on them. + """ + # Reset the prior map; if the call is nested, this will suppress the + # remaining dispatch in the earlier call. + self._processing_fd_event_map.clear() + + self._processing_fd_event_map = fd_event_map + + for fileno in pika.compat.dictkeys(fd_event_map): + if fileno not in fd_event_map: + # the fileno has been removed from the map under our feet. + continue + + events = fd_event_map[fileno] + for evt in [READ, WRITE, ERROR]: + if fileno not in self._fd_events[evt]: + events &= ~evt + + if events: + handler = self._fd_handlers[fileno] + handler(fileno, events) + + @staticmethod + def _get_interrupt_pair(): + """ Use a socketpair to be able to interrupt the ioloop if called + from another thread. Socketpair() is not supported on some OS (Win) + so use a pair of simple TCP sockets instead. The sockets will be + closed and garbage collected by python when the ioloop itself is. + """ + return pika.compat._nonblocking_socketpair() # pylint: disable=W0212 + + def _read_interrupt(self, interrupt_fd, events): # pylint: disable=W0613 + """ Read the interrupt byte(s). We ignore the event mask as we can ony + get here if there's data to be read on our fd. + + :param int interrupt_fd: The file descriptor to read from + :param int events: (unused) The events generated for this fd + """ + try: + # NOTE Use recv instead of os.read for windows compatibility + self._r_interrupt.recv(512) # pylint: disable=E1101 + except pika.compat.SOCKET_ERROR as err: + if err.errno != errno.EAGAIN: + raise + + +class SelectPoller(_PollerBase): + """Default behavior is to use Select since it's the widest supported and has + all of the methods we need for child classes as well. One should only need + to override the update_handler and start methods for additional types. + + """ + # if the poller uses MS specify 1000 + POLL_TIMEOUT_MULT = 1 + + def poll(self): + """Wait for events of interest on registered file descriptors until an + event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT, + whichever is sooner, and dispatch the corresponding event handlers. + + """ + while True: + try: + if (self._fd_events[READ] or self._fd_events[WRITE] or + self._fd_events[ERROR]): + read, write, error = select.select( + self._fd_events[READ], self._fd_events[WRITE], + self._fd_events[ERROR], self._get_max_wait()) + else: + # NOTE When called without any FDs, select fails on + # Windows with error 10022, 'An invalid argument was + # supplied'. + time.sleep(self._get_max_wait()) + read, write, error = [], [], [] + break + except _SELECT_ERRORS as error: + if _is_resumable(error): + continue + else: + raise + + # Build an event bit mask for each fileno we've received an event for + + fd_event_map = collections.defaultdict(int) + for fd_set, evt in zip((read, write, error), (READ, WRITE, ERROR)): + for fileno in fd_set: + fd_event_map[fileno] |= evt + + self._dispatch_fd_events(fd_event_map) + + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + # It's a no op in SelectPoller + pass + + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + # It's a no op in SelectPoller + pass + + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + """ + # It's a no op in SelectPoller + pass + + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + # It's a no op in SelectPoller + pass + + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + # It's a no op in SelectPoller + pass + + +class KQueuePoller(_PollerBase): + """KQueuePoller works on BSD based systems and is faster than select""" + + def __init__(self, get_wait_seconds, process_timeouts): + """Create an instance of the KQueuePoller + """ + self._kqueue = None + super(KQueuePoller, self).__init__(get_wait_seconds, process_timeouts) + + @staticmethod + def _map_event(kevent): + """return the event type associated with a kevent object + + :param kevent kevent: a kevent object as returned by kqueue.control() + + """ + if kevent.filter == select.KQ_FILTER_READ: + return READ + elif kevent.filter == select.KQ_FILTER_WRITE: + return WRITE + elif kevent.flags & select.KQ_EV_ERROR: + return ERROR + + # Should never happen + return None + + def poll(self): + """Wait for events of interest on registered file descriptors until an + event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT, + whichever is sooner, and dispatch the corresponding event handlers. + + """ + while True: + try: + kevents = self._kqueue.control(None, 1000, + self._get_max_wait()) + break + except _SELECT_ERRORS as error: + if _is_resumable(error): + continue + else: + raise + + fd_event_map = collections.defaultdict(int) + for event in kevents: + fd_event_map[event.ident] |= self._map_event(event) + + self._dispatch_fd_events(fd_event_map) + + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + assert self._kqueue is None + + self._kqueue = select.kqueue() + + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + if self._kqueue is not None: + self._kqueue.close() + self._kqueue = None + + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + """ + self._modify_fd_events( + fileno, events=events, events_to_clear=0, events_to_set=events) + + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + if self._kqueue is None: + return + + kevents = list() + + if events_to_clear & READ: + kevents.append( + select.kevent( + fileno, + filter=select.KQ_FILTER_READ, + flags=select.KQ_EV_DELETE)) + if events_to_set & READ: + kevents.append( + select.kevent( + fileno, + filter=select.KQ_FILTER_READ, + flags=select.KQ_EV_ADD)) + if events_to_clear & WRITE: + kevents.append( + select.kevent( + fileno, + filter=select.KQ_FILTER_WRITE, + flags=select.KQ_EV_DELETE)) + if events_to_set & WRITE: + kevents.append( + select.kevent( + fileno, + filter=select.KQ_FILTER_WRITE, + flags=select.KQ_EV_ADD)) + + self._kqueue.control(kevents, 0) + + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + self._modify_fd_events( + fileno, events=0, events_to_clear=events_to_clear, events_to_set=0) + + +class PollPoller(_PollerBase): + """Poll works on Linux and can have better performance than EPoll in + certain scenarios. Both are faster than select. + + """ + POLL_TIMEOUT_MULT = 1000 + + def __init__(self, get_wait_seconds, process_timeouts): + """Create an instance of the KQueuePoller + + """ + self._poll = None + super(PollPoller, self).__init__(get_wait_seconds, process_timeouts) + + @staticmethod + def _create_poller(): + """ + :rtype: `select.poll` + """ + return select.poll() # pylint: disable=E1101 + + def poll(self): + """Wait for events of interest on registered file descriptors until an + event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT, + whichever is sooner, and dispatch the corresponding event handlers. + + """ + while True: + try: + events = self._poll.poll(self._get_max_wait()) + break + except _SELECT_ERRORS as error: + if _is_resumable(error): + continue + else: + raise + + fd_event_map = collections.defaultdict(int) + for fileno, event in events: + fd_event_map[fileno] |= event + + self._dispatch_fd_events(fd_event_map) + + def _init_poller(self): + """Notify the implementation to allocate the poller resource""" + assert self._poll is None + + self._poll = self._create_poller() + + def _uninit_poller(self): + """Notify the implementation to release the poller resource""" + if self._poll is not None: + if hasattr(self._poll, "close"): + self._poll.close() + + self._poll = None + + def _register_fd(self, fileno, events): + """The base class invokes this method to notify the implementation to + register the file descriptor with the polling object. The request must + be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: The event mask using READ, WRITE, ERROR + """ + if self._poll is not None: + self._poll.register(fileno, events) + + def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set): + """The base class invoikes this method to notify the implementation to + modify an already registered file descriptor. The request must be + ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events: absolute events (READ, WRITE, ERROR) + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + :param int events_to_set: The events to set (READ, WRITE, ERROR) + """ + if self._poll is not None: + self._poll.modify(fileno, events) + + def _unregister_fd(self, fileno, events_to_clear): + """The base class invokes this method to notify the implementation to + unregister the file descriptor being tracked by the polling object. The + request must be ignored if the poller is not activated. + + :param int fileno: The file descriptor + :param int events_to_clear: The events to clear (READ, WRITE, ERROR) + """ + if self._poll is not None: + self._poll.unregister(fileno) + + +class EPollPoller(PollPoller): + """EPoll works on Linux and can have better performance than Poll in + certain scenarios. Both are faster than select. + + """ + POLL_TIMEOUT_MULT = 1 + + @staticmethod + def _create_poller(): + """ + :rtype: `select.poll` + """ + return select.epoll() # pylint: disable=E1101 diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/tornado_connection.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/tornado_connection.py new file mode 100644 index 000000000..db34dfd99 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/tornado_connection.py @@ -0,0 +1,122 @@ +"""Use pika with the Tornado IOLoop""" +from tornado import ioloop +import logging +import time + +from pika.adapters import base_connection + +LOGGER = logging.getLogger(__name__) + + +class TornadoConnection(base_connection.BaseConnection): + """The TornadoConnection runs on the Tornado IOLoop. If you're running the + connection in a web app, make sure you set stop_ioloop_on_close to False, + which is the default behavior for this adapter, otherwise the web app + will stop taking requests. + + :param pika.connection.Parameters parameters: Connection parameters + :param on_open_callback: The method to call when the connection is open + :type on_open_callback: method + :param on_open_error_callback: Method to call if the connection cant + be opened + :type on_open_error_callback: method + :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected + :param custom_ioloop: Override using the global IOLoop in Tornado + + """ + WARN_ABOUT_IOLOOP = True + + def __init__(self, + parameters=None, + on_open_callback=None, + on_open_error_callback=None, + on_close_callback=None, + stop_ioloop_on_close=False, + custom_ioloop=None): + """Create a new instance of the TornadoConnection class, connecting + to RabbitMQ automatically + + :param pika.connection.Parameters parameters: Connection parameters + :param on_open_callback: The method to call when the connection is open + :type on_open_callback: method + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) + :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected + :param custom_ioloop: Override using the global IOLoop in Tornado + + """ + self.sleep_counter = 0 + self.ioloop = custom_ioloop or ioloop.IOLoop.instance() + super(TornadoConnection, self).__init__(parameters, on_open_callback, + on_open_error_callback, + on_close_callback, self.ioloop, + stop_ioloop_on_close) + + def _adapter_connect(self): + """Connect to the remote socket, adding the socket to the IOLoop if + connected. + + :rtype: bool + + """ + error = super(TornadoConnection, self)._adapter_connect() + if not error: + self.ioloop.add_handler(self.socket.fileno(), self._handle_events, + self.event_state) + return error + + def _adapter_disconnect(self): + """Disconnect from the RabbitMQ broker""" + if self.socket: + self.ioloop.remove_handler(self.socket.fileno()) + super(TornadoConnection, self)._adapter_disconnect() + + def add_timeout(self, deadline, callback_method): + """Add the callback_method to the IOLoop timer to fire after deadline + seconds. Returns a handle to the timeout. Do not confuse with + Tornado's timeout where you pass in the time you want to have your + callback called. Only pass in the seconds until it's to be called. + + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + :rtype: str + + """ + return self.ioloop.add_timeout(time.time() + deadline, callback_method) + + def remove_timeout(self, timeout_id): + """Remove the timeout from the IOLoop by the ID returned from + add_timeout. + + :rtype: str + + """ + return self.ioloop.remove_timeout(timeout_id) + + def add_callback_threadsafe(self, callback): + """Requests a call to the given function as soon as possible in the + context of this connection's IOLoop thread. + + NOTE: This is the only thread-safe method offered by the connection. All + other manipulations of the connection must be performed from the + connection's thread. + + For example, a thread may request a call to the + `channel.basic_ack` method of a connection that is running in a + different thread via + + ``` + connection.add_callback_threadsafe( + functools.partial(channel.basic_ack, delivery_tag=...)) + ``` + + :param method callback: The callback method; must be callable. + + """ + if not callable(callback): + raise TypeError( + 'callback must be a callable, but got %r' % (callback,)) + + self.ioloop.add_callback(callback) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/twisted_connection.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/twisted_connection.py new file mode 100644 index 000000000..1dac51f44 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/adapters/twisted_connection.py @@ -0,0 +1,474 @@ +"""Using Pika with a Twisted reactor. + +Supports two methods of establishing the connection, using TwistedConnection +or TwistedProtocolConnection. For details about each method, see the docstrings +of the corresponding classes. + +The interfaces in this module are Deferred-based when possible. This means that +the connection.channel() method and most of the channel methods return +Deferreds instead of taking a callback argument and that basic_consume() +returns a Twisted DeferredQueue where messages from the server will be +stored. Refer to the docstrings for TwistedConnection.channel() and the +TwistedChannel class for details. + +""" +import functools +from twisted.internet import defer, error, reactor +from twisted.python import log + +from pika import connection +from pika import exceptions +from pika.adapters import base_connection + + +class ClosableDeferredQueue(defer.DeferredQueue): + """ + Like the normal Twisted DeferredQueue, but after close() is called with an + Exception instance all pending Deferreds are errbacked and further attempts + to call get() or put() return a Failure wrapping that exception. + """ + + def __init__(self, size=None, backlog=None): + self.closed = None + super(ClosableDeferredQueue, self).__init__(size, backlog) + + def put(self, obj): + if self.closed: + return defer.fail(self.closed) + return defer.DeferredQueue.put(self, obj) + + def get(self): + if self.closed: + return defer.fail(self.closed) + return defer.DeferredQueue.get(self) + + def close(self, reason): + self.closed = reason + while self.waiting: + self.waiting.pop().errback(reason) + self.pending = [] + + +class TwistedChannel(object): + """A wrapper wround Pika's Channel. + + Channel methods that normally take a callback argument are wrapped to + return a Deferred that fires with whatever would be passed to the callback. + If the channel gets closed, all pending Deferreds are errbacked with a + ChannelClosed exception. The returned Deferreds fire with whatever + arguments the callback to the original method would receive. + + The basic_consume method is wrapped in a special way, see its docstring for + details. + """ + + WRAPPED_METHODS = ('exchange_declare', 'exchange_delete', 'queue_declare', + 'queue_bind', 'queue_purge', 'queue_unbind', 'basic_qos', + 'basic_get', 'basic_recover', 'tx_select', 'tx_commit', + 'tx_rollback', 'flow', 'basic_cancel') + + def __init__(self, channel): + self.__channel = channel + self.__closed = None + self.__calls = set() + self.__consumers = {} + + channel.add_on_close_callback(self.channel_closed) + + def channel_closed(self, channel, reply_code, reply_text): + # enter the closed state + self.__closed = exceptions.ChannelClosed(reply_code, reply_text) + # errback all pending calls + for d in self.__calls: + d.errback(self.__closed) + # close all open queues + for consumers in self.__consumers.values(): + for c in consumers: + c.close(self.__closed) + # release references to stored objects + self.__calls = set() + self.__consumers = {} + + def basic_consume(self, *args, **kwargs): + """Consume from a server queue. Returns a Deferred that fires with a + tuple: (queue_object, consumer_tag). The queue object is an instance of + ClosableDeferredQueue, where data received from the queue will be + stored. Clients should use its get() method to fetch individual + message. + """ + if self.__closed: + return defer.fail(self.__closed) + + queue = ClosableDeferredQueue() + queue_name = kwargs['queue'] + kwargs['consumer_callback'] = lambda *args: queue.put(args) + self.__consumers.setdefault(queue_name, set()).add(queue) + + try: + consumer_tag = self.__channel.basic_consume(*args, **kwargs) + # TODO this except without types would suppress system-exiting + # exceptions, such as SystemExit and KeyboardInterrupt. It should be at + # least `except Exception` and preferably more specific. + except: + return defer.fail() + + return defer.succeed((queue, consumer_tag)) + + def queue_delete(self, *args, **kwargs): + """Wraps the method the same way all the others are wrapped, but removes + the reference to the queue object after it gets deleted on the server. + + """ + wrapped = self.__wrap_channel_method('queue_delete') + queue_name = kwargs['queue'] + + d = wrapped(*args, **kwargs) + return d.addCallback(self.__clear_consumer, queue_name) + + def basic_publish(self, *args, **kwargs): + """Make sure the channel is not closed and then publish. Return a + Deferred that fires with the result of the channel's basic_publish. + + """ + if self.__closed: + return defer.fail(self.__closed) + return defer.succeed(self.__channel.basic_publish(*args, **kwargs)) + + def __wrap_channel_method(self, name): + """Wrap Pika's Channel method to make it return a Deferred that fires + when the method completes and errbacks if the channel gets closed. If + the original method's callback would receive more than one argument, the + Deferred fires with a tuple of argument values. + + """ + method = getattr(self.__channel, name) + + @functools.wraps(method) + def wrapped(*args, **kwargs): + if self.__closed: + return defer.fail(self.__closed) + + d = defer.Deferred() + self.__calls.add(d) + d.addCallback(self.__clear_call, d) + + def single_argument(*args): + """ + Make sure that the deferred is called with a single argument. + In case the original callback fires with more than one, convert + to a tuple. + """ + if len(args) > 1: + d.callback(tuple(args)) + else: + d.callback(*args) + + kwargs['callback'] = single_argument + + try: + method(*args, **kwargs) + # TODO this except without types would suppress system-exiting + # exceptions, such as SystemExit and KeyboardInterrupt. It should be + # at least `except Exception` and preferably more specific. + except: + return defer.fail() + return d + + return wrapped + + def __clear_consumer(self, ret, queue_name): + self.__consumers.pop(queue_name, None) + return ret + + def __clear_call(self, ret, d): + self.__calls.discard(d) + return ret + + def __getattr__(self, name): + # Wrap methods defined in WRAPPED_METHODS, forward the rest of accesses + # to the channel. + if name in self.WRAPPED_METHODS: + return self.__wrap_channel_method(name) + return getattr(self.__channel, name) + + +class IOLoopReactorAdapter(object): + """An adapter providing Pika's IOLoop interface using a Twisted reactor. + + Accepts a TwistedConnection object and a Twisted reactor object. + + """ + + def __init__(self, connection, reactor): + self.connection = connection + self.reactor = reactor + self.started = False + + def add_timeout(self, deadline, callback_method): + """Add the callback_method to the IOLoop timer to fire after deadline + seconds. Returns a handle to the timeout. Do not confuse with + Tornado's timeout where you pass in the time you want to have your + callback called. Only pass in the seconds until it's to be called. + + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + :rtype: twisted.internet.interfaces.IDelayedCall + + """ + return self.reactor.callLater(deadline, callback_method) + + def remove_timeout(self, call): + """Remove a call + + :param twisted.internet.interfaces.IDelayedCall call: The call to cancel + + """ + call.cancel() + + def add_callback_threadsafe(self, callback): + """Requests a call to the given function as soon as possible in the + context of this IOLoop's thread. + + NOTE: This is the only thread-safe method offered by the IOLoop adapter. + All other manipulations of the IOLoop adapter and its parent connection + must be performed from the connection's thread. + + For example, a thread may request a call to the + `channel.basic_ack` method of a connection that is running in a + different thread via + + ``` + connection.add_callback_threadsafe( + functools.partial(channel.basic_ack, delivery_tag=...)) + ``` + + :param method callback: The callback method; must be callable. + + """ + self.reactor.callFromThread(callback) + + def stop(self): + # Guard against stopping the reactor multiple times + if not self.started: + return + self.started = False + self.reactor.stop() + + def start(self): + # Guard against starting the reactor multiple times + if self.started: + return + self.started = True + self.reactor.run() + + def remove_handler(self, _): + # The fileno is irrelevant, as it's the connection's job to provide it + # to the reactor when asked to do so. Removing the handler from the + # ioloop is removing it from the reactor in Twisted's parlance. + self.reactor.removeReader(self.connection) + self.reactor.removeWriter(self.connection) + + def update_handler(self, _, event_state): + # Same as in remove_handler, the fileno is irrelevant. First remove the + # connection entirely from the reactor, then add it back depending on + # the event state. + self.reactor.removeReader(self.connection) + self.reactor.removeWriter(self.connection) + + if event_state & self.connection.READ: + self.reactor.addReader(self.connection) + + if event_state & self.connection.WRITE: + self.reactor.addWriter(self.connection) + + +class TwistedConnection(base_connection.BaseConnection): + """A standard Pika connection adapter. You instantiate the class passing the + connection parameters and the connected callback and when it gets called + you can start using it. + + The problem is that connection establishing is done using the blocking + socket module. For instance, if the host you are connecting to is behind a + misconfigured firewall that just drops packets, the whole process will + freeze until the connection timeout passes. To work around that problem, + use TwistedProtocolConnection, but read its docstring first. + + Objects of this class get put in the Twisted reactor which will notify them + when the socket connection becomes readable or writable, so apart from + implementing the BaseConnection interface, they also provide Twisted's + IReadWriteDescriptor interface. + + """ + + def __init__(self, + parameters=None, + on_open_callback=None, + on_open_error_callback=None, + on_close_callback=None, + stop_ioloop_on_close=False): + super(TwistedConnection, self).__init__( + parameters=parameters, + on_open_callback=on_open_callback, + on_open_error_callback=on_open_error_callback, + on_close_callback=on_close_callback, + ioloop=IOLoopReactorAdapter(self, reactor), + stop_ioloop_on_close=stop_ioloop_on_close) + + def _adapter_connect(self): + """Connect to the RabbitMQ broker""" + # Connect (blockignly!) to the server + error = super(TwistedConnection, self)._adapter_connect() + if not error: + # Set the I/O events we're waiting for (see IOLoopReactorAdapter + # docstrings for why it's OK to pass None as the file descriptor) + self.ioloop.update_handler(None, self.event_state) + return error + + def _adapter_disconnect(self): + """Called when the adapter should disconnect""" + self.ioloop.remove_handler(None) + self._cleanup_socket() + + def _on_connected(self): + """Call superclass and then update the event state to flush the outgoing + frame out. Commit 50d842526d9f12d32ad9f3c4910ef60b8c301f59 removed a + self._flush_outbound call that was in _send_frame which previously + made this step unnecessary. + + """ + super(TwistedConnection, self)._on_connected() + self._manage_event_state() + + def channel(self, channel_number=None): + """Return a Deferred that fires with an instance of a wrapper around the + Pika Channel class. + + """ + d = defer.Deferred() + base_connection.BaseConnection.channel(self, d.callback, channel_number) + return d.addCallback(TwistedChannel) + + # IReadWriteDescriptor methods + + def fileno(self): + return self.socket.fileno() + + def logPrefix(self): + return "twisted-pika" + + def connectionLost(self, reason): + # If the connection was not closed cleanly, log the error + if not reason.check(error.ConnectionDone): + log.err(reason) + + self._on_terminate(connection.InternalCloseReasons.SOCKET_ERROR, + str(reason)) + + def doRead(self): + self._handle_read() + + def doWrite(self): + self._handle_write() + self._manage_event_state() + + +class TwistedProtocolConnection(base_connection.BaseConnection): + """A hybrid between a Pika Connection and a Twisted Protocol. Allows using + Twisted's non-blocking connectTCP/connectSSL methods for connecting to the + server. + + It has one caveat: TwistedProtocolConnection objects have a ready + instance variable that's a Deferred which fires when the connection is + ready to be used (the initial AMQP handshaking has been done). You *have* + to wait for this Deferred to fire before requesting a channel. + + Since it's Twisted handling connection establishing it does not accept + connect callbacks, you have to implement that within Twisted. Also remember + that the host, port and ssl values of the connection parameters are ignored + because, yet again, it's Twisted who manages the connection. + + """ + + def __init__(self, parameters=None, on_close_callback=None): + self.ready = defer.Deferred() + super(TwistedProtocolConnection, self).__init__( + parameters=parameters, + on_open_callback=self.connectionReady, + on_open_error_callback=self.connectionFailed, + on_close_callback=on_close_callback, + ioloop=IOLoopReactorAdapter(self, reactor), + stop_ioloop_on_close=False) + + def connect(self): + # The connection is open asynchronously by Twisted, so skip the whole + # connect() part, except for setting the connection state + self._set_connection_state(self.CONNECTION_INIT) + + def _adapter_connect(self): + # Should never be called, as we override connect() and leave the + # building of a TCP connection to Twisted, but implement anyway to keep + # the interface + return False + + def _adapter_disconnect(self): + # Disconnect from the server + self.transport.loseConnection() + + def _flush_outbound(self): + """Override BaseConnection._flush_outbound to send all bufferred data + the Twisted way, by writing to the transport. No need for buffering, + Twisted handles that for us. + """ + while self.outbound_buffer: + self.transport.write(self.outbound_buffer.popleft()) + + def channel(self, channel_number=None): + """Create a new channel with the next available channel number or pass + in a channel number to use. Must be non-zero if you would like to + specify but it is recommended that you let Pika manage the channel + numbers. + + Return a Deferred that fires with an instance of a wrapper around the + Pika Channel class. + + :param int channel_number: The channel number to use, defaults to the + next available. + + """ + d = defer.Deferred() + base_connection.BaseConnection.channel(self, d.callback, channel_number) + return d.addCallback(TwistedChannel) + + # IProtocol methods + + def dataReceived(self, data): + # Pass the bytes to Pika for parsing + self._on_data_available(data) + + def connectionLost(self, reason): + # Let the caller know there's been an error + d, self.ready = self.ready, None + if d: + d.errback(reason) + + def makeConnection(self, transport): + self.transport = transport + self.connectionMade() + + def connectionMade(self): + # Tell everyone we're connected + self._on_connected() + + # Our own methods + + def connectionReady(self, res): + d, self.ready = self.ready, None + if d: + d.callback(res) + + def connectionFailed(self, connection_unused, error_message=None): + d, self.ready = self.ready, None + if d: + attempts = self.params.connection_attempts + exc = exceptions.AMQPConnectionError(attempts) + d.errback(exc) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/amqp_object.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/amqp_object.py new file mode 100644 index 000000000..576a2c412 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/amqp_object.py @@ -0,0 +1,66 @@ +"""Base classes that are extended by low level AMQP frames and higher level +AMQP classes and methods. + +""" + + +class AMQPObject(object): + """Base object that is extended by AMQP low level frames and AMQP classes + and methods. + + """ + NAME = 'AMQPObject' + INDEX = None + + def __repr__(self): + items = list() + for key, value in self.__dict__.items(): + if getattr(self.__class__, key, None) != value: + items.append('%s=%s' % (key, value)) + if not items: + return "<%s>" % self.NAME + return "<%s(%s)>" % (self.NAME, sorted(items)) + + +class Class(AMQPObject): + """Is extended by AMQP classes""" + NAME = 'Unextended Class' + + +class Method(AMQPObject): + """Is extended by AMQP methods""" + NAME = 'Unextended Method' + synchronous = False + + def _set_content(self, properties, body): + """If the method is a content frame, set the properties and body to + be carried as attributes of the class. + + :param pika.frame.Properties properties: AMQP Basic Properties + :param body: The message body + :type body: str or unicode + + """ + self._properties = properties + self._body = body + + def get_properties(self): + """Return the properties if they are set. + + :rtype: pika.frame.Properties + + """ + return self._properties + + def get_body(self): + """Return the message body if it is set. + + :rtype: str|unicode + + """ + return self._body + + +class Properties(AMQPObject): + """Class to encompass message properties (AMQP Basic.Properties)""" + NAME = 'Unextended Properties' diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/callback.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/callback.py new file mode 100644 index 000000000..6ac58bd95 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/callback.py @@ -0,0 +1,410 @@ +"""Callback management class, common area for keeping track of all callbacks in +the Pika stack. + +""" +import functools +import logging + +from pika import frame +from pika import amqp_object +from pika.compat import xrange, canonical_str + +LOGGER = logging.getLogger(__name__) + + +def name_or_value(value): + """Will take Frame objects, classes, etc and attempt to return a valid + string identifier for them. + + :param value: The value to sanitize + :type value: pika.amqp_object.AMQPObject|pika.frame.Frame|int|unicode|str + :rtype: str + + """ + # Is it subclass of AMQPObject + try: + if issubclass(value, amqp_object.AMQPObject): + return value.NAME + except TypeError: + pass + + # Is it a Pika frame object? + if isinstance(value, frame.Method): + return value.method.NAME + + # Is it a Pika frame object (go after Method since Method extends this) + if isinstance(value, amqp_object.AMQPObject): + return value.NAME + + # Cast the value to a str (python 2 and python 3); encoding as UTF-8 on Python 2 + return canonical_str(value) + + +def sanitize_prefix(function): + """Automatically call name_or_value on the prefix passed in.""" + + @functools.wraps(function) + def wrapper(*args, **kwargs): + args = list(args) + offset = 1 + if 'prefix' in kwargs: + kwargs['prefix'] = name_or_value(kwargs['prefix']) + elif len(args) - 1 >= offset: + args[offset] = name_or_value(args[offset]) + offset += 1 + if 'key' in kwargs: + kwargs['key'] = name_or_value(kwargs['key']) + elif len(args) - 1 >= offset: + args[offset] = name_or_value(args[offset]) + + return function(*tuple(args), **kwargs) + + return wrapper + + +def check_for_prefix_and_key(function): + """Automatically return false if the key or prefix is not in the callbacks + for the instance. + + """ + + @functools.wraps(function) + def wrapper(*args, **kwargs): + offset = 1 + # Sanitize the prefix + if 'prefix' in kwargs: + prefix = name_or_value(kwargs['prefix']) + else: + prefix = name_or_value(args[offset]) + offset += 1 + + # Make sure to sanitize the key as well + if 'key' in kwargs: + key = name_or_value(kwargs['key']) + else: + key = name_or_value(args[offset]) + + # Make sure prefix and key are in the stack + if prefix not in args[0]._stack or key not in args[0]._stack[prefix]: + return False + + # Execute the method + return function(*args, **kwargs) + + return wrapper + + +class CallbackManager(object): + """CallbackManager is a global callback system designed to be a single place + where Pika can manage callbacks and process them. It should be referenced + by the CallbackManager.instance() method instead of constructing new + instances of it. + + """ + CALLS = 'calls' + ARGUMENTS = 'arguments' + DUPLICATE_WARNING = 'Duplicate callback found for "%s:%s"' + CALLBACK = 'callback' + ONE_SHOT = 'one_shot' + ONLY_CALLER = 'only' + + def __init__(self): + """Create an instance of the CallbackManager""" + self._stack = dict() + + @sanitize_prefix + def add(self, prefix, key, callback, + one_shot=True, + only_caller=None, + arguments=None): + """Add a callback to the stack for the specified key. If the call is + specified as one_shot, it will be removed after being fired + + The prefix is usually the channel number but the class is generic + and prefix and key may be any value. If you pass in only_caller + CallbackManager will restrict processing of the callback to only + the calling function/object that you specify. + + :param prefix: Categorize the callback + :type prefix: str or int + :param key: The key for the callback + :type key: object or str or dict + :param method callback: The callback to call + :param bool one_shot: Remove this callback after it is called + :param object only_caller: Only allow one_caller value to call the + event that fires the callback. + :param dict arguments: Arguments to validate when processing + :rtype: tuple(prefix, key) + + """ + # Prep the stack + if prefix not in self._stack: + self._stack[prefix] = dict() + + if key not in self._stack[prefix]: + self._stack[prefix][key] = list() + + # Check for a duplicate + for callback_dict in self._stack[prefix][key]: + if (callback_dict[self.CALLBACK] == callback and + callback_dict[self.ARGUMENTS] == arguments and + callback_dict[self.ONLY_CALLER] == only_caller): + if callback_dict[self.ONE_SHOT] is True: + callback_dict[self.CALLS] += 1 + LOGGER.debug('Incremented callback reference counter: %r', + callback_dict) + else: + LOGGER.warning(self.DUPLICATE_WARNING, prefix, key) + return prefix, key + + # Create the callback dictionary + callback_dict = self._callback_dict(callback, one_shot, only_caller, + arguments) + self._stack[prefix][key].append(callback_dict) + LOGGER.debug('Added: %r', callback_dict) + return prefix, key + + def clear(self): + """Clear all the callbacks if there are any defined.""" + self._stack = dict() + LOGGER.debug('Callbacks cleared') + + @sanitize_prefix + def cleanup(self, prefix): + """Remove all callbacks from the stack by a prefix. Returns True + if keys were there to be removed + + :param str or int prefix: The prefix for keeping track of callbacks with + :rtype: bool + + """ + LOGGER.debug('Clearing out %r from the stack', prefix) + if prefix not in self._stack or not self._stack[prefix]: + return False + del self._stack[prefix] + return True + + @sanitize_prefix + def pending(self, prefix, key): + """Return count of callbacks for a given prefix or key or None + + :param prefix: Categorize the callback + :type prefix: str or int + :param key: The key for the callback + :type key: object or str or dict + :rtype: None or int + + """ + if not prefix in self._stack or not key in self._stack[prefix]: + return None + return len(self._stack[prefix][key]) + + @sanitize_prefix + @check_for_prefix_and_key + def process(self, prefix, key, caller, *args, **keywords): + """Run through and process all the callbacks for the specified keys. + Caller should be specified at all times so that callbacks which + require a specific function to call CallbackManager.process will + not be processed. + + :param prefix: Categorize the callback + :type prefix: str or int + :param key: The key for the callback + :type key: object or str or dict + :param object caller: Who is firing the event + :param list args: Any optional arguments + :param dict keywords: Optional keyword arguments + :rtype: bool + + """ + LOGGER.debug('Processing %s:%s', prefix, key) + if prefix not in self._stack or key not in self._stack[prefix]: + return False + + callbacks = list() + # Check each callback, append it to the list if it should be called + for callback_dict in list(self._stack[prefix][key]): + if self._should_process_callback(callback_dict, caller, list(args)): + callbacks.append(callback_dict[self.CALLBACK]) + if callback_dict[self.ONE_SHOT]: + self._use_one_shot_callback(prefix, key, callback_dict) + + # Call each callback + for callback in callbacks: + LOGGER.debug('Calling %s for "%s:%s"', callback, prefix, key) + try: + callback(*args, **keywords) + except: + LOGGER.exception('Calling %s for "%s:%s" failed', callback, + prefix, key) + raise + return True + + @sanitize_prefix + @check_for_prefix_and_key + def remove(self, prefix, key, callback_value=None, arguments=None): + """Remove a callback from the stack by prefix, key and optionally + the callback itself. If you only pass in prefix and key, all + callbacks for that prefix and key will be removed. + + :param str or int prefix: The prefix for keeping track of callbacks with + :param str key: The callback key + :param method callback_value: The method defined to call on callback + :param dict arguments: Optional arguments to check + :rtype: bool + + """ + if callback_value: + offsets_to_remove = list() + for offset in xrange(len(self._stack[prefix][key]), 0, -1): + callback_dict = self._stack[prefix][key][offset - 1] + + if (callback_dict[self.CALLBACK] == callback_value and + self._arguments_match(callback_dict, [arguments])): + offsets_to_remove.append(offset - 1) + + for offset in offsets_to_remove: + try: + LOGGER.debug('Removing callback #%i: %r', offset, + self._stack[prefix][key][offset]) + del self._stack[prefix][key][offset] + except KeyError: + pass + + self._cleanup_callback_dict(prefix, key) + return True + + @sanitize_prefix + @check_for_prefix_and_key + def remove_all(self, prefix, key): + """Remove all callbacks for the specified prefix and key. + + :param str prefix: The prefix for keeping track of callbacks with + :param str key: The callback key + + """ + del self._stack[prefix][key] + self._cleanup_callback_dict(prefix, key) + + def _arguments_match(self, callback_dict, args): + """Validate if the arguments passed in match the expected arguments in + the callback_dict. We expect this to be a frame passed in to *args for + process or passed in as a list from remove. + + :param dict callback_dict: The callback dictionary to evaluate against + :param list args: The arguments passed in as a list + + """ + if callback_dict[self.ARGUMENTS] is None: + return True + if not args: + return False + if isinstance(args[0], dict): + return self._dict_arguments_match(args[0], + callback_dict[self.ARGUMENTS]) + return self._obj_arguments_match(args[0].method + if hasattr(args[0], 'method') else + args[0], callback_dict[self.ARGUMENTS]) + + def _callback_dict(self, callback, one_shot, only_caller, arguments): + """Return the callback dictionary. + + :param method callback: The callback to call + :param bool one_shot: Remove this callback after it is called + :param object only_caller: Only allow one_caller value to call the + event that fires the callback. + :rtype: dict + + """ + value = { + self.CALLBACK: callback, + self.ONE_SHOT: one_shot, + self.ONLY_CALLER: only_caller, + self.ARGUMENTS: arguments + } + if one_shot: + value[self.CALLS] = 1 + return value + + def _cleanup_callback_dict(self, prefix, key=None): + """Remove empty dict nodes in the callback stack. + + :param str or int prefix: The prefix for keeping track of callbacks with + :param str key: The callback key + + """ + if key and key in self._stack[prefix] and not self._stack[prefix][key]: + del self._stack[prefix][key] + if prefix in self._stack and not self._stack[prefix]: + del self._stack[prefix] + + @staticmethod + def _dict_arguments_match(value, expectation): + """Checks an dict to see if it has attributes that meet the expectation. + + :param dict value: The dict to evaluate + :param dict expectation: The values to check against + :rtype: bool + + """ + LOGGER.debug('Comparing %r to %r', value, expectation) + for key in expectation: + if value.get(key) != expectation[key]: + LOGGER.debug('Values in dict do not match for %s', key) + return False + return True + + @staticmethod + def _obj_arguments_match(value, expectation): + """Checks an object to see if it has attributes that meet the + expectation. + + :param object value: The object to evaluate + :param dict expectation: The values to check against + :rtype: bool + + """ + for key in expectation: + if not hasattr(value, key): + LOGGER.debug('%r does not have required attribute: %s', + type(value), key) + return False + if getattr(value, key) != expectation[key]: + LOGGER.debug('Values in %s do not match for %s', type(value), + key) + return False + return True + + def _should_process_callback(self, callback_dict, caller, args): + """Returns True if the callback should be processed. + + :param dict callback_dict: The callback configuration + :param object caller: Who is firing the event + :param list args: Any optional arguments + :rtype: bool + + """ + if not self._arguments_match(callback_dict, args): + LOGGER.debug('Arguments do not match for %r, %r', callback_dict, + args) + return False + return (callback_dict[self.ONLY_CALLER] is None or + (callback_dict[self.ONLY_CALLER] and + callback_dict[self.ONLY_CALLER] == caller)) + + def _use_one_shot_callback(self, prefix, key, callback_dict): + """Process the one-shot callback, decrementing the use counter and + removing it from the stack if it's now been fully used. + + :param str or int prefix: The prefix for keeping track of callbacks with + :param str key: The callback key + :param dict callback_dict: The callback dict to process + + """ + LOGGER.debug('Processing use of oneshot callback') + callback_dict[self.CALLS] -= 1 + LOGGER.debug('%i registered uses left', callback_dict[self.CALLS]) + + if callback_dict[self.CALLS] <= 0: + self.remove(prefix, key, callback_dict[self.CALLBACK], + callback_dict[self.ARGUMENTS]) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/channel.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/channel.py new file mode 100644 index 000000000..745e47bdd --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/channel.py @@ -0,0 +1,1436 @@ +"""The Channel class provides a wrapper for interacting with RabbitMQ +implementing the methods and behaviors for an AMQP Channel. + +""" + +import collections +import logging +import uuid + +import pika.frame as frame +import pika.exceptions as exceptions +import pika.spec as spec +from pika.utils import is_callable +from pika.compat import unicode_type, dictkeys, is_integer + + +LOGGER = logging.getLogger(__name__) + +MAX_CHANNELS = 65535 # per AMQP 0.9.1 spec. + + +class Channel(object): + """A Channel is the primary communication method for interacting with + RabbitMQ. It is recommended that you do not directly invoke + the creation of a channel object in your application code but rather + construct the a channel by calling the active connection's channel() + method. + + """ + + # Disable pyling messages concerning "method could be a function" + # pylint: disable=R0201 + + CLOSED = 0 + OPENING = 1 + OPEN = 2 + CLOSING = 3 # client-initiated close in progress + + _STATE_NAMES = { + CLOSED: 'CLOSED', + OPENING: 'OPENING', + OPEN: 'OPEN', + CLOSING: 'CLOSING' + } + + _ON_CHANNEL_CLEANUP_CB_KEY = '_on_channel_cleanup' + + def __init__(self, connection, channel_number, on_open_callback): + """Create a new instance of the Channel + + :param pika.connection.Connection connection: The connection + :param int channel_number: The channel number for this instance + :param callable on_open_callback: The callback to call on channel open + + """ + if not isinstance(channel_number, int): + raise exceptions.InvalidChannelNumber + self.channel_number = channel_number + self.callbacks = connection.callbacks + self.connection = connection + + # Initially, flow is assumed to be active + self.flow_active = True + + self._content_assembler = ContentFrameAssembler() + + self._blocked = collections.deque(list()) + self._blocking = None + self._has_on_flow_callback = False + self._cancelled = set() + self._consumers = dict() + self._consumers_with_noack = set() + self._on_flowok_callback = None + self._on_getok_callback = None + self._on_openok_callback = on_open_callback + self._state = self.CLOSED + + # We save the closing reason code and text to be passed to + # on-channel-close callback at closing of the channel. Channel.close + # stores the given reply_code/reply_text if the channel was in OPEN or + # OPENING states. An incoming Channel.Close AMQP method from broker will + # override this value. And a sudden loss of connection has the highest + # prececence to override it. + self._closing_code_and_text = (0, '') + + # opaque cookie value set by wrapper layer (e.g., BlockingConnection) + # via _set_cookie + self._cookie = None + + def __int__(self): + """Return the channel object as its channel number + + :rtype: int + + """ + return self.channel_number + + def __repr__(self): + return '<%s number=%s %s conn=%r>' % (self.__class__.__name__, + self.channel_number, + self._STATE_NAMES[self._state], + self.connection) + + def add_callback(self, callback, replies, one_shot=True): + """Pass in a callback handler and a list replies from the + RabbitMQ broker which you'd like the callback notified of. Callbacks + should allow for the frame parameter to be passed in. + + :param callable callback: The callback to call + :param list replies: The replies to get a callback for + :param bool one_shot: Only handle the first type callback + + """ + for reply in replies: + self.callbacks.add(self.channel_number, reply, callback, one_shot) + + def add_on_cancel_callback(self, callback): + """Pass a callback function that will be called when the basic_cancel + is sent by the server. The callback function should receive a frame + parameter. + + :param callable callback: The callback to call on Basic.Cancel from + broker + + """ + self.callbacks.add(self.channel_number, spec.Basic.Cancel, callback, + False) + + def add_on_close_callback(self, callback): + """Pass a callback function that will be called when the channel is + closed. The callback function will receive the channel, the + reply_code (int) and the reply_text (string) describing why the channel was + closed. + + If the channel is closed by broker via Channel.Close, the callback will + receive the reply_code/reply_text provided by the broker. + + If channel closing is initiated by user (either directly of indirectly + by closing a connection containing the channel) and closing + concludes gracefully without Channel.Close from the broker and without + loss of connection, the callback will receive 0 as reply_code and empty + string as reply_text. + + If channel was closed due to loss of connection, the callback will + receive reply_code and reply_text representing the loss of connection. + + :param callable callback: The callback, having the signature: + callback(Channel, int reply_code, str reply_text) + + """ + self.callbacks.add(self.channel_number, '_on_channel_close', callback, + False, self) + + def add_on_flow_callback(self, callback): + """Pass a callback function that will be called when Channel.Flow is + called by the remote server. Note that newer versions of RabbitMQ + will not issue this but instead use TCP backpressure + + :param callable callback: The callback function + + """ + self._has_on_flow_callback = True + self.callbacks.add(self.channel_number, spec.Channel.Flow, callback, + False) + + def add_on_return_callback(self, callback): + """Pass a callback function that will be called when basic_publish as + sent a message that has been rejected and returned by the server. + + :param callable callback: The function to call, having the signature + callback(channel, method, properties, body) + where + channel: pika.Channel + method: pika.spec.Basic.Return + properties: pika.spec.BasicProperties + body: str, unicode, or bytes (python 3.x) + + """ + self.callbacks.add(self.channel_number, '_on_return', callback, False) + + def basic_ack(self, delivery_tag=0, multiple=False): + """Acknowledge one or more messages. When sent by the client, this + method acknowledges one or more messages delivered via the Deliver or + Get-Ok methods. When sent by server, this method acknowledges one or + more messages published with the Publish method on a channel in + confirm mode. The acknowledgement can be for a single message or a + set of messages up to and including a specific message. + + :param integer delivery_tag: int/long The server-assigned delivery tag + :param bool multiple: If set to True, the delivery tag is treated as + "up to and including", so that multiple messages + can be acknowledged with a single method. If set + to False, the delivery tag refers to a single + message. If the multiple field is 1, and the + delivery tag is zero, this indicates + acknowledgement of all outstanding messages. + """ + if not self.is_open: + raise exceptions.ChannelClosed() + return self._send_method(spec.Basic.Ack(delivery_tag, multiple)) + + def basic_cancel(self, callback=None, consumer_tag='', nowait=False): + """This method cancels a consumer. This does not affect already + delivered messages, but it does mean the server will not send any more + messages for that consumer. The client may receive an arbitrary number + of messages in between sending the cancel method and receiving the + cancel-ok reply. It may also be sent from the server to the client in + the event of the consumer being unexpectedly cancelled (i.e. cancelled + for any reason other than the server receiving the corresponding + basic.cancel from the client). This allows clients to be notified of + the loss of consumers due to events such as queue deletion. + + :param callable callback: Callback to call for a Basic.CancelOk + response; MUST be None when nowait=True. MUST be callable when + nowait=False. + :param str consumer_tag: Identifier for the consumer + :param bool nowait: Do not expect a Basic.CancelOk response + + :raises ValueError: + + """ + self._validate_channel_and_callback(callback) + + if nowait: + if callback is not None: + raise ValueError( + 'Completion callback must be None when nowait=True') + else: + if callback is None: + raise ValueError( + 'Must have completion callback with nowait=False') + + if consumer_tag in self._cancelled: + # We check for cancelled first, because basic_cancel removes + # consumers closed with nowait from self._consumers + LOGGER.warning('basic_cancel - consumer is already cancelling: %s', + consumer_tag) + return + + if consumer_tag not in self._consumers: + # Could be cancelled by user or broker earlier + LOGGER.warning('basic_cancel - consumer not found: %s', + consumer_tag) + return + + LOGGER.debug('Cancelling consumer: %s (nowait=%s)', + consumer_tag, nowait) + + if nowait: + # This is our last opportunity while the channel is open to remove + # this consumer callback and help gc; unfortunately, this consumer's + # self._cancelled and self._consumers_with_noack (if any) entries + # will persist until the channel is closed. + del self._consumers[consumer_tag] + + if callback is not None: + if nowait: + raise ValueError('Cannot pass a callback if nowait is True') + self.callbacks.add(self.channel_number, spec.Basic.CancelOk, + callback) + + self._cancelled.add(consumer_tag) + + self._rpc(spec.Basic.Cancel(consumer_tag=consumer_tag, nowait=nowait), + self._on_cancelok if not nowait else None, + [(spec.Basic.CancelOk, {'consumer_tag': consumer_tag})] if + nowait is False else []) + + def basic_consume(self, consumer_callback, + queue='', + no_ack=False, + exclusive=False, + consumer_tag=None, + arguments=None): + """Sends the AMQP 0-9-1 command Basic.Consume to the broker and binds messages + for the consumer_tag to the consumer callback. If you do not pass in + a consumer_tag, one will be automatically generated for you. Returns + the consumer tag. + + For more information on basic_consume, see: + Tutorial 2 at http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/confirms.html + http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.consume + + + :param callable consumer_callback: The function to call when consuming + with the signature consumer_callback(channel, method, properties, + body), where + channel: pika.Channel + method: pika.spec.Basic.Deliver + properties: pika.spec.BasicProperties + body: str, unicode, or bytes (python 3.x) + + :param queue: The queue to consume from + :type queue: str or unicode + :param bool no_ack: if set to True, automatic acknowledgement mode will be used + (see http://www.rabbitmq.com/confirms.html) + :param bool exclusive: Don't allow other consumers on the queue + :param consumer_tag: Specify your own consumer tag + :type consumer_tag: str or unicode + :param dict arguments: Custom key/value pair arguments for the consumer + :rtype: str + + """ + self._validate_channel_and_callback(consumer_callback) + + # If a consumer tag was not passed, create one + if not consumer_tag: + consumer_tag = self._generate_consumer_tag() + + if consumer_tag in self._consumers or consumer_tag in self._cancelled: + raise exceptions.DuplicateConsumerTag(consumer_tag) + + if no_ack: + self._consumers_with_noack.add(consumer_tag) + + self._consumers[consumer_tag] = consumer_callback + self._rpc(spec.Basic.Consume(queue=queue, + consumer_tag=consumer_tag, + no_ack=no_ack, + exclusive=exclusive, + arguments=arguments or dict()), + self._on_eventok, [(spec.Basic.ConsumeOk, + {'consumer_tag': consumer_tag})]) + + return consumer_tag + + def _generate_consumer_tag(self): + """Generate a consumer tag + + NOTE: this protected method may be called by derived classes + + :returns: consumer tag + :rtype: str + """ + return 'ctag%i.%s' % (self.channel_number, + uuid.uuid4().hex) + + def basic_get(self, callback=None, queue='', no_ack=False): + """Get a single message from the AMQP broker. If you want to + be notified of Basic.GetEmpty, use the Channel.add_callback method + adding your Basic.GetEmpty callback which should expect only one + parameter, frame. Due to implementation details, this cannot be called + a second time until the callback is executed. For more information on + basic_get and its parameters, see: + + http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.get + + :param callable callback: The callback to call with a message that has + the signature callback(channel, method, properties, body), where: + channel: pika.Channel + method: pika.spec.Basic.GetOk + properties: pika.spec.BasicProperties + body: str, unicode, or bytes (python 3.x) + :param queue: The queue to get a message from + :type queue: str or unicode + :param bool no_ack: Tell the broker to not expect a reply + + """ + self._validate_channel_and_callback(callback) + # TODO Is basic_get meaningful when callback is None? + if self._on_getok_callback is not None: + raise exceptions.DuplicateGetOkCallback() + self._on_getok_callback = callback + # TODO Strangely, not using _rpc for the synchronous Basic.Get. Would + # need to extend _rpc to handle Basic.GetOk method, header, and body + # frames (or similar) + self._send_method(spec.Basic.Get(queue=queue, no_ack=no_ack)) + + def basic_nack(self, delivery_tag=None, multiple=False, requeue=True): + """This method allows a client to reject one or more incoming messages. + It can be used to interrupt and cancel large incoming messages, or + return untreatable messages to their original queue. + + :param integer delivery-tag: int/long The server-assigned delivery tag + :param bool multiple: If set to True, the delivery tag is treated as + "up to and including", so that multiple messages + can be acknowledged with a single method. If set + to False, the delivery tag refers to a single + message. If the multiple field is 1, and the + delivery tag is zero, this indicates + acknowledgement of all outstanding messages. + :param bool requeue: If requeue is true, the server will attempt to + requeue the message. If requeue is false or the + requeue attempt fails the messages are discarded or + dead-lettered. + + """ + if not self.is_open: + raise exceptions.ChannelClosed() + return self._send_method(spec.Basic.Nack(delivery_tag, multiple, + requeue)) + + def basic_publish(self, exchange, routing_key, body, + properties=None, + mandatory=False, + immediate=False): + """Publish to the channel with the given exchange, routing key and body. + For more information on basic_publish and what the parameters do, see: + + http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish + + :param exchange: The exchange to publish to + :type exchange: str or unicode + :param routing_key: The routing key to bind on + :type routing_key: str or unicode + :param body: The message body + :type body: str or unicode + :param pika.spec.BasicProperties properties: Basic.properties + :param bool mandatory: The mandatory flag + :param bool immediate: The immediate flag + + """ + if not self.is_open: + raise exceptions.ChannelClosed() + if immediate: + LOGGER.warning('The immediate flag is deprecated in RabbitMQ') + if isinstance(body, unicode_type): + body = body.encode('utf-8') + properties = properties or spec.BasicProperties() + self._send_method(spec.Basic.Publish(exchange=exchange, + routing_key=routing_key, + mandatory=mandatory, + immediate=immediate), + (properties, body)) + + def basic_qos(self, + callback=None, + prefetch_size=0, + prefetch_count=0, + all_channels=False): + """Specify quality of service. This method requests a specific quality + of service. The QoS can be specified for the current channel or for all + channels on the connection. The client can request that messages be sent + in advance so that when the client finishes processing a message, the + following message is already held locally, rather than needing to be + sent down the channel. Prefetching gives a performance improvement. + + :param callable callback: The callback to call for Basic.QosOk response + :param int prefetch_size: This field specifies the prefetch window + size. The server will send a message in + advance if it is equal to or smaller in size + than the available prefetch size (and also + falls into other prefetch limits). May be set + to zero, meaning "no specific limit", + although other prefetch limits may still + apply. The prefetch-size is ignored if the + no-ack option is set. + :param int prefetch_count: Specifies a prefetch window in terms of whole + messages. This field may be used in + combination with the prefetch-size field; a + message will only be sent in advance if both + prefetch windows (and those at the channel + and connection level) allow it. The + prefetch-count is ignored if the no-ack + option is set. + :param bool all_channels: Should the QoS apply to all channels + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Basic.Qos(prefetch_size, prefetch_count, + all_channels), + callback, [spec.Basic.QosOk]) + + def basic_reject(self, delivery_tag, requeue=True): + """Reject an incoming message. This method allows a client to reject a + message. It can be used to interrupt and cancel large incoming messages, + or return untreatable messages to their original queue. + + :param integer delivery-tag: int/long The server-assigned delivery tag + :param bool requeue: If requeue is true, the server will attempt to + requeue the message. If requeue is false or the + requeue attempt fails the messages are discarded or + dead-lettered. + :raises: TypeError + + """ + if not self.is_open: + raise exceptions.ChannelClosed() + if not is_integer(delivery_tag): + raise TypeError('delivery_tag must be an integer') + return self._send_method(spec.Basic.Reject(delivery_tag, requeue)) + + def basic_recover(self, callback=None, requeue=False): + """This method asks the server to redeliver all unacknowledged messages + on a specified channel. Zero or more messages may be redelivered. This + method replaces the asynchronous Recover. + + :param callable callback: Callback to call when receiving + Basic.RecoverOk + :param bool requeue: If False, the message will be redelivered to the + original recipient. If True, the server will + attempt to requeue the message, potentially then + delivering it to an alternative subscriber. + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Basic.Recover(requeue), callback, + [spec.Basic.RecoverOk]) + + def close(self, reply_code=0, reply_text="Normal shutdown"): + """Invoke a graceful shutdown of the channel with the AMQP Broker. + + If channel is OPENING, transition to CLOSING and suppress the incoming + Channel.OpenOk, if any. + + :param int reply_code: The reason code to send to broker + :param str reply_text: The reason text to send to broker + + :raises ChannelClosed: if channel is already closed + :raises ChannelAlreadyClosing: if channel is already closing + """ + if self.is_closed: + # Whoever is calling `close` might expect the on-channel-close-cb + # to be called, which won't happen when it's already closed + raise exceptions.ChannelClosed('Already closed: %s' % self) + + if self.is_closing: + # Whoever is calling `close` might expect their reply_code and + # reply_text to be sent to broker, which won't happen if we're + # already closing. + raise exceptions.ChannelAlreadyClosing('Already closing: %s' % self) + + # If channel is OPENING, we will transition it to CLOSING state, + # causing the _on_openok method to suppress the OPEN state transition + # and the on-channel-open-callback + + LOGGER.info('Closing channel (%s): %r on %s', + reply_code, reply_text, self) + + for consumer_tag in dictkeys(self._consumers): + if consumer_tag not in self._cancelled: + self.basic_cancel(consumer_tag=consumer_tag, nowait=True) + + # Change state after cancelling consumers to avoid ChannelClosed + # exception from basic_cancel + self._set_state(self.CLOSING) + + self._rpc(spec.Channel.Close(reply_code, reply_text, 0, 0), + self._on_closeok, [spec.Channel.CloseOk]) + + def confirm_delivery(self, callback=None, nowait=False): + """Turn on Confirm mode in the channel. Pass in a callback to be + notified by the Broker when a message has been confirmed as received or + rejected (Basic.Ack, Basic.Nack) from the broker to the publisher. + + For more information see: + http://www.rabbitmq.com/extensions.html#confirms + + :param callable callback: The callback for delivery confirmations that + has the following signature: callback(pika.frame.Method), where + method_frame contains either method `spec.Basic.Ack` or + `spec.Basic.Nack`. + :param bool nowait: Do not send a reply frame (Confirm.SelectOk) + + """ + self._validate_channel_and_callback(callback) + + # TODO confirm_deliver should require a callback; it's meaningless + # without a user callback to receieve Basic.Ack/Basic.Nack notifications + + if not (self.connection.publisher_confirms and + self.connection.basic_nack): + raise exceptions.MethodNotImplemented('Not Supported on Server') + + # Add the ack and nack callbacks + if callback is not None: + self.callbacks.add(self.channel_number, spec.Basic.Ack, callback, + False) + self.callbacks.add(self.channel_number, spec.Basic.Nack, callback, + False) + + # Send the RPC command + self._rpc(spec.Confirm.Select(nowait), + self._on_selectok if not nowait else None, + [spec.Confirm.SelectOk] if nowait is False else []) + + @property + def consumer_tags(self): + """Property method that returns a list of currently active consumers + + :rtype: list + + """ + return dictkeys(self._consumers) + + def exchange_bind(self, + callback=None, + destination=None, + source=None, + routing_key='', + nowait=False, + arguments=None): + """Bind an exchange to another exchange. + + :param callable callback: The callback to call on Exchange.BindOk; MUST + be None when nowait=True + :param destination: The destination exchange to bind + :type destination: str or unicode + :param source: The source exchange to bind to + :type source: str or unicode + :param routing_key: The routing key to bind on + :type routing_key: str or unicode + :param bool nowait: Do not wait for an Exchange.BindOk + :param dict arguments: Custom key/value pair arguments for the binding + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Exchange.Bind(0, destination, source, routing_key, + nowait, arguments or dict()), + callback, [spec.Exchange.BindOk] if nowait is False + else []) + + def exchange_declare(self, + callback=None, + exchange=None, + exchange_type='direct', + passive=False, + durable=False, + auto_delete=False, + internal=False, + nowait=False, + arguments=None): + """This method creates an exchange if it does not already exist, and if + the exchange exists, verifies that it is of the correct and expected + class. + + If passive set, the server will reply with Declare-Ok if the exchange + already exists with the same name, and raise an error if not and if the + exchange does not already exist, the server MUST raise a channel + exception with reply code 404 (not found). + + :param callable callback: Call this method on Exchange.DeclareOk; MUST + be None when nowait=True + :param exchange: The exchange name consists of a non-empty + :type exchange: str or unicode + sequence of these characters: letters, + digits, hyphen, underscore, period, or + colon. + :param str exchange_type: The exchange type to use + :param bool passive: Perform a declare or just check to see if it exists + :param bool durable: Survive a reboot of RabbitMQ + :param bool auto_delete: Remove when no more queues are bound to it + :param bool internal: Can only be published to by other exchanges + :param bool nowait: Do not expect an Exchange.DeclareOk response + :param dict arguments: Custom key/value pair arguments for the exchange + + """ + self._validate_channel_and_callback(callback) + + return self._rpc(spec.Exchange.Declare(0, exchange, exchange_type, + passive, durable, auto_delete, + internal, nowait, + arguments or dict()), + callback, + [spec.Exchange.DeclareOk] if nowait is False else []) + + def exchange_delete(self, + callback=None, + exchange=None, + if_unused=False, + nowait=False): + """Delete the exchange. + + :param callable callback: The function to call on Exchange.DeleteOk; + MUST be None when nowait=True. + :param exchange: The exchange name + :type exchange: str or unicode + :param bool if_unused: only delete if the exchange is unused + :param bool nowait: Do not wait for an Exchange.DeleteOk + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Exchange.Delete(0, exchange, if_unused, nowait), + callback, [spec.Exchange.DeleteOk] if nowait is False + else []) + + def exchange_unbind(self, + callback=None, + destination=None, + source=None, + routing_key='', + nowait=False, + arguments=None): + """Unbind an exchange from another exchange. + + :param callable callback: The callback to call on Exchange.UnbindOk; + MUST be None when nowait=True. + :param destination: The destination exchange to unbind + :type destination: str or unicode + :param source: The source exchange to unbind from + :type source: str or unicode + :param routing_key: The routing key to unbind + :type routing_key: str or unicode + :param bool nowait: Do not wait for an Exchange.UnbindOk + :param dict arguments: Custom key/value pair arguments for the binding + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Exchange.Unbind(0, destination, source, + routing_key, nowait, arguments), + callback, + [spec.Exchange.UnbindOk] if nowait is False else []) + + def flow(self, callback, active): + """Turn Channel flow control off and on. Pass a callback to be notified + of the response from the server. active is a bool. Callback should + expect a bool in response indicating channel flow state. For more + information, please reference: + + http://www.rabbitmq.com/amqp-0-9-1-reference.html#channel.flow + + :param callable callback: The callback to call upon completion + :param bool active: Turn flow on or off + + """ + self._validate_channel_and_callback(callback) + self._on_flowok_callback = callback + self._rpc(spec.Channel.Flow(active), self._on_flowok, + [spec.Channel.FlowOk]) + + @property + def is_closed(self): + """Returns True if the channel is closed. + + :rtype: bool + + """ + return self._state == self.CLOSED + + @property + def is_closing(self): + """Returns True if client-initiated closing of the channel is in + progress. + + :rtype: bool + + """ + return self._state == self.CLOSING + + @property + def is_open(self): + """Returns True if the channel is open. + + :rtype: bool + + """ + return self._state == self.OPEN + + def open(self): + """Open the channel""" + self._set_state(self.OPENING) + self._add_callbacks() + self._rpc(spec.Channel.Open(), self._on_openok, [spec.Channel.OpenOk]) + + def queue_bind(self, callback, queue, exchange, + routing_key=None, + nowait=False, + arguments=None): + """Bind the queue to the specified exchange + + :param callable callback: The callback to call on Queue.BindOk; + MUST be None when nowait=True. + :param queue: The queue to bind to the exchange + :type queue: str or unicode + :param exchange: The source exchange to bind to + :type exchange: str or unicode + :param routing_key: The routing key to bind on + :type routing_key: str or unicode + :param bool nowait: Do not wait for a Queue.BindOk + :param dict arguments: Custom key/value pair arguments for the binding + + """ + self._validate_channel_and_callback(callback) + replies = [spec.Queue.BindOk] if nowait is False else [] + if routing_key is None: + routing_key = queue + return self._rpc(spec.Queue.Bind(0, queue, exchange, routing_key, + nowait, arguments or dict()), + callback, replies) + + def queue_declare(self, callback, + queue='', + passive=False, + durable=False, + exclusive=False, + auto_delete=False, + nowait=False, + arguments=None): + """Declare queue, create if needed. This method creates or checks a + queue. When creating a new queue the client can specify various + properties that control the durability of the queue and its contents, + and the level of sharing for the queue. + + Leave the queue name empty for a auto-named queue in RabbitMQ + + :param callable callback: callback(pika.frame.Method) for method + Queue.DeclareOk; MUST be None when nowait=True. + :param queue: The queue name + :type queue: str or unicode + :param bool passive: Only check to see if the queue exists + :param bool durable: Survive reboots of the broker + :param bool exclusive: Only allow access by the current connection + :param bool auto_delete: Delete after consumer cancels or disconnects + :param bool nowait: Do not wait for a Queue.DeclareOk + :param dict arguments: Custom key/value arguments for the queue + + """ + if queue: + condition = (spec.Queue.DeclareOk, + {'queue': queue}) + else: + condition = spec.Queue.DeclareOk # pylint: disable=R0204 + replies = [condition] if nowait is False else [] + self._validate_channel_and_callback(callback) + return self._rpc(spec.Queue.Declare(0, queue, passive, durable, + exclusive, auto_delete, nowait, + arguments or dict()), + callback, replies) + + def queue_delete(self, + callback=None, + queue='', + if_unused=False, + if_empty=False, + nowait=False): + """Delete a queue from the broker. + + :param callable callback: The callback to call on Queue.DeleteOk; + MUST be None when nowait=True. + :param queue: The queue to delete + :type queue: str or unicode + :param bool if_unused: only delete if it's unused + :param bool if_empty: only delete if the queue is empty + :param bool nowait: Do not wait for a Queue.DeleteOk + + """ + replies = [spec.Queue.DeleteOk] if nowait is False else [] + self._validate_channel_and_callback(callback) + return self._rpc(spec.Queue.Delete(0, queue, if_unused, if_empty, + nowait), + callback, replies) + + def queue_purge(self, callback=None, queue='', nowait=False): + """Purge all of the messages from the specified queue + + :param callable callback: The callback to call on Queue.PurgeOk; + MUST be None when nowait=True. + :param queue: The queue to purge + :type queue: str or unicode + :param bool nowait: Do not expect a Queue.PurgeOk response + + """ + replies = [spec.Queue.PurgeOk] if nowait is False else [] + self._validate_channel_and_callback(callback) + return self._rpc(spec.Queue.Purge(0, queue, nowait), callback, replies) + + def queue_unbind(self, + callback=None, + queue='', + exchange=None, + routing_key=None, + arguments=None): + """Unbind a queue from an exchange. + + :param callable callback: The callback to call on Queue.UnbindOk + :param queue: The queue to unbind from the exchange + :type queue: str or unicode + :param exchange: The source exchange to bind from + :type exchange: str or unicode + :param routing_key: The routing key to unbind + :type routing_key: str or unicode + :param dict arguments: Custom key/value pair arguments for the binding + + """ + self._validate_channel_and_callback(callback) + if routing_key is None: + routing_key = queue + return self._rpc(spec.Queue.Unbind(0, queue, exchange, routing_key, + arguments or dict()), + callback, [spec.Queue.UnbindOk]) + + def tx_commit(self, callback=None): + """Commit a transaction + + :param callable callback: The callback for delivery confirmations + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Tx.Commit(), callback, [spec.Tx.CommitOk]) + + def tx_rollback(self, callback=None): + """Rollback a transaction. + + :param callable callback: The callback for delivery confirmations + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Tx.Rollback(), callback, [spec.Tx.RollbackOk]) + + def tx_select(self, callback=None): + """Select standard transaction mode. This method sets the channel to use + standard transactions. The client must use this method at least once on + a channel before using the Commit or Rollback methods. + + :param callable callback: The callback for delivery confirmations + + """ + self._validate_channel_and_callback(callback) + return self._rpc(spec.Tx.Select(), callback, [spec.Tx.SelectOk]) + + # Internal methods + + def _add_callbacks(self): + """Callbacks that add the required behavior for a channel when + connecting and connected to a server. + + """ + # Add a callback for Basic.GetEmpty + self.callbacks.add(self.channel_number, spec.Basic.GetEmpty, + self._on_getempty, False) + + # Add a callback for Basic.Cancel + self.callbacks.add(self.channel_number, spec.Basic.Cancel, + self._on_cancel, False) + + # Deprecated in newer versions of RabbitMQ but still register for it + self.callbacks.add(self.channel_number, spec.Channel.Flow, + self._on_flow, False) + + # Add a callback for when the server closes our channel + self.callbacks.add(self.channel_number, spec.Channel.Close, + self._on_close, True) + + def _add_on_cleanup_callback(self, callback): + """For internal use only (e.g., Connection needs to remove closed + channels from its channel container). Pass a callback function that will + be called when the channel is being cleaned up after all channel-close + callbacks callbacks. + + :param callable callback: The callback to call, having the + signature: callback(channel) + + """ + self.callbacks.add(self.channel_number, self._ON_CHANNEL_CLEANUP_CB_KEY, + callback, one_shot=True, only_caller=self) + + def _cleanup(self): + """Remove all consumers and any callbacks for the channel.""" + self.callbacks.process(self.channel_number, + self._ON_CHANNEL_CLEANUP_CB_KEY, self, + self) + self._consumers = dict() + self.callbacks.cleanup(str(self.channel_number)) + self._cookie = None + + def _cleanup_consumer_ref(self, consumer_tag): + """Remove any references to the consumer tag in internal structures + for consumer state. + + :param str consumer_tag: The consumer tag to cleanup + + """ + self._consumers_with_noack.discard(consumer_tag) + self._consumers.pop(consumer_tag, None) + self._cancelled.discard(consumer_tag) + + def _get_cookie(self): + """Used by the wrapper implementation (e.g., `BlockingChannel`) to + retrieve the cookie that it set via `_set_cookie` + + :returns: opaque cookie value that was set via `_set_cookie` + """ + return self._cookie + + def _handle_content_frame(self, frame_value): + """This is invoked by the connection when frames that are not registered + with the CallbackManager have been found. This should only be the case + when the frames are related to content delivery. + + The _content_assembler will be invoked which will return the fully + formed message in three parts when all of the body frames have been + received. + + :param pika.amqp_object.Frame frame_value: The frame to deliver + + """ + try: + response = self._content_assembler.process(frame_value) + except exceptions.UnexpectedFrameError: + self._on_unexpected_frame(frame_value) + return + + if response: + if isinstance(response[0].method, spec.Basic.Deliver): + self._on_deliver(*response) + elif isinstance(response[0].method, spec.Basic.GetOk): + self._on_getok(*response) + elif isinstance(response[0].method, spec.Basic.Return): + self._on_return(*response) + + def _on_cancel(self, method_frame): + """When the broker cancels a consumer, delete it from our internal + dictionary. + + :param pika.frame.Method method_frame: The method frame received + + """ + if method_frame.method.consumer_tag in self._cancelled: + # User-initiated cancel is waiting for Cancel-ok + return + + self._cleanup_consumer_ref(method_frame.method.consumer_tag) + + def _on_cancelok(self, method_frame): + """Called in response to a frame from the Broker when the + client sends Basic.Cancel + + :param pika.frame.Method method_frame: The method frame received + + """ + self._cleanup_consumer_ref(method_frame.method.consumer_tag) + + def _on_close(self, method_frame): + """Handle the case where our channel has been closed for us + + :param pika.frame.Method method_frame: Method frame with Channel.Close + method + + """ + LOGGER.warning('Received remote Channel.Close (%s): %r on %s', + method_frame.method.reply_code, + method_frame.method.reply_text, + self) + + # AMQP 0.9.1 requires CloseOk response to Channel.Close; Note, we should + # not be called when connection is closed + self._send_method(spec.Channel.CloseOk()) + + if self.is_closing: + # Since we already sent Channel.Close, we need to wait for CloseOk + # before cleaning up to avoid a race condition whereby our channel + # number might get reused before our CloseOk arrives + + # Save the details to provide to user callback when CloseOk arrives + self._closing_code_and_text = (method_frame.method.reply_code, + method_frame.method.reply_text) + else: + self._set_state(self.CLOSED) + try: + self.callbacks.process(self.channel_number, '_on_channel_close', + self, self, + method_frame.method.reply_code, + method_frame.method.reply_text) + finally: + self._cleanup() + + def _on_close_meta(self, reply_code, reply_text): + """Handle meta-close request from Connection's cleanup logic after + sudden connection loss. We use this opportunity to transition to + CLOSED state, clean up the channel, and dispatch the on-channel-closed + callbacks. + + :param int reply_code: The reply code to pass to on-close callback + :param str reply_text: The reply text to pass to on-close callback + + """ + LOGGER.debug('Handling meta-close on %s', self) + + if not self.is_closed: + self._closing_code_and_text = reply_code, reply_text + + self._set_state(self.CLOSED) + + try: + self.callbacks.process(self.channel_number, '_on_channel_close', + self, self, + reply_code, + reply_text) + finally: + self._cleanup() + + def _on_closeok(self, method_frame): + """Invoked when RabbitMQ replies to a Channel.Close method + + :param pika.frame.Method method_frame: Method frame with Channel.CloseOk + method + + """ + LOGGER.info('Received %s on %s', method_frame.method, self) + + self._set_state(self.CLOSED) + + try: + self.callbacks.process(self.channel_number, '_on_channel_close', + self, self, + self._closing_code_and_text[0], + self._closing_code_and_text[1]) + finally: + self._cleanup() + + def _on_deliver(self, method_frame, header_frame, body): + """Cope with reentrancy. If a particular consumer is still active when + another delivery appears for it, queue the deliveries up until it + finally exits. + + :param pika.frame.Method method_frame: The method frame received + :param pika.frame.Header header_frame: The header frame received + :param body: The body received + :type body: str or unicode + + """ + consumer_tag = method_frame.method.consumer_tag + + if consumer_tag in self._cancelled: + if self.is_open and consumer_tag not in self._consumers_with_noack: + self.basic_reject(method_frame.method.delivery_tag) + return + + if consumer_tag not in self._consumers: + LOGGER.error('Unexpected delivery: %r', method_frame) + return + + self._consumers[consumer_tag](self, method_frame.method, + header_frame.properties, body) + + def _on_eventok(self, method_frame): + """Generic events that returned ok that may have internal callbacks. + We keep a list of what we've yet to implement so that we don't silently + drain events that we don't support. + + :param pika.frame.Method method_frame: The method frame received + + """ + LOGGER.debug('Discarding frame %r', method_frame) + + def _on_flow(self, _method_frame_unused): + """Called if the server sends a Channel.Flow frame. + + :param pika.frame.Method method_frame_unused: The Channel.Flow frame + + """ + if self._has_on_flow_callback is False: + LOGGER.warning('Channel.Flow received from server') + + def _on_flowok(self, method_frame): + """Called in response to us asking the server to toggle on Channel.Flow + + :param pika.frame.Method method_frame: The method frame received + + """ + self.flow_active = method_frame.method.active + if self._on_flowok_callback: + self._on_flowok_callback(method_frame.method.active) + self._on_flowok_callback = None + else: + LOGGER.warning('Channel.FlowOk received with no active callbacks') + + def _on_getempty(self, method_frame): + """When we receive an empty reply do nothing but log it + + :param pika.frame.Method method_frame: The method frame received + + """ + LOGGER.debug('Received Basic.GetEmpty: %r', method_frame) + if self._on_getok_callback is not None: + self._on_getok_callback = None + + def _on_getok(self, method_frame, header_frame, body): + """Called in reply to a Basic.Get when there is a message. + + :param pika.frame.Method method_frame: The method frame received + :param pika.frame.Header header_frame: The header frame received + :param body: The body received + :type body: str or unicode + + """ + if self._on_getok_callback is not None: + callback = self._on_getok_callback + self._on_getok_callback = None + callback(self, method_frame.method, header_frame.properties, body) + else: + LOGGER.error('Basic.GetOk received with no active callback') + + def _on_openok(self, method_frame): + """Called by our callback handler when we receive a Channel.OpenOk and + subsequently calls our _on_openok_callback which was passed into the + Channel constructor. The reason we do this is because we want to make + sure that the on_open_callback parameter passed into the Channel + constructor is not the first callback we make. + + Suppress the state transition and callback if channel is already in + CLOSING state. + + :param pika.frame.Method method_frame: Channel.OpenOk frame + + """ + # Suppress OpenOk if the user or Connection.Close started closing it + # before open completed. + if self.is_closing: + LOGGER.debug('Suppressing while in closing state: %s', method_frame) + else: + self._set_state(self.OPEN) + if self._on_openok_callback is not None: + self._on_openok_callback(self) + + def _on_return(self, method_frame, header_frame, body): + """Called if the server sends a Basic.Return frame. + + :param pika.frame.Method method_frame: The Basic.Return frame + :param pika.frame.Header header_frame: The content header frame + :param body: The message body + :type body: str or unicode + + """ + if not self.callbacks.process(self.channel_number, '_on_return', self, + self, + method_frame.method, + header_frame.properties, + body): + LOGGER.warning('Basic.Return received from server (%r, %r)', + method_frame.method, header_frame.properties) + + def _on_selectok(self, method_frame): + """Called when the broker sends a Confirm.SelectOk frame + + :param pika.frame.Method method_frame: The method frame received + + """ + LOGGER.debug("Confirm.SelectOk Received: %r", method_frame) + + def _on_synchronous_complete(self, _method_frame_unused): + """This is called when a synchronous command is completed. It will undo + the blocking state and send all the frames that stacked up while we + were in the blocking state. + + :param pika.frame.Method method_frame_unused: The method frame received + + """ + LOGGER.debug('%i blocked frames', len(self._blocked)) + self._blocking = None + while self._blocked and self._blocking is None: + self._rpc(*self._blocked.popleft()) + + def _rpc(self, method, callback=None, acceptable_replies=None): + """Make a syncronous channel RPC call for a synchronous method frame. If + the channel is already in the blocking state, then enqueue the request, + but don't send it at this time; it will be eventually sent by + `_on_synchronous_complete` after the prior blocking request receives a + resposne. If the channel is not in the blocking state and + `acceptable_replies` is not empty, transition the channel to the + blocking state and register for `_on_synchronous_complete` before + sending the request. + + NOTE: A callback must be accompanied by non-empty acceptable_replies. + + :param pika.amqp_object.Method method: The AMQP method to invoke + :param callable callback: The callback for the RPC response + :param acceptable_replies: A (possibly empty) sequence of + replies this RPC call expects or None + :type acceptable_replies: list or None + + """ + assert method.synchronous, ( + 'Only synchronous-capable methods may be used with _rpc: %r' + % (method,)) + + # Validate we got None or a list of acceptable_replies + if not isinstance(acceptable_replies, (type(None), list)): + raise TypeError('acceptable_replies should be list or None') + + if callback is not None: + # Validate the callback is callable + if not is_callable(callback): + raise TypeError( + 'callback should be None or a callable') + + # Make sure that callback is accompanied by acceptable replies + if not acceptable_replies: + raise ValueError( + 'Unexpected callback for asynchronous (nowait) operation.') + + # Make sure the channel is not closed yet + if self.is_closed: + raise exceptions.ChannelClosed + + # If the channel is blocking, add subsequent commands to our stack + if self._blocking: + LOGGER.debug('Already in blocking state, so enqueueing method %s; ' + 'acceptable_replies=%r', + method, acceptable_replies) + return self._blocked.append([method, callback, acceptable_replies]) + + # If acceptable replies are set, add callbacks + if acceptable_replies: + # Block until a response frame is received for synchronous frames + self._blocking = method.NAME + LOGGER.debug( + 'Entering blocking state on frame %s; acceptable_replies=%r', + method, acceptable_replies) + + for reply in acceptable_replies: + if isinstance(reply, tuple): + reply, arguments = reply + else: + arguments = None + LOGGER.debug('Adding on_synchronous_complete callback') + self.callbacks.add(self.channel_number, reply, + self._on_synchronous_complete, + arguments=arguments) + if callback is not None: + LOGGER.debug('Adding passed-in callback') + self.callbacks.add(self.channel_number, reply, callback, + arguments=arguments) + + self._send_method(method) + + def _send_method(self, method, content=None): + """Shortcut wrapper to send a method through our connection, passing in + the channel number + + :param pika.amqp_object.Method method: The method to send + :param tuple content: If set, is a content frame, is tuple of + properties and body. + + """ + # pylint: disable=W0212 + self.connection._send_method(self.channel_number, method, content) + + def _set_cookie(self, cookie): + """Used by wrapper layer (e.g., `BlockingConnection`) to link the + channel implementation back to the proxy. See `_get_cookie`. + + :param cookie: an opaque value; typically a proxy channel implementation + instance (e.g., `BlockingChannel` instance) + """ + self._cookie = cookie + + def _set_state(self, connection_state): + """Set the channel connection state to the specified state value. + + :param int connection_state: The connection_state value + + """ + self._state = connection_state + + def _on_unexpected_frame(self, frame_value): + """Invoked when a frame is received that is not setup to be processed. + + :param pika.frame.Frame frame_value: The frame received + + """ + LOGGER.error('Unexpected frame: %r', frame_value) + + def _validate_channel_and_callback(self, callback): + """Verify that channel is open and callback is callable if not None + + :raises ChannelClosed: if channel is closed + :raises ValueError: if callback is not None and is not callable + """ + if not self.is_open: + raise exceptions.ChannelClosed() + if callback is not None and not is_callable(callback): + raise ValueError('callback must be a function or method') + + +class ContentFrameAssembler(object): + """Handle content related frames, building a message and return the message + back in three parts upon receipt. + + """ + + def __init__(self): + """Create a new instance of the conent frame assembler. + + """ + self._method_frame = None + self._header_frame = None + self._seen_so_far = 0 + self._body_fragments = list() + + def process(self, frame_value): + """Invoked by the Channel object when passed frames that are not + setup in the rpc process and that don't have explicit reply types + defined. This includes Basic.Publish, Basic.GetOk and Basic.Return + + :param Method|Header|Body frame_value: The frame to process + + """ + if (isinstance(frame_value, frame.Method) and + spec.has_content(frame_value.method.INDEX)): + self._method_frame = frame_value + elif isinstance(frame_value, frame.Header): + self._header_frame = frame_value + if frame_value.body_size == 0: + return self._finish() + elif isinstance(frame_value, frame.Body): + return self._handle_body_frame(frame_value) + else: + raise exceptions.UnexpectedFrameError(frame_value) + + def _finish(self): + """Invoked when all of the message has been received + + :rtype: tuple(pika.frame.Method, pika.frame.Header, str) + + """ + content = (self._method_frame, self._header_frame, + b''.join(self._body_fragments)) + self._reset() + return content + + def _handle_body_frame(self, body_frame): + """Receive body frames and append them to the stack. When the body size + matches, call the finish method. + + :param Body body_frame: The body frame + :raises: pika.exceptions.BodyTooLongError + :rtype: tuple(pika.frame.Method, pika.frame.Header, str)|None + + """ + self._seen_so_far += len(body_frame.fragment) + self._body_fragments.append(body_frame.fragment) + if self._seen_so_far == self._header_frame.body_size: + return self._finish() + elif self._seen_so_far > self._header_frame.body_size: + raise exceptions.BodyTooLongError(self._seen_so_far, + self._header_frame.body_size) + return None + + def _reset(self): + """Reset the values for processing frames""" + self._method_frame = None + self._header_frame = None + self._seen_so_far = 0 + self._body_fragments = list() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/compat.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/compat.py new file mode 100644 index 000000000..ad14aaa88 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/compat.py @@ -0,0 +1,210 @@ +import errno +import os +import platform +import re +import socket +import sys as _sys + +PY2 = _sys.version_info < (3,) +PY3 = not PY2 +RE_NUM = re.compile(r'(\d+).+') + +if _sys.version_info[:2] < (3, 3): + SOCKET_ERROR = socket.error +else: + # socket.error was deprecated and replaced by OSError in python 3.3 + SOCKET_ERROR = OSError + +try: + SOL_TCP = socket.SOL_TCP +except AttributeError: + SOL_TCP = socket.IPPROTO_TCP + +if not PY2: + # these were moved around for Python 3 + from urllib.parse import (quote as url_quote, unquote as url_unquote, + urlencode) + + # Python 3 does not have basestring anymore; we include + # *only* the str here as this is used for textual data. + basestring = (str,) + + # for assertions that the data is either encoded or non-encoded text + str_or_bytes = (str, bytes) + + # xrange is gone, replace it with range + xrange = range + + # the unicode type is str + unicode_type = str + + def dictkeys(dct): + """ + Returns a list of keys of dictionary + + dict.keys returns a view that works like .keys in Python 2 + *except* any modifications in the dictionary will be visible + (and will cause errors if the view is being iterated over while + it is modified). + """ + + return list(dct.keys()) + + def dictvalues(dct): + """ + Returns a list of values of a dictionary + + dict.values returns a view that works like .values in Python 2 + *except* any modifications in the dictionary will be visible + (and will cause errors if the view is being iterated over while + it is modified). + """ + return list(dct.values()) + + def dict_iteritems(dct): + """ + Returns an iterator of items (key/value pairs) of a dictionary + + dict.items returns a view that works like .items in Python 2 + *except* any modifications in the dictionary will be visible + (and will cause errors if the view is being iterated over while + it is modified). + """ + return dct.items() + + def dict_itervalues(dct): + """ + :param dict dct: + :returns: an iterator of the values of a dictionary + """ + return dct.values() + + def byte(*args): + """ + This is the same as Python 2 `chr(n)` for bytes in Python 3 + + Returns a single byte `bytes` for the given int argument (we + optimize it a bit here by passing the positional argument tuple + directly to the bytes constructor. + """ + return bytes(args) + + class long(int): + """ + A marker class that signifies that the integer value should be + serialized as `l` instead of `I` + """ + + def __repr__(self): + return str(self) + 'L' + + def canonical_str(value): + """ + Return the canonical str value for the string. + In both Python 3 and Python 2 this is str. + """ + + return str(value) + + def is_integer(value): + return isinstance(value, int) +else: + from urllib import quote as url_quote, unquote as url_unquote, urlencode + + basestring = basestring + str_or_bytes = basestring + xrange = xrange + unicode_type = unicode + dictkeys = dict.keys + dictvalues = dict.values + dict_iteritems = dict.iteritems + dict_itervalues = dict.itervalues + byte = chr + long = long + + def canonical_str(value): + """ + Returns the canonical string value of the given string. + In Python 2 this is the value unchanged if it is an str, otherwise + it is the unicode value encoded as UTF-8. + """ + + try: + return str(value) + except UnicodeEncodeError: + return str(value.encode('utf-8')) + + def is_integer(value): + return isinstance(value, (int, long)) + + +def as_bytes(value): + if not isinstance(value, bytes): + return value.encode('UTF-8') + return value + + +def to_digit(value): + if value.isdigit(): + return int(value) + match = RE_NUM.match(value) + return int(match.groups()[0]) if match else 0 + + +def get_linux_version(release_str): + ver_str = release_str.split('-')[0] + return tuple(map(to_digit, ver_str.split('.')[:3])) + + +HAVE_SIGNAL = os.name == 'posix' + +EINTR_IS_EXPOSED = _sys.version_info[:2] <= (3, 4) + +LINUX_VERSION = None +if platform.system() == 'Linux': + LINUX_VERSION = get_linux_version(platform.release()) + +_LOCALHOST = '127.0.0.1' +_LOCALHOST_V6 = '::1' + +def _nonblocking_socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): + """ + Returns a pair of sockets in the manner of socketpair with the additional + feature that they will be non-blocking. Prior to Python 3.5, socketpair + did not exist on Windows at all. + """ + if family == socket.AF_INET: + host = _LOCALHOST + elif family == socket.AF_INET6: + host = _LOCALHOST_V6 + else: + raise ValueError( + 'Only AF_INET and AF_INET6 socket address families ' + 'are supported') + if type != socket.SOCK_STREAM: + raise ValueError('Only SOCK_STREAM socket type is supported') + if proto != 0: + raise ValueError('Only protocol zero is supported') + + lsock = socket.socket(family, type, proto) + try: + lsock.bind((host, 0)) + lsock.listen(min(socket.SOMAXCONN, 128)) + # On IPv6, ignore flow_info and scope_id + addr, port = lsock.getsockname()[:2] + csock = socket.socket(family, type, proto) + try: + csock.connect((addr, port)) + ssock, _ = lsock.accept() + except Exception: + csock.close() + raise + finally: + lsock.close() + + # Make sockets non-blocking to prevent deadlocks + # See https://github.com/pika/pika/issues/917 + csock.setblocking(False) + ssock.setblocking(False) + + return ssock, csock diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/connection.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/connection.py new file mode 100644 index 000000000..4cc1e3272 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/connection.py @@ -0,0 +1,2356 @@ +"""Core connection objects""" +import ast +import sys +import collections +import copy +import logging +import math +import numbers +import os +import platform +import warnings +import ssl + +if sys.version_info > (3,): + import urllib.parse as urlparse # pylint: disable=E0611,F0401 +else: + import urlparse + +from pika import __version__ +from pika import callback +import pika.channel +from pika import credentials as pika_credentials +from pika import exceptions +from pika import frame +from pika import heartbeat +from pika import utils + +from pika import spec + +from pika.compat import (xrange, basestring, # pylint: disable=W0622 + url_unquote, dictkeys, dict_itervalues, + dict_iteritems) + + +BACKPRESSURE_WARNING = ("Pika: Write buffer exceeded warning threshold at " + "%i bytes and an estimated %i frames behind") +PRODUCT = "Pika Python Client Library" + +LOGGER = logging.getLogger(__name__) + + +class InternalCloseReasons(object): + """Internal reason codes passed to the user's on_close_callback when the + connection is terminated abruptly, without reply code/text from the broker. + + AMQP 0.9.1 specification cites IETF RFC 821 for reply codes. To avoid + conflict, the `InternalCloseReasons` namespace uses negative integers. These + are invalid for sending to the broker. + """ + SOCKET_ERROR = -1 + BLOCKED_CONNECTION_TIMEOUT = -2 + + +class Parameters(object): # pylint: disable=R0902 + """Base connection parameters class definition + + :param bool backpressure_detection: `DEFAULT_BACKPRESSURE_DETECTION` + :param float|None blocked_connection_timeout: + `DEFAULT_BLOCKED_CONNECTION_TIMEOUT` + :param int channel_max: `DEFAULT_CHANNEL_MAX` + :param int connection_attempts: `DEFAULT_CONNECTION_ATTEMPTS` + :param credentials: `DEFAULT_CREDENTIALS` + :param int frame_max: `DEFAULT_FRAME_MAX` + :param int heartbeat: `DEFAULT_HEARTBEAT_TIMEOUT` + :param str host: `DEFAULT_HOST` + :param str locale: `DEFAULT_LOCALE` + :param int port: `DEFAULT_PORT` + :param float retry_delay: `DEFAULT_RETRY_DELAY` + :param float socket_timeout: `DEFAULT_SOCKET_TIMEOUT` + :param bool ssl: `DEFAULT_SSL` + :param dict ssl_options: `DEFAULT_SSL_OPTIONS` + :param str virtual_host: `DEFAULT_VIRTUAL_HOST` + :param int tcp_options: `DEFAULT_TCP_OPTIONS` + """ + + # Declare slots to protect against accidental assignment of an invalid + # attribute + __slots__ = ( + '_backpressure_detection', + '_blocked_connection_timeout', + '_channel_max', + '_client_properties', + '_connection_attempts', + '_credentials', + '_frame_max', + '_heartbeat', + '_host', + '_locale', + '_port', + '_retry_delay', + '_socket_timeout', + '_ssl', + '_ssl_options', + '_virtual_host', + '_tcp_options' + ) + + DEFAULT_USERNAME = 'guest' + DEFAULT_PASSWORD = 'guest' + + DEFAULT_BACKPRESSURE_DETECTION = False + DEFAULT_BLOCKED_CONNECTION_TIMEOUT = None + DEFAULT_CHANNEL_MAX = pika.channel.MAX_CHANNELS + DEFAULT_CLIENT_PROPERTIES = None + DEFAULT_CREDENTIALS = pika_credentials.PlainCredentials(DEFAULT_USERNAME, + DEFAULT_PASSWORD) + DEFAULT_CONNECTION_ATTEMPTS = 1 + DEFAULT_FRAME_MAX = spec.FRAME_MAX_SIZE + DEFAULT_HEARTBEAT_TIMEOUT = None # None accepts server's proposal + DEFAULT_HOST = 'localhost' + DEFAULT_LOCALE = 'en_US' + DEFAULT_PORT = 5672 + DEFAULT_RETRY_DELAY = 2.0 + DEFAULT_SOCKET_TIMEOUT = 10.0 + DEFAULT_SSL = False + DEFAULT_SSL_OPTIONS = None + DEFAULT_SSL_PORT = 5671 + DEFAULT_VIRTUAL_HOST = '/' + DEFAULT_TCP_OPTIONS = None + + DEFAULT_HEARTBEAT_INTERVAL = DEFAULT_HEARTBEAT_TIMEOUT # DEPRECATED + + def __init__(self): + self._backpressure_detection = None + self.backpressure_detection = self.DEFAULT_BACKPRESSURE_DETECTION + + # If not None, blocked_connection_timeout is the timeout, in seconds, + # for the connection to remain blocked; if the timeout expires, the + # connection will be torn down, triggering the connection's + # on_close_callback + self._blocked_connection_timeout = None + self.blocked_connection_timeout = ( + self.DEFAULT_BLOCKED_CONNECTION_TIMEOUT) + + self._channel_max = None + self.channel_max = self.DEFAULT_CHANNEL_MAX + + self._client_properties = None + self.client_properties = self.DEFAULT_CLIENT_PROPERTIES + + self._connection_attempts = None + self.connection_attempts = self.DEFAULT_CONNECTION_ATTEMPTS + + self._credentials = None + self.credentials = self.DEFAULT_CREDENTIALS + + self._frame_max = None + self.frame_max = self.DEFAULT_FRAME_MAX + + self._heartbeat = None + self.heartbeat = self.DEFAULT_HEARTBEAT_TIMEOUT + + self._host = None + self.host = self.DEFAULT_HOST + + self._locale = None + self.locale = self.DEFAULT_LOCALE + + self._port = None + self.port = self.DEFAULT_PORT + + self._retry_delay = None + self.retry_delay = self.DEFAULT_RETRY_DELAY + + self._socket_timeout = None + self.socket_timeout = self.DEFAULT_SOCKET_TIMEOUT + + self._ssl = None + self.ssl = self.DEFAULT_SSL + + self._ssl_options = None + self.ssl_options = self.DEFAULT_SSL_OPTIONS + + self._virtual_host = None + self.virtual_host = self.DEFAULT_VIRTUAL_HOST + + self._tcp_options = None + self.tcp_options = self.DEFAULT_TCP_OPTIONS + + def __repr__(self): + """Represent the info about the instance. + + :rtype: str + + """ + return ('<%s host=%s port=%s virtual_host=%s ssl=%s>' % + (self.__class__.__name__, self.host, self.port, + self.virtual_host, self.ssl)) + + @property + def backpressure_detection(self): + """ + :returns: boolean indicating whether backpressure detection is + enabled. Defaults to `DEFAULT_BACKPRESSURE_DETECTION`. + + """ + return self._backpressure_detection + + @backpressure_detection.setter + def backpressure_detection(self, value): + """ + :param bool value: boolean indicating whether to enable backpressure + detection + + """ + if not isinstance(value, bool): + raise TypeError('backpressure_detection must be a bool, ' + 'but got %r' % (value,)) + self._backpressure_detection = value + + @property + def blocked_connection_timeout(self): + """ + :returns: None or float blocked connection timeout. Defaults to + `DEFAULT_BLOCKED_CONNECTION_TIMEOUT`. + + """ + return self._blocked_connection_timeout + + @blocked_connection_timeout.setter + def blocked_connection_timeout(self, value): + """ + :param value: If not None, blocked_connection_timeout is the timeout, in + seconds, for the connection to remain blocked; if the timeout + expires, the connection will be torn down, triggering the + connection's on_close_callback + + """ + if value is not None: + if not isinstance(value, numbers.Real): + raise TypeError('blocked_connection_timeout must be a Real ' + 'number, but got %r' % (value,)) + if value < 0: + raise ValueError('blocked_connection_timeout must be >= 0, but ' + 'got %r' % (value,)) + self._blocked_connection_timeout = value + + @property + def channel_max(self): + """ + :returns: max preferred number of channels. Defaults to + `DEFAULT_CHANNEL_MAX`. + :rtype: int + + """ + return self._channel_max + + @channel_max.setter + def channel_max(self, value): + """ + :param int value: max preferred number of channels, between 1 and + `channel.MAX_CHANNELS`, inclusive + + """ + if not isinstance(value, numbers.Integral): + raise TypeError('channel_max must be an int, but got %r' % (value,)) + if value < 1 or value > pika.channel.MAX_CHANNELS: + raise ValueError('channel_max must be <= %i and > 0, but got %r' % + (pika.channel.MAX_CHANNELS, value)) + self._channel_max = value + + @property + def client_properties(self): + """ + :returns: None or dict of client properties used to override the fields + in the default client poperties reported to RabbitMQ via + `Connection.StartOk` method. Defaults to + `DEFAULT_CLIENT_PROPERTIES`. + + """ + return self._client_properties + + @client_properties.setter + def client_properties(self, value): + """ + :param value: None or dict of client properties used to override the + fields in the default client poperties reported to RabbitMQ via + `Connection.StartOk` method. + """ + if not isinstance(value, (dict, type(None),)): + raise TypeError('client_properties must be dict or None, ' + 'but got %r' % (value,)) + # Copy the mutable object to avoid accidental side-effects + self._client_properties = copy.deepcopy(value) + + @property + def connection_attempts(self): + """ + :returns: number of socket connection attempts. Defaults to + `DEFAULT_CONNECTION_ATTEMPTS`. + + """ + return self._connection_attempts + + @connection_attempts.setter + def connection_attempts(self, value): + """ + :param int value: number of socket connection attempts of at least 1 + + """ + if not isinstance(value, numbers.Integral): + raise TypeError('connection_attempts must be an int') + if value < 1: + raise ValueError('connection_attempts must be > 0, but got %r' % + (value,)) + self._connection_attempts = value + + @property + def credentials(self): + """ + :rtype: one of the classes from `pika.credentials.VALID_TYPES`. Defaults + to `DEFAULT_CREDENTIALS`. + + """ + return self._credentials + + @credentials.setter + def credentials(self, value): + """ + :param value: authentication credential object of one of the classes + from `pika.credentials.VALID_TYPES` + + """ + if not isinstance(value, tuple(pika_credentials.VALID_TYPES)): + raise TypeError('Credentials must be an object of type: %r, but ' + 'got %r' % (pika_credentials.VALID_TYPES, value)) + # Copy the mutable object to avoid accidental side-effects + self._credentials = copy.deepcopy(value) + + @property + def frame_max(self): + """ + :returns: desired maximum AMQP frame size to use. Defaults to + `DEFAULT_FRAME_MAX`. + + """ + return self._frame_max + + @frame_max.setter + def frame_max(self, value): + """ + :param int value: desired maximum AMQP frame size to use between + `spec.FRAME_MIN_SIZE` and `spec.FRAME_MAX_SIZE`, inclusive + + """ + if not isinstance(value, numbers.Integral): + raise TypeError('frame_max must be an int, but got %r' % (value,)) + if value < spec.FRAME_MIN_SIZE: + raise ValueError('Min AMQP 0.9.1 Frame Size is %i, but got %r', + (spec.FRAME_MIN_SIZE, value,)) + elif value > spec.FRAME_MAX_SIZE: + raise ValueError('Max AMQP 0.9.1 Frame Size is %i, but got %r', + (spec.FRAME_MAX_SIZE, value,)) + self._frame_max = value + + @property + def heartbeat(self): + """ + :returns: AMQP connection heartbeat timeout value for negotiation during + connection tuning or callable which is invoked during connection tuning. + None to accept broker's value. 0 turns heartbeat off. Defaults to + `DEFAULT_HEARTBEAT_TIMEOUT`. + :rtype: integer, None or callable + + """ + return self._heartbeat + + @heartbeat.setter + def heartbeat(self, value): + """ + :param int|None|callable value: Controls AMQP heartbeat timeout negotiation + during connection tuning. An integer value always overrides the value + proposed by broker. Use 0 to deactivate heartbeats and None to always + accept the broker's proposal. If a callable is given, it will be called + with the connection instance and the heartbeat timeout proposed by broker + as its arguments. The callback should return a non-negative integer that + will be used to override the broker's proposal. + """ + if value is not None: + if not isinstance(value, numbers.Integral) and not callable(value): + raise TypeError('heartbeat must be an int or a callable function, but got %r' % + (value,)) + if not callable(value) and value < 0: + raise ValueError('heartbeat must >= 0, but got %r' % (value,)) + self._heartbeat = value + + @property + def host(self): + """ + :returns: hostname or ip address of broker. Defaults to `DEFAULT_HOST`. + :rtype: str + + """ + return self._host + + @host.setter + def host(self, value): + """ + :param str value: hostname or ip address of broker + + """ + if not isinstance(value, basestring): + raise TypeError('host must be a str or unicode str, but got %r' % + (value,)) + self._host = value + + @property + def locale(self): + """ + :returns: locale value to pass to broker; e.g., 'en_US'. Defaults to + `DEFAULT_LOCALE`. + :rtype: str + + """ + return self._locale + + @locale.setter + def locale(self, value): + """ + :param str value: locale value to pass to broker; e.g., "en_US" + + """ + if not isinstance(value, basestring): + raise TypeError('locale must be a str, but got %r' % (value,)) + self._locale = value + + @property + def port(self): + """ + :returns: port number of broker's listening socket. Defaults to + `DEFAULT_PORT`. + :rtype: int + + """ + return self._port + + @port.setter + def port(self, value): + """ + :param int value: port number of broker's listening socket + + """ + try: + self._port = int(value) + except (TypeError, ValueError): + raise TypeError('port must be an int, but got %r' % (value,)) + + @property + def retry_delay(self): + """ + :returns: interval between socket connection attempts; see also + `connection_attempts`. Defaults to `DEFAULT_RETRY_DELAY`. + :rtype: float + + """ + return self._retry_delay + + @retry_delay.setter + def retry_delay(self, value): + """ + :param float value: interval between socket connection attempts; see + also `connection_attempts`. + + """ + if not isinstance(value, numbers.Real): + raise TypeError('retry_delay must be a float or int, but got %r' % + (value,)) + self._retry_delay = value + + @property + def socket_timeout(self): + """ + :returns: socket timeout value. Defaults to `DEFAULT_SOCKET_TIMEOUT`. + :rtype: float + + """ + return self._socket_timeout + + @socket_timeout.setter + def socket_timeout(self, value): + """ + :param float value: socket timeout value; NOTE: this is mostly unused + now, owing to switchover to to non-blocking socket setting after + initial socket connection establishment. + + """ + if value is not None: + if not isinstance(value, numbers.Real): + raise TypeError('socket_timeout must be a float or int, ' + 'but got %r' % (value,)) + if not value > 0: + raise ValueError('socket_timeout must be > 0, but got %r' % + (value,)) + self._socket_timeout = value + + @property + def ssl(self): + """ + :returns: boolean indicating whether to connect via SSL. Defaults to + `DEFAULT_SSL`. + + """ + return self._ssl + + @ssl.setter + def ssl(self, value): + """ + :param bool value: boolean indicating whether to connect via SSL + + """ + if not isinstance(value, bool): + raise TypeError('ssl must be a bool, but got %r' % (value,)) + self._ssl = value + + @property + def ssl_options(self): + """ + :returns: None or a dict of options to pass to `ssl.wrap_socket`. + Defaults to `DEFAULT_SSL_OPTIONS`. + + """ + return self._ssl_options + + @ssl_options.setter + def ssl_options(self, value): + """ + :param value: None, a dict of options to pass to `ssl.wrap_socket` or + a SSLOptions object for advanced setup. + + """ + if not isinstance(value, (dict, SSLOptions, type(None))): + raise TypeError( + 'ssl_options must be a dict, None or an SSLOptions but got %r' + % (value, )) + # Copy the mutable object to avoid accidental side-effects + self._ssl_options = copy.deepcopy(value) + + + @property + def virtual_host(self): + """ + :returns: rabbitmq virtual host name. Defaults to + `DEFAULT_VIRTUAL_HOST`. + + """ + return self._virtual_host + + @virtual_host.setter + def virtual_host(self, value): + """ + :param str value: rabbitmq virtual host name + + """ + if not isinstance(value, basestring): + raise TypeError('virtual_host must be a str, but got %r' % (value,)) + self._virtual_host = value + + @property + def tcp_options(self): + """ + :returns: None or a dict of options to pass to the underlying socket + """ + return self._tcp_options + + @tcp_options.setter + def tcp_options(self, value): + """ + :param bool value: None or a dict of options to pass to the underlying + socket. Currently supported are TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT + and TCP_USER_TIMEOUT. Availability of these may depend on your platform. + """ + if not isinstance(value, (dict, type(None))): + raise TypeError('tcp_options must be a dict or None, but got %r' % + (value,)) + self._tcp_options = value + + +class ConnectionParameters(Parameters): + """Connection parameters object that is passed into the connection adapter + upon construction. + + """ + + # Protect against accidental assignment of an invalid attribute + __slots__ = () + + class _DEFAULT(object): + """Designates default parameter value; internal use""" + pass + + def __init__(self, # pylint: disable=R0913,R0914,R0912 + host=_DEFAULT, + port=_DEFAULT, + virtual_host=_DEFAULT, + credentials=_DEFAULT, + channel_max=_DEFAULT, + frame_max=_DEFAULT, + heartbeat=_DEFAULT, + ssl=_DEFAULT, + ssl_options=_DEFAULT, + connection_attempts=_DEFAULT, + retry_delay=_DEFAULT, + socket_timeout=_DEFAULT, + locale=_DEFAULT, + backpressure_detection=_DEFAULT, + blocked_connection_timeout=_DEFAULT, + client_properties=_DEFAULT, + tcp_options=_DEFAULT, + **kwargs): + """Create a new ConnectionParameters instance. See `Parameters` for + default values. + + :param str host: Hostname or IP Address to connect to + :param int port: TCP port to connect to + :param str virtual_host: RabbitMQ virtual host to use + :param pika.credentials.Credentials credentials: auth credentials + :param int channel_max: Maximum number of channels to allow + :param int frame_max: The maximum byte size for an AMQP frame + :param int|None|callable value: Controls AMQP heartbeat timeout negotiation + during connection tuning. An integer value always overrides the value + proposed by broker. Use 0 to deactivate heartbeats and None to always + accept the broker's proposal. If a callable is given, it will be called + with the connection instance and the heartbeat timeout proposed by broker + as its arguments. The callback should return a non-negative integer that + will be used to override the broker's proposal. + :param bool ssl: Enable SSL + :param dict ssl_options: None or a dict of arguments to be passed to + ssl.wrap_socket + :param int connection_attempts: Maximum number of retry attempts + :param int|float retry_delay: Time to wait in seconds, before the next + :param int|float socket_timeout: Use for high latency networks + :param str locale: Set the locale value + :param bool backpressure_detection: DEPRECATED in favor of + `Connection.Blocked` and `Connection.Unblocked`. See + `Connection.add_on_connection_blocked_callback`. + :param blocked_connection_timeout: If not None, + the value is a non-negative timeout, in seconds, for the + connection to remain blocked (triggered by Connection.Blocked from + broker); if the timeout expires before connection becomes unblocked, + the connection will be torn down, triggering the adapter-specific + mechanism for informing client app about the closed connection ( + e.g., on_close_callback or ConnectionClosed exception) with + `reason_code` of `InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT`. + :type blocked_connection_timeout: None, int, float + :param client_properties: None or dict of client properties used to + override the fields in the default client properties reported to + RabbitMQ via `Connection.StartOk` method. + :param heartbeat_interval: DEPRECATED; use `heartbeat` instead, and + don't pass both + :param tcp_options: None or a dict of TCP options to set for socket + """ + super(ConnectionParameters, self).__init__() + + if backpressure_detection is not self._DEFAULT: + self.backpressure_detection = backpressure_detection + + if blocked_connection_timeout is not self._DEFAULT: + self.blocked_connection_timeout = blocked_connection_timeout + + if channel_max is not self._DEFAULT: + self.channel_max = channel_max + + if client_properties is not self._DEFAULT: + self.client_properties = client_properties + + if connection_attempts is not self._DEFAULT: + self.connection_attempts = connection_attempts + + if credentials is not self._DEFAULT: + self.credentials = credentials + + if frame_max is not self._DEFAULT: + self.frame_max = frame_max + + if heartbeat is not self._DEFAULT: + self.heartbeat = heartbeat + + try: + heartbeat_interval = kwargs.pop('heartbeat_interval') + except KeyError: + # Good, this one is deprecated + pass + else: + warnings.warn('heartbeat_interval is deprecated, use heartbeat', + DeprecationWarning, stacklevel=2) + if heartbeat is not self._DEFAULT: + raise TypeError('heartbeat and deprecated heartbeat_interval ' + 'are mutually-exclusive') + self.heartbeat = heartbeat_interval + + if host is not self._DEFAULT: + self.host = host + + if locale is not self._DEFAULT: + self.locale = locale + + if retry_delay is not self._DEFAULT: + self.retry_delay = retry_delay + + if socket_timeout is not self._DEFAULT: + self.socket_timeout = socket_timeout + + if ssl is not self._DEFAULT: + self.ssl = ssl + + if ssl_options is not self._DEFAULT: + self.ssl_options = ssl_options + + # Set port after SSL status is known + if port is not self._DEFAULT: + self.port = port + elif ssl is not self._DEFAULT: + self.port = self.DEFAULT_SSL_PORT if self.ssl else self.DEFAULT_PORT + + if virtual_host is not self._DEFAULT: + self.virtual_host = virtual_host + + if tcp_options is not self._DEFAULT: + self.tcp_options = tcp_options + + if kwargs: + raise TypeError('Unexpected kwargs: %r' % (kwargs,)) + + +class URLParameters(Parameters): + """Connect to RabbitMQ via an AMQP URL in the format:: + + amqp://username:password@host:port/[?query-string] + + Ensure that the virtual host is URI encoded when specified. For example if + you are using the default "/" virtual host, the value should be `%2f`. + + See `Parameters` for default values. + + Valid query string values are: + + - backpressure_detection: + DEPRECATED in favor of + `Connection.Blocked` and `Connection.Unblocked`. See + `Connection.add_on_connection_blocked_callback`. + - channel_max: + Override the default maximum channel count value + - client_properties: + dict of client properties used to override the fields in the default + client properties reported to RabbitMQ via `Connection.StartOk` + method + - connection_attempts: + Specify how many times pika should try and reconnect before it gives up + - frame_max: + Override the default maximum frame size for communication + - heartbeat: + Desired connection heartbeat timeout for negotiation. If not present + the broker's value is accepted. 0 turns heartbeat off. + - locale: + Override the default `en_US` locale value + - ssl: + Toggle SSL, possible values are `t`, `f` + - ssl_options: + Arguments passed to :meth:`ssl.wrap_socket` + - retry_delay: + The number of seconds to sleep before attempting to connect on + connection failure. + - socket_timeout: + Override low level socket timeout value + - blocked_connection_timeout: + Set the timeout, in seconds, that the connection may remain blocked + (triggered by Connection.Blocked from broker); if the timeout + expires before connection becomes unblocked, the connection will be + torn down, triggering the connection's on_close_callback + - tcp_options: + Set the tcp options for the underlying socket. + + :param str url: The AMQP URL to connect to + + """ + + # Protect against accidental assignment of an invalid attribute + __slots__ = ('_all_url_query_values',) + + + # The name of the private function for parsing and setting a given URL query + # arg is constructed by catenating the query arg's name to this prefix + _SETTER_PREFIX = '_set_url_' + + def __init__(self, url): + """Create a new URLParameters instance. + + :param str url: The URL value + + """ + super(URLParameters, self).__init__() + + self._all_url_query_values = None + + # Handle the Protocol scheme + # + # Fix up scheme amqp(s) to http(s) so urlparse won't barf on python + # prior to 2.7. On Python 2.6.9, + # `urlparse('amqp://127.0.0.1/%2f?socket_timeout=1')` produces an + # incorrect path='/%2f?socket_timeout=1' + if url[0:4].lower() == 'amqp': + url = 'http' + url[4:] + + # TODO Is support for the alternative http(s) schemes intentional? + + parts = urlparse.urlparse(url) + + if parts.scheme == 'https': + self.ssl = True + elif parts.scheme == 'http': + self.ssl = False + elif parts.scheme: + raise ValueError('Unexpected URL scheme %r; supported scheme ' + 'values: amqp, amqps' % (parts.scheme,)) + + if parts.hostname is not None: + self.host = parts.hostname + + # Take care of port after SSL status is known + if parts.port is not None: + self.port = parts.port + else: + self.port = self.DEFAULT_SSL_PORT if self.ssl else self.DEFAULT_PORT + + if parts.username is not None: + self.credentials = pika_credentials.PlainCredentials(url_unquote(parts.username), + url_unquote(parts.password)) + + # Get the Virtual Host + if len(parts.path) > 1: + self.virtual_host = url_unquote(parts.path.split('/')[1]) + + # Handle query string values, validating and assigning them + self._all_url_query_values = urlparse.parse_qs(parts.query) + + for name, value in dict_iteritems(self._all_url_query_values): + try: + set_value = getattr(self, self._SETTER_PREFIX + name) + except AttributeError: + raise ValueError('Unknown URL parameter: %r' % (name,)) + + try: + (value,) = value + except ValueError: + raise ValueError('Expected exactly one value for URL parameter ' + '%s, but got %i values: %s' % ( + name, len(value), value)) + + set_value(value) + + def _set_url_backpressure_detection(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + backpressure_detection = {'t': True, 'f': False}[value] + except KeyError: + raise ValueError('Invalid backpressure_detection value: %r' % + (value,)) + self.backpressure_detection = backpressure_detection + + def _set_url_blocked_connection_timeout(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + blocked_connection_timeout = float(value) + except ValueError as exc: + raise ValueError('Invalid blocked_connection_timeout value %r: %r' % + (value, exc,)) + self.blocked_connection_timeout = blocked_connection_timeout + + def _set_url_channel_max(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + channel_max = int(value) + except ValueError as exc: + raise ValueError('Invalid channel_max value %r: %r' % (value, exc,)) + self.channel_max = channel_max + + def _set_url_client_properties(self, value): + """Deserialize and apply the corresponding query string arg""" + self.client_properties = ast.literal_eval(value) + + def _set_url_connection_attempts(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + connection_attempts = int(value) + except ValueError as exc: + raise ValueError('Invalid connection_attempts value %r: %r' % + (value, exc,)) + self.connection_attempts = connection_attempts + + def _set_url_frame_max(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + frame_max = int(value) + except ValueError as exc: + raise ValueError('Invalid frame_max value %r: %r' % (value, exc,)) + self.frame_max = frame_max + + def _set_url_heartbeat(self, value): + """Deserialize and apply the corresponding query string arg""" + if 'heartbeat_interval' in self._all_url_query_values: + raise ValueError('Deprecated URL parameter heartbeat_interval must ' + 'not be specified together with heartbeat') + + try: + heartbeat_timeout = int(value) + except ValueError as exc: + raise ValueError('Invalid heartbeat value %r: %r' % (value, exc,)) + self.heartbeat = heartbeat_timeout + + def _set_url_heartbeat_interval(self, value): + """Deserialize and apply the corresponding query string arg""" + warnings.warn('heartbeat_interval is deprecated, use heartbeat', + DeprecationWarning, stacklevel=2) + + if 'heartbeat' in self._all_url_query_values: + raise ValueError('Deprecated URL parameter heartbeat_interval must ' + 'not be specified together with heartbeat') + + try: + heartbeat_timeout = int(value) + except ValueError as exc: + raise ValueError('Invalid heartbeat_interval value %r: %r' % + (value, exc,)) + self.heartbeat = heartbeat_timeout + + def _set_url_locale(self, value): + """Deserialize and apply the corresponding query string arg""" + self.locale = value + + def _set_url_retry_delay(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + retry_delay = float(value) + except ValueError as exc: + raise ValueError('Invalid retry_delay value %r: %r' % (value, exc,)) + self.retry_delay = retry_delay + + def _set_url_socket_timeout(self, value): + """Deserialize and apply the corresponding query string arg""" + try: + socket_timeout = float(value) + except ValueError as exc: + raise ValueError('Invalid socket_timeout value %r: %r' % + (value, exc,)) + self.socket_timeout = socket_timeout + + def _set_url_ssl_options(self, value): + """Deserialize and apply the corresponding query string arg + + """ + opts = ast.literal_eval(value) + if opts is None: + if self.ssl_options is not None: + raise ValueError( + 'Specified ssl_options=None URL arg is inconsistent with ' + 'the specified https URL scheme.') + else: + self.ssl_options = pika.SSLOptions( + keyfile=opts.get('keyfile'), + key_password=opts.get('key_password') or opts.get('password'), + certfile=opts.get('certfile'), + verify_mode=opts.get('verify_mode') or ssl.CERT_NONE, + ssl_version=opts.get('ssl_version') or ssl.PROTOCOL_TLSv1, + cafile=opts.get('cafile'), + capath=opts.get('capath'), + cadata=opts.get('cadata'), + ciphers=opts.get('ciphers'), + server_hostname=opts.get('server_hostname')) + + def _set_url_tcp_options(self, value): + """Deserialize and apply the corresponding query string arg""" + self.tcp_options = ast.literal_eval(value) + +class SSLOptions(object): + """Class used to provide parameters for optional fine grained control of SSL + socket wrapping. + + :param string keyfile: The key file to pass to SSLContext.load_cert_chain + :param string key_password: The key password to passed to + SSLContext.load_cert_chain + :param string certfile: The certificate file to passed to + SSLContext.load_cert_chain + :param bool server_side: Passed to SSLContext.wrap_socket + :param verify_mode: Passed to SSLContext.wrap_socket + :param ssl_version: Passed to SSLContext init, defines the ssl + version to use + :param string cafile: The CA file passed to + SSLContext.load_verify_locations + :param string capath: The CA path passed to + SSLContext.load_verify_locations + :param string cadata: The CA data passed to + SSLContext.load_verify_locations + :param do_handshake_on_connect: Passed to SSLContext.wrap_socket + :param suppress_ragged_eofs: Passed to SSLContext.wrap_socket + :param ciphers: Passed to SSLContext.set_ciphers + :param server_hostname: SSLContext.wrap_socket, used to enable SNI + """ + + def __init__(self, + keyfile=None, + key_password=None, + certfile=None, + server_side=False, + verify_mode=ssl.CERT_NONE, + ssl_version=ssl.PROTOCOL_TLSv1, + cafile=None, + capath=None, + cadata=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + ciphers=None, + server_hostname=None): + self.keyfile = keyfile + self.key_password = key_password + self.certfile = certfile + self.server_side = server_side + self.verify_mode = verify_mode + self.ssl_version = ssl_version + self.cafile = cafile + self.capath = capath + self.cadata = cadata + self.do_handshake_on_connect = do_handshake_on_connect + self.suppress_ragged_eofs = suppress_ragged_eofs + self.ciphers = ciphers + self.server_hostname = server_hostname + +class Connection(object): + """This is the core class that implements communication with RabbitMQ. This + class should not be invoked directly but rather through the use of an + adapter such as SelectConnection or BlockingConnection. + + :param pika.connection.Parameters parameters: Connection parameters + :param method on_open_callback: Called when the connection is opened + :param method on_open_error_callback: Called if the connection cant + be opened + :param method on_close_callback: Called when the connection is closed + + """ + + # Disable pylint messages concerning "method could be a funciton" + # pylint: disable=R0201 + + ON_CONNECTION_BACKPRESSURE = '_on_connection_backpressure' + ON_CONNECTION_BLOCKED = '_on_connection_blocked' + ON_CONNECTION_CLOSED = '_on_connection_closed' + ON_CONNECTION_ERROR = '_on_connection_error' + ON_CONNECTION_OPEN = '_on_connection_open' + ON_CONNECTION_UNBLOCKED = '_on_connection_unblocked' + CONNECTION_CLOSED = 0 + CONNECTION_INIT = 1 + CONNECTION_PROTOCOL = 2 + CONNECTION_START = 3 + CONNECTION_TUNE = 4 + CONNECTION_OPEN = 5 + CONNECTION_CLOSING = 6 # client-initiated close in progress + + _STATE_NAMES = { + CONNECTION_CLOSED: 'CLOSED', + CONNECTION_INIT: 'INIT', + CONNECTION_PROTOCOL: 'PROTOCOL', + CONNECTION_START: 'START', + CONNECTION_TUNE: 'TUNE', + CONNECTION_OPEN: 'OPEN', + CONNECTION_CLOSING: 'CLOSING' + } + + def __init__(self, + parameters=None, + on_open_callback=None, + on_open_error_callback=None, + on_close_callback=None): + """Connection initialization expects an object that has implemented the + Parameters class and a callback function to notify when we have + successfully connected to the AMQP Broker. + + Available Parameters classes are the ConnectionParameters class and + URLParameters class. + + :param pika.connection.Parameters parameters: Connection parameters + :param method on_open_callback: Called when the connection is opened + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + `on_close_callback(connection, reason_code, reason_text)`, where + `reason_code` is either an IETF RFC 821 reply code for AMQP-level + closures or a value from `pika.connection.InternalCloseReasons` for + internal causes, such as socket errors. + + """ + self.connection_state = self.CONNECTION_CLOSED + + # Holds timer when the initial connect or reconnect is scheduled + self._connection_attempt_timer = None + + # Used to hold timer if configured for Connection.Blocked timeout + self._blocked_conn_timer = None + + self.heartbeat = None + + # Set our configuration options + self.params = (copy.deepcopy(parameters) if parameters is not None else + ConnectionParameters()) + + # Define our callback dictionary + self.callbacks = callback.CallbackManager() + + # Attributes that will be properly initialized by _init_connection_state + # and/or during connection handshake. + self.server_capabilities = None + self.server_properties = None + self._body_max_length = None + self.known_hosts = None + self.closing = None + self._frame_buffer = None + self._channels = None + self._backpressure_multiplier = None + self.remaining_connection_attempts = None + + self._init_connection_state() + + + # Add the on connection error callback + self.callbacks.add(0, self.ON_CONNECTION_ERROR, + on_open_error_callback or self._on_connection_error, + False) + + # On connection callback + if on_open_callback: + self.add_on_open_callback(on_open_callback) + + # On connection callback + if on_close_callback: + self.add_on_close_callback(on_close_callback) + + self.connect() + + def add_backpressure_callback(self, callback_method): + """Call method "callback" when pika believes backpressure is being + applied. + + :param method callback_method: The method to call + + """ + self.callbacks.add(0, self.ON_CONNECTION_BACKPRESSURE, callback_method, + False) + + def add_on_close_callback(self, callback_method): + """Add a callback notification when the connection has closed. The + callback will be passed the connection, the reply_code (int) and the + reply_text (str), if sent by the remote server. + + :param method callback_method: Callback to call on close + + """ + self.callbacks.add(0, self.ON_CONNECTION_CLOSED, callback_method, False) + + def add_on_connection_blocked_callback(self, callback_method): + """Add a callback to be notified when RabbitMQ has sent a + ``Connection.Blocked`` frame indicating that RabbitMQ is low on + resources. Publishers can use this to voluntarily suspend publishing, + instead of relying on back pressure throttling. The callback + will be passed the ``Connection.Blocked`` method frame. + + See also `ConnectionParameters.blocked_connection_timeout`. + + :param method callback_method: Callback to call on `Connection.Blocked`, + having the signature `callback_method(pika.frame.Method)`, where the + method frame's `method` member is of type + `pika.spec.Connection.Blocked` + + """ + self.callbacks.add(0, spec.Connection.Blocked, callback_method, False) + + def add_on_connection_unblocked_callback(self, callback_method): + """Add a callback to be notified when RabbitMQ has sent a + ``Connection.Unblocked`` frame letting publishers know it's ok + to start publishing again. The callback will be passed the + ``Connection.Unblocked`` method frame. + + :param method callback_method: Callback to call on + `Connection.Unblocked`, having the signature + `callback_method(pika.frame.Method)`, where the method frame's + `method` member is of type `pika.spec.Connection.Unblocked` + + """ + self.callbacks.add(0, spec.Connection.Unblocked, callback_method, False) + + def add_on_open_callback(self, callback_method): + """Add a callback notification when the connection has opened. + + :param method callback_method: Callback to call when open + + """ + self.callbacks.add(0, self.ON_CONNECTION_OPEN, callback_method, False) + + def add_on_open_error_callback(self, callback_method, remove_default=True): + """Add a callback notification when the connection can not be opened. + + The callback method should accept the connection object that could not + connect, and an optional error message. + + :param method callback_method: Callback to call when can't connect + :param bool remove_default: Remove default exception raising callback + + """ + if remove_default: + self.callbacks.remove(0, self.ON_CONNECTION_ERROR, + self._on_connection_error) + self.callbacks.add(0, self.ON_CONNECTION_ERROR, callback_method, False) + + def add_timeout(self, deadline, callback_method): + """Adapters should override to call the callback after the + specified number of seconds have elapsed, using a timer, or a + thread, or similar. + + :param int deadline: The number of seconds to wait to call callback + :param method callback_method: The callback method + + """ + raise NotImplementedError + + def channel(self, on_open_callback, channel_number=None): + """Create a new channel with the next available channel number or pass + in a channel number to use. Must be non-zero if you would like to + specify but it is recommended that you let Pika manage the channel + numbers. + + :param method on_open_callback: The callback when the channel is opened + :param int channel_number: The channel number to use, defaults to the + next available. + :rtype: pika.channel.Channel + + """ + if not self.is_open: + # TODO if state is OPENING, then ConnectionClosed might be wrong + raise exceptions.ConnectionClosed( + 'Channel allocation requires an open connection: %s' % self) + + if not channel_number: + channel_number = self._next_channel_number() + self._channels[channel_number] = self._create_channel(channel_number, + on_open_callback) + self._add_channel_callbacks(channel_number) + self._channels[channel_number].open() + return self._channels[channel_number] + + def close(self, reply_code=200, reply_text='Normal shutdown'): + """Disconnect from RabbitMQ. If there are any open channels, it will + attempt to close them prior to fully disconnecting. Channels which + have active consumers will attempt to send a Basic.Cancel to RabbitMQ + to cleanly stop the delivery of messages prior to closing the channel. + + :param int reply_code: The code number for the close + :param str reply_text: The text reason for the close + + """ + if self.is_closing or self.is_closed: + LOGGER.warning('Suppressing close request on %s', self) + return + + # NOTE The connection is either in opening or open state + + # Initiate graceful closing of channels that are OPEN or OPENING + if self._channels: + self._close_channels(reply_code, reply_text) + + # Set our connection state + self._set_connection_state(self.CONNECTION_CLOSING) + LOGGER.info("Closing connection (%s): %s", reply_code, reply_text) + self.closing = reply_code, reply_text + + # If there are channels that haven't finished closing yet, then + # _on_close_ready will finally be called from _on_channel_cleanup once + # all channels have been closed + if not self._channels: + # We can initiate graceful closing of the connection right away, + # since no more channels remain + self._on_close_ready() + else: + LOGGER.info('Connection.close is waiting for ' + '%d channels to close: %s', len(self._channels), self) + + def connect(self): + """Invoke if trying to reconnect to a RabbitMQ server. Constructing the + Connection object should connect on its own. + + """ + assert self._connection_attempt_timer is None, ( + 'connect timer was already scheduled') + + assert self.is_closed, ( + 'connect expected CLOSED state, but got: {}'.format( + self._STATE_NAMES[self.connection_state])) + + self._set_connection_state(self.CONNECTION_INIT) + + # Schedule a timer callback to start the actual connection logic from + # event loop's context, thus avoiding error callbacks in the context of + # the caller, which could be the constructor. + self._connection_attempt_timer = self.add_timeout( + 0, + self._on_connect_timer) + + + def remove_timeout(self, timeout_id): + """Adapters should override: Remove a timeout + + :param str timeout_id: The timeout id to remove + + """ + raise NotImplementedError + + def set_backpressure_multiplier(self, value=10): + """Alter the backpressure multiplier value. We set this to 10 by default. + This value is used to raise warnings and trigger the backpressure + callback. + + :param int value: The multiplier value to set + + """ + self._backpressure_multiplier = value + + # + # Connection state properties + # + + @property + def is_closed(self): + """ + Returns a boolean reporting the current connection state. + """ + return self.connection_state == self.CONNECTION_CLOSED + + @property + def is_closing(self): + """ + Returns True if connection is in the process of closing due to + client-initiated `close` request, but closing is not yet complete. + """ + return self.connection_state == self.CONNECTION_CLOSING + + @property + def is_open(self): + """ + Returns a boolean reporting the current connection state. + """ + return self.connection_state == self.CONNECTION_OPEN + + # + # Properties that reflect server capabilities for the current connection + # + + @property + def basic_nack(self): + """Specifies if the server supports basic.nack on the active connection. + + :rtype: bool + + """ + return self.server_capabilities.get('basic.nack', False) + + @property + def consumer_cancel_notify(self): + """Specifies if the server supports consumer cancel notification on the + active connection. + + :rtype: bool + + """ + return self.server_capabilities.get('consumer_cancel_notify', False) + + @property + def exchange_exchange_bindings(self): + """Specifies if the active connection supports exchange to exchange + bindings. + + :rtype: bool + + """ + return self.server_capabilities.get('exchange_exchange_bindings', False) + + @property + def publisher_confirms(self): + """Specifies if the active connection can use publisher confirmations. + + :rtype: bool + + """ + return self.server_capabilities.get('publisher_confirms', False) + + # + # Internal methods for managing the communication process + # + + def _adapter_connect(self): + """Subclasses should override to set up the outbound socket connection. + + :raises: NotImplementedError + + """ + raise NotImplementedError + + def _adapter_disconnect(self): + """Subclasses should override this to cause the underlying transport + (socket) to close. + + :raises: NotImplementedError + + """ + raise NotImplementedError + + def _add_channel_callbacks(self, channel_number): + """Add the appropriate callbacks for the specified channel number. + + :param int channel_number: The channel number for the callbacks + + """ + # pylint: disable=W0212 + + # This permits us to garbage-collect our reference to the channel + # regardless of whether it was closed by client or broker, and do so + # after all channel-close callbacks. + self._channels[channel_number]._add_on_cleanup_callback( + self._on_channel_cleanup) + + def _add_connection_start_callback(self): + """Add a callback for when a Connection.Start frame is received from + the broker. + + """ + self.callbacks.add(0, spec.Connection.Start, self._on_connection_start) + + def _add_connection_tune_callback(self): + """Add a callback for when a Connection.Tune frame is received.""" + self.callbacks.add(0, spec.Connection.Tune, self._on_connection_tune) + + def _append_frame_buffer(self, value): + """Append the bytes to the frame buffer. + + :param str value: The bytes to append to the frame buffer + + """ + self._frame_buffer += value + + @property + def _buffer_size(self): + """Return the suggested buffer size from the connection state/tune or + the default if that is None. + + :rtype: int + + """ + return self.params.frame_max or spec.FRAME_MAX_SIZE + + def _check_for_protocol_mismatch(self, value): + """Invoked when starting a connection to make sure it's a supported + protocol. + + :param pika.frame.Method value: The frame to check + :raises: ProtocolVersionMismatch + + """ + if (value.method.version_major, + value.method.version_minor) != spec.PROTOCOL_VERSION[0:2]: + # TODO This should call _on_terminate for proper callbacks and + # cleanup + raise exceptions.ProtocolVersionMismatch(frame.ProtocolHeader(), + value) + + @property + def _client_properties(self): + """Return the client properties dictionary. + + :rtype: dict + + """ + properties = { + 'product': PRODUCT, + 'platform': 'Python %s' % platform.python_version(), + 'capabilities': { + 'authentication_failure_close': True, + 'basic.nack': True, + 'connection.blocked': True, + 'consumer_cancel_notify': True, + 'publisher_confirms': True + }, + 'information': 'See http://pika.rtfd.org', + 'version': __version__ + } + + if self.params.client_properties: + properties.update(self.params.client_properties) + + return properties + + def _close_channels(self, reply_code, reply_text): + """Initiate graceful closing of channels that are in OPEN or OPENING + states, passing reply_code and reply_text. + + :param int reply_code: The code for why the channels are being closed + :param str reply_text: The text reason for why the channels are closing + + """ + assert self.is_open, str(self) + + for channel_number in dictkeys(self._channels): + chan = self._channels[channel_number] + if not (chan.is_closing or chan.is_closed): + chan.close(reply_code, reply_text) + + def _connect(self): + """Attempt to connect to RabbitMQ + + :rtype: bool + + """ + warnings.warn('This method is deprecated, use Connection.connect', + DeprecationWarning) + + def _create_channel(self, channel_number, on_open_callback): + """Create a new channel using the specified channel number and calling + back the method specified by on_open_callback + + :param int channel_number: The channel number to use + :param method on_open_callback: The callback when the channel is opened + + """ + LOGGER.debug('Creating channel %s', channel_number) + return pika.channel.Channel(self, channel_number, on_open_callback) + + def _create_heartbeat_checker(self): + """Create a heartbeat checker instance if there is a heartbeat interval + set. + + :rtype: pika.heartbeat.Heartbeat|None + + """ + if self.params.heartbeat is not None and self.params.heartbeat > 0: + LOGGER.debug('Creating a HeartbeatChecker: %r', + self.params.heartbeat) + return heartbeat.HeartbeatChecker(self, self.params.heartbeat) + + return None + + def _remove_heartbeat(self): + """Stop the heartbeat checker if it exists + + """ + if self.heartbeat: + self.heartbeat.stop() + self.heartbeat = None + + def _deliver_frame_to_channel(self, value): + """Deliver the frame to the channel specified in the frame. + + :param pika.frame.Method value: The frame to deliver + + """ + if not value.channel_number in self._channels: + # This should never happen and would constitute breach of the + # protocol + LOGGER.critical( + 'Received %s frame for unregistered channel %i on %s', + value.NAME, value.channel_number, self) + return + + # pylint: disable=W0212 + self._channels[value.channel_number]._handle_content_frame(value) + + def _detect_backpressure(self): + """Attempt to calculate if TCP backpressure is being applied due to + our outbound buffer being larger than the average frame size over + a window of frames. + + """ + avg_frame_size = self.bytes_sent / self.frames_sent + buffer_size = sum([len(f) for f in self.outbound_buffer]) + if buffer_size > (avg_frame_size * self._backpressure_multiplier): + LOGGER.warning(BACKPRESSURE_WARNING, buffer_size, + int(buffer_size / avg_frame_size)) + self.callbacks.process(0, self.ON_CONNECTION_BACKPRESSURE, self) + + def _ensure_closed(self): + """If the connection is not closed, close it.""" + if self.is_open: + self.close() + + def _flush_outbound(self): + """Adapters should override to flush the contents of outbound_buffer + out along the socket. + + :raises: NotImplementedError + + """ + raise NotImplementedError + + def _get_body_frame_max_length(self): + """Calculate the maximum amount of bytes that can be in a body frame. + + :rtype: int + + """ + return ( + self.params.frame_max - spec.FRAME_HEADER_SIZE - spec.FRAME_END_SIZE + ) + + def _get_credentials(self, method_frame): + """Get credentials for authentication. + + :param pika.frame.MethodFrame method_frame: The Connection.Start frame + :rtype: tuple(str, str) + + """ + (auth_type, + response) = self.params.credentials.response_for(method_frame.method) + if not auth_type: + # TODO this should call _on_terminate for proper callbacks and + # cleanup instead + raise exceptions.AuthenticationError(self.params.credentials.TYPE) + self.params.credentials.erase_credentials() + return auth_type, response + + def _has_pending_callbacks(self, value): + """Return true if there are any callbacks pending for the specified + frame. + + :param pika.frame.Method value: The frame to check + :rtype: bool + + """ + return self.callbacks.pending(value.channel_number, value.method) + + def _init_connection_state(self): + """Initialize or reset all of the internal state variables for a given + connection. On disconnect or reconnect all of the state needs to + be wiped. + + """ + # Connection state + self._set_connection_state(self.CONNECTION_CLOSED) + + # Negotiated server properties + self.server_properties = None + + # Outbound buffer for buffering writes until we're able to send them + self.outbound_buffer = collections.deque([]) + + # Inbound buffer for decoding frames + self._frame_buffer = bytes() + + # Dict of open channels + self._channels = dict() + + # Remaining connection attempts + self.remaining_connection_attempts = self.params.connection_attempts + + # Data used for Heartbeat checking and back-pressure detection + self.bytes_sent = 0 + self.bytes_received = 0 + self.frames_sent = 0 + self.frames_received = 0 + self.heartbeat = None + + # Default back-pressure multiplier value + self._backpressure_multiplier = 10 + + # When closing, hold reason why + self.closing = 0, 'Not specified' + + # Our starting point once connected, first frame received + self._add_connection_start_callback() + + # Add a callback handler for the Broker telling us to disconnect. + # NOTE: As of RabbitMQ 3.6.0, RabbitMQ broker may send Connection.Close + # to signal error during connection setup (and wait a longish time + # before closing the TCP/IP stream). Earlier RabbitMQ versions + # simply closed the TCP/IP stream. + self.callbacks.add(0, spec.Connection.Close, self._on_connection_close) + + if self._connection_attempt_timer is not None: + # Connection attempt timer was active when teardown was initiated + self.remove_timeout(self._connection_attempt_timer) + self._connection_attempt_timer = None + + if self.params.blocked_connection_timeout is not None: + if self._blocked_conn_timer is not None: + # Blocked connection timer was active when teardown was + # initiated + self.remove_timeout(self._blocked_conn_timer) + self._blocked_conn_timer = None + + self.add_on_connection_blocked_callback( + self._on_connection_blocked) + self.add_on_connection_unblocked_callback( + self._on_connection_unblocked) + + def _is_method_frame(self, value): + """Returns true if the frame is a method frame. + + :param pika.frame.Frame value: The frame to evaluate + :rtype: bool + + """ + return isinstance(value, frame.Method) + + def _is_protocol_header_frame(self, value): + """Returns True if it's a protocol header frame. + + :rtype: bool + + """ + return isinstance(value, frame.ProtocolHeader) + + def _next_channel_number(self): + """Return the next available channel number or raise an exception. + + :rtype: int + + """ + limit = self.params.channel_max or pika.channel.MAX_CHANNELS + if len(self._channels) >= limit: + raise exceptions.NoFreeChannels() + + for num in xrange(1, len(self._channels) + 1): + if num not in self._channels: + return num + return len(self._channels) + 1 + + def _on_channel_cleanup(self, channel): + """Remove the channel from the dict of channels when Channel.CloseOk is + sent. If connection is closing and no more channels remain, proceed to + `_on_close_ready`. + + :param pika.channel.Channel channel: channel instance + + """ + try: + del self._channels[channel.channel_number] + LOGGER.debug('Removed channel %s', channel.channel_number) + except KeyError: + LOGGER.error('Channel %r not in channels', + channel.channel_number) + if self.is_closing: + if not self._channels: + # Initiate graceful closing of the connection + self._on_close_ready() + else: + # Once Connection enters CLOSING state, all remaining channels + # should also be in CLOSING state. Deviation from this would + # prevent Connection from completing its closing procedure. + channels_not_in_closing_state = [ + chan for chan in dict_itervalues(self._channels) + if not chan.is_closing] + if channels_not_in_closing_state: + LOGGER.critical( + 'Connection in CLOSING state has non-CLOSING ' + 'channels: %r', channels_not_in_closing_state) + + def _on_close_ready(self): + """Called when the Connection is in a state that it can close after + a close has been requested. This happens, for example, when all of the + channels are closed that were open when the close request was made. + + """ + if self.is_closed: + LOGGER.warning('_on_close_ready invoked when already closed') + return + + self._send_connection_close(self.closing[0], self.closing[1]) + + def _on_connected(self): + """Invoked when the socket is connected and it's time to start speaking + AMQP with the broker. + + """ + self._set_connection_state(self.CONNECTION_PROTOCOL) + + # Start the communication with the RabbitMQ Broker + self._send_frame(frame.ProtocolHeader()) + + def _on_blocked_connection_timeout(self): + """ Called when the "connection blocked timeout" expires. When this + happens, we tear down the connection + + """ + self._blocked_conn_timer = None + self._on_terminate(InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT, + 'Blocked connection timeout expired') + + def _on_connection_blocked(self, method_frame): + """Handle Connection.Blocked notification from RabbitMQ broker + + :param pika.frame.Method method_frame: method frame having `method` + member of type `pika.spec.Connection.Blocked` + """ + LOGGER.warning('Received %s from broker', method_frame) + + if self._blocked_conn_timer is not None: + # RabbitMQ is not supposed to repeat Connection.Blocked, but it + # doesn't hurt to be careful + LOGGER.warning('_blocked_conn_timer %s already set when ' + '_on_connection_blocked is called', + self._blocked_conn_timer) + else: + self._blocked_conn_timer = self.add_timeout( + self.params.blocked_connection_timeout, + self._on_blocked_connection_timeout) + + def _on_connection_unblocked(self, method_frame): + """Handle Connection.Unblocked notification from RabbitMQ broker + + :param pika.frame.Method method_frame: method frame having `method` + member of type `pika.spec.Connection.Blocked` + """ + LOGGER.info('Received %s from broker', method_frame) + + if self._blocked_conn_timer is None: + # RabbitMQ is supposed to pair Connection.Blocked/Unblocked, but it + # doesn't hurt to be careful + LOGGER.warning('_blocked_conn_timer was not active when ' + '_on_connection_unblocked called') + else: + self.remove_timeout(self._blocked_conn_timer) + self._blocked_conn_timer = None + + def _on_connection_close(self, method_frame): + """Called when the connection is closed remotely via Connection.Close + frame from broker. + + :param pika.frame.Method method_frame: The Connection.Close frame + + """ + LOGGER.debug('_on_connection_close: frame=%s', method_frame) + + self.closing = (method_frame.method.reply_code, + method_frame.method.reply_text) + + self._on_terminate(self.closing[0], self.closing[1]) + + def _on_connection_close_ok(self, method_frame): + """Called when Connection.CloseOk is received from remote. + + :param pika.frame.Method method_frame: The Connection.CloseOk frame + + """ + LOGGER.debug('_on_connection_close_ok: frame=%s', method_frame) + + self._on_terminate(self.closing[0], self.closing[1]) + + def _on_connection_error(self, _connection_unused, error_message=None): + """Default behavior when the connecting connection can not connect. + + :raises: exceptions.AMQPConnectionError + + """ + raise exceptions.AMQPConnectionError(error_message or + self.params.connection_attempts) + + def _on_connection_open(self, method_frame): + """ + This is called once we have tuned the connection with the server and + called the Connection.Open on the server and it has replied with + Connection.Ok. + """ + # TODO _on_connection_open - what if user started closing it already? + # It shouldn't transition to OPEN if in closing state. Just log and skip + # the rest. + + self.known_hosts = method_frame.method.known_hosts + + # We're now connected at the AMQP level + self._set_connection_state(self.CONNECTION_OPEN) + + # Call our initial callback that we're open + self.callbacks.process(0, self.ON_CONNECTION_OPEN, self, self) + + def _on_connection_start(self, method_frame): + """This is called as a callback once we have received a Connection.Start + from the server. + + :param pika.frame.Method method_frame: The frame received + :raises: UnexpectedFrameError + + """ + self._set_connection_state(self.CONNECTION_START) + if self._is_protocol_header_frame(method_frame): + raise exceptions.UnexpectedFrameError + self._check_for_protocol_mismatch(method_frame) + self._set_server_information(method_frame) + self._add_connection_tune_callback() + self._send_connection_start_ok(*self._get_credentials(method_frame)) + + def _on_connect_timer(self): + """Callback for self._connection_attempt_timer: initiate connection + attempt in the context of the event loop + + """ + self._connection_attempt_timer = None + + error = self._adapter_connect() + if not error: + self._on_connected() + return + + self.remaining_connection_attempts -= 1 + LOGGER.warning('Could not connect, %i attempts left', + self.remaining_connection_attempts) + if self.remaining_connection_attempts > 0: + LOGGER.info('Retrying in %i seconds', self.params.retry_delay) + self._connection_attempt_timer = self.add_timeout( + self.params.retry_delay, + self._on_connect_timer) + else: + # TODO connect must not call failure callback from constructor. The + # current behavior is error-prone, because the user code may get a + # callback upon socket connection failure before user's other state + # may be sufficiently initialized. Constructors must either succeed + # or raise an exception. To be forward-compatible with failure + # reporting from fully non-blocking connection establishment, + # connect() should set INIT state and schedule a 0-second timer to + # continue the rest of the logic in a private method. The private + # method should use itself instead of connect() as the callback for + # scheduling retries. + + # TODO This should use _on_terminate for consistent behavior/cleanup + self.callbacks.process(0, self.ON_CONNECTION_ERROR, self, self, + error) + self.remaining_connection_attempts = self.params.connection_attempts + self._set_connection_state(self.CONNECTION_CLOSED) + + @staticmethod + def _negotiate_integer_value(client_value, server_value): + """Negotiates two values. If either of them is 0 or None, + returns the other one. If both are positive integers, returns the + smallest one. + + :param int client_value: The client value + :param int server_value: The server value + :rtype: int + + """ + if client_value == None: + client_value = 0 + if server_value == None: + server_value = 0 + + # this is consistent with how Java client and Bunny + # perform negotiation, see pika/pika#874 + if client_value == 0 or server_value == 0: + val = max(client_value, server_value) + else: + val = min(client_value, server_value) + + return val + + @staticmethod + def _tune_heartbeat_timeout(client_value, server_value): + """ Determine heartbeat timeout per AMQP 0-9-1 rules + + Per https://www.rabbitmq.com/resources/specs/amqp0-9-1.pdf, + + > Both peers negotiate the limits to the lowest agreed value as follows: + > - The server MUST tell the client what limits it proposes. + > - The client responds and **MAY reduce those limits** for its + connection + + If the client specifies a value, it always takes precedence. + + :param client_value: None to accept server_value; otherwise, an integral + number in seconds; 0 (zero) to disable heartbeat. + :param server_value: integral value of the heartbeat timeout proposed by + broker; 0 (zero) to disable heartbeat. + + :returns: the value of the heartbeat timeout to use and return to broker + """ + if client_value is None: + # Accept server's limit + timeout = server_value + else: + timeout = client_value + + return timeout + + def _on_connection_tune(self, method_frame): + """Once the Broker sends back a Connection.Tune, we will set our tuning + variables that have been returned to us and kick off the Heartbeat + monitor if required, send our TuneOk and then the Connection. Open rpc + call on channel 0. + + :param pika.frame.Method method_frame: The frame received + + """ + self._set_connection_state(self.CONNECTION_TUNE) + + # Get our max channels, frames and heartbeat interval + self.params.channel_max = Connection._negotiate_integer_value(self.params.channel_max, + method_frame.method.channel_max) + self.params.frame_max = Connection._negotiate_integer_value(self.params.frame_max, + method_frame.method.frame_max) + + if callable(self.params.heartbeat): + ret_heartbeat = self.params.heartbeat(self, method_frame.method.heartbeat) + if ret_heartbeat is None or callable(ret_heartbeat): + # Enforce callback-specific restrictions on callback's return value + raise TypeError('heartbeat callback must not return None ' + 'or callable, but got %r' % (ret_heartbeat,)) + + # Leave it to hearbeat setter deal with the rest of the validation + self.params.heartbeat = ret_heartbeat + + # Negotiate heatbeat timeout + self.params.heartbeat = self._tune_heartbeat_timeout( + client_value=self.params.heartbeat, + server_value=method_frame.method.heartbeat) + + # Calculate the maximum pieces for body frames + self._body_max_length = self._get_body_frame_max_length() + + # Create a new heartbeat checker if needed + self.heartbeat = self._create_heartbeat_checker() + + # Send the TuneOk response with what we've agreed upon + self._send_connection_tune_ok() + + # Send the Connection.Open RPC call for the vhost + self._send_connection_open() + + def _on_data_available(self, data_in): + """This is called by our Adapter, passing in the data from the socket. + As long as we have buffer try and map out frame data. + + :param str data_in: The data that is available to read + + """ + self._append_frame_buffer(data_in) + while self._frame_buffer: + consumed_count, frame_value = self._read_frame() + if not frame_value: + return + self._trim_frame_buffer(consumed_count) + self._process_frame(frame_value) + + def _on_terminate(self, reason_code, reason_text): + """Terminate the connection and notify registered ON_CONNECTION_ERROR + and/or ON_CONNECTION_CLOSED callbacks + + :param integer reason_code: either IETF RFC 821 reply code for + AMQP-level closures or a value from `InternalCloseReasons` for + internal causes, such as socket errors + :param str reason_text: human-readable text message describing the error + """ + LOGGER.info( + 'Disconnected from RabbitMQ at %s:%i (%s): %s', + self.params.host, self.params.port, reason_code, + reason_text) + + if not isinstance(reason_code, numbers.Integral): + raise TypeError('reason_code must be an integer, but got %r' + % (reason_code,)) + + # Stop the heartbeat checker if it exists + self._remove_heartbeat() + + # Remove connection management callbacks + # TODO This call was moved here verbatim from legacy code and the + # following doesn't seem to be right: `Connection.Open` here is + # unexpected, we don't appear to ever register it, and the broker + # shouldn't be sending `Connection.Open` to us, anyway. + self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start, + spec.Connection.Open]) + + if self.params.blocked_connection_timeout is not None: + self._remove_callbacks(0, [spec.Connection.Blocked, + spec.Connection.Unblocked]) + + # Close the socket + self._adapter_disconnect() + + # Determine whether this was an error during connection setup + connection_error = None + + if self.connection_state == self.CONNECTION_PROTOCOL: + LOGGER.error('Incompatible Protocol Versions') + connection_error = exceptions.IncompatibleProtocolError( + reason_code, + reason_text) + elif self.connection_state == self.CONNECTION_START: + LOGGER.error('Connection closed while authenticating indicating a ' + 'probable authentication error') + connection_error = exceptions.ProbableAuthenticationError( + reason_code, + reason_text) + elif self.connection_state == self.CONNECTION_TUNE: + LOGGER.error('Connection closed while tuning the connection ' + 'indicating a probable permission error when ' + 'accessing a virtual host') + connection_error = exceptions.ProbableAccessDeniedError( + reason_code, + reason_text) + elif self.connection_state not in [self.CONNECTION_OPEN, + self.CONNECTION_CLOSED, + self.CONNECTION_CLOSING]: + LOGGER.warning('Unexpected connection state on disconnect: %i', + self.connection_state) + + # Transition to closed state + self._set_connection_state(self.CONNECTION_CLOSED) + + # Inform our channel proxies + for channel in dictkeys(self._channels): + if channel not in self._channels: + continue + # pylint: disable=W0212 + self._channels[channel]._on_close_meta(reason_code, reason_text) + + # Inform interested parties + if connection_error is not None: + LOGGER.error('Connection setup failed due to %r', connection_error) + self.callbacks.process(0, + self.ON_CONNECTION_ERROR, + self, self, + connection_error) + + self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self, + reason_code, reason_text) + + # Reset connection properties + self._init_connection_state() + + def _process_callbacks(self, frame_value): + """Process the callbacks for the frame if the frame is a method frame + and if it has any callbacks pending. + + :param pika.frame.Method frame_value: The frame to process + :rtype: bool + + """ + if (self._is_method_frame(frame_value) and + self._has_pending_callbacks(frame_value)): + self.callbacks.process(frame_value.channel_number, # Prefix + frame_value.method, # Key + self, # Caller + frame_value) # Args + return True + return False + + def _process_frame(self, frame_value): + """Process an inbound frame from the socket. + + :param frame_value: The frame to process + :type frame_value: pika.frame.Frame | pika.frame.Method + + """ + # Will receive a frame type of -1 if protocol version mismatch + if frame_value.frame_type < 0: + return + + # Keep track of how many frames have been read + self.frames_received += 1 + + # Process any callbacks, if True, exit method + if self._process_callbacks(frame_value): + return + + # If a heartbeat is received, update the checker + if isinstance(frame_value, frame.Heartbeat): + if self.heartbeat: + self.heartbeat.received() + else: + LOGGER.warning('Received heartbeat frame without a heartbeat ' + 'checker') + + # If the frame has a channel number beyond the base channel, deliver it + elif frame_value.channel_number > 0: + self._deliver_frame_to_channel(frame_value) + + def _read_frame(self): + """Try and read from the frame buffer and decode a frame. + + :rtype tuple: (int, pika.frame.Frame) + + """ + return frame.decode_frame(self._frame_buffer) + + def _remove_callback(self, channel_number, method_class): + """Remove the specified method_frame callback if it is set for the + specified channel number. + + :param int channel_number: The channel number to remove the callback on + :param pika.amqp_object.Method method_class: The method class for the + callback + + """ + self.callbacks.remove(str(channel_number), method_class) + + def _remove_callbacks(self, channel_number, method_classes): + """Remove the callbacks for the specified channel number and list of + method frames. + + :param int channel_number: The channel number to remove the callback on + :param sequence method_classes: The method classes (derived from + `pika.amqp_object.Method`) for the callbacks + + """ + for method_frame in method_classes: + self._remove_callback(channel_number, method_frame) + + def _rpc(self, channel_number, method, + callback_method=None, + acceptable_replies=None): + """Make an RPC call for the given callback, channel number and method. + acceptable_replies lists out what responses we'll process from the + server with the specified callback. + + :param int channel_number: The channel number for the RPC call + :param pika.amqp_object.Method method: The method frame to call + :param method callback_method: The callback for the RPC response + :param list acceptable_replies: The replies this RPC call expects + + """ + # Validate that acceptable_replies is a list or None + if acceptable_replies and not isinstance(acceptable_replies, list): + raise TypeError('acceptable_replies should be list or None') + + # Validate the callback is callable + if callback_method: + if not utils.is_callable(callback_method): + raise TypeError('callback should be None, function or method.') + + for reply in acceptable_replies: + self.callbacks.add(channel_number, reply, callback_method) + + # Send the rpc call to RabbitMQ + self._send_method(channel_number, method) + + def _send_connection_close(self, reply_code, reply_text): + """Send a Connection.Close method frame. + + :param int reply_code: The reason for the close + :param str reply_text: The text reason for the close + + """ + self._rpc(0, spec.Connection.Close(reply_code, reply_text, 0, 0), + self._on_connection_close_ok, [spec.Connection.CloseOk]) + + def _send_connection_open(self): + """Send a Connection.Open frame""" + self._rpc(0, spec.Connection.Open(self.params.virtual_host, + insist=True), + self._on_connection_open, [spec.Connection.OpenOk]) + + def _send_connection_start_ok(self, authentication_type, response): + """Send a Connection.StartOk frame + + :param str authentication_type: The auth type value + :param str response: The encoded value to send + + """ + self._send_method(0, + spec.Connection.StartOk(self._client_properties, + authentication_type, response, + self.params.locale)) + + def _send_connection_tune_ok(self): + """Send a Connection.TuneOk frame""" + self._send_method(0, spec.Connection.TuneOk(self.params.channel_max, + self.params.frame_max, + self.params.heartbeat)) + + def _send_frame(self, frame_value): + """This appends the fully generated frame to send to the broker to the + output buffer which will be then sent via the connection adapter. + + :param frame_value: The frame to write + :type frame_value: pika.frame.Frame|pika.frame.ProtocolHeader + :raises: exceptions.ConnectionClosed + + """ + if self.is_closed: + LOGGER.error('Attempted to send frame when closed') + raise exceptions.ConnectionClosed + + marshaled_frame = frame_value.marshal() + self.bytes_sent += len(marshaled_frame) + self.frames_sent += 1 + self.outbound_buffer.append(marshaled_frame) + self._flush_outbound() + if self.params.backpressure_detection: + self._detect_backpressure() + + def _send_method(self, channel_number, method, content=None): + """Constructs a RPC method frame and then sends it to the broker. + + :param int channel_number: The channel number for the frame + :param pika.amqp_object.Method method: The method to send + :param tuple content: If set, is a content frame, is tuple of + properties and body. + + """ + if content: + self._send_message(channel_number, method, content) + else: + self._send_frame(frame.Method(channel_number, method)) + + def _send_message(self, channel_number, method_frame, content): + """Publish a message. + + :param int channel_number: The channel number for the frame + :param pika.object.Method method_frame: The method frame to send + :param tuple content: A content frame, which is tuple of properties and + body. + + """ + length = len(content[1]) + self._send_frame(frame.Method(channel_number, method_frame)) + self._send_frame(frame.Header(channel_number, length, content[0])) + + if content[1]: + chunks = int(math.ceil(float(length) / self._body_max_length)) + for chunk in xrange(0, chunks): + s = chunk * self._body_max_length + e = s + self._body_max_length + if e > length: + e = length + self._send_frame(frame.Body(channel_number, content[1][s:e])) + + def _set_connection_state(self, connection_state): + """Set the connection state. + + :param int connection_state: The connection state to set + + """ + self.connection_state = connection_state + + def _set_server_information(self, method_frame): + """Set the server properties and capabilities + + :param spec.connection.Start method_frame: The Connection.Start frame + + """ + self.server_properties = method_frame.method.server_properties + self.server_capabilities = self.server_properties.get('capabilities', + dict()) + if hasattr(self.server_properties, 'capabilities'): + del self.server_properties['capabilities'] + + def _trim_frame_buffer(self, byte_count): + """Trim the leading N bytes off the frame buffer and increment the + counter that keeps track of how many bytes have been read/used from the + socket. + + :param int byte_count: The number of bytes consumed + + """ + self._frame_buffer = self._frame_buffer[byte_count:] + self.bytes_received += byte_count diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/credentials.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/credentials.py new file mode 100644 index 000000000..09e0cae46 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/credentials.py @@ -0,0 +1,120 @@ +"""The credentials classes are used to encapsulate all authentication +information for the :class:`~pika.connection.ConnectionParameters` class. + +The :class:`~pika.credentials.PlainCredentials` class returns the properly +formatted username and password to the :class:`~pika.connection.Connection`. + +To authenticate with Pika, create a :class:`~pika.credentials.PlainCredentials` +object passing in the username and password and pass it as the credentials +argument value to the :class:`~pika.connection.ConnectionParameters` object. + +If you are using :class:`~pika.connection.URLParameters` you do not need a +credentials object, one will automatically be created for you. + +If you are looking to implement SSL certificate style authentication, you would +extend the :class:`~pika.credentials.ExternalCredentials` class implementing +the required behavior. + +""" +from .compat import as_bytes +import logging + +LOGGER = logging.getLogger(__name__) + + +class PlainCredentials(object): + """A credentials object for the default authentication methodology with + RabbitMQ. + + If you do not pass in credentials to the ConnectionParameters object, it + will create credentials for 'guest' with the password of 'guest'. + + If you pass True to erase_on_connect the credentials will not be stored + in memory after the Connection attempt has been made. + + :param str username: The username to authenticate with + :param str password: The password to authenticate with + :param bool erase_on_connect: erase credentials on connect. + + """ + TYPE = 'PLAIN' + + def __init__(self, username, password, erase_on_connect=False): + """Create a new instance of PlainCredentials + + :param str username: The username to authenticate with + :param str password: The password to authenticate with + :param bool erase_on_connect: erase credentials on connect. + + """ + self.username = username + self.password = password + self.erase_on_connect = erase_on_connect + + def __eq__(self, other): + return (isinstance(other, PlainCredentials) and + other.username == self.username and + other.password == self.password and + other.erase_on_connect == self.erase_on_connect) + + def __ne__(self, other): + return not self == other + + def response_for(self, start): + """Validate that this type of authentication is supported + + :param spec.Connection.Start start: Connection.Start method + :rtype: tuple(str|None, str|None) + + """ + if as_bytes(PlainCredentials.TYPE) not in\ + as_bytes(start.mechanisms).split(): + return None, None + return (PlainCredentials.TYPE, + b'\0' + as_bytes(self.username) + + b'\0' + as_bytes(self.password)) + + def erase_credentials(self): + """Called by Connection when it no longer needs the credentials""" + if self.erase_on_connect: + LOGGER.info("Erasing stored credential values") + self.username = None + self.password = None + + +class ExternalCredentials(object): + """The ExternalCredentials class allows the connection to use EXTERNAL + authentication, generally with a client SSL certificate. + + """ + TYPE = 'EXTERNAL' + + def __init__(self): + """Create a new instance of ExternalCredentials""" + self.erase_on_connect = False + + def __eq__(self, other): + return (isinstance(other, ExternalCredentials) and + other.erase_on_connect == self.erase_on_connect) + + def __ne__(self, other): + return not self == other + + def response_for(self, start): + """Validate that this type of authentication is supported + + :param spec.Connection.Start start: Connection.Start method + :rtype: tuple(str or None, str or None) + + """ + if as_bytes(ExternalCredentials.TYPE) not in\ + as_bytes(start.mechanisms).split(): + return None, None + return ExternalCredentials.TYPE, b'' + + def erase_credentials(self): + """Called by Connection when it no longer needs the credentials""" + LOGGER.debug('Not supported by this Credentials type') + +# Append custom credential types to this list for validation support +VALID_TYPES = [PlainCredentials, ExternalCredentials] diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/data.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/data.py new file mode 100644 index 000000000..b5bc5d92d --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/data.py @@ -0,0 +1,322 @@ +"""AMQP Table Encoding/Decoding""" +import struct +import decimal +import calendar +import warnings + +from datetime import datetime + +from pika import exceptions +from pika.compat import PY2, PY3 +from pika.compat import unicode_type, long, as_bytes + + +def encode_short_string(pieces, value): + """Encode a string value as short string and append it to pieces list + returning the size of the encoded value. + + :param list pieces: Already encoded values + :param value: String value to encode + :type value: str or unicode + :rtype: int + + """ + encoded_value = as_bytes(value) + length = len(encoded_value) + + # 4.2.5.3 + # Short strings, stored as an 8-bit unsigned integer length followed by zero + # or more octets of data. Short strings can carry up to 255 octets of UTF-8 + # data, but may not contain binary zero octets. + # ... + # 4.2.5.5 + # The server SHOULD validate field names and upon receiving an invalid field + # name, it SHOULD signal a connection exception with reply code 503 (syntax + # error). + # -> validate length (avoid truncated utf-8 / corrupted data), but skip null + # byte check. + if length > 255: + raise exceptions.ShortStringTooLong(encoded_value) + + pieces.append(struct.pack('B', length)) + pieces.append(encoded_value) + return 1 + length + + +if PY2: + def decode_short_string(encoded, offset): + """Decode a short string value from ``encoded`` data at ``offset``. + """ + length = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + # Purely for compatibility with original python2 code. No idea what + # and why this does. + value = encoded[offset:offset + length] + try: + value = bytes(value) + except UnicodeEncodeError: + pass + offset += length + return value, offset + +else: + def decode_short_string(encoded, offset): + """Decode a short string value from ``encoded`` data at ``offset``. + """ + length = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + value = encoded[offset:offset + length] + try: + value = value.decode('utf8') + except UnicodeDecodeError: + pass + offset += length + return value, offset + + +def encode_table(pieces, table): + """Encode a dict as an AMQP table appending the encded table to the + pieces list passed in. + + :param list pieces: Already encoded frame pieces + :param dict table: The dict to encode + :rtype: int + + """ + table = table or {} + length_index = len(pieces) + pieces.append(None) # placeholder + tablesize = 0 + for (key, value) in table.items(): + tablesize += encode_short_string(pieces, key) + tablesize += encode_value(pieces, value) + + pieces[length_index] = struct.pack('>I', tablesize) + return tablesize + 4 + + +def encode_value(pieces, value): + """Encode the value passed in and append it to the pieces list returning + the the size of the encoded value. + + :param list pieces: Already encoded values + :param any value: The value to encode + :rtype: int + + """ + + if PY2: + if isinstance(value, basestring): + if isinstance(value, unicode_type): + value = value.encode('utf-8') + pieces.append(struct.pack('>cI', b'S', len(value))) + pieces.append(value) + return 5 + len(value) + else: + # support only str on Python 3 + if isinstance(value, str): + value = value.encode('utf-8') + pieces.append(struct.pack('>cI', b'S', len(value))) + pieces.append(value) + return 5 + len(value) + + if isinstance(value, bytes): + pieces.append(struct.pack('>cI', b'x', len(value))) + pieces.append(value) + return 5 + len(value) + + if isinstance(value, bool): + pieces.append(struct.pack('>cB', b't', int(value))) + return 2 + if isinstance(value, long): + pieces.append(struct.pack('>cq', b'l', value)) + return 9 + elif isinstance(value, int): + with warnings.catch_warnings(): + warnings.filterwarnings('error') + try: + p = struct.pack('>ci', b'I', value) + pieces.append(p) + return 5 + except (struct.error, DeprecationWarning): + p = struct.pack('>cq', b'l', long(value)) + pieces.append(p) + return 9 + elif isinstance(value, decimal.Decimal): + value = value.normalize() + if value.as_tuple().exponent < 0: + decimals = -value.as_tuple().exponent + raw = int(value * (decimal.Decimal(10) ** decimals)) + pieces.append(struct.pack('>cBi', b'D', decimals, raw)) + else: + # per spec, the "decimals" octet is unsigned (!) + pieces.append(struct.pack('>cBi', b'D', 0, int(value))) + return 6 + elif isinstance(value, datetime): + pieces.append(struct.pack('>cQ', b'T', + calendar.timegm(value.utctimetuple()))) + return 9 + elif isinstance(value, dict): + pieces.append(struct.pack('>c', b'F')) + return 1 + encode_table(pieces, value) + elif isinstance(value, list): + p = [] + for v in value: + encode_value(p, v) + piece = b''.join(p) + pieces.append(struct.pack('>cI', b'A', len(piece))) + pieces.append(piece) + return 5 + len(piece) + elif value is None: + pieces.append(struct.pack('>c', b'V')) + return 1 + else: + raise exceptions.UnsupportedAMQPFieldException(pieces, value) + + +def decode_table(encoded, offset): + """Decode the AMQP table passed in from the encoded value returning the + decoded result and the number of bytes read plus the offset. + + :param str encoded: The binary encoded data to decode + :param int offset: The starting byte offset + :rtype: tuple + + """ + result = {} + tablesize = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + limit = offset + tablesize + while offset < limit: + key, offset = decode_short_string(encoded, offset) + value, offset = decode_value(encoded, offset) + result[key] = value + return result, offset + + +def decode_value(encoded, offset): + """Decode the value passed in returning the decoded value and the number + of bytes read in addition to the starting offset. + + :param str encoded: The binary encoded data to decode + :param int offset: The starting byte offset + :rtype: tuple + :raises: pika.exceptions.InvalidFieldTypeException + + """ + # slice to get bytes in Python 3 and str in Python 2 + kind = encoded[offset:offset + 1] + offset += 1 + + # Bool + if kind == b't': + value = struct.unpack_from('>B', encoded, offset)[0] + value = bool(value) + offset += 1 + + # Short-Short Int + elif kind == b'b': + value = struct.unpack_from('>B', encoded, offset)[0] + offset += 1 + + # Short-Short Unsigned Int + elif kind == b'B': + value = struct.unpack_from('>b', encoded, offset)[0] + offset += 1 + + # Short Int + elif kind == b'U': + value = struct.unpack_from('>h', encoded, offset)[0] + offset += 2 + + # Short Unsigned Int + elif kind == b'u': + value = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + + # Long Int + elif kind == b'I': + value = struct.unpack_from('>i', encoded, offset)[0] + offset += 4 + + # Long Unsigned Int + elif kind == b'i': + value = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + + # Long-Long Int + elif kind == b'L': + value = long(struct.unpack_from('>q', encoded, offset)[0]) + offset += 8 + + # Long-Long Unsigned Int + elif kind == b'l': + value = long(struct.unpack_from('>Q', encoded, offset)[0]) + offset += 8 + + # Float + elif kind == b'f': + value = long(struct.unpack_from('>f', encoded, offset)[0]) + offset += 4 + + # Double + elif kind == b'd': + value = long(struct.unpack_from('>d', encoded, offset)[0]) + offset += 8 + + # Decimal + elif kind == b'D': + decimals = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + raw = struct.unpack_from('>i', encoded, offset)[0] + offset += 4 + value = decimal.Decimal(raw) * (decimal.Decimal(10) ** -decimals) + + # Short String + elif kind == b's': + value, offset = decode_short_string(encoded, offset) + + # Long String + elif kind == b'S': + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + value = encoded[offset:offset + length] + try: + value = value.decode('utf8') + except UnicodeDecodeError: + pass + offset += length + + elif kind == b'x': + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + value = encoded[offset:offset + length] + offset += length + + # Field Array + elif kind == b'A': + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + offset_end = offset + length + value = [] + while offset < offset_end: + v, offset = decode_value(encoded, offset) + value.append(v) + + # Timestamp + elif kind == b'T': + value = datetime.utcfromtimestamp(struct.unpack_from('>Q', encoded, + offset)[0]) + offset += 8 + + # Field Table + elif kind == b'F': + (value, offset) = decode_table(encoded, offset) + + # Null / Void + elif kind == b'V': + value = None + else: + raise exceptions.InvalidFieldTypeException(kind) + + return value, offset diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/exceptions.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/exceptions.py new file mode 100644 index 000000000..7daef48a4 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/exceptions.py @@ -0,0 +1,257 @@ +"""Pika specific exceptions""" + + +class AMQPError(Exception): + + def __repr__(self): + return 'An unspecified AMQP error has occurred' + + +class AMQPConnectionError(AMQPError): + + def __repr__(self): + if len(self.args) == 1: + if self.args[0] == 1: + return ('No connection could be opened after 1 ' + 'connection attempt') + elif isinstance(self.args[0], int): + return ('No connection could be opened after %s ' + 'connection attempts' % self.args[0]) + else: + return 'No connection could be opened: %s' % self.args[0] + elif len(self.args) == 2: + return '%s: %s' % (self.args[0], self.args[1]) + + +class IncompatibleProtocolError(AMQPConnectionError): + + def __repr__(self): + return ('The protocol returned by the server is not supported: %s' % + (self.args,)) + + +class AuthenticationError(AMQPConnectionError): + + def __repr__(self): + return ('Server and client could not negotiate use of the %s ' + 'authentication mechanism' % self.args[0]) + + +class ProbableAuthenticationError(AMQPConnectionError): + + def __repr__(self): + return ('Client was disconnected at a connection stage indicating a ' + 'probable authentication error: %s' % (self.args,)) + + +class ProbableAccessDeniedError(AMQPConnectionError): + + def __repr__(self): + return ('Client was disconnected at a connection stage indicating a ' + 'probable denial of access to the specified virtual host: %s' % + (self.args,)) + + +class NoFreeChannels(AMQPConnectionError): + + def __repr__(self): + return 'The connection has run out of free channels' + + +class ConnectionClosed(AMQPConnectionError): + + def __repr__(self): + if len(self.args) == 2: + return 'The AMQP connection was closed (%s) %s' % (self.args[0], + self.args[1]) + else: + return 'The AMQP connection was closed: %s' % (self.args,) + + +class AMQPChannelError(AMQPError): + + def __repr__(self): + return 'An unspecified AMQP channel error has occurred' + + +class ChannelClosed(AMQPChannelError): + + def __repr__(self): + if len(self.args) == 2: + return 'The channel was closed (%s) %s' % (self.args[0], + self.args[1]) + else: + return 'The channel was closed: %s' % (self.args,) + + +class ChannelAlreadyClosing(AMQPChannelError): + """Raised when `Channel.close` is called while channel is already closing""" + pass + + +class DuplicateConsumerTag(AMQPChannelError): + + def __repr__(self): + return ('The consumer tag specified already exists for this ' + 'channel: %s' % self.args[0]) + + +class ConsumerCancelled(AMQPChannelError): + + def __repr__(self): + return 'Server cancelled consumer' + + +class UnroutableError(AMQPChannelError): + """Exception containing one or more unroutable messages returned by broker + via Basic.Return. + + Used by BlockingChannel. + + In publisher-acknowledgements mode, this is raised upon receipt of Basic.Ack + from broker; in the event of Basic.Nack from broker, `NackError` is raised + instead + """ + + def __init__(self, messages): + """ + :param messages: sequence of returned unroutable messages + :type messages: sequence of `blocking_connection.ReturnedMessage` + objects + """ + super(UnroutableError, self).__init__( + "%s unroutable message(s) returned" % (len(messages))) + + self.messages = messages + + def __repr__(self): + return '%s: %i unroutable messages returned by broker' % ( + self.__class__.__name__, len(self.messages)) + + +class NackError(AMQPChannelError): + """This exception is raised when a message published in + publisher-acknowledgements mode is Nack'ed by the broker. + + Used by BlockingChannel. + """ + + def __init__(self, messages): + """ + :param messages: sequence of returned unroutable messages + :type messages: sequence of `blocking_connection.ReturnedMessage` + objects + """ + super(NackError, self).__init__( + "%s message(s) NACKed" % (len(messages))) + + self.messages = messages + + def __repr__(self): + return '%s: %i unroutable messages returned by broker' % ( + self.__class__.__name__, len(self.messages)) + + +class InvalidChannelNumber(AMQPError): + + def __repr__(self): + return 'An invalid channel number has been specified: %s' % self.args[0] + + +class ProtocolSyntaxError(AMQPError): + + def __repr__(self): + return 'An unspecified protocol syntax error occurred' + + +class UnexpectedFrameError(ProtocolSyntaxError): + + def __repr__(self): + return 'Received a frame out of sequence: %r' % self.args[0] + + +class ProtocolVersionMismatch(ProtocolSyntaxError): + + def __repr__(self): + return 'Protocol versions did not match: %r vs %r' % (self.args[0], + self.args[1]) + + +class BodyTooLongError(ProtocolSyntaxError): + + def __repr__(self): + return ('Received too many bytes for a message delivery: ' + 'Received %i, expected %i' % (self.args[0], self.args[1])) + + +class InvalidFrameError(ProtocolSyntaxError): + + def __repr__(self): + return 'Invalid frame received: %r' % self.args[0] + + +class InvalidFieldTypeException(ProtocolSyntaxError): + + def __repr__(self): + return 'Unsupported field kind %s' % self.args[0] + + +class UnsupportedAMQPFieldException(ProtocolSyntaxError): + + def __repr__(self): + return 'Unsupported field kind %s' % type(self.args[1]) + + +class UnspportedAMQPFieldException(UnsupportedAMQPFieldException): + """Deprecated version of UnsupportedAMQPFieldException""" + + +class MethodNotImplemented(AMQPError): + pass + + +class ChannelError(Exception): + + def __repr__(self): + return 'An unspecified error occurred with the Channel' + + +class InvalidMinimumFrameSize(ProtocolSyntaxError): + """ DEPRECATED; pika.connection.Parameters.frame_max property setter now + raises the standard `ValueError` exception when the value is out of bounds. + """ + + def __repr__(self): + return 'AMQP Minimum Frame Size is 4096 Bytes' + + +class InvalidMaximumFrameSize(ProtocolSyntaxError): + """ DEPRECATED; pika.connection.Parameters.frame_max property setter now + raises the standard `ValueError` exception when the value is out of bounds. + """ + + def __repr__(self): + return 'AMQP Maximum Frame Size is 131072 Bytes' + + +class RecursionError(Exception): + """The requested operation would result in unsupported recursion or + reentrancy. + + Used by BlockingConnection/BlockingChannel + + """ + + +class ShortStringTooLong(AMQPError): + + def __repr__(self): + return ('AMQP Short String can contain up to 255 bytes: ' + '%.300s' % self.args[0]) + + +class DuplicateGetOkCallback(ChannelError): + + def __repr__(self): + return ('basic_get can only be called again after the callback for the' + 'previous basic_get is executed') diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/frame.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/frame.py new file mode 100644 index 000000000..9a07ec36a --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/frame.py @@ -0,0 +1,265 @@ +"""Frame objects that do the frame demarshaling and marshaling.""" +import logging +import struct + +from pika import amqp_object +from pika import exceptions +from pika import spec +from pika.compat import byte + + +LOGGER = logging.getLogger(__name__) + + +class Frame(amqp_object.AMQPObject): + """Base Frame object mapping. Defines a behavior for all child classes for + assignment of core attributes and implementation of the a core _marshal + method which child classes use to create the binary AMQP frame. + + """ + NAME = 'Frame' + + def __init__(self, frame_type, channel_number): + """Create a new instance of a frame + + :param int frame_type: The frame type + :param int channel_number: The channel number for the frame + + """ + self.frame_type = frame_type + self.channel_number = channel_number + + def _marshal(self, pieces): + """Create the full AMQP wire protocol frame data representation + + :rtype: bytes + + """ + payload = b''.join(pieces) + return struct.pack('>BHI', self.frame_type, self.channel_number, + len(payload)) + payload + byte(spec.FRAME_END) + + def marshal(self): + """To be ended by child classes + + :raises NotImplementedError + + """ + raise NotImplementedError + + +class Method(Frame): + """Base Method frame object mapping. AMQP method frames are mapped on top + of this class for creating or accessing their data and attributes. + + """ + NAME = 'METHOD' + + def __init__(self, channel_number, method): + """Create a new instance of a frame + + :param int channel_number: The frame type + :param pika.Spec.Class.Method method: The AMQP Class.Method + + """ + Frame.__init__(self, spec.FRAME_METHOD, channel_number) + self.method = method + + def marshal(self): + """Return the AMQP binary encoded value of the frame + + :rtype: str + + """ + pieces = self.method.encode() + pieces.insert(0, struct.pack('>I', self.method.INDEX)) + return self._marshal(pieces) + + +class Header(Frame): + """Header frame object mapping. AMQP content header frames are mapped + on top of this class for creating or accessing their data and attributes. + + """ + NAME = 'Header' + + def __init__(self, channel_number, body_size, props): + """Create a new instance of a AMQP ContentHeader object + + :param int channel_number: The channel number for the frame + :param int body_size: The number of bytes for the body + :param pika.spec.BasicProperties props: Basic.Properties object + + """ + Frame.__init__(self, spec.FRAME_HEADER, channel_number) + self.body_size = body_size + self.properties = props + + def marshal(self): + """Return the AMQP binary encoded value of the frame + + :rtype: str + + """ + pieces = self.properties.encode() + pieces.insert(0, struct.pack('>HxxQ', self.properties.INDEX, + self.body_size)) + return self._marshal(pieces) + + +class Body(Frame): + """Body frame object mapping class. AMQP content body frames are mapped on + to this base class for getting/setting of attributes/data. + + """ + NAME = 'Body' + + def __init__(self, channel_number, fragment): + """ + Parameters: + + - channel_number: int + - fragment: unicode or str + """ + Frame.__init__(self, spec.FRAME_BODY, channel_number) + self.fragment = fragment + + def marshal(self): + """Return the AMQP binary encoded value of the frame + + :rtype: str + + """ + return self._marshal([self.fragment]) + + +class Heartbeat(Frame): + """Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped + on to this class for a common access structure to the attributes/data + values. + + """ + NAME = 'Heartbeat' + + def __init__(self): + """Create a new instance of the Heartbeat frame""" + Frame.__init__(self, spec.FRAME_HEARTBEAT, 0) + + def marshal(self): + """Return the AMQP binary encoded value of the frame + + :rtype: str + + """ + return self._marshal(list()) + + +class ProtocolHeader(amqp_object.AMQPObject): + """AMQP Protocol header frame class which provides a pythonic interface + for creating AMQP Protocol headers + + """ + NAME = 'ProtocolHeader' + + def __init__(self, major=None, minor=None, revision=None): + """Construct a Protocol Header frame object for the specified AMQP + version + + :param int major: Major version number + :param int minor: Minor version number + :param int revision: Revision + + """ + self.frame_type = -1 + self.major = major or spec.PROTOCOL_VERSION[0] + self.minor = minor or spec.PROTOCOL_VERSION[1] + self.revision = revision or spec.PROTOCOL_VERSION[2] + + def marshal(self): + """Return the full AMQP wire protocol frame data representation of the + ProtocolHeader frame + + :rtype: str + + """ + return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor, + self.revision) + + +def decode_frame(data_in): + """Receives raw socket data and attempts to turn it into a frame. + Returns bytes used to make the frame and the frame + + :param str data_in: The raw data stream + :rtype: tuple(bytes consumed, frame) + :raises: pika.exceptions.InvalidFrameError + + """ + # Look to see if it's a protocol header frame + try: + if data_in[0:4] == b'AMQP': + major, minor, revision = struct.unpack_from('BBB', data_in, 5) + return 8, ProtocolHeader(major, minor, revision) + except (IndexError, struct.error): + return 0, None + + # Get the Frame Type, Channel Number and Frame Size + try: + (frame_type, channel_number, + frame_size) = struct.unpack('>BHL', data_in[0:7]) + except struct.error: + return 0, None + + # Get the frame data + frame_end = spec.FRAME_HEADER_SIZE + frame_size + spec.FRAME_END_SIZE + + # We don't have all of the frame yet + if frame_end > len(data_in): + return 0, None + + # The Frame termination chr is wrong + if data_in[frame_end - 1:frame_end] != byte(spec.FRAME_END): + raise exceptions.InvalidFrameError("Invalid FRAME_END marker") + + # Get the raw frame data + frame_data = data_in[spec.FRAME_HEADER_SIZE:frame_end - 1] + + if frame_type == spec.FRAME_METHOD: + + # Get the Method ID from the frame data + method_id = struct.unpack_from('>I', frame_data)[0] + + # Get a Method object for this method_id + method = spec.methods[method_id]() + + # Decode the content + method.decode(frame_data, 4) + + # Return the amount of data consumed and the Method object + return frame_end, Method(channel_number, method) + + elif frame_type == spec.FRAME_HEADER: + + # Return the header class and body size + class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data) + + # Get the Properties type + properties = spec.props[class_id]() + + # Decode the properties + out = properties.decode(frame_data[12:]) + + # Return a Header frame + return frame_end, Header(channel_number, body_size, properties) + + elif frame_type == spec.FRAME_BODY: + + # Return the amount of data consumed and the Body frame w/ data + return frame_end, Body(channel_number, frame_data) + + elif frame_type == spec.FRAME_HEARTBEAT: + + # Return the amount of data and a Heartbeat frame + return frame_end, Heartbeat() + + raise exceptions.InvalidFrameError("Unknown frame type: %i" % frame_type) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/heartbeat.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/heartbeat.py new file mode 100644 index 000000000..cb9a1db05 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/heartbeat.py @@ -0,0 +1,214 @@ +"""Handle AMQP Heartbeats""" +import logging + +from pika import frame + +LOGGER = logging.getLogger(__name__) + + +class HeartbeatChecker(object): + """Sends heartbeats to the broker. The provided timeout is used to + determine if the connection is stale - no received heartbeats or + other activity will close the connection. See the parameter list for more + details. + + """ + _CONNECTION_FORCED = 320 + _STALE_CONNECTION = "No activity or too many missed heartbeats in the last %i seconds" + + def __init__(self, connection, timeout): + """Create an object that will check for activity on the provided + connection as well as receive heartbeat frames from the broker. The + timeout parameter defines a window within which this activity must + happen. If not, the connection is considered dead and closed. + + The value passed for timeout is also used to calculate an interval + at which a heartbeat frame is sent to the broker. The interval is + equal to the timeout value divided by two. + + :param pika.connection.Connection: Connection object + :param int timeout: Connection idle timeout. If no activity occurs on the + connection nor heartbeat frames received during the + timeout window the connection will be closed. The + interval used to send heartbeats is calculated from + this value by dividing it by two. + + """ + if timeout < 1: + raise ValueError('timeout must >= 0, but got %r' % (timeout,)) + + self._connection = connection + + # Note: see the following documents: + # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout + # https://github.com/pika/pika/pull/1072 + # https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion + # There is a certain amount of confusion around how client developers + # interpret the spec. The spec talks about 2 missed heartbeats as a + # *timeout*, plus that any activity on the connection counts for a + # heartbeat. This is to avoid edge cases and not to depend on network + # latency. + self._timeout = timeout + + self._send_interval = float(timeout) / 2 + + # Note: Pika will calculate the heartbeat / connectivity check interval + # by adding 5 seconds to the negotiated timeout to leave a bit of room + # for broker heartbeats that may be right at the edge of the timeout + # window. This is different behavior from the RabbitMQ Java client and + # the spec that suggests a check interval equivalent to two times the + # heartbeat timeout value. But, one advantage of adding a small amount + # is that bad connections will be detected faster. + # https://github.com/pika/pika/pull/1072#issuecomment-397850795 + # https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780 + # https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192 + self._check_interval = timeout + 5 + + LOGGER.debug('timeout: %f send_interval: %f check_interval: %f', + self._timeout, + self._send_interval, + self._check_interval) + + # Initialize counters + self._bytes_received = 0 + self._bytes_sent = 0 + self._heartbeat_frames_received = 0 + self._heartbeat_frames_sent = 0 + self._idle_byte_intervals = 0 + + self._send_timer = None + self._check_timer = None + self._start_send_timer() + self._start_check_timer() + + @property + def bytes_received_on_connection(self): + """Return the number of bytes received by the connection bytes object. + + :rtype int + + """ + return self._connection.bytes_received + + @property + def connection_is_idle(self): + """Returns true if the byte count hasn't changed in enough intervals + to trip the max idle threshold. + + """ + return self._idle_byte_intervals > 0 + + def received(self): + """Called when a heartbeat is received""" + LOGGER.debug('Received heartbeat frame') + self._heartbeat_frames_received += 1 + + def _send_heartbeat(self): + """Invoked by a timer to send a heartbeat when we need to. + + """ + LOGGER.debug('Sending heartbeat frame') + self._send_heartbeat_frame() + self._start_send_timer() + + def _check_heartbeat(self): + """Invoked by a timer to check for broker heartbeats. Checks to see + if we've missed any heartbeats and disconnect our connection if it's + been idle too long. + + """ + if self._has_received_data: + self._idle_byte_intervals = 0 + else: + # Connection has not received any data, increment the counter + self._idle_byte_intervals += 1 + + LOGGER.debug('Received %i heartbeat frames, sent %i, ' + 'idle intervals %i', + self._heartbeat_frames_received, + self._heartbeat_frames_sent, + self._idle_byte_intervals) + + if self.connection_is_idle: + self._close_connection() + return + + self._start_check_timer() + + def stop(self): + """Stop the heartbeat checker""" + if self._send_timer: + LOGGER.debug('Removing timer for next heartbeat send interval') + self._connection.remove_timeout(self._send_timer) # pylint: disable=W0212 + self._send_timer = None + if self._check_timer: + LOGGER.debug('Removing timer for next heartbeat check interval') + self._connection.remove_timeout(self._check_timer) # pylint: disable=W0212 + self._check_timer = None + + def _close_connection(self): + """Close the connection with the AMQP Connection-Forced value.""" + LOGGER.info('Connection is idle, %i stale byte intervals', + self._idle_byte_intervals) + text = HeartbeatChecker._STALE_CONNECTION % self._timeout + + # NOTE: this won't achieve the perceived effect of sending + # Connection.Close to broker, because the frame will only get buffered + # in memory before the next statement terminates the connection. + self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text) + + self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, # pylint: disable=W0212 + text) + + @property + def _has_received_data(self): + """Returns True if the connection has received data. + + :rtype: bool + + """ + return self._bytes_received != self.bytes_received_on_connection + + @staticmethod + def _new_heartbeat_frame(): + """Return a new heartbeat frame. + + :rtype pika.frame.Heartbeat + + """ + return frame.Heartbeat() + + def _send_heartbeat_frame(self): + """Send a heartbeat frame on the connection. + + """ + LOGGER.debug('Sending heartbeat frame') + self._connection._send_frame( # pylint: disable=W0212 + self._new_heartbeat_frame()) + self._heartbeat_frames_sent += 1 + + def _start_send_timer(self): + """Start a new heartbeat send timer.""" + self._send_timer = self._connection.add_timeout( # pylint: disable=W0212 + self._send_interval, + self._send_heartbeat) + + def _start_check_timer(self): + """Start a new heartbeat check timer.""" + # Note: update counters now to get current values + # at the start of the timeout window. Values will be + # checked against the connection's byte count at the + # end of the window + self._update_counters() + + self._check_timer = self._connection.add_timeout( # pylint: disable=W0212 + self._check_interval, + self._check_heartbeat) + + def _update_counters(self): + """Update the internal counters for bytes sent and received and the + number of frames received + + """ + self._bytes_sent = self._connection.bytes_sent + self._bytes_received = self._connection.bytes_received diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/spec.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/spec.py new file mode 100644 index 000000000..f1fe225dc --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/spec.py @@ -0,0 +1,2319 @@ +""" +AMQP Specification +================== +This module implements the constants and classes that comprise AMQP protocol +level constructs. It should rarely be directly referenced outside of Pika's +own internal use. + +.. note:: Auto-generated code by codegen.py, do not edit directly. Pull +requests to this file without accompanying ``utils/codegen.py`` changes will be +rejected. + +""" + +import struct +from pika import amqp_object +from pika import data +from pika.compat import str_or_bytes, unicode_type + +# Python 3 support for str object +str = bytes + +PROTOCOL_VERSION = (0, 9, 1) +PORT = 5672 + +ACCESS_REFUSED = 403 +CHANNEL_ERROR = 504 +COMMAND_INVALID = 503 +CONNECTION_FORCED = 320 +CONTENT_TOO_LARGE = 311 +FRAME_BODY = 3 +FRAME_END = 206 +FRAME_END_SIZE = 1 +FRAME_ERROR = 501 +FRAME_HEADER = 2 +FRAME_HEADER_SIZE = 7 +FRAME_HEARTBEAT = 8 +FRAME_MAX_SIZE = 131072 +FRAME_METHOD = 1 +FRAME_MIN_SIZE = 4096 +INTERNAL_ERROR = 541 +INVALID_PATH = 402 +NOT_ALLOWED = 530 +NOT_FOUND = 404 +NOT_IMPLEMENTED = 540 +NO_CONSUMERS = 313 +NO_ROUTE = 312 +PERSISTENT_DELIVERY_MODE = 2 +PRECONDITION_FAILED = 406 +REPLY_SUCCESS = 200 +RESOURCE_ERROR = 506 +RESOURCE_LOCKED = 405 +SYNTAX_ERROR = 502 +TRANSIENT_DELIVERY_MODE = 1 +UNEXPECTED_FRAME = 505 + + +class Connection(amqp_object.Class): + + INDEX = 0x000A # 10 + NAME = 'Connection' + + class Start(amqp_object.Method): + + INDEX = 0x000A000A # 10, 10; 655370 + NAME = 'Connection.Start' + + def __init__(self, version_major=0, version_minor=9, server_properties=None, mechanisms='PLAIN', locales='en_US'): + self.version_major = version_major + self.version_minor = version_minor + self.server_properties = server_properties + self.mechanisms = mechanisms + self.locales = locales + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.version_major = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.version_minor = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + (self.server_properties, offset) = data.decode_table(encoded, offset) + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.mechanisms = encoded[offset:offset + length] + try: + self.mechanisms = str(self.mechanisms) + except UnicodeEncodeError: + pass + offset += length + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.locales = encoded[offset:offset + length] + try: + self.locales = str(self.locales) + except UnicodeEncodeError: + pass + offset += length + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('B', self.version_major)) + pieces.append(struct.pack('B', self.version_minor)) + data.encode_table(pieces, self.server_properties) + assert isinstance(self.mechanisms, str_or_bytes),\ + 'A non-string value was supplied for self.mechanisms' + value = self.mechanisms.encode('utf-8') if isinstance(self.mechanisms, unicode_type) else self.mechanisms + pieces.append(struct.pack('>I', len(value))) + pieces.append(value) + assert isinstance(self.locales, str_or_bytes),\ + 'A non-string value was supplied for self.locales' + value = self.locales.encode('utf-8') if isinstance(self.locales, unicode_type) else self.locales + pieces.append(struct.pack('>I', len(value))) + pieces.append(value) + return pieces + + class StartOk(amqp_object.Method): + + INDEX = 0x000A000B # 10, 11; 655371 + NAME = 'Connection.StartOk' + + def __init__(self, client_properties=None, mechanism='PLAIN', response=None, locale='en_US'): + self.client_properties = client_properties + self.mechanism = mechanism + self.response = response + self.locale = locale + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + (self.client_properties, offset) = data.decode_table(encoded, offset) + self.mechanism, offset = data.decode_short_string(encoded, offset) + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.response = encoded[offset:offset + length] + try: + self.response = str(self.response) + except UnicodeEncodeError: + pass + offset += length + self.locale, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + data.encode_table(pieces, self.client_properties) + assert isinstance(self.mechanism, str_or_bytes),\ + 'A non-string value was supplied for self.mechanism' + data.encode_short_string(pieces, self.mechanism) + assert isinstance(self.response, str_or_bytes),\ + 'A non-string value was supplied for self.response' + value = self.response.encode('utf-8') if isinstance(self.response, unicode_type) else self.response + pieces.append(struct.pack('>I', len(value))) + pieces.append(value) + assert isinstance(self.locale, str_or_bytes),\ + 'A non-string value was supplied for self.locale' + data.encode_short_string(pieces, self.locale) + return pieces + + class Secure(amqp_object.Method): + + INDEX = 0x000A0014 # 10, 20; 655380 + NAME = 'Connection.Secure' + + def __init__(self, challenge=None): + self.challenge = challenge + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.challenge = encoded[offset:offset + length] + try: + self.challenge = str(self.challenge) + except UnicodeEncodeError: + pass + offset += length + return self + + def encode(self): + pieces = list() + assert isinstance(self.challenge, str_or_bytes),\ + 'A non-string value was supplied for self.challenge' + value = self.challenge.encode('utf-8') if isinstance(self.challenge, unicode_type) else self.challenge + pieces.append(struct.pack('>I', len(value))) + pieces.append(value) + return pieces + + class SecureOk(amqp_object.Method): + + INDEX = 0x000A0015 # 10, 21; 655381 + NAME = 'Connection.SecureOk' + + def __init__(self, response=None): + self.response = response + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.response = encoded[offset:offset + length] + try: + self.response = str(self.response) + except UnicodeEncodeError: + pass + offset += length + return self + + def encode(self): + pieces = list() + assert isinstance(self.response, str_or_bytes),\ + 'A non-string value was supplied for self.response' + value = self.response.encode('utf-8') if isinstance(self.response, unicode_type) else self.response + pieces.append(struct.pack('>I', len(value))) + pieces.append(value) + return pieces + + class Tune(amqp_object.Method): + + INDEX = 0x000A001E # 10, 30; 655390 + NAME = 'Connection.Tune' + + def __init__(self, channel_max=0, frame_max=0, heartbeat=0): + self.channel_max = channel_max + self.frame_max = frame_max + self.heartbeat = heartbeat + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.channel_max = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.frame_max = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.heartbeat = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.channel_max)) + pieces.append(struct.pack('>I', self.frame_max)) + pieces.append(struct.pack('>H', self.heartbeat)) + return pieces + + class TuneOk(amqp_object.Method): + + INDEX = 0x000A001F # 10, 31; 655391 + NAME = 'Connection.TuneOk' + + def __init__(self, channel_max=0, frame_max=0, heartbeat=0): + self.channel_max = channel_max + self.frame_max = frame_max + self.heartbeat = heartbeat + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.channel_max = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.frame_max = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.heartbeat = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.channel_max)) + pieces.append(struct.pack('>I', self.frame_max)) + pieces.append(struct.pack('>H', self.heartbeat)) + return pieces + + class Open(amqp_object.Method): + + INDEX = 0x000A0028 # 10, 40; 655400 + NAME = 'Connection.Open' + + def __init__(self, virtual_host='/', capabilities='', insist=False): + self.virtual_host = virtual_host + self.capabilities = capabilities + self.insist = insist + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.virtual_host, offset = data.decode_short_string(encoded, offset) + self.capabilities, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.insist = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + assert isinstance(self.virtual_host, str_or_bytes),\ + 'A non-string value was supplied for self.virtual_host' + data.encode_short_string(pieces, self.virtual_host) + assert isinstance(self.capabilities, str_or_bytes),\ + 'A non-string value was supplied for self.capabilities' + data.encode_short_string(pieces, self.capabilities) + bit_buffer = 0 + if self.insist: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class OpenOk(amqp_object.Method): + + INDEX = 0x000A0029 # 10, 41; 655401 + NAME = 'Connection.OpenOk' + + def __init__(self, known_hosts=''): + self.known_hosts = known_hosts + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.known_hosts, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + assert isinstance(self.known_hosts, str_or_bytes),\ + 'A non-string value was supplied for self.known_hosts' + data.encode_short_string(pieces, self.known_hosts) + return pieces + + class Close(amqp_object.Method): + + INDEX = 0x000A0032 # 10, 50; 655410 + NAME = 'Connection.Close' + + def __init__(self, reply_code=None, reply_text='', class_id=None, method_id=None): + self.reply_code = reply_code + self.reply_text = reply_text + self.class_id = class_id + self.method_id = method_id + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.reply_code = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.reply_text, offset = data.decode_short_string(encoded, offset) + self.class_id = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.method_id = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.reply_code)) + assert isinstance(self.reply_text, str_or_bytes),\ + 'A non-string value was supplied for self.reply_text' + data.encode_short_string(pieces, self.reply_text) + pieces.append(struct.pack('>H', self.class_id)) + pieces.append(struct.pack('>H', self.method_id)) + return pieces + + class CloseOk(amqp_object.Method): + + INDEX = 0x000A0033 # 10, 51; 655411 + NAME = 'Connection.CloseOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Blocked(amqp_object.Method): + + INDEX = 0x000A003C # 10, 60; 655420 + NAME = 'Connection.Blocked' + + def __init__(self, reason=''): + self.reason = reason + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.reason, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + assert isinstance(self.reason, str_or_bytes),\ + 'A non-string value was supplied for self.reason' + data.encode_short_string(pieces, self.reason) + return pieces + + class Unblocked(amqp_object.Method): + + INDEX = 0x000A003D # 10, 61; 655421 + NAME = 'Connection.Unblocked' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + +class Channel(amqp_object.Class): + + INDEX = 0x0014 # 20 + NAME = 'Channel' + + class Open(amqp_object.Method): + + INDEX = 0x0014000A # 20, 10; 1310730 + NAME = 'Channel.Open' + + def __init__(self, out_of_band=''): + self.out_of_band = out_of_band + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.out_of_band, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + assert isinstance(self.out_of_band, str_or_bytes),\ + 'A non-string value was supplied for self.out_of_band' + data.encode_short_string(pieces, self.out_of_band) + return pieces + + class OpenOk(amqp_object.Method): + + INDEX = 0x0014000B # 20, 11; 1310731 + NAME = 'Channel.OpenOk' + + def __init__(self, channel_id=''): + self.channel_id = channel_id + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + length = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.channel_id = encoded[offset:offset + length] + try: + self.channel_id = str(self.channel_id) + except UnicodeEncodeError: + pass + offset += length + return self + + def encode(self): + pieces = list() + assert isinstance(self.channel_id, str_or_bytes),\ + 'A non-string value was supplied for self.channel_id' + value = self.channel_id.encode('utf-8') if isinstance(self.channel_id, unicode_type) else self.channel_id + pieces.append(struct.pack('>I', len(value))) + pieces.append(value) + return pieces + + class Flow(amqp_object.Method): + + INDEX = 0x00140014 # 20, 20; 1310740 + NAME = 'Channel.Flow' + + def __init__(self, active=None): + self.active = active + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.active = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + bit_buffer = 0 + if self.active: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class FlowOk(amqp_object.Method): + + INDEX = 0x00140015 # 20, 21; 1310741 + NAME = 'Channel.FlowOk' + + def __init__(self, active=None): + self.active = active + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.active = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + bit_buffer = 0 + if self.active: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class Close(amqp_object.Method): + + INDEX = 0x00140028 # 20, 40; 1310760 + NAME = 'Channel.Close' + + def __init__(self, reply_code=None, reply_text='', class_id=None, method_id=None): + self.reply_code = reply_code + self.reply_text = reply_text + self.class_id = class_id + self.method_id = method_id + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.reply_code = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.reply_text, offset = data.decode_short_string(encoded, offset) + self.class_id = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.method_id = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.reply_code)) + assert isinstance(self.reply_text, str_or_bytes),\ + 'A non-string value was supplied for self.reply_text' + data.encode_short_string(pieces, self.reply_text) + pieces.append(struct.pack('>H', self.class_id)) + pieces.append(struct.pack('>H', self.method_id)) + return pieces + + class CloseOk(amqp_object.Method): + + INDEX = 0x00140029 # 20, 41; 1310761 + NAME = 'Channel.CloseOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + +class Access(amqp_object.Class): + + INDEX = 0x001E # 30 + NAME = 'Access' + + class Request(amqp_object.Method): + + INDEX = 0x001E000A # 30, 10; 1966090 + NAME = 'Access.Request' + + def __init__(self, realm='/data', exclusive=False, passive=True, active=True, write=True, read=True): + self.realm = realm + self.exclusive = exclusive + self.passive = passive + self.active = active + self.write = write + self.read = read + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.realm, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.exclusive = (bit_buffer & (1 << 0)) != 0 + self.passive = (bit_buffer & (1 << 1)) != 0 + self.active = (bit_buffer & (1 << 2)) != 0 + self.write = (bit_buffer & (1 << 3)) != 0 + self.read = (bit_buffer & (1 << 4)) != 0 + return self + + def encode(self): + pieces = list() + assert isinstance(self.realm, str_or_bytes),\ + 'A non-string value was supplied for self.realm' + data.encode_short_string(pieces, self.realm) + bit_buffer = 0 + if self.exclusive: + bit_buffer = bit_buffer | (1 << 0) + if self.passive: + bit_buffer = bit_buffer | (1 << 1) + if self.active: + bit_buffer = bit_buffer | (1 << 2) + if self.write: + bit_buffer = bit_buffer | (1 << 3) + if self.read: + bit_buffer = bit_buffer | (1 << 4) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class RequestOk(amqp_object.Method): + + INDEX = 0x001E000B # 30, 11; 1966091 + NAME = 'Access.RequestOk' + + def __init__(self, ticket=1): + self.ticket = ticket + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + return pieces + + +class Exchange(amqp_object.Class): + + INDEX = 0x0028 # 40 + NAME = 'Exchange' + + class Declare(amqp_object.Method): + + INDEX = 0x0028000A # 40, 10; 2621450 + NAME = 'Exchange.Declare' + + def __init__(self, ticket=0, exchange=None, type='direct', passive=False, durable=False, auto_delete=False, internal=False, nowait=False, arguments={}): + self.ticket = ticket + self.exchange = exchange + self.type = type + self.passive = passive + self.durable = durable + self.auto_delete = auto_delete + self.internal = internal + self.nowait = nowait + self.arguments = arguments + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.exchange, offset = data.decode_short_string(encoded, offset) + self.type, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.passive = (bit_buffer & (1 << 0)) != 0 + self.durable = (bit_buffer & (1 << 1)) != 0 + self.auto_delete = (bit_buffer & (1 << 2)) != 0 + self.internal = (bit_buffer & (1 << 3)) != 0 + self.nowait = (bit_buffer & (1 << 4)) != 0 + (self.arguments, offset) = data.decode_table(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + assert isinstance(self.type, str_or_bytes),\ + 'A non-string value was supplied for self.type' + data.encode_short_string(pieces, self.type) + bit_buffer = 0 + if self.passive: + bit_buffer = bit_buffer | (1 << 0) + if self.durable: + bit_buffer = bit_buffer | (1 << 1) + if self.auto_delete: + bit_buffer = bit_buffer | (1 << 2) + if self.internal: + bit_buffer = bit_buffer | (1 << 3) + if self.nowait: + bit_buffer = bit_buffer | (1 << 4) + pieces.append(struct.pack('B', bit_buffer)) + data.encode_table(pieces, self.arguments) + return pieces + + class DeclareOk(amqp_object.Method): + + INDEX = 0x0028000B # 40, 11; 2621451 + NAME = 'Exchange.DeclareOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Delete(amqp_object.Method): + + INDEX = 0x00280014 # 40, 20; 2621460 + NAME = 'Exchange.Delete' + + def __init__(self, ticket=0, exchange=None, if_unused=False, nowait=False): + self.ticket = ticket + self.exchange = exchange + self.if_unused = if_unused + self.nowait = nowait + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.exchange, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.if_unused = (bit_buffer & (1 << 0)) != 0 + self.nowait = (bit_buffer & (1 << 1)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + bit_buffer = 0 + if self.if_unused: + bit_buffer = bit_buffer | (1 << 0) + if self.nowait: + bit_buffer = bit_buffer | (1 << 1) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class DeleteOk(amqp_object.Method): + + INDEX = 0x00280015 # 40, 21; 2621461 + NAME = 'Exchange.DeleteOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Bind(amqp_object.Method): + + INDEX = 0x0028001E # 40, 30; 2621470 + NAME = 'Exchange.Bind' + + def __init__(self, ticket=0, destination=None, source=None, routing_key='', nowait=False, arguments={}): + self.ticket = ticket + self.destination = destination + self.source = source + self.routing_key = routing_key + self.nowait = nowait + self.arguments = arguments + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.destination, offset = data.decode_short_string(encoded, offset) + self.source, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.nowait = (bit_buffer & (1 << 0)) != 0 + (self.arguments, offset) = data.decode_table(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.destination, str_or_bytes),\ + 'A non-string value was supplied for self.destination' + data.encode_short_string(pieces, self.destination) + assert isinstance(self.source, str_or_bytes),\ + 'A non-string value was supplied for self.source' + data.encode_short_string(pieces, self.source) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + bit_buffer = 0 + if self.nowait: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + data.encode_table(pieces, self.arguments) + return pieces + + class BindOk(amqp_object.Method): + + INDEX = 0x0028001F # 40, 31; 2621471 + NAME = 'Exchange.BindOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Unbind(amqp_object.Method): + + INDEX = 0x00280028 # 40, 40; 2621480 + NAME = 'Exchange.Unbind' + + def __init__(self, ticket=0, destination=None, source=None, routing_key='', nowait=False, arguments={}): + self.ticket = ticket + self.destination = destination + self.source = source + self.routing_key = routing_key + self.nowait = nowait + self.arguments = arguments + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.destination, offset = data.decode_short_string(encoded, offset) + self.source, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.nowait = (bit_buffer & (1 << 0)) != 0 + (self.arguments, offset) = data.decode_table(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.destination, str_or_bytes),\ + 'A non-string value was supplied for self.destination' + data.encode_short_string(pieces, self.destination) + assert isinstance(self.source, str_or_bytes),\ + 'A non-string value was supplied for self.source' + data.encode_short_string(pieces, self.source) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + bit_buffer = 0 + if self.nowait: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + data.encode_table(pieces, self.arguments) + return pieces + + class UnbindOk(amqp_object.Method): + + INDEX = 0x00280033 # 40, 51; 2621491 + NAME = 'Exchange.UnbindOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + +class Queue(amqp_object.Class): + + INDEX = 0x0032 # 50 + NAME = 'Queue' + + class Declare(amqp_object.Method): + + INDEX = 0x0032000A # 50, 10; 3276810 + NAME = 'Queue.Declare' + + def __init__(self, ticket=0, queue='', passive=False, durable=False, exclusive=False, auto_delete=False, nowait=False, arguments={}): + self.ticket = ticket + self.queue = queue + self.passive = passive + self.durable = durable + self.exclusive = exclusive + self.auto_delete = auto_delete + self.nowait = nowait + self.arguments = arguments + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.queue, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.passive = (bit_buffer & (1 << 0)) != 0 + self.durable = (bit_buffer & (1 << 1)) != 0 + self.exclusive = (bit_buffer & (1 << 2)) != 0 + self.auto_delete = (bit_buffer & (1 << 3)) != 0 + self.nowait = (bit_buffer & (1 << 4)) != 0 + (self.arguments, offset) = data.decode_table(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + bit_buffer = 0 + if self.passive: + bit_buffer = bit_buffer | (1 << 0) + if self.durable: + bit_buffer = bit_buffer | (1 << 1) + if self.exclusive: + bit_buffer = bit_buffer | (1 << 2) + if self.auto_delete: + bit_buffer = bit_buffer | (1 << 3) + if self.nowait: + bit_buffer = bit_buffer | (1 << 4) + pieces.append(struct.pack('B', bit_buffer)) + data.encode_table(pieces, self.arguments) + return pieces + + class DeclareOk(amqp_object.Method): + + INDEX = 0x0032000B # 50, 11; 3276811 + NAME = 'Queue.DeclareOk' + + def __init__(self, queue=None, message_count=None, consumer_count=None): + self.queue = queue + self.message_count = message_count + self.consumer_count = consumer_count + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.queue, offset = data.decode_short_string(encoded, offset) + self.message_count = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.consumer_count = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + return self + + def encode(self): + pieces = list() + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + pieces.append(struct.pack('>I', self.message_count)) + pieces.append(struct.pack('>I', self.consumer_count)) + return pieces + + class Bind(amqp_object.Method): + + INDEX = 0x00320014 # 50, 20; 3276820 + NAME = 'Queue.Bind' + + def __init__(self, ticket=0, queue='', exchange=None, routing_key='', nowait=False, arguments={}): + self.ticket = ticket + self.queue = queue + self.exchange = exchange + self.routing_key = routing_key + self.nowait = nowait + self.arguments = arguments + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.queue, offset = data.decode_short_string(encoded, offset) + self.exchange, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.nowait = (bit_buffer & (1 << 0)) != 0 + (self.arguments, offset) = data.decode_table(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + bit_buffer = 0 + if self.nowait: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + data.encode_table(pieces, self.arguments) + return pieces + + class BindOk(amqp_object.Method): + + INDEX = 0x00320015 # 50, 21; 3276821 + NAME = 'Queue.BindOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Purge(amqp_object.Method): + + INDEX = 0x0032001E # 50, 30; 3276830 + NAME = 'Queue.Purge' + + def __init__(self, ticket=0, queue='', nowait=False): + self.ticket = ticket + self.queue = queue + self.nowait = nowait + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.queue, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.nowait = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + bit_buffer = 0 + if self.nowait: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class PurgeOk(amqp_object.Method): + + INDEX = 0x0032001F # 50, 31; 3276831 + NAME = 'Queue.PurgeOk' + + def __init__(self, message_count=None): + self.message_count = message_count + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.message_count = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>I', self.message_count)) + return pieces + + class Delete(amqp_object.Method): + + INDEX = 0x00320028 # 50, 40; 3276840 + NAME = 'Queue.Delete' + + def __init__(self, ticket=0, queue='', if_unused=False, if_empty=False, nowait=False): + self.ticket = ticket + self.queue = queue + self.if_unused = if_unused + self.if_empty = if_empty + self.nowait = nowait + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.queue, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.if_unused = (bit_buffer & (1 << 0)) != 0 + self.if_empty = (bit_buffer & (1 << 1)) != 0 + self.nowait = (bit_buffer & (1 << 2)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + bit_buffer = 0 + if self.if_unused: + bit_buffer = bit_buffer | (1 << 0) + if self.if_empty: + bit_buffer = bit_buffer | (1 << 1) + if self.nowait: + bit_buffer = bit_buffer | (1 << 2) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class DeleteOk(amqp_object.Method): + + INDEX = 0x00320029 # 50, 41; 3276841 + NAME = 'Queue.DeleteOk' + + def __init__(self, message_count=None): + self.message_count = message_count + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.message_count = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>I', self.message_count)) + return pieces + + class Unbind(amqp_object.Method): + + INDEX = 0x00320032 # 50, 50; 3276850 + NAME = 'Queue.Unbind' + + def __init__(self, ticket=0, queue='', exchange=None, routing_key='', arguments={}): + self.ticket = ticket + self.queue = queue + self.exchange = exchange + self.routing_key = routing_key + self.arguments = arguments + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.queue, offset = data.decode_short_string(encoded, offset) + self.exchange, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + (self.arguments, offset) = data.decode_table(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + data.encode_table(pieces, self.arguments) + return pieces + + class UnbindOk(amqp_object.Method): + + INDEX = 0x00320033 # 50, 51; 3276851 + NAME = 'Queue.UnbindOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + +class Basic(amqp_object.Class): + + INDEX = 0x003C # 60 + NAME = 'Basic' + + class Qos(amqp_object.Method): + + INDEX = 0x003C000A # 60, 10; 3932170 + NAME = 'Basic.Qos' + + def __init__(self, prefetch_size=0, prefetch_count=0, global_=False): + self.prefetch_size = prefetch_size + self.prefetch_count = prefetch_count + self.global_ = global_ + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.prefetch_size = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + self.prefetch_count = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.global_ = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>I', self.prefetch_size)) + pieces.append(struct.pack('>H', self.prefetch_count)) + bit_buffer = 0 + if self.global_: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class QosOk(amqp_object.Method): + + INDEX = 0x003C000B # 60, 11; 3932171 + NAME = 'Basic.QosOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Consume(amqp_object.Method): + + INDEX = 0x003C0014 # 60, 20; 3932180 + NAME = 'Basic.Consume' + + def __init__(self, ticket=0, queue='', consumer_tag='', no_local=False, no_ack=False, exclusive=False, nowait=False, arguments={}): + self.ticket = ticket + self.queue = queue + self.consumer_tag = consumer_tag + self.no_local = no_local + self.no_ack = no_ack + self.exclusive = exclusive + self.nowait = nowait + self.arguments = arguments + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.queue, offset = data.decode_short_string(encoded, offset) + self.consumer_tag, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.no_local = (bit_buffer & (1 << 0)) != 0 + self.no_ack = (bit_buffer & (1 << 1)) != 0 + self.exclusive = (bit_buffer & (1 << 2)) != 0 + self.nowait = (bit_buffer & (1 << 3)) != 0 + (self.arguments, offset) = data.decode_table(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + assert isinstance(self.consumer_tag, str_or_bytes),\ + 'A non-string value was supplied for self.consumer_tag' + data.encode_short_string(pieces, self.consumer_tag) + bit_buffer = 0 + if self.no_local: + bit_buffer = bit_buffer | (1 << 0) + if self.no_ack: + bit_buffer = bit_buffer | (1 << 1) + if self.exclusive: + bit_buffer = bit_buffer | (1 << 2) + if self.nowait: + bit_buffer = bit_buffer | (1 << 3) + pieces.append(struct.pack('B', bit_buffer)) + data.encode_table(pieces, self.arguments) + return pieces + + class ConsumeOk(amqp_object.Method): + + INDEX = 0x003C0015 # 60, 21; 3932181 + NAME = 'Basic.ConsumeOk' + + def __init__(self, consumer_tag=None): + self.consumer_tag = consumer_tag + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.consumer_tag, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + assert isinstance(self.consumer_tag, str_or_bytes),\ + 'A non-string value was supplied for self.consumer_tag' + data.encode_short_string(pieces, self.consumer_tag) + return pieces + + class Cancel(amqp_object.Method): + + INDEX = 0x003C001E # 60, 30; 3932190 + NAME = 'Basic.Cancel' + + def __init__(self, consumer_tag=None, nowait=False): + self.consumer_tag = consumer_tag + self.nowait = nowait + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.consumer_tag, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.nowait = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + assert isinstance(self.consumer_tag, str_or_bytes),\ + 'A non-string value was supplied for self.consumer_tag' + data.encode_short_string(pieces, self.consumer_tag) + bit_buffer = 0 + if self.nowait: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class CancelOk(amqp_object.Method): + + INDEX = 0x003C001F # 60, 31; 3932191 + NAME = 'Basic.CancelOk' + + def __init__(self, consumer_tag=None): + self.consumer_tag = consumer_tag + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.consumer_tag, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + assert isinstance(self.consumer_tag, str_or_bytes),\ + 'A non-string value was supplied for self.consumer_tag' + data.encode_short_string(pieces, self.consumer_tag) + return pieces + + class Publish(amqp_object.Method): + + INDEX = 0x003C0028 # 60, 40; 3932200 + NAME = 'Basic.Publish' + + def __init__(self, ticket=0, exchange='', routing_key='', mandatory=False, immediate=False): + self.ticket = ticket + self.exchange = exchange + self.routing_key = routing_key + self.mandatory = mandatory + self.immediate = immediate + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.exchange, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.mandatory = (bit_buffer & (1 << 0)) != 0 + self.immediate = (bit_buffer & (1 << 1)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + bit_buffer = 0 + if self.mandatory: + bit_buffer = bit_buffer | (1 << 0) + if self.immediate: + bit_buffer = bit_buffer | (1 << 1) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class Return(amqp_object.Method): + + INDEX = 0x003C0032 # 60, 50; 3932210 + NAME = 'Basic.Return' + + def __init__(self, reply_code=None, reply_text='', exchange=None, routing_key=None): + self.reply_code = reply_code + self.reply_text = reply_text + self.exchange = exchange + self.routing_key = routing_key + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.reply_code = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.reply_text, offset = data.decode_short_string(encoded, offset) + self.exchange, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.reply_code)) + assert isinstance(self.reply_text, str_or_bytes),\ + 'A non-string value was supplied for self.reply_text' + data.encode_short_string(pieces, self.reply_text) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + return pieces + + class Deliver(amqp_object.Method): + + INDEX = 0x003C003C # 60, 60; 3932220 + NAME = 'Basic.Deliver' + + def __init__(self, consumer_tag=None, delivery_tag=None, redelivered=False, exchange=None, routing_key=None): + self.consumer_tag = consumer_tag + self.delivery_tag = delivery_tag + self.redelivered = redelivered + self.exchange = exchange + self.routing_key = routing_key + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.consumer_tag, offset = data.decode_short_string(encoded, offset) + self.delivery_tag = struct.unpack_from('>Q', encoded, offset)[0] + offset += 8 + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.redelivered = (bit_buffer & (1 << 0)) != 0 + self.exchange, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + assert isinstance(self.consumer_tag, str_or_bytes),\ + 'A non-string value was supplied for self.consumer_tag' + data.encode_short_string(pieces, self.consumer_tag) + pieces.append(struct.pack('>Q', self.delivery_tag)) + bit_buffer = 0 + if self.redelivered: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + return pieces + + class Get(amqp_object.Method): + + INDEX = 0x003C0046 # 60, 70; 3932230 + NAME = 'Basic.Get' + + def __init__(self, ticket=0, queue='', no_ack=False): + self.ticket = ticket + self.queue = queue + self.no_ack = no_ack + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + self.ticket = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + self.queue, offset = data.decode_short_string(encoded, offset) + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.no_ack = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>H', self.ticket)) + assert isinstance(self.queue, str_or_bytes),\ + 'A non-string value was supplied for self.queue' + data.encode_short_string(pieces, self.queue) + bit_buffer = 0 + if self.no_ack: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class GetOk(amqp_object.Method): + + INDEX = 0x003C0047 # 60, 71; 3932231 + NAME = 'Basic.GetOk' + + def __init__(self, delivery_tag=None, redelivered=False, exchange=None, routing_key=None, message_count=None): + self.delivery_tag = delivery_tag + self.redelivered = redelivered + self.exchange = exchange + self.routing_key = routing_key + self.message_count = message_count + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.delivery_tag = struct.unpack_from('>Q', encoded, offset)[0] + offset += 8 + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.redelivered = (bit_buffer & (1 << 0)) != 0 + self.exchange, offset = data.decode_short_string(encoded, offset) + self.routing_key, offset = data.decode_short_string(encoded, offset) + self.message_count = struct.unpack_from('>I', encoded, offset)[0] + offset += 4 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>Q', self.delivery_tag)) + bit_buffer = 0 + if self.redelivered: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + assert isinstance(self.exchange, str_or_bytes),\ + 'A non-string value was supplied for self.exchange' + data.encode_short_string(pieces, self.exchange) + assert isinstance(self.routing_key, str_or_bytes),\ + 'A non-string value was supplied for self.routing_key' + data.encode_short_string(pieces, self.routing_key) + pieces.append(struct.pack('>I', self.message_count)) + return pieces + + class GetEmpty(amqp_object.Method): + + INDEX = 0x003C0048 # 60, 72; 3932232 + NAME = 'Basic.GetEmpty' + + def __init__(self, cluster_id=''): + self.cluster_id = cluster_id + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.cluster_id, offset = data.decode_short_string(encoded, offset) + return self + + def encode(self): + pieces = list() + assert isinstance(self.cluster_id, str_or_bytes),\ + 'A non-string value was supplied for self.cluster_id' + data.encode_short_string(pieces, self.cluster_id) + return pieces + + class Ack(amqp_object.Method): + + INDEX = 0x003C0050 # 60, 80; 3932240 + NAME = 'Basic.Ack' + + def __init__(self, delivery_tag=0, multiple=False): + self.delivery_tag = delivery_tag + self.multiple = multiple + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.delivery_tag = struct.unpack_from('>Q', encoded, offset)[0] + offset += 8 + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.multiple = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>Q', self.delivery_tag)) + bit_buffer = 0 + if self.multiple: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class Reject(amqp_object.Method): + + INDEX = 0x003C005A # 60, 90; 3932250 + NAME = 'Basic.Reject' + + def __init__(self, delivery_tag=None, requeue=True): + self.delivery_tag = delivery_tag + self.requeue = requeue + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.delivery_tag = struct.unpack_from('>Q', encoded, offset)[0] + offset += 8 + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.requeue = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>Q', self.delivery_tag)) + bit_buffer = 0 + if self.requeue: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class RecoverAsync(amqp_object.Method): + + INDEX = 0x003C0064 # 60, 100; 3932260 + NAME = 'Basic.RecoverAsync' + + def __init__(self, requeue=False): + self.requeue = requeue + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.requeue = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + bit_buffer = 0 + if self.requeue: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class Recover(amqp_object.Method): + + INDEX = 0x003C006E # 60, 110; 3932270 + NAME = 'Basic.Recover' + + def __init__(self, requeue=False): + self.requeue = requeue + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.requeue = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + bit_buffer = 0 + if self.requeue: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class RecoverOk(amqp_object.Method): + + INDEX = 0x003C006F # 60, 111; 3932271 + NAME = 'Basic.RecoverOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Nack(amqp_object.Method): + + INDEX = 0x003C0078 # 60, 120; 3932280 + NAME = 'Basic.Nack' + + def __init__(self, delivery_tag=0, multiple=False, requeue=True): + self.delivery_tag = delivery_tag + self.multiple = multiple + self.requeue = requeue + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + self.delivery_tag = struct.unpack_from('>Q', encoded, offset)[0] + offset += 8 + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.multiple = (bit_buffer & (1 << 0)) != 0 + self.requeue = (bit_buffer & (1 << 1)) != 0 + return self + + def encode(self): + pieces = list() + pieces.append(struct.pack('>Q', self.delivery_tag)) + bit_buffer = 0 + if self.multiple: + bit_buffer = bit_buffer | (1 << 0) + if self.requeue: + bit_buffer = bit_buffer | (1 << 1) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + +class Tx(amqp_object.Class): + + INDEX = 0x005A # 90 + NAME = 'Tx' + + class Select(amqp_object.Method): + + INDEX = 0x005A000A # 90, 10; 5898250 + NAME = 'Tx.Select' + + def __init__(self): + pass + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class SelectOk(amqp_object.Method): + + INDEX = 0x005A000B # 90, 11; 5898251 + NAME = 'Tx.SelectOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Commit(amqp_object.Method): + + INDEX = 0x005A0014 # 90, 20; 5898260 + NAME = 'Tx.Commit' + + def __init__(self): + pass + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class CommitOk(amqp_object.Method): + + INDEX = 0x005A0015 # 90, 21; 5898261 + NAME = 'Tx.CommitOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class Rollback(amqp_object.Method): + + INDEX = 0x005A001E # 90, 30; 5898270 + NAME = 'Tx.Rollback' + + def __init__(self): + pass + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + class RollbackOk(amqp_object.Method): + + INDEX = 0x005A001F # 90, 31; 5898271 + NAME = 'Tx.RollbackOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + +class Confirm(amqp_object.Class): + + INDEX = 0x0055 # 85 + NAME = 'Confirm' + + class Select(amqp_object.Method): + + INDEX = 0x0055000A # 85, 10; 5570570 + NAME = 'Confirm.Select' + + def __init__(self, nowait=False): + self.nowait = nowait + + @property + def synchronous(self): + return True + + def decode(self, encoded, offset=0): + bit_buffer = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + self.nowait = (bit_buffer & (1 << 0)) != 0 + return self + + def encode(self): + pieces = list() + bit_buffer = 0 + if self.nowait: + bit_buffer = bit_buffer | (1 << 0) + pieces.append(struct.pack('B', bit_buffer)) + return pieces + + class SelectOk(amqp_object.Method): + + INDEX = 0x0055000B # 85, 11; 5570571 + NAME = 'Confirm.SelectOk' + + def __init__(self): + pass + + @property + def synchronous(self): + return False + + def decode(self, encoded, offset=0): + return self + + def encode(self): + pieces = list() + return pieces + + +class BasicProperties(amqp_object.Properties): + + CLASS = Basic + INDEX = 0x003C # 60 + NAME = 'BasicProperties' + + FLAG_CONTENT_TYPE = (1 << 15) + FLAG_CONTENT_ENCODING = (1 << 14) + FLAG_HEADERS = (1 << 13) + FLAG_DELIVERY_MODE = (1 << 12) + FLAG_PRIORITY = (1 << 11) + FLAG_CORRELATION_ID = (1 << 10) + FLAG_REPLY_TO = (1 << 9) + FLAG_EXPIRATION = (1 << 8) + FLAG_MESSAGE_ID = (1 << 7) + FLAG_TIMESTAMP = (1 << 6) + FLAG_TYPE = (1 << 5) + FLAG_USER_ID = (1 << 4) + FLAG_APP_ID = (1 << 3) + FLAG_CLUSTER_ID = (1 << 2) + + def __init__(self, content_type=None, content_encoding=None, headers=None, delivery_mode=None, priority=None, correlation_id=None, reply_to=None, expiration=None, message_id=None, timestamp=None, type=None, user_id=None, app_id=None, cluster_id=None): + self.content_type = content_type + self.content_encoding = content_encoding + self.headers = headers + self.delivery_mode = delivery_mode + self.priority = priority + self.correlation_id = correlation_id + self.reply_to = reply_to + self.expiration = expiration + self.message_id = message_id + self.timestamp = timestamp + self.type = type + self.user_id = user_id + self.app_id = app_id + self.cluster_id = cluster_id + + def decode(self, encoded, offset=0): + flags = 0 + flagword_index = 0 + while True: + partial_flags = struct.unpack_from('>H', encoded, offset)[0] + offset += 2 + flags = flags | (partial_flags << (flagword_index * 16)) + if not (partial_flags & 1): + break + flagword_index += 1 + if flags & BasicProperties.FLAG_CONTENT_TYPE: + self.content_type, offset = data.decode_short_string(encoded, offset) + else: + self.content_type = None + if flags & BasicProperties.FLAG_CONTENT_ENCODING: + self.content_encoding, offset = data.decode_short_string(encoded, offset) + else: + self.content_encoding = None + if flags & BasicProperties.FLAG_HEADERS: + (self.headers, offset) = data.decode_table(encoded, offset) + else: + self.headers = None + if flags & BasicProperties.FLAG_DELIVERY_MODE: + self.delivery_mode = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + else: + self.delivery_mode = None + if flags & BasicProperties.FLAG_PRIORITY: + self.priority = struct.unpack_from('B', encoded, offset)[0] + offset += 1 + else: + self.priority = None + if flags & BasicProperties.FLAG_CORRELATION_ID: + self.correlation_id, offset = data.decode_short_string(encoded, offset) + else: + self.correlation_id = None + if flags & BasicProperties.FLAG_REPLY_TO: + self.reply_to, offset = data.decode_short_string(encoded, offset) + else: + self.reply_to = None + if flags & BasicProperties.FLAG_EXPIRATION: + self.expiration, offset = data.decode_short_string(encoded, offset) + else: + self.expiration = None + if flags & BasicProperties.FLAG_MESSAGE_ID: + self.message_id, offset = data.decode_short_string(encoded, offset) + else: + self.message_id = None + if flags & BasicProperties.FLAG_TIMESTAMP: + self.timestamp = struct.unpack_from('>Q', encoded, offset)[0] + offset += 8 + else: + self.timestamp = None + if flags & BasicProperties.FLAG_TYPE: + self.type, offset = data.decode_short_string(encoded, offset) + else: + self.type = None + if flags & BasicProperties.FLAG_USER_ID: + self.user_id, offset = data.decode_short_string(encoded, offset) + else: + self.user_id = None + if flags & BasicProperties.FLAG_APP_ID: + self.app_id, offset = data.decode_short_string(encoded, offset) + else: + self.app_id = None + if flags & BasicProperties.FLAG_CLUSTER_ID: + self.cluster_id, offset = data.decode_short_string(encoded, offset) + else: + self.cluster_id = None + return self + + def encode(self): + pieces = list() + flags = 0 + if self.content_type is not None: + flags = flags | BasicProperties.FLAG_CONTENT_TYPE + assert isinstance(self.content_type, str_or_bytes),\ + 'A non-string value was supplied for self.content_type' + data.encode_short_string(pieces, self.content_type) + if self.content_encoding is not None: + flags = flags | BasicProperties.FLAG_CONTENT_ENCODING + assert isinstance(self.content_encoding, str_or_bytes),\ + 'A non-string value was supplied for self.content_encoding' + data.encode_short_string(pieces, self.content_encoding) + if self.headers is not None: + flags = flags | BasicProperties.FLAG_HEADERS + data.encode_table(pieces, self.headers) + if self.delivery_mode is not None: + flags = flags | BasicProperties.FLAG_DELIVERY_MODE + pieces.append(struct.pack('B', self.delivery_mode)) + if self.priority is not None: + flags = flags | BasicProperties.FLAG_PRIORITY + pieces.append(struct.pack('B', self.priority)) + if self.correlation_id is not None: + flags = flags | BasicProperties.FLAG_CORRELATION_ID + assert isinstance(self.correlation_id, str_or_bytes),\ + 'A non-string value was supplied for self.correlation_id' + data.encode_short_string(pieces, self.correlation_id) + if self.reply_to is not None: + flags = flags | BasicProperties.FLAG_REPLY_TO + assert isinstance(self.reply_to, str_or_bytes),\ + 'A non-string value was supplied for self.reply_to' + data.encode_short_string(pieces, self.reply_to) + if self.expiration is not None: + flags = flags | BasicProperties.FLAG_EXPIRATION + assert isinstance(self.expiration, str_or_bytes),\ + 'A non-string value was supplied for self.expiration' + data.encode_short_string(pieces, self.expiration) + if self.message_id is not None: + flags = flags | BasicProperties.FLAG_MESSAGE_ID + assert isinstance(self.message_id, str_or_bytes),\ + 'A non-string value was supplied for self.message_id' + data.encode_short_string(pieces, self.message_id) + if self.timestamp is not None: + flags = flags | BasicProperties.FLAG_TIMESTAMP + pieces.append(struct.pack('>Q', self.timestamp)) + if self.type is not None: + flags = flags | BasicProperties.FLAG_TYPE + assert isinstance(self.type, str_or_bytes),\ + 'A non-string value was supplied for self.type' + data.encode_short_string(pieces, self.type) + if self.user_id is not None: + flags = flags | BasicProperties.FLAG_USER_ID + assert isinstance(self.user_id, str_or_bytes),\ + 'A non-string value was supplied for self.user_id' + data.encode_short_string(pieces, self.user_id) + if self.app_id is not None: + flags = flags | BasicProperties.FLAG_APP_ID + assert isinstance(self.app_id, str_or_bytes),\ + 'A non-string value was supplied for self.app_id' + data.encode_short_string(pieces, self.app_id) + if self.cluster_id is not None: + flags = flags | BasicProperties.FLAG_CLUSTER_ID + assert isinstance(self.cluster_id, str_or_bytes),\ + 'A non-string value was supplied for self.cluster_id' + data.encode_short_string(pieces, self.cluster_id) + flag_pieces = list() + while True: + remainder = flags >> 16 + partial_flags = flags & 0xFFFE + if remainder != 0: + partial_flags |= 1 + flag_pieces.append(struct.pack('>H', partial_flags)) + flags = remainder + if not flags: + break + return flag_pieces + pieces + +methods = { + 0x000A000A: Connection.Start, + 0x000A000B: Connection.StartOk, + 0x000A0014: Connection.Secure, + 0x000A0015: Connection.SecureOk, + 0x000A001E: Connection.Tune, + 0x000A001F: Connection.TuneOk, + 0x000A0028: Connection.Open, + 0x000A0029: Connection.OpenOk, + 0x000A0032: Connection.Close, + 0x000A0033: Connection.CloseOk, + 0x000A003C: Connection.Blocked, + 0x000A003D: Connection.Unblocked, + 0x0014000A: Channel.Open, + 0x0014000B: Channel.OpenOk, + 0x00140014: Channel.Flow, + 0x00140015: Channel.FlowOk, + 0x00140028: Channel.Close, + 0x00140029: Channel.CloseOk, + 0x001E000A: Access.Request, + 0x001E000B: Access.RequestOk, + 0x0028000A: Exchange.Declare, + 0x0028000B: Exchange.DeclareOk, + 0x00280014: Exchange.Delete, + 0x00280015: Exchange.DeleteOk, + 0x0028001E: Exchange.Bind, + 0x0028001F: Exchange.BindOk, + 0x00280028: Exchange.Unbind, + 0x00280033: Exchange.UnbindOk, + 0x0032000A: Queue.Declare, + 0x0032000B: Queue.DeclareOk, + 0x00320014: Queue.Bind, + 0x00320015: Queue.BindOk, + 0x0032001E: Queue.Purge, + 0x0032001F: Queue.PurgeOk, + 0x00320028: Queue.Delete, + 0x00320029: Queue.DeleteOk, + 0x00320032: Queue.Unbind, + 0x00320033: Queue.UnbindOk, + 0x003C000A: Basic.Qos, + 0x003C000B: Basic.QosOk, + 0x003C0014: Basic.Consume, + 0x003C0015: Basic.ConsumeOk, + 0x003C001E: Basic.Cancel, + 0x003C001F: Basic.CancelOk, + 0x003C0028: Basic.Publish, + 0x003C0032: Basic.Return, + 0x003C003C: Basic.Deliver, + 0x003C0046: Basic.Get, + 0x003C0047: Basic.GetOk, + 0x003C0048: Basic.GetEmpty, + 0x003C0050: Basic.Ack, + 0x003C005A: Basic.Reject, + 0x003C0064: Basic.RecoverAsync, + 0x003C006E: Basic.Recover, + 0x003C006F: Basic.RecoverOk, + 0x003C0078: Basic.Nack, + 0x005A000A: Tx.Select, + 0x005A000B: Tx.SelectOk, + 0x005A0014: Tx.Commit, + 0x005A0015: Tx.CommitOk, + 0x005A001E: Tx.Rollback, + 0x005A001F: Tx.RollbackOk, + 0x0055000A: Confirm.Select, + 0x0055000B: Confirm.SelectOk +} + +props = { + 0x003C: BasicProperties +} + + +def has_content(methodNumber): + return methodNumber in ( + Basic.Publish.INDEX, + Basic.Return.INDEX, + Basic.Deliver.INDEX, + Basic.GetOk.INDEX, + ) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/tcp_socket_opts.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/tcp_socket_opts.py new file mode 100644 index 000000000..73d2062b1 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/tcp_socket_opts.py @@ -0,0 +1,43 @@ +import logging +import socket +import pika.compat + +LOGGER = logging.getLogger(__name__) + +_SUPPORTED_TCP_OPTIONS = {} + +try: + _SUPPORTED_TCP_OPTIONS['TCP_USER_TIMEOUT'] = socket.TCP_USER_TIMEOUT +except AttributeError: + if pika.compat.LINUX_VERSION and pika.compat.LINUX_VERSION >= (2, 6, 37): + # NB: this is not the timeout value, but the number corresponding + # to the constant in tcp.h + # https://github.com/torvalds/linux/blob/master/include/uapi/linux/tcp.h# + # #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ + _SUPPORTED_TCP_OPTIONS['TCP_USER_TIMEOUT'] = 18 + +try: + _SUPPORTED_TCP_OPTIONS['TCP_KEEPIDLE'] = socket.TCP_KEEPIDLE + _SUPPORTED_TCP_OPTIONS['TCP_KEEPCNT'] = socket.TCP_KEEPCNT + _SUPPORTED_TCP_OPTIONS['TCP_KEEPINTVL'] = socket.TCP_KEEPINTVL +except AttributeError: + pass + + +def socket_requires_keepalive(tcp_options): + return 'TCP_KEEPIDLE' in tcp_options or 'TCP_KEEPCNT' in tcp_options or 'TCP_KEEPINTVL' in tcp_options + + +def set_sock_opts(tcp_options, sock): + if not tcp_options: + return + + if socket_requires_keepalive(tcp_options): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + for key, value in tcp_options.items(): + option = _SUPPORTED_TCP_OPTIONS.get(key) + if option: + sock.setsockopt(pika.compat.SOL_TCP, option, value) + else: + LOGGER.warning('Unsupported TCP option %s:%s', key, value) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pika/utils.py b/NodeRed/NodeRedFiles/pika-0.13.1/pika/utils.py new file mode 100644 index 000000000..57f93b0a6 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pika/utils.py @@ -0,0 +1,16 @@ +""" +Non-module specific functions shared by modules in the pika package + +""" +import collections + + +def is_callable(handle): + """Returns a bool value if the handle passed in is a callable + method/function + + :param any handle: The object to check + :rtype: bool + + """ + return isinstance(handle, collections.Callable) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/pylintrc b/NodeRed/NodeRedFiles/pika-0.13.1/pylintrc new file mode 100644 index 000000000..4f96c7920 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/pylintrc @@ -0,0 +1,391 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Profiled execution. +profile=no + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Pickle collected data for later comparisons. +persistent=no + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Deprecated. It was used to include message's id in output. Use --msg-template +# instead. +#include-ids=no + +# Deprecated. It was used to include symbolic ids of messages in output. Use +# --msg-template instead. +#symbols=no + +# Use multiple processes to speed up Pylint. +#jobs=1 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +#unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +#extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. +#optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=R1705 + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (RP0004). +comment=no + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +msg-template={msg_id}, {line:3d}:{column:2d} - {msg} ({symbol}) +#msg-template= + + +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# List of builtins function names that should not be used, separated by a comma +bad-functions=map,filter,input + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,fd,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,40}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,40}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,40}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,40}$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,40}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,40}|(__.*__))$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,40}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,40}$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=__.*__ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=100 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis +ignored-modules= + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +ignored-classes=SQLObject + +# When zope mode is activated, add a predefined set of Zope acquired attributes +# to generated-members. +zope=no + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. Python regular +# expressions are accepted. +generated-members=REQUEST,acl_users,aq_parent + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_|_$|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + + +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=10 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=20 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=20 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=0 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=40 + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/setup.cfg b/NodeRed/NodeRedFiles/pika-0.13.1/setup.cfg new file mode 100644 index 000000000..3bac5afc9 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/setup.cfg @@ -0,0 +1,16 @@ +[bdist_wheel] +universal = 1 + +[nosetests] +cover-branches = 1 +cover-erase = 1 +cover-html = 1 +cover-html-dir = build/coverage +cover-package = pika +cover-tests = 1 +logging-level = DEBUG +stop = 1 +tests=tests/unit,tests/acceptance +verbosity = 3 +with-coverage = 1 +detailed-errors = 1 diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/setup.py b/NodeRed/NodeRedFiles/pika-0.13.1/setup.py new file mode 100644 index 000000000..e64f5997d --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/setup.py @@ -0,0 +1,51 @@ +import setuptools +import os + +# Conditionally include additional modules for docs +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' +requirements = list() +if on_rtd: + requirements.append('tornado') + requirements.append('twisted') + +long_description = ('Pika is a pure-Python implementation of the AMQP 0-9-1 ' + 'protocol that tries to stay fairly independent of the ' + 'underlying network support library. Pika was developed ' + 'primarily for use with RabbitMQ, but should also work ' + 'with other AMQP 0-9-1 brokers.') + +setuptools.setup( + name='pika', + version='0.13.1', + description='Pika Python AMQP Client Library', + long_description=open('README.rst').read(), + maintainer='Gavin M. Roy', + maintainer_email='gavinmroy@gmail.com', + url='https://pika.readthedocs.io', + packages=setuptools.find_packages(include=['pika', 'pika.*']), + license='BSD', + install_requires=requirements, + package_data={'': ['LICENSE', 'README.rst']}, + extras_require={'tornado': ['tornado'], + 'twisted': ['twisted']}, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: BSD License', + 'Natural Language :: English', 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: Jython', + 'Programming Language :: Python :: Implementation :: PyPy', + 'Topic :: Communications', 'Topic :: Internet', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: System :: Networking' + ], + zip_safe=True) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/test-requirements.txt b/NodeRed/NodeRedFiles/pika-0.13.1/test-requirements.txt new file mode 100644 index 000000000..ced2cc220 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/test-requirements.txt @@ -0,0 +1,6 @@ +coverage +codecov +mock +nose +tornado +twisted diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/ca_certificate.pem b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/ca_certificate.pem new file mode 100644 index 000000000..a59971792 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/ca_certificate.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDAjCCAeqgAwIBAgIJAIwXYB8fddi4MA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV +BAMMF1RMU0dlblNlbGZTaWduZWR0Um9vdENBMQ0wCwYDVQQHDAQkJCQkMB4XDTE4 +MDIyNzIyMjkyMFoXDTI4MDIyNTIyMjkyMFowMTEgMB4GA1UEAwwXVExTR2VuU2Vs +ZlNpZ25lZHRSb290Q0ExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQDMHFlBCaj/wr9AToBRiEx4N8b+lLYyTgp2BA4j/+WyNXyQ +BlGOpqBiJ2XDyzykoQGeNDMao8JjhMOZVuZ/I9LxwRNDOuA5aTAOknuPf/M6VigW +OawsyZblcHW8X2QnX9UJi+MCDaoSFGDRfXwkXFkfEvkYYfXl7lrEdWXg0LsiphPJ +RflqJMhfM1R/b+NvJ6OJs7rM15J1V1AkQAqKWygM79FQzVOUb1tFVZfy/OrIqd4Z +3dDfqF1JqnLoGIoLWxXeheqiRgDYzM6hegDefWZtKmBFR2ZIL3ZRYP4A9Ftx1gan +Gi6u8J05sjDOHyrzg/pZbrK/8Nqc3QRlSxhbwG+hAgMBAAGjHTAbMAwGA1UdEwQF +MAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQAMx1i0VogTZPYA +OJh/MdUSVNpI+DGZlId6ZPxglWDOQa53+DtjHlB/ur77RLnT89w3/HV2+xC+0yXP +W+MxRxbYe++Ja7mcByHHjEXMNT8GvffXrCSTT7hAUoQVlVX3VwCNLgjSY3NA6jw+ +Xhd/aOGDD8TmgNFdL2PbOiwbiiMP2nRmb11pxwvPUmGu4o2fn5biy8v47eq3zkvv +Wc64R0mrelVcEn2noFZBU9yfK4aCDD5kMG463UYZpj5/v20tPO44hYEc48MDMXOt +8jCaYaoh4P9zm1TK4syqY/b1fcCW44ri/HrZMfAoUGNktlbkBpnusMW7zwDg7ygG +gBBQadQA +-----END CERTIFICATE----- diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/ca_key.pem b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/ca_key.pem new file mode 100644 index 000000000..8fbc4e144 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/ca_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDMHFlBCaj/wr9A +ToBRiEx4N8b+lLYyTgp2BA4j/+WyNXyQBlGOpqBiJ2XDyzykoQGeNDMao8JjhMOZ +VuZ/I9LxwRNDOuA5aTAOknuPf/M6VigWOawsyZblcHW8X2QnX9UJi+MCDaoSFGDR +fXwkXFkfEvkYYfXl7lrEdWXg0LsiphPJRflqJMhfM1R/b+NvJ6OJs7rM15J1V1Ak +QAqKWygM79FQzVOUb1tFVZfy/OrIqd4Z3dDfqF1JqnLoGIoLWxXeheqiRgDYzM6h +egDefWZtKmBFR2ZIL3ZRYP4A9Ftx1ganGi6u8J05sjDOHyrzg/pZbrK/8Nqc3QRl +SxhbwG+hAgMBAAECggEBAKpjO+r8GnUvaKJRJfNqGZP91AOvtwz1cuwj5H4O3/vN +hKdibvDrjlWB2AUDgyNXONeNcd/aNqsHKJW4IoCeMjkYWXE1E/s5ISH5DGa1hCD9 +zUcIa/+TZYExSUewRhZMfLYUbbPIHmIrWstmupxL7yXum7zMAg8o0+LOsfZjqOR5 +SmbKja6nk+ywzTyVOSX4qJbfbxc+qswno1B7vGXgBXOjXR1jgfAqMjI3yaE5qyKy +GpGnHdYow5st8Yikjg91OoDjSaLPtm3+clkYpUl/oTh8g8wTFRrphrJjfWk/io/3 +IkNUpKuI/jRr3Br4Fx8tOfDC9pX4+nYcFjcc7Z5b/cECgYEA+LxFRWqI3RJQQTzE +8MQNbk8QdBKHtxziKUX8t3Skbe9s2/Fo4O5sQSfQ/+UtGlQP2LbJVJDiPYAEP2zu +ncmbXVmHgEWd9PcosN3li28Vk7d5Z5RGhb0BNyzslnUOF06xV84pDWRBKqTfk/iz +jpGwG70oz5K06jF8rJ81mwJni4kCgYEA0hJuUqw0NYNJAm/ALd64ffnHIkqjBsxi +pJruqGUf0DLROcVqv9SYDUmS23DHE4p7H1NXHwneTaBcdOUgTIdcjqJv3Nv+IDDi +unnaVAsBvtJ4YiCdXvNNMog2Ptx1Zcdy93fRmKHBNO349QGvhp4RPPMuoTCAmLXN +gm3qdJG9RVkCgYB671pxV5kzYmTGCYnw0YUt2ufv5mSrTRw+d/fSmFTYfPhZsHQ8 +j7pzbYFvqE5lb9yxKI5TPJSE/uuaiXVaCRW/yosdC8tpEmtLFzDnfEy2yHy5g+Bj +IyohohOAvXPscxlVo3BIzR8kO37BPK9KLJlU7GXqScEe6ryK+Nleto6EuQKBgFnH +qMDKehG4xzY0XnT8L+sfGh0sutoH9cyVrpPnjB4l6cd/+Ox9RnK/U/VXEK3oTFCK +BLzuMcUJWRpYZmJuo38OKzLADq7hVbUOqOGsRpWwS+TcPYW8A+0py8032TCjeh4L +ZleOIg2l+vVzP/oPihx7bh1TplIduPQaV850DukZAoGAOHvComT/TeGIHwTp1KyD +7w8uXfRmoo5nk+x8GnfbTi0+1XG79LA4061gtvvQkDPlkdZjUwJcz6bhXQVMlBwo +1ILIAj5mm89G0n5wbmshui6yCEs82fpYpg8UhXIq8/LmvdsJZjcJ7ySQxpuNqxCi +Q/L5/WmjSY+1bvvLkkUOF7k= +-----END PRIVATE KEY----- diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/client_certificate.pem b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/client_certificate.pem new file mode 100644 index 000000000..43dd98c70 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/client_certificate.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDADCCAeigAwIBAgIBAjANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xODAyMjcyMjI5 +MjBaFw0yODAyMjUyMjI5MjBaMCUxEjAQBgNVBAMMCWxvY2FsaG9zdDEPMA0GA1UE +CgwGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4wBF9bFL +nRZlRdUWQ9M6R2lxwJvIfL4O0UT3opqc95RGaPuqJjWgc3kFkFmsJMI7cSgiDcDY +XKYNp/Z/l1BOYrwwbuNX2V8V2SFbP5ksUU75jxwKJV8/bNnHSMfOrZ0wlq2jIrEA +hvLC38ujrNZsSzWusjhESKy8rNgAqq+Xyfy/R+MIVrHfD6wjxyGDGctj0z6bYDai +LPR5AVgzGHNUTLWSsyS9KoeuOXFOWht7+iEj/eNbU8bA29oOnVyb52o5OSx/E/Nx +qV7z0tv+LL8LTs4iXo4DwfrYB6BRwM2Qwzma4tmpZG2uvwwq/xIStMSfUIIOIUeZ +WLUs2V8d0mUslQIDAQABoy8wLTAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV +HSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAmvYPZjAFUvknD4Cm +AkTu7ZAbsXkMYgFn74wxIr9gXbRswWJDsPmVYDHDtFbynBmLTVOciqS2P3ccnbOQ +sy8lLQeZ4YNcTcTs8GONyo3G6SDUTTF19dB/uIUbi9mzxfufhFg/UcGMSy6bAW/J +uEQi9RrfwlJAHkce39ZHxu8/xsHDiiEFQLK907kXLzZixQ0bonjbejgKQGiCX5oB +/JQiaEmREmwyENFSmEyNEprzorkquzB6HoqAIxQxNQ+4xuEmy2wr+jecK9X4cGU9 +uPdPlF+9GSY8qIomxvTBOuXOCyY+ULl3zZZhRjjh6c4Nj8YlUyBOoQW4XOM5R8DM +I8gRgA== +-----END CERTIFICATE----- diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/client_key.pem b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/client_key.pem new file mode 100644 index 000000000..b95461488 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA4wBF9bFLnRZlRdUWQ9M6R2lxwJvIfL4O0UT3opqc95RGaPuq +JjWgc3kFkFmsJMI7cSgiDcDYXKYNp/Z/l1BOYrwwbuNX2V8V2SFbP5ksUU75jxwK +JV8/bNnHSMfOrZ0wlq2jIrEAhvLC38ujrNZsSzWusjhESKy8rNgAqq+Xyfy/R+MI +VrHfD6wjxyGDGctj0z6bYDaiLPR5AVgzGHNUTLWSsyS9KoeuOXFOWht7+iEj/eNb +U8bA29oOnVyb52o5OSx/E/NxqV7z0tv+LL8LTs4iXo4DwfrYB6BRwM2Qwzma4tmp +ZG2uvwwq/xIStMSfUIIOIUeZWLUs2V8d0mUslQIDAQABAoIBAQDcr81T+VPLmpRG +ec01j0tfVdHzMQCO1a9OIECn4qyrvYleUxyuHQCqgoO4PJWw/uwPLLc+q7ctC3rH +Skvs7XPnZZGonHkxqNFy0I0HnYCKY78XNGlgv+LHjknCJg52lxU/x/uLpI0gpS5z +qGStiaCq1bvLJAyuotCjMTQkvKVTBgHm70lC3ZNt4I8OaocyfGF4BJqbUxyjXFKz +sYxgAXssAdMPqKsSH5du2ii4YyAZvzYO+RJ4OByPaZZA3OfsKddsnGDFPqfw3+Aa +8QBCFbf/uGUepOL0P4rWDp7XhHDwCJS/kiP3qTUZ37dh660arIeXCN4qcI6ZUjMo +nJp8tjvBAoGBAP9huBC79WWid9CYskCO9m+zIk4RTe07FNQW46Kxpva9lULqmSj+ +MpB47kSvjCNnVYdn0FOxi3ipsTeM7R0+fZz1LM5vdwRWc7pZ5U3HvrB2zBCaftSL +gWlvxup02eXR2TrPwNzKd1iR4GoPlLXc8FxMSZLHzcxFZYWh42RAKV8NAoGBAOOM +9ubZVSW8GdviZobFalNG7HhAn06U3LPrN71vxFdE3tKePLxNI/RNDbUHo/ji5muV +Hx45uvIFT36BtH74DgiORHjLYdTaOF+TVUJEHEmPkBGPj4kdLu9MRAEYba7lBvbw +rvv1NI49GEUB1NhzNoYaHdyw98afgq3mO7C59+GpAoGAKykUtp9NhfPDVBm6ZnbC +53Xa4l7CUMmfZ4jGyARGN4Uq6LhFUkxDt274tdsFUCZyqgO9jad/7tCfBshmen7J +M/GrtOfZsX366Q+wVUjgzWoGavfxX3KEWJFviMhe2xxwHiRmb+o36VweFU60z1eb +t72c4ZWrDk7cLY2CpLrDQzECgYEAuQDr4GZEfwh+sbJRjzNELe7U5TQNEZOgGLud +tuv+4sEAtJaoZKQHHmHjShKrMvgvRLqZ8TYYkxrUNdN4j15X8obQjrE0mhzNho8/ +2z+LDBenl09z8JOEjFQiWg2lZ3OXhP/MFNeYYiiz6a8CgPSzGLZ1Hu/Wk2sukPpF +/3pDWfkCgYBIpMV122EYgKqfk7yayJUDez69X+/JmUR4mBtkI9CZoYM8xrft7Coi +VzsjRc+2yS21ZFRuB4g4xO2ViEVLqM8/g2tFDuuUkGb+vdBsolx3j64xYqoBs20T +0JjnFaxl2MCpn2y44bJ9ht134q5/Mm762rPyNuqdm4R7rhAi7g/7+w== +-----END RSA PRIVATE KEY----- diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/server_certificate.pem b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/server_certificate.pem new file mode 100644 index 000000000..55b0703be --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/server_certificate.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDADCCAeigAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH +ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xODAyMjcyMjI5 +MjBaFw0yODAyMjUyMjI5MjBaMCUxEjAQBgNVBAMMCWxvY2FsaG9zdDEPMA0GA1UE +CgwGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA14xcFsbL +bhw2qunzyhzBXQs5xxi+DQKqK9CjVVEmBrUVR2qwzS2P3IFdkVzegRa9jZgsgVSI +TrbsCgF2xkTy0Bsz5sbasaFVQQHDI8PjZ82vmK5MAXyUzR3HQ4iBzyNIpmSNhKHK +qWQuXhqN/D1wgkl08/B0xlx+jaFDIy+hdbuLpB5+NXgHbKWqYq1jrKomnZucKbm7 +dahXvbomshF6fe6xWNU4CxCaC7+snym3Zug6kkXSgI5NEYQzh93RjTXbV8m4q8Az +8nwWZ3vWcIdtcYURHZ+wP/DdGQmObItZjVrTmQ6OI9750MCtGfDXv+IX/Iipcp7W +0OYkNPPPJCowMwIDAQABoy8wLTAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV +HSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAcqduPpka+3O29X3U +BGKAdKGJjSoqHrn+K7QbQ9A/BbCJC9htZ8cUtmVgRm0Aw82x94zMREG+YF7jQhXM +qSoYVxLbqPWxMCRx2FmQihByHvGusCKfu2yBAHo6rBfFWitc2KHVQs2Tyku65bsa +3WnyRkRBW92UBdzM6aEjFeqzzDQaBezHT8DkKjj7NFsxI+lKuopInJWs1SckMmcT +yWat5d7hDTiNwObjaD9vsKH4/k11ftksBmmxvd+F/AIL1f4leU5hBGcfmoHyjyBU +FQlbOimj/YsGHB09ClH1/avqBqoHEVMzK61o7aZstDKSs7RIX2Iip1erhgC8YN8C +dkFM6g== +-----END CERTIFICATE----- diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/server_key.pem b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/server_key.pem new file mode 100644 index 000000000..b65df5094 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/certs/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA14xcFsbLbhw2qunzyhzBXQs5xxi+DQKqK9CjVVEmBrUVR2qw +zS2P3IFdkVzegRa9jZgsgVSITrbsCgF2xkTy0Bsz5sbasaFVQQHDI8PjZ82vmK5M +AXyUzR3HQ4iBzyNIpmSNhKHKqWQuXhqN/D1wgkl08/B0xlx+jaFDIy+hdbuLpB5+ +NXgHbKWqYq1jrKomnZucKbm7dahXvbomshF6fe6xWNU4CxCaC7+snym3Zug6kkXS +gI5NEYQzh93RjTXbV8m4q8Az8nwWZ3vWcIdtcYURHZ+wP/DdGQmObItZjVrTmQ6O +I9750MCtGfDXv+IX/Iipcp7W0OYkNPPPJCowMwIDAQABAoIBAEgT1Ht9UGtqndE7 +WF1ejj5p0ZFFMdAx8uuh4exWrWuSxCHUiHij4rZuv/Yq4vCxxQMDPuakeMwxmO6j +nK8iQTvbzNg/6MN7WoHZcAKWr4V3n19j8WCR40RPz+FCVX4Jo3KYr0YJwDYynNq0 +8IPHm8bFf4lPAA2QwnTj1wp+E/TE+/48c+p5YiVpZZSv9LNvErUtWSXO7atbKA0+ +IlmfipdzC+u+8NW9a9CX5uM5UB05TLydsbnpwCsxptuPJqIuOO3oI6ByL6QJsqU1 +mk7/s5Vofb4JJna1xgo1NigsQBJf2NQCDDi5C0D6tnBPnZGBPp1QWbUp6xnzSEd8 +BMqxXQkCgYEA8RMCAX7Ol6akHSxZZDnDSyxHAdgERhwsSLBBqAUjn4iOEaP+KIjY +qElqT+vJSiHls1O0vKySotNH0UbOg6CmogRSat7CoOCzS0J+XLjqmPh+4VGdTU7q +/UKV60YzJA/LZBdZF/dNByN5lKEQVMxQRHglwvRM/kK5NXvyFONCWvcCgYEA5OTE +2c3hlRMq7XSAoNj3NarrazexeCxTcvuwdDYzHMWsp4YUpnvrUacAhoTbzYfPv9Gh +ujb0bubZRAUcOMzPk3V+UXk8g2ziZy/f2FqLx6Uq7Fp9YwZfuzplScUpVAfA24zv +qvcowaBA1FVvSMC45kJo3o5B9LuQuZAbOZvMKaUCgYEApyq8GFdbYNOgN86aCiL3 +5nfEoWWuyQMePiVi0eUGZ8jkYx0pz+fc/Q0zmEnzYeGRi8F+sdqlMB18ToVgDOxo +wC5pDEx9/9rw2T45q4havUqLiSj0ADi6QHZcyTH7ooUFT9nU4QaOtmWGGGd7kKHB +A0mhPcf0X9fa4FibbJqOV88CgYEAjQ01CYtH0hfnwkCi63wIizfyrzW41XdrTVYw +nMyxnq9qACouG5INp1mkh0DkOrnQmkJSyXAIHTeA99u2UoJUAGjNGOP/GHZG5pOn ++6mArdzooJH65sUMxVHtDRLErxXAEQu+vbplkTxx9udXFpw81Rhji5JarrfPLarS +PCP4IkkCgYBDHG+sNpdPridbD9paTFg53sG0zu0Yv66vubFJvWJSxMNHsTJsyaZv +R5K+VTL4+l/4WkbJRk8pR4SWtETCsPSXxUgnoJ99GtDSxX2J+LmzkiyR5N8k14st +6rtIxC259xAEYYAsu0pXly33pMlZUr2aIGykQOSnMIhwdE1PvWkSmg== +-----END RSA PRIVATE KEY----- diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/rabbitmq.conf.in b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/rabbitmq.conf.in new file mode 100644 index 000000000..fb6e64afb --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/rabbitmq.conf.in @@ -0,0 +1,13 @@ +listeners.tcp.default = 5672 +listeners.ssl.default = 5671 +num_acceptors.tcp = 10 +num_acceptors.ssl = 10 +reverse_dns_lookups = false +loopback_users.guest = true +ssl_options.verify = verify_peer +ssl_options.fail_if_no_peer_cert = true +ssl_options.cacertfile = PIKA_DIR/testdata/certs/ca_certificate.pem +ssl_options.certfile = PIKA_DIR/testdata/certs/server_certificate.pem +ssl_options.keyfile = PIKA_DIR/testdata/certs/server_key.pem +log.console = false +log.console.level = debug diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/wait-epmd.ps1 b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/wait-epmd.ps1 new file mode 100644 index 000000000..8971914c8 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/wait-epmd.ps1 @@ -0,0 +1,21 @@ +$running = $false +[int]$count = 1 + +$epmd = [System.IO.Path]::Combine($env:ERLANG_HOME, $env:erlang_erts_version, "bin", "epmd.exe") + +Do { + $running = & $epmd -names | Select-String -CaseSensitive -SimpleMatch -Quiet -Pattern 'name rabbit at port 25672' + if ($running -eq $true) { + Write-Host '[INFO] epmd reports that RabbitMQ is at port 25672' + break + } + + if ($count -gt 120) { + throw '[ERROR] too many tries waiting for epmd to report RabbitMQ on port 25672' + } + + Write-Host "[INFO] epmd NOT reporting yet that RabbitMQ is at port 25672, count: $count" + $count = $count + 1 + Start-Sleep -Seconds 5 + +} While ($true) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/testdata/wait-rabbitmq.ps1 b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/wait-rabbitmq.ps1 new file mode 100644 index 000000000..1f602c307 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/testdata/wait-rabbitmq.ps1 @@ -0,0 +1,21 @@ +[int]$count = 1 + +Do { + $proc_id = (Get-Process -Name erl).Id + if (-Not ($proc_id -is [array])) { + & "C:\Program Files\RabbitMQ Server\rabbitmq_server-$env:rabbitmq_version\sbin\rabbitmqctl.bat" wait -t 300000 -P $proc_id + if ($LASTEXITCODE -ne 0) { + throw "[ERROR] rabbitmqctl wait returned error: $LASTEXITCODE" + } + break + } + + if ($count -gt 120) { + throw '[ERROR] too many tries waiting for just one erl process to be running' + } + + Write-Host '[INFO] multiple erl instances running still' + $count = $count + 1 + Start-Sleep -Seconds 5 + +} While ($true) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/async_adapter_tests.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/async_adapter_tests.py new file mode 100644 index 000000000..b9946e10b --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/async_adapter_tests.py @@ -0,0 +1,587 @@ +# Suppress pylint messages concerning missing class and method docstrings +# pylint: disable=C0111 + +# Suppress pylint warning about attribute defined outside __init__ +# pylint: disable=W0201 + +# Suppress pylint warning about access to protected member +# pylint: disable=W0212 + +# Suppress pylint warning about unused argument +# pylint: disable=W0613 + +import functools +import threading +import time +import uuid + +from pika import spec +from pika.compat import as_bytes +import pika.connection +import pika.frame +import pika.spec + +from async_test_base import (AsyncTestCase, BoundQueueTestCase, AsyncAdapters) + + +class TestA_Connect(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Connect, open channel and disconnect" + + def begin(self, channel): + self.stop() + + +class TestConfirmSelect(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Receive confirmation of Confirm.Select" + + def begin(self, channel): + channel._on_selectok = self.on_complete + channel.confirm_delivery() + + def on_complete(self, frame): + self.assertIsInstance(frame.method, spec.Confirm.SelectOk) + self.stop() + + +class TestBlockingNonBlockingBlockingRPCWontStall(AsyncTestCase, AsyncAdapters): + DESCRIPTION = ("Verify that a sequence of blocking, non-blocking, blocking " + "RPC requests won't stall") + + def begin(self, channel): + # Queue declaration params table: queue name, nowait value + self._expected_queue_params = ( + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False), + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, True), + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False) + ) + + self._declared_queue_names = [] + + for queue, nowait in self._expected_queue_params: + channel.queue_declare(callback=self._queue_declare_ok_cb + if not nowait else None, + queue=queue, + auto_delete=True, + nowait=nowait, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def _queue_declare_ok_cb(self, declare_ok_frame): + self._declared_queue_names.append(declare_ok_frame.method.queue) + + if len(self._declared_queue_names) == 2: + # Initiate check for creation of queue declared with nowait=True + self.channel.queue_declare(callback=self._queue_declare_ok_cb, + queue=self._expected_queue_params[1][0], + passive=True, + nowait=False) + elif len(self._declared_queue_names) == 3: + self.assertSequenceEqual( + sorted(self._declared_queue_names), + sorted(item[0] for item in self._expected_queue_params)) + self.stop() + + +class TestConsumeCancel(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Consume and cancel" + + def begin(self, channel): + self.queue_name = self.__class__.__name__ + ':' + uuid.uuid1().hex + channel.queue_declare(self.on_queue_declared, queue=self.queue_name) + + def on_queue_declared(self, frame): + for i in range(0, 100): + msg_body = '{}:{}:{}'.format(self.__class__.__name__, i, + time.time()) + self.channel.basic_publish('', self.queue_name, msg_body) + self.ctag = self.channel.basic_consume(self.on_message, + queue=self.queue_name, + no_ack=True) + + def on_message(self, _channel, _frame, _header, body): + self.channel.basic_cancel(self.on_cancel, self.ctag) + + def on_cancel(self, _frame): + self.channel.queue_delete(self.on_deleted, self.queue_name) + + def on_deleted(self, _frame): + self.stop() + + +class TestExchangeDeclareAndDelete(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Create and delete and exchange" + + X_TYPE = 'direct' + + def begin(self, channel): + self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex + channel.exchange_declare(self.on_exchange_declared, self.name, + exchange_type=self.X_TYPE, + passive=False, + durable=False, + auto_delete=True) + + def on_exchange_declared(self, frame): + self.assertIsInstance(frame.method, spec.Exchange.DeclareOk) + self.channel.exchange_delete(self.on_exchange_delete, self.name) + + def on_exchange_delete(self, frame): + self.assertIsInstance(frame.method, spec.Exchange.DeleteOk) + self.stop() + + +class TestExchangeRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "should close chan: re-declared exchange w/ diff params" + + X_TYPE1 = 'direct' + X_TYPE2 = 'topic' + + def begin(self, channel): + self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex + self.channel.add_on_close_callback(self.on_channel_closed) + channel.exchange_declare(self.on_exchange_declared, self.name, + exchange_type=self.X_TYPE1, + passive=False, + durable=False, + auto_delete=True) + + def on_cleanup_channel(self, channel): + channel.exchange_delete(None, self.name, nowait=True) + self.stop() + + def on_channel_closed(self, channel, reply_code, reply_text): + self.connection.channel(self.on_cleanup_channel) + + def on_exchange_declared(self, frame): + self.channel.exchange_declare(self.on_bad_result, self.name, + exchange_type=self.X_TYPE2, + passive=False, + durable=False, + auto_delete=True) + + def on_bad_result(self, frame): + self.channel.exchange_delete(None, self.name, nowait=True) + raise AssertionError("Should not have received a Queue.DeclareOk") + + +class TestQueueDeclareAndDelete(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Create and delete a queue" + + def begin(self, channel): + channel.queue_declare(self.on_queue_declared, + passive=False, + durable=False, + exclusive=True, + auto_delete=False, + nowait=False, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def on_queue_declared(self, frame): + self.assertIsInstance(frame.method, spec.Queue.DeclareOk) + self.channel.queue_delete(self.on_queue_delete, frame.method.queue) + + def on_queue_delete(self, frame): + self.assertIsInstance(frame.method, spec.Queue.DeleteOk) + self.stop() + + + +class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Create and delete a named queue" + + def begin(self, channel): + self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex + channel.queue_declare(self.on_queue_declared, self._q_name, + passive=False, + durable=False, + exclusive=True, + auto_delete=True, + nowait=False, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def on_queue_declared(self, frame): + self.assertIsInstance(frame.method, spec.Queue.DeclareOk) + # Frame's method's queue is encoded (impl detail) + self.assertEqual(frame.method.queue, self._q_name) + self.channel.queue_delete(self.on_queue_delete, frame.method.queue) + + def on_queue_delete(self, frame): + self.assertIsInstance(frame.method, spec.Queue.DeleteOk) + self.stop() + + + +class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Should close chan: re-declared queue w/ diff params" + + def begin(self, channel): + self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex + self.channel.add_on_close_callback(self.on_channel_closed) + channel.queue_declare(self.on_queue_declared, self._q_name, + passive=False, + durable=False, + exclusive=True, + auto_delete=True, + nowait=False, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def on_channel_closed(self, channel, reply_code, reply_text): + self.stop() + + def on_queue_declared(self, frame): + self.channel.queue_declare(self.on_bad_result, self._q_name, + passive=False, + durable=True, + exclusive=False, + auto_delete=True, + nowait=False, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def on_bad_result(self, frame): + self.channel.queue_delete(None, self._q_name, nowait=True) + raise AssertionError("Should not have received a Queue.DeclareOk") + + + +class TestTX1_Select(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Receive confirmation of Tx.Select" + + def begin(self, channel): + channel.tx_select(self.on_complete) + + def on_complete(self, frame): + self.assertIsInstance(frame.method, spec.Tx.SelectOk) + self.stop() + + + +class TestTX2_Commit(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Start a transaction, and commit it" + + def begin(self, channel): + channel.tx_select(self.on_selectok) + + def on_selectok(self, frame): + self.assertIsInstance(frame.method, spec.Tx.SelectOk) + self.channel.tx_commit(self.on_commitok) + + def on_commitok(self, frame): + self.assertIsInstance(frame.method, spec.Tx.CommitOk) + self.stop() + + +class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Close the channel: commit without a TX" + + def begin(self, channel): + self.channel.add_on_close_callback(self.on_channel_closed) + self.channel.tx_commit(self.on_commitok) + + def on_channel_closed(self, channel, reply_code, reply_text): + self.stop() + + def on_selectok(self, frame): + self.assertIsInstance(frame.method, spec.Tx.SelectOk) + + @staticmethod + def on_commitok(frame): + raise AssertionError("Should not have received a Tx.CommitOk") + + +class TestTX3_Rollback(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Start a transaction, then rollback" + + def begin(self, channel): + channel.tx_select(self.on_selectok) + + def on_selectok(self, frame): + self.assertIsInstance(frame.method, spec.Tx.SelectOk) + self.channel.tx_rollback(self.on_rollbackok) + + def on_rollbackok(self, frame): + self.assertIsInstance(frame.method, spec.Tx.RollbackOk) + self.stop() + + + +class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Close the channel: rollback without a TX" + + def begin(self, channel): + self.channel.add_on_close_callback(self.on_channel_closed) + self.channel.tx_rollback(self.on_commitok) + + def on_channel_closed(self, channel, reply_code, reply_text): + self.stop() + + @staticmethod + def on_commitok(frame): + raise AssertionError("Should not have received a Tx.RollbackOk") + + +class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Publish a message and consume it" + + def on_ready(self, frame): + self.ctag = self.channel.basic_consume(self.on_message, self.queue) + self.msg_body = "%s: %i" % (self.__class__.__name__, time.time()) + self.channel.basic_publish(self.exchange, self.routing_key, + self.msg_body) + + def on_cancelled(self, frame): + self.assertIsInstance(frame.method, spec.Basic.CancelOk) + self.stop() + + def on_message(self, channel, method, header, body): + self.assertIsInstance(method, spec.Basic.Deliver) + self.assertEqual(body, as_bytes(self.msg_body)) + self.channel.basic_ack(method.delivery_tag) + self.channel.basic_cancel(self.on_cancelled, self.ctag) + + + +class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Publish a big message and consume it" + + @staticmethod + def _get_msg_body(): + return '\n'.join(["%s" % i for i in range(0, 2097152)]) + + def on_ready(self, frame): + self.ctag = self.channel.basic_consume(self.on_message, self.queue) + self.msg_body = self._get_msg_body() + self.channel.basic_publish(self.exchange, self.routing_key, + self.msg_body) + + def on_cancelled(self, frame): + self.assertIsInstance(frame.method, spec.Basic.CancelOk) + self.stop() + + def on_message(self, channel, method, header, body): + self.assertIsInstance(method, spec.Basic.Deliver) + self.assertEqual(body, as_bytes(self.msg_body)) + self.channel.basic_ack(method.delivery_tag) + self.channel.basic_cancel(self.on_cancelled, self.ctag) + + +class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Publish a message and get it" + + def on_ready(self, frame): + self.msg_body = "%s: %i" % (self.__class__.__name__, time.time()) + self.channel.basic_publish(self.exchange, self.routing_key, + self.msg_body) + self.channel.basic_get(self.on_get, self.queue) + + def on_get(self, channel, method, header, body): + self.assertIsInstance(method, spec.Basic.GetOk) + self.assertEqual(body, as_bytes(self.msg_body)) + self.channel.basic_ack(method.delivery_tag) + self.stop() + + +class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Verify that access denied invokes on open error callback" + + def start(self, *args, **kwargs): + self.parameters.virtual_host = str(uuid.uuid4()) + self.error_captured = False + super(TestZ_AccessDenied, self).start(*args, **kwargs) + self.assertTrue(self.error_captured) + + def on_open_error(self, connection, error): + self.error_captured = True + self.stop() + + def on_open(self, connection): + super(TestZ_AccessDenied, self).on_open(connection) + self.stop() + + +class TestBlockedConnectionTimesOut(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Verify that blocked connection terminates on timeout" + + def start(self, *args, **kwargs): + self.parameters.blocked_connection_timeout = 0.001 + self.on_closed_pair = None + super(TestBlockedConnectionTimesOut, self).start(*args, **kwargs) + self.assertEqual( + self.on_closed_pair, + (pika.connection.InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT, + 'Blocked connection timeout expired')) + + def begin(self, channel): + + # Simulate Connection.Blocked + channel.connection._on_connection_blocked(pika.frame.Method( + 0, + pika.spec.Connection.Blocked('Testing blocked connection timeout'))) + + def on_closed(self, connection, reply_code, reply_text): + """called when the connection has finished closing""" + self.on_closed_pair = (reply_code, reply_text) + super(TestBlockedConnectionTimesOut, self).on_closed(connection, + reply_code, + reply_text) + + +class TestBlockedConnectionUnblocks(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Verify that blocked-unblocked connection closes normally" + + def start(self, *args, **kwargs): + self.parameters.blocked_connection_timeout = 0.001 + self.on_closed_pair = None + super(TestBlockedConnectionUnblocks, self).start(*args, **kwargs) + self.assertEqual( + self.on_closed_pair, + (200, 'Normal shutdown')) + + def begin(self, channel): + + # Simulate Connection.Blocked + channel.connection._on_connection_blocked(pika.frame.Method( + 0, + pika.spec.Connection.Blocked( + 'Testing blocked connection unblocks'))) + + # Simulate Connection.Unblocked + channel.connection._on_connection_unblocked(pika.frame.Method( + 0, + pika.spec.Connection.Unblocked())) + + # Schedule shutdown after blocked connection timeout would expire + channel.connection.add_timeout(0.005, self.on_cleanup_timer) + + def on_cleanup_timer(self): + self.stop() + + def on_closed(self, connection, reply_code, reply_text): + """called when the connection has finished closing""" + self.on_closed_pair = (reply_code, reply_text) + super(TestBlockedConnectionUnblocks, self).on_closed(connection, + reply_code, + reply_text) + + +class TestAddCallbackThreadsafeRequestBeforeIOLoopStarts(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Test add_callback_threadsafe request before ioloop starts." + + def _run_ioloop(self, *args, **kwargs): # pylint: disable=W0221 + """We intercept this method from AsyncTestCase in order to call + add_callback_threadsafe before AsyncTestCase starts the ioloop. + + """ + self.my_start_time = time.time() + # Request a callback from our current (ioloop's) thread + self.connection.add_callback_threadsafe(self.on_requested_callback) + + return super( + TestAddCallbackThreadsafeRequestBeforeIOLoopStarts, self)._run_ioloop( + *args, **kwargs) + + def start(self, *args, **kwargs): # pylint: disable=W0221 + self.loop_thread_ident = threading.current_thread().ident + self.my_start_time = None + self.got_callback = False + super(TestAddCallbackThreadsafeRequestBeforeIOLoopStarts, self).start(*args, **kwargs) + self.assertTrue(self.got_callback) + + def begin(self, channel): + self.stop() + + def on_requested_callback(self): + self.assertEqual(threading.current_thread().ident, + self.loop_thread_ident) + self.assertLess(time.time() - self.my_start_time, 0.25) + self.got_callback = True + + +class TestAddCallbackThreadsafeFromIOLoopThread(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Test add_callback_threadsafe request from same thread." + + def start(self, *args, **kwargs): + self.loop_thread_ident = threading.current_thread().ident + self.my_start_time = None + self.got_callback = False + super(TestAddCallbackThreadsafeFromIOLoopThread, self).start(*args, **kwargs) + self.assertTrue(self.got_callback) + + def begin(self, channel): + self.my_start_time = time.time() + # Request a callback from our current (ioloop's) thread + channel.connection.add_callback_threadsafe(self.on_requested_callback) + + def on_requested_callback(self): + self.assertEqual(threading.current_thread().ident, + self.loop_thread_ident) + self.assertLess(time.time() - self.my_start_time, 0.25) + self.got_callback = True + self.stop() + + +class TestAddCallbackThreadsafeFromAnotherThread(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Test add_callback_threadsafe request from another thread." + + def start(self, *args, **kwargs): + self.loop_thread_ident = threading.current_thread().ident + self.my_start_time = None + self.got_callback = False + super(TestAddCallbackThreadsafeFromAnotherThread, self).start(*args, **kwargs) + self.assertTrue(self.got_callback) + + def begin(self, channel): + self.my_start_time = time.time() + # Request a callback from ioloop while executing in another thread + timer = threading.Timer( + 0, + lambda: channel.connection.add_callback_threadsafe( + self.on_requested_callback)) + self.addCleanup(timer.cancel) + timer.start() + + def on_requested_callback(self): + self.assertEqual(threading.current_thread().ident, + self.loop_thread_ident) + self.assertLess(time.time() - self.my_start_time, 0.25) + self.got_callback = True + self.stop() + + +class TestIOLoopStopBeforeIOLoopStarts(AsyncTestCase, AsyncAdapters): + DESCRIPTION = "Test ioloop.stop() before ioloop starts causes ioloop to exit quickly." + + def _run_ioloop(self, *args, **kwargs): # pylint: disable=W0221 + """We intercept this method from AsyncTestCase in order to call + ioloop.stop() before AsyncTestCase starts the ioloop. + """ + # Request ioloop to stop before it starts + self.my_start_time = time.time() + self.stop_ioloop_only() + + return super( + TestIOLoopStopBeforeIOLoopStarts, self)._run_ioloop(*args, **kwargs) + + def start(self, *args, **kwargs): # pylint: disable=W0221 + self.loop_thread_ident = threading.current_thread().ident + self.my_start_time = None + super(TestIOLoopStopBeforeIOLoopStarts, self).start(*args, **kwargs) + self.assertLess(time.time() - self.my_start_time, 0.25) + + def begin(self, channel): + pass + + +class TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Test viability of multiple timeouts with same deadline and callback" + + def begin(self, channel): + timer1 = channel.connection.add_timeout(0, self.on_my_timer) + timer2 = channel.connection.add_timeout(0, self.on_my_timer) + + self.assertIsNot(timer1, timer2) + + channel.connection.remove_timeout(timer1) + + # Wait for timer2 to fire + + def on_my_timer(self): + self.stop() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/async_test_base.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/async_test_base.py new file mode 100644 index 000000000..bbe8ec63f --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/async_test_base.py @@ -0,0 +1,247 @@ +# Suppress pylint warnings concerning attribute defined outside __init__ +# pylint: disable=W0201 + +# Suppress pylint messages concerning missing docstrings +# pylint: disable=C0111 + +import datetime +import os +import select +import ssl +import sys +import logging +import unittest +import uuid + +try: + from unittest import mock +except ImportError: + import mock + + +import pika +from pika import adapters +from pika.adapters import select_connection + + +def enable_tls(): + if 'PIKA_TEST_TLS' in os.environ and \ + os.environ['PIKA_TEST_TLS'].lower() == 'true': + return True + return False + + +class AsyncTestCase(unittest.TestCase): + DESCRIPTION = "" + ADAPTER = None + TIMEOUT = 15 + + def setUp(self): + self.logger = logging.getLogger(self.__class__.__name__) + if enable_tls(): + self.logger.info('testing using TLS/SSL connection to port 5671') + url = 'amqps://localhost:5671/%2F?ssl_options=%7B%27ca_certs%27%3A%27testdata%2Fcerts%2Fca_certificate.pem%27%2C%27keyfile%27%3A%27testdata%2Fcerts%2Fclient_key.pem%27%2C%27certfile%27%3A%27testdata%2Fcerts%2Fclient_certificate.pem%27%7D' + self.parameters = pika.URLParameters(url) + else: + self.parameters = pika.ConnectionParameters(host='localhost', port=5672) + self._timed_out = False + super(AsyncTestCase, self).setUp() + + def tearDown(self): + self._stop() + + def shortDescription(self): + method_desc = super(AsyncTestCase, self).shortDescription() + if self.DESCRIPTION: + return "%s (%s)" % (self.DESCRIPTION, method_desc) + else: + return method_desc + + def begin(self, channel): # pylint: disable=R0201,W0613 + """Extend to start the actual tests on the channel""" + self.fail("AsyncTestCase.begin_test not extended") + + def start(self, adapter, ioloop_factory): + self.logger.info('start at %s', datetime.datetime.utcnow()) + self.adapter = adapter or self.ADAPTER + + self.connection = self.adapter(self.parameters, + self.on_open, + self.on_open_error, + self.on_closed, + custom_ioloop=ioloop_factory()) + try: + self.timeout = self.connection.add_timeout(self.TIMEOUT, + self.on_timeout) + self._run_ioloop() + self.assertFalse(self._timed_out) + finally: + self.connection.ioloop.close() + self.connection = None + + def stop_ioloop_only(self): + """Request stopping of the connection's ioloop to end the test without + closing the connection + """ + self._safe_remove_test_timeout() + self.connection.ioloop.stop() + + def stop(self): + """close the connection and stop the ioloop""" + self.logger.info("Stopping test") + self._safe_remove_test_timeout() + self.connection.close() # NOTE: on_closed() will stop the ioloop + + def _run_ioloop(self): + """Some tests need to subclass this in order to bootstrap their test + logic after we instantiate the connection and assign it to + `self.connection`, but before we run the ioloop + """ + self.connection.ioloop.start() + + def _safe_remove_test_timeout(self): + if hasattr(self, 'timeout') and self.timeout is not None: + self.logger.info("Removing timeout") + if hasattr(self, 'connection') and self.connection is not None: + self.connection.remove_timeout(self.timeout) + self.timeout = None + + def _stop(self): + self._safe_remove_test_timeout() + if hasattr(self, 'connection') and self.connection is not None: + self.logger.info("Stopping ioloop") + self.connection.ioloop.stop() + + def on_closed(self, connection, reply_code, reply_text): + """called when the connection has finished closing""" + self.logger.info('on_closed: %r %r %r', connection, + reply_code, reply_text) + self._stop() + + def on_open(self, connection): + self.logger.debug('on_open: %r', connection) + self.channel = connection.channel(self.begin) + + def on_open_error(self, connection, error): + self.logger.error('on_open_error: %r %r', connection, error) + connection.ioloop.stop() + raise AssertionError('Error connecting to RabbitMQ') + + def on_timeout(self): + """called when stuck waiting for connection to close""" + self.logger.error('%s timed out; on_timeout called at %s', + self, datetime.datetime.utcnow()) + self.timeout = None # the dispatcher should have removed it + self._timed_out = True + # initiate cleanup + self.stop() + + +class BoundQueueTestCase(AsyncTestCase): + + def start(self, adapter, ioloop_factory): + # PY3 compat encoding + self.exchange = 'e-' + self.__class__.__name__ + ':' + uuid.uuid1().hex + self.queue = 'q-' + self.__class__.__name__ + ':' + uuid.uuid1().hex + self.routing_key = self.__class__.__name__ + super(BoundQueueTestCase, self).start(adapter, ioloop_factory) + + def begin(self, channel): + self.channel.exchange_declare(self.on_exchange_declared, self.exchange, + exchange_type='direct', + passive=False, + durable=False, + auto_delete=True) + + def on_exchange_declared(self, frame): # pylint: disable=W0613 + self.channel.queue_declare(self.on_queue_declared, self.queue, + passive=False, + durable=False, + exclusive=True, + auto_delete=True, + nowait=False, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def on_queue_declared(self, frame): # pylint: disable=W0613 + self.channel.queue_bind(self.on_ready, self.queue, self.exchange, + self.routing_key) + + def on_ready(self, frame): + raise NotImplementedError + + +# +# In order to write test cases that will tested using all the Async Adapters +# write a class that inherits both from one of TestCase classes above and +# from the AsyncAdapters class below. This allows you to avoid duplicating the +# test methods for each adapter in each test class. +# + +class AsyncAdapters(object): + + def start(self, adapter_class, ioloop_factory): + """ + + :param adapter_class: pika connection adapter class to test. + :param ioloop_factory: to be called without args to instantiate a + non-shared ioloop to be passed as the `custom_ioloop` arg to the + `adapter_class` constructor. This is needed because some of the + adapters default to using a singleton ioloop, which results in + tests errors after prior tests close the ioloop to release resources, + in order to eliminate ResourceWarning warnings concerning unclosed + sockets from our adapters. + :return: + """ + raise NotImplementedError + + def select_default_test(self): + """SelectConnection:DefaultPoller""" + with mock.patch.multiple(select_connection, SELECT_TYPE=None): + self.start(adapters.SelectConnection, select_connection.IOLoop) + + def select_select_test(self): + """SelectConnection:select""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='select'): + self.start(adapters.SelectConnection, select_connection.IOLoop) + + @unittest.skipIf( + not hasattr(select, 'poll') or + not hasattr(select.poll(), 'modify'), "poll not supported") # pylint: disable=E1101 + def select_poll_test(self): + """SelectConnection:poll""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='poll'): + self.start(adapters.SelectConnection, select_connection.IOLoop) + + @unittest.skipIf(not hasattr(select, 'epoll'), "epoll not supported") + def select_epoll_test(self): + """SelectConnection:epoll""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='epoll'): + self.start(adapters.SelectConnection, select_connection.IOLoop) + + @unittest.skipIf(not hasattr(select, 'kqueue'), "kqueue not supported") + def select_kqueue_test(self): + """SelectConnection:kqueue""" + + with mock.patch.multiple(select_connection, SELECT_TYPE='kqueue'): + self.start(adapters.SelectConnection, select_connection.IOLoop) + + def tornado_test(self): + """TornadoConnection""" + ioloop_factory = None + if adapters.tornado_connection.TornadoConnection is not None: + import tornado.ioloop + ioloop_factory = tornado.ioloop.IOLoop + self.start(adapters.tornado_connection.TornadoConnection, ioloop_factory) + + @unittest.skipIf(sys.version_info < (3, 4), "Asyncio available for Python 3.4+") + def asyncio_test(self): + """AsyncioConnection""" + ioloop_factory = None + if adapters.asyncio_connection.AsyncioConnection is not None: + import asyncio + ioloop_factory = asyncio.new_event_loop + + self.start(adapters.asyncio_connection.AsyncioConnection, ioloop_factory) diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/blocking_adapter_test.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/blocking_adapter_test.py new file mode 100644 index 000000000..749e812ce --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/blocking_adapter_test.py @@ -0,0 +1,2791 @@ +"""blocking adapter test""" +from datetime import datetime +import functools +import logging +import socket +import threading +import time +import unittest +import uuid + +from forward_server import ForwardServer +from test_utils import retry_assertion + +import pika +from pika.adapters import blocking_connection +from pika.compat import as_bytes +import pika.connection +import pika.exceptions + +# Disable warning about access to protected member +# pylint: disable=W0212 + +# Disable warning Attribute defined outside __init__ +# pylint: disable=W0201 + +# Disable warning Missing docstring +# pylint: disable=C0111 + +# Disable warning Too many public methods +# pylint: disable=R0904 + +# Disable warning Invalid variable name +# pylint: disable=C0103 + + +LOGGER = logging.getLogger(__name__) + +PARAMS_URL_TEMPLATE = ( + 'amqp://guest:guest@127.0.0.1:%(port)s/%%2f?socket_timeout=1') +DEFAULT_URL = PARAMS_URL_TEMPLATE % {'port': 5672} +DEFAULT_PARAMS = pika.URLParameters(DEFAULT_URL) +DEFAULT_TIMEOUT = 15 + + + +def setUpModule(): + logging.basicConfig(level=logging.DEBUG) + + +class BlockingTestCaseBase(unittest.TestCase): + + TIMEOUT = DEFAULT_TIMEOUT + + def _connect(self, + url=DEFAULT_URL, + connection_class=pika.BlockingConnection, + impl_class=None): + parameters = pika.URLParameters(url) + connection = connection_class(parameters, _impl_class=impl_class) + self.addCleanup(lambda: connection.close() + if connection.is_open else None) + + connection._impl.add_timeout( + self.TIMEOUT, # pylint: disable=E1101 + self._on_test_timeout) + + return connection + + def _on_test_timeout(self): + """Called when test times out""" + LOGGER.info('%s TIMED OUT (%s)', datetime.utcnow(), self) + self.fail('Test timed out') + + @retry_assertion(TIMEOUT/2) + def _assert_exact_message_count_with_retries(self, + channel, + queue, + expected_count): + frame = channel.queue_declare(queue, passive=True) + self.assertEqual(frame.method.message_count, expected_count) + + +class TestCreateAndCloseConnection(BlockingTestCaseBase): + + def test(self): + """BlockingConnection: Create and close connection""" + connection = self._connect() + self.assertIsInstance(connection, pika.BlockingConnection) + self.assertTrue(connection.is_open) + self.assertFalse(connection.is_closed) + self.assertFalse(connection.is_closing) + + connection.close() + self.assertTrue(connection.is_closed) + self.assertFalse(connection.is_open) + self.assertFalse(connection.is_closing) + + +class TestMultiCloseConnection(BlockingTestCaseBase): + + def test(self): + """BlockingConnection: Close connection twice""" + connection = self._connect() + self.assertIsInstance(connection, pika.BlockingConnection) + self.assertTrue(connection.is_open) + self.assertFalse(connection.is_closed) + self.assertFalse(connection.is_closing) + + connection.close() + self.assertTrue(connection.is_closed) + self.assertFalse(connection.is_open) + self.assertFalse(connection.is_closing) + + # Second close call shouldn't crash + connection.close() + + +class TestConnectionContextManagerClosesConnection(BlockingTestCaseBase): + def test(self): + """BlockingConnection: connection context manager closes connection""" + with self._connect() as connection: + self.assertIsInstance(connection, pika.BlockingConnection) + self.assertTrue(connection.is_open) + + self.assertTrue(connection.is_closed) + + +class TestConnectionContextManagerClosesConnectionAndPassesOriginalException(BlockingTestCaseBase): + def test(self): + """BlockingConnection: connection context manager closes connection and passes original exception""" # pylint: disable=C0301 + class MyException(Exception): + pass + + with self.assertRaises(MyException): + with self._connect() as connection: + self.assertTrue(connection.is_open) + + raise MyException() + + self.assertTrue(connection.is_closed) + + +class TestConnectionContextManagerClosesConnectionAndPassesSystemException(BlockingTestCaseBase): + def test(self): + """BlockingConnection: connection context manager closes connection and passes system exception""" # pylint: disable=C0301 + with self.assertRaises(SystemExit): + with self._connect() as connection: + self.assertTrue(connection.is_open) + raise SystemExit() + + self.assertTrue(connection.is_closed) + + +class TestLostConnectionResultsInIsClosedConnectionAndChannel(BlockingTestCaseBase): + def test(self): + connection = self._connect() + channel = connection.channel() + + # Simulate the server dropping the socket connection + connection._impl.socket.shutdown(socket.SHUT_RDWR) + + with self.assertRaises(pika.exceptions.ConnectionClosed): + # Changing QoS should result in ConnectionClosed + channel.basic_qos() + + # Now check is_open/is_closed on channel and connection + self.assertFalse(channel.is_open) + self.assertTrue(channel.is_closed) + self.assertFalse(connection.is_open) + self.assertTrue(connection.is_closed) + + +class TestInvalidExchangeTypeRaisesConnectionClosed(BlockingTestCaseBase): + def test(self): + """BlockingConnection: ConnectionClosed raised when creating exchange with invalid type""" # pylint: disable=C0301 + # This test exploits behavior specific to RabbitMQ whereby the broker + # closes the connection if an attempt is made to declare an exchange + # with an invalid exchange type + connection = self._connect() + ch = connection.channel() + + exg_name = ("TestInvalidExchangeTypeRaisesConnectionClosed_" + + uuid.uuid1().hex) + + with self.assertRaises(pika.exceptions.ConnectionClosed) as ex_cm: + # Attempt to create an exchange with invalid exchange type + ch.exchange_declare(exg_name, exchange_type='ZZwwInvalid') + + self.assertEqual(ex_cm.exception.args[0], 503) + + +class TestCreateAndCloseConnectionWithChannelAndConsumer(BlockingTestCaseBase): + + def test(self): + """BlockingConnection: Create and close connection with channel and consumer""" # pylint: disable=C0301 + connection = self._connect() + + ch = connection.channel() + + q_name = ( + 'TestCreateAndCloseConnectionWithChannelAndConsumer_q' + + uuid.uuid1().hex) + + body1 = 'a' * 1024 + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Publish the message to the queue by way of default exchange + ch.publish(exchange='', routing_key=q_name, body=body1) + + # Create a non-ackable consumer + ch.basic_consume(lambda *x: None, q_name, no_ack=True, + exclusive=False, arguments=None) + + connection.close() + self.assertTrue(connection.is_closed) + self.assertFalse(connection.is_open) + self.assertFalse(connection.is_closing) + + self.assertFalse(connection._impl._channels) + + self.assertFalse(ch._consumer_infos) + self.assertFalse(ch._impl._consumers) + + +class TestSuddenBrokerDisconnectBeforeChannel(BlockingTestCaseBase): + + def test(self): + """BlockingConnection resets properly on TCP/IP drop during channel() + """ + with ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) as fwd: + + self.connection = self._connect( + PARAMS_URL_TEMPLATE % {"port": fwd.server_address[1]}) + + # Once outside the context, the connection is broken + + # BlockingConnection should raise ConnectionClosed + with self.assertRaises(pika.exceptions.ConnectionClosed): + self.connection.channel() + + self.assertTrue(self.connection.is_closed) + self.assertFalse(self.connection.is_open) + self.assertIsNone(self.connection._impl.socket) + + +class TestNoAccessToFileDescriptorAfterConnectionClosed(BlockingTestCaseBase): + + def test(self): + """BlockingConnection no access file descriptor after ConnectionClosed + """ + with ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) as fwd: + + self.connection = self._connect( + PARAMS_URL_TEMPLATE % {"port": fwd.server_address[1]}) + + # Once outside the context, the connection is broken + + # BlockingConnection should raise ConnectionClosed + with self.assertRaises(pika.exceptions.ConnectionClosed): + self.connection.channel() + + self.assertTrue(self.connection.is_closed) + self.assertFalse(self.connection.is_open) + self.assertIsNone(self.connection._impl.socket) + + # Attempt to operate on the connection once again after ConnectionClosed + self.assertIsNone(self.connection._impl.socket) + with self.assertRaises(pika.exceptions.ConnectionClosed): + self.connection.channel() + + +class TestConnectWithDownedBroker(BlockingTestCaseBase): + + def test(self): + """ BlockingConnection to downed broker results in AMQPConnectionError + + """ + # Reserve a port for use in connect + sock = socket.socket() + self.addCleanup(sock.close) + + sock.bind(("127.0.0.1", 0)) + + port = sock.getsockname()[1] + + sock.close() + + with self.assertRaises(pika.exceptions.AMQPConnectionError): + self.connection = self._connect( + PARAMS_URL_TEMPLATE % {"port": port}) + + +class TestDisconnectDuringConnectionStart(BlockingTestCaseBase): + + def test(self): + """ BlockingConnection TCP/IP connection loss in CONNECTION_START + """ + fwd = ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) + + fwd.start() + self.addCleanup(lambda: fwd.stop() if fwd.running else None) + + class MySelectConnection(pika.SelectConnection): + assert hasattr(pika.SelectConnection, '_on_connection_start') + + def _on_connection_start(self, *args, **kwargs): + fwd.stop() + return super(MySelectConnection, self)._on_connection_start( + *args, **kwargs) + + with self.assertRaises(pika.exceptions.ProbableAuthenticationError): + self._connect( + PARAMS_URL_TEMPLATE % {"port": fwd.server_address[1]}, + impl_class=MySelectConnection) + + +class TestDisconnectDuringConnectionTune(BlockingTestCaseBase): + + def test(self): + """ BlockingConnection TCP/IP connection loss in CONNECTION_TUNE + """ + fwd = ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) + fwd.start() + self.addCleanup(lambda: fwd.stop() if fwd.running else None) + + class MySelectConnection(pika.SelectConnection): + assert hasattr(pika.SelectConnection, '_on_connection_tune') + + def _on_connection_tune(self, *args, **kwargs): + fwd.stop() + return super(MySelectConnection, self)._on_connection_tune( + *args, **kwargs) + + with self.assertRaises(pika.exceptions.ProbableAccessDeniedError): + self._connect( + PARAMS_URL_TEMPLATE % {"port": fwd.server_address[1]}, + impl_class=MySelectConnection) + + +class TestDisconnectDuringConnectionProtocol(BlockingTestCaseBase): + + def test(self): + """ BlockingConnection TCP/IP connection loss in CONNECTION_PROTOCOL + """ + fwd = ForwardServer( + remote_addr=(DEFAULT_PARAMS.host, DEFAULT_PARAMS.port), + local_linger_args=(1, 0)) + + fwd.start() + self.addCleanup(lambda: fwd.stop() if fwd.running else None) + + class MySelectConnection(pika.SelectConnection): + assert hasattr(pika.SelectConnection, '_on_connected') + + def _on_connected(self, *args, **kwargs): + fwd.stop() + return super(MySelectConnection, self)._on_connected( + *args, **kwargs) + + with self.assertRaises(pika.exceptions.IncompatibleProtocolError): + self._connect(PARAMS_URL_TEMPLATE % {"port": fwd.server_address[1]}, + impl_class=MySelectConnection) + + +class TestProcessDataEvents(BlockingTestCaseBase): + + def test(self): + """BlockingConnection.process_data_events""" + connection = self._connect() + + # Try with time_limit=0 + start_time = time.time() + connection.process_data_events(time_limit=0) + elapsed = time.time() - start_time + self.assertLess(elapsed, 0.25) + + # Try with time_limit=0.005 + start_time = time.time() + connection.process_data_events(time_limit=0.005) + elapsed = time.time() - start_time + self.assertGreaterEqual(elapsed, 0.005) + self.assertLess(elapsed, 0.25) + + +class TestConnectionRegisterForBlockAndUnblock(BlockingTestCaseBase): + + def test(self): + """BlockingConnection register for Connection.Blocked/Unblocked""" + connection = self._connect() + + # NOTE: I haven't figured out yet how to coerce RabbitMQ to emit + # Connection.Block and Connection.Unblock from the test, so we'll + # just call the registration functions for now, to make sure that + # registration doesn't crash + + connection.add_on_connection_blocked_callback(lambda frame: None) + + blocked_buffer = [] + evt = blocking_connection._ConnectionBlockedEvt( + lambda f: blocked_buffer.append("blocked"), + pika.frame.Method(1, pika.spec.Connection.Blocked('reason'))) + repr(evt) + evt.dispatch() + self.assertEqual(blocked_buffer, ["blocked"]) + + unblocked_buffer = [] + connection.add_on_connection_unblocked_callback(lambda frame: None) + evt = blocking_connection._ConnectionUnblockedEvt( + lambda f: unblocked_buffer.append("unblocked"), + pika.frame.Method(1, pika.spec.Connection.Unblocked())) + repr(evt) + evt.dispatch() + self.assertEqual(unblocked_buffer, ["unblocked"]) + + +class TestBlockedConnectionTimeout(BlockingTestCaseBase): + + def test(self): + """BlockingConnection Connection.Blocked timeout """ + url = DEFAULT_URL + '&blocked_connection_timeout=0.001' + conn = self._connect(url=url) + + # NOTE: I haven't figured out yet how to coerce RabbitMQ to emit + # Connection.Block and Connection.Unblock from the test, so we'll + # simulate it for now + + # Simulate Connection.Blocked + conn._impl._on_connection_blocked(pika.frame.Method( + 0, + pika.spec.Connection.Blocked('TestBlockedConnectionTimeout'))) + + # Wait for connection teardown + with self.assertRaises(pika.exceptions.ConnectionClosed) as excCtx: + while True: + conn.process_data_events(time_limit=1) + + self.assertEqual( + excCtx.exception.args, + (pika.connection.InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT, + 'Blocked connection timeout expired')) + + +class TestAddCallbackThreadsafeFromSameThread(BlockingTestCaseBase): + + def test(self): + """BlockingConnection.add_callback_threadsafe from same thread""" + connection = self._connect() + + # Test timer completion + start_time = time.time() + rx_callback = [] + connection.add_callback_threadsafe( + lambda: rx_callback.append(time.time())) + while not rx_callback: + connection.process_data_events(time_limit=None) + + self.assertEqual(len(rx_callback), 1) + elapsed = time.time() - start_time + self.assertLess(elapsed, 0.25) + + +class TestAddCallbackThreadsafeFromAnotherThread(BlockingTestCaseBase): + + def test(self): + """BlockingConnection.add_callback_threadsafe from another thread""" + connection = self._connect() + + # Test timer completion + start_time = time.time() + rx_callback = [] + timer = threading.Timer( + 0, + functools.partial(connection.add_callback_threadsafe, + lambda: rx_callback.append(time.time()))) + self.addCleanup(timer.cancel) + timer.start() + while not rx_callback: + connection.process_data_events(time_limit=None) + + self.assertEqual(len(rx_callback), 1) + elapsed = time.time() - start_time + self.assertLess(elapsed, 0.25) + + +class TestAddTimeoutRemoveTimeout(BlockingTestCaseBase): + + def test(self): + """BlockingConnection.add_timeout and remove_timeout""" + connection = self._connect() + + # Test timer completion + start_time = time.time() + rx_callback = [] + timer_id = connection.add_timeout( + 0.005, + lambda: rx_callback.append(time.time())) + while not rx_callback: + connection.process_data_events(time_limit=None) + + self.assertEqual(len(rx_callback), 1) + elapsed = time.time() - start_time + self.assertLess(elapsed, 0.25) + + # Test removing triggered timeout + connection.remove_timeout(timer_id) + + + # Test aborted timer + rx_callback = [] + timer_id = connection.add_timeout( + 0.001, + lambda: rx_callback.append(time.time())) + connection.remove_timeout(timer_id) + connection.process_data_events(time_limit=0.1) + self.assertFalse(rx_callback) + + # Make sure _TimerEvt repr doesn't crash + evt = blocking_connection._TimerEvt(lambda: None) + repr(evt) + + +class TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback(BlockingTestCaseBase): + + def test(self): + """BlockingConnection viability of multiple timeouts with same deadline and callback""" + connection = self._connect() + + rx_callback = [] + + def callback(): + rx_callback.append(1) + + timer1 = connection.add_timeout(0, callback) + timer2 = connection.add_timeout(0, callback) + + self.assertIsNot(timer1, timer2) + + connection.remove_timeout(timer1) + + # Wait for second timer to fire + start_wait_time = time.time() + while not rx_callback and time.time() - start_wait_time < 0.25: + connection.process_data_events(time_limit=0.001) + + self.assertListEqual(rx_callback, [1]) + + +class TestRemoveTimeoutFromTimeoutCallback(BlockingTestCaseBase): + + def test(self): + """BlockingConnection.remove_timeout from timeout callback""" + connection = self._connect() + + # Test timer completion + timer_id1 = connection.add_timeout(5, lambda: 0/0) + + rx_timer2 = [] + def on_timer2(): + connection.remove_timeout(timer_id1) + connection.remove_timeout(timer_id2) + rx_timer2.append(1) + + timer_id2 = connection.add_timeout(0, on_timer2) + + while not rx_timer2: + connection.process_data_events(time_limit=None) + + self.assertIsNone(timer_id1.callback) + self.assertFalse(connection._ready_events) + + +class TestSleep(BlockingTestCaseBase): + + def test(self): + """BlockingConnection.sleep""" + connection = self._connect() + + # Try with duration=0 + start_time = time.time() + connection.sleep(duration=0) + elapsed = time.time() - start_time + self.assertLess(elapsed, 0.25) + + # Try with duration=0.005 + start_time = time.time() + connection.sleep(duration=0.005) + elapsed = time.time() - start_time + self.assertGreaterEqual(elapsed, 0.005) + self.assertLess(elapsed, 0.25) + + +class TestConnectionProperties(BlockingTestCaseBase): + + def test(self): + """Test BlockingConnection properties""" + connection = self._connect() + + self.assertTrue(connection.is_open) + self.assertFalse(connection.is_closing) + self.assertFalse(connection.is_closed) + + self.assertTrue(connection.basic_nack_supported) + self.assertTrue(connection.consumer_cancel_notify_supported) + self.assertTrue(connection.exchange_exchange_bindings_supported) + self.assertTrue(connection.publisher_confirms_supported) + + connection.close() + self.assertFalse(connection.is_open) + self.assertFalse(connection.is_closing) + self.assertTrue(connection.is_closed) + + + +class TestCreateAndCloseChannel(BlockingTestCaseBase): + + def test(self): + """BlockingChannel: Create and close channel""" + connection = self._connect() + + ch = connection.channel() + self.assertIsInstance(ch, blocking_connection.BlockingChannel) + self.assertTrue(ch.is_open) + self.assertFalse(ch.is_closed) + self.assertFalse(ch.is_closing) + self.assertIs(ch.connection, connection) + + ch.close() + self.assertTrue(ch.is_closed) + self.assertFalse(ch.is_open) + self.assertFalse(ch.is_closing) + + +class TestExchangeDeclareAndDelete(BlockingTestCaseBase): + + def test(self): + """BlockingChannel: Test exchange_declare and exchange_delete""" + connection = self._connect() + + ch = connection.channel() + + name = "TestExchangeDeclareAndDelete_" + uuid.uuid1().hex + + # Declare a new exchange + frame = ch.exchange_declare(name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, name) + + self.assertIsInstance(frame.method, pika.spec.Exchange.DeclareOk) + + # Check if it exists by declaring it passively + frame = ch.exchange_declare(name, passive=True) + self.assertIsInstance(frame.method, pika.spec.Exchange.DeclareOk) + + # Delete the exchange + frame = ch.exchange_delete(name) + self.assertIsInstance(frame.method, pika.spec.Exchange.DeleteOk) + + # Verify that it's been deleted + with self.assertRaises(pika.exceptions.ChannelClosed) as cm: + ch.exchange_declare(name, passive=True) + + self.assertEqual(cm.exception.args[0], 404) + + +class TestExchangeBindAndUnbind(BlockingTestCaseBase): + + def test(self): + """BlockingChannel: Test exchange_bind and exchange_unbind""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestExchangeBindAndUnbind_q' + uuid.uuid1().hex + src_exg_name = 'TestExchangeBindAndUnbind_src_exg_' + uuid.uuid1().hex + dest_exg_name = 'TestExchangeBindAndUnbind_dest_exg_' + uuid.uuid1().hex + routing_key = 'TestExchangeBindAndUnbind' + + # Place channel in publisher-acknowledgments mode so that we may test + # whether the queue is reachable by publishing with mandatory=True + res = ch.confirm_delivery() + self.assertIsNone(res) + + # Declare both exchanges + ch.exchange_declare(src_exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, src_exg_name) + ch.exchange_declare(dest_exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, dest_exg_name) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Bind the queue to the destination exchange + ch.queue_bind(q_name, exchange=dest_exg_name, routing_key=routing_key) + + + # Verify that the queue is unreachable without exchange-exchange binding + with self.assertRaises(pika.exceptions.UnroutableError): + ch.publish(src_exg_name, routing_key, body='', mandatory=True) + + # Bind the exchanges + frame = ch.exchange_bind(destination=dest_exg_name, source=src_exg_name, + routing_key=routing_key) + self.assertIsInstance(frame.method, pika.spec.Exchange.BindOk) + + # Publish a message via the source exchange + ch.publish(src_exg_name, routing_key, body='TestExchangeBindAndUnbind', + mandatory=True) + + # Check that the queue now has one message + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) + + # Unbind the exchanges + frame = ch.exchange_unbind(destination=dest_exg_name, + source=src_exg_name, + routing_key=routing_key) + self.assertIsInstance(frame.method, pika.spec.Exchange.UnbindOk) + + # Verify that the queue is now unreachable via the source exchange + with self.assertRaises(pika.exceptions.UnroutableError): + ch.publish(src_exg_name, routing_key, body='', mandatory=True) + + +class TestQueueDeclareAndDelete(BlockingTestCaseBase): + + def test(self): + """BlockingChannel: Test queue_declare and queue_delete""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestQueueDeclareAndDelete_' + uuid.uuid1().hex + + # Declare a new queue + frame = ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + self.assertIsInstance(frame.method, pika.spec.Queue.DeclareOk) + + # Check if it exists by declaring it passively + frame = ch.queue_declare(q_name, passive=True) + self.assertIsInstance(frame.method, pika.spec.Queue.DeclareOk) + + # Delete the queue + frame = ch.queue_delete(q_name) + self.assertIsInstance(frame.method, pika.spec.Queue.DeleteOk) + + # Verify that it's been deleted + with self.assertRaises(pika.exceptions.ChannelClosed) as cm: + ch.queue_declare(q_name, passive=True) + + self.assertEqual(cm.exception.args[0], 404) + + +class TestPassiveQueueDeclareOfUnknownQueueRaisesChannelClosed( + BlockingTestCaseBase): + def test(self): + """BlockingChannel: ChannelClosed raised when passive-declaring unknown queue""" # pylint: disable=C0301 + connection = self._connect() + ch = connection.channel() + + q_name = ("TestPassiveQueueDeclareOfUnknownQueueRaisesChannelClosed_q_" + + uuid.uuid1().hex) + + with self.assertRaises(pika.exceptions.ChannelClosed) as ex_cm: + ch.queue_declare(q_name, passive=True) + + self.assertEqual(ex_cm.exception.args[0], 404) + + +class TestQueueBindAndUnbindAndPurge(BlockingTestCaseBase): + + def test(self): + """BlockingChannel: Test queue_bind and queue_unbind""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestQueueBindAndUnbindAndPurge_q' + uuid.uuid1().hex + exg_name = 'TestQueueBindAndUnbindAndPurge_exg_' + uuid.uuid1().hex + routing_key = 'TestQueueBindAndUnbindAndPurge' + + # Place channel in publisher-acknowledgments mode so that we may test + # whether the queue is reachable by publishing with mandatory=True + res = ch.confirm_delivery() + self.assertIsNone(res) + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Bind the queue to the exchange using routing key + frame = ch.queue_bind(q_name, exchange=exg_name, + routing_key=routing_key) + self.assertIsInstance(frame.method, pika.spec.Queue.BindOk) + + # Check that the queue is empty + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + # Deposit a message in the queue + ch.publish(exg_name, routing_key, body='TestQueueBindAndUnbindAndPurge', + mandatory=True) + + # Check that the queue now has one message + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 1) + + # Unbind the queue + frame = ch.queue_unbind(queue=q_name, exchange=exg_name, + routing_key=routing_key) + self.assertIsInstance(frame.method, pika.spec.Queue.UnbindOk) + + # Verify that the queue is now unreachable via that binding + with self.assertRaises(pika.exceptions.UnroutableError): + ch.publish(exg_name, routing_key, + body='TestQueueBindAndUnbindAndPurge-2', + mandatory=True) + + # Purge the queue and verify that 1 message was purged + frame = ch.queue_purge(q_name) + self.assertIsInstance(frame.method, pika.spec.Queue.PurgeOk) + self.assertEqual(frame.method.message_count, 1) + + # Verify that the queue is now empty + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + +class TestBasicGet(BlockingTestCaseBase): + + def tearDown(self): + LOGGER.info('%s TEARING DOWN (%s)', datetime.utcnow(), self) + + def test(self): + """BlockingChannel.basic_get""" + LOGGER.info('%s STARTED (%s)', datetime.utcnow(), self) + + connection = self._connect() + LOGGER.info('%s CONNECTED (%s)', datetime.utcnow(), self) + + ch = connection.channel() + LOGGER.info('%s CREATED CHANNEL (%s)', datetime.utcnow(), self) + + q_name = 'TestBasicGet_q' + uuid.uuid1().hex + + # Place channel in publisher-acknowledgments mode so that the message + # may be delivered synchronously to the queue by publishing it with + # mandatory=True + ch.confirm_delivery() + LOGGER.info('%s ENABLED PUB-ACKS (%s)', datetime.utcnow(), self) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + LOGGER.info('%s DECLARED QUEUE (%s)', datetime.utcnow(), self) + + # Verify result of getting a message from an empty queue + msg = ch.basic_get(q_name, no_ack=False) + self.assertTupleEqual(msg, (None, None, None)) + LOGGER.info('%s GOT FROM EMPTY QUEUE (%s)', datetime.utcnow(), self) + + body = 'TestBasicGet' + # Deposit a message in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body=body, + mandatory=True) + LOGGER.info('%s PUBLISHED (%s)', datetime.utcnow(), self) + + # Get the message + (method, properties, body) = ch.basic_get(q_name, no_ack=False) + LOGGER.info('%s GOT FROM NON-EMPTY QUEUE (%s)', datetime.utcnow(), self) + self.assertIsInstance(method, pika.spec.Basic.GetOk) + self.assertEqual(method.delivery_tag, 1) + self.assertFalse(method.redelivered) + self.assertEqual(method.exchange, '') + self.assertEqual(method.routing_key, q_name) + self.assertEqual(method.message_count, 0) + + self.assertIsInstance(properties, pika.BasicProperties) + self.assertIsNone(properties.headers) + self.assertEqual(body, as_bytes(body)) + + # Ack it + ch.basic_ack(delivery_tag=method.delivery_tag) + LOGGER.info('%s ACKED (%s)', datetime.utcnow(), self) + + # Verify that the queue is now empty + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + +class TestBasicReject(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_reject""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestBasicReject_q' + uuid.uuid1().hex + + # Place channel in publisher-acknowledgments mode so that the message + # may be delivered synchronously to the queue by publishing it with + # mandatory=True + ch.confirm_delivery() + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Deposit two messages in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestBasicReject1', + mandatory=True) + ch.publish(exchange='', routing_key=q_name, + body='TestBasicReject2', + mandatory=True) + + # Get the messages + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, as_bytes('TestBasicReject1')) + + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, as_bytes('TestBasicReject2')) + + # Nack the second message + ch.basic_reject(rx_method.delivery_tag, requeue=True) + + # Verify that exactly one message is present in the queue, namely the + # second one + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, as_bytes('TestBasicReject2')) + + +class TestBasicRejectNoRequeue(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_reject with requeue=False""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestBasicRejectNoRequeue_q' + uuid.uuid1().hex + + # Place channel in publisher-acknowledgments mode so that the message + # may be delivered synchronously to the queue by publishing it with + # mandatory=True + ch.confirm_delivery() + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Deposit two messages in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestBasicRejectNoRequeue1', + mandatory=True) + ch.publish(exchange='', routing_key=q_name, + body='TestBasicRejectNoRequeue2', + mandatory=True) + + # Get the messages + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicRejectNoRequeue1')) + + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicRejectNoRequeue2')) + + # Nack the second message + ch.basic_reject(rx_method.delivery_tag, requeue=False) + + # Verify that no messages are present in the queue + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + +class TestBasicNack(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_nack single message""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestBasicNack_q' + uuid.uuid1().hex + + # Place channel in publisher-acknowledgments mode so that the message + # may be delivered synchronously to the queue by publishing it with + # mandatory=True + ch.confirm_delivery() + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Deposit two messages in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestBasicNack1', + mandatory=True) + ch.publish(exchange='', routing_key=q_name, + body='TestBasicNack2', + mandatory=True) + + # Get the messages + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, as_bytes('TestBasicNack1')) + + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, as_bytes('TestBasicNack2')) + + # Nack the second message + ch.basic_nack(rx_method.delivery_tag, multiple=False, requeue=True) + + # Verify that exactly one message is present in the queue, namely the + # second one + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, as_bytes('TestBasicNack2')) + + +class TestBasicNackNoRequeue(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_nack with requeue=False""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestBasicNackNoRequeue_q' + uuid.uuid1().hex + + # Place channel in publisher-acknowledgments mode so that the message + # may be delivered synchronously to the queue by publishing it with + # mandatory=True + ch.confirm_delivery() + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Deposit two messages in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestBasicNackNoRequeue1', + mandatory=True) + ch.publish(exchange='', routing_key=q_name, + body='TestBasicNackNoRequeue2', + mandatory=True) + + # Get the messages + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicNackNoRequeue1')) + + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicNackNoRequeue2')) + + # Nack the second message + ch.basic_nack(rx_method.delivery_tag, requeue=False) + + # Verify that no messages are present in the queue + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + +class TestBasicNackMultiple(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_nack multiple messages""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestBasicNackMultiple_q' + uuid.uuid1().hex + + # Place channel in publisher-acknowledgments mode so that the message + # may be delivered synchronously to the queue by publishing it with + # mandatory=True + ch.confirm_delivery() + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Deposit two messages in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestBasicNackMultiple1', + mandatory=True) + ch.publish(exchange='', routing_key=q_name, + body='TestBasicNackMultiple2', + mandatory=True) + + # Get the messages + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicNackMultiple1')) + + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicNackMultiple2')) + + # Nack both messages via the "multiple" option + ch.basic_nack(rx_method.delivery_tag, multiple=True, requeue=True) + + # Verify that both messages are present in the queue + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicNackMultiple1')) + (rx_method, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, + as_bytes('TestBasicNackMultiple2')) + + +class TestBasicRecoverWithRequeue(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_recover with requeue=True. + + NOTE: the requeue=False option is not supported by RabbitMQ broker as + of this writing (using RabbitMQ 3.5.1) + """ + connection = self._connect() + + ch = connection.channel() + + q_name = ( + 'TestBasicRecoverWithRequeue_q' + uuid.uuid1().hex) + + # Place channel in publisher-acknowledgments mode so that the message + # may be delivered synchronously to the queue by publishing it with + # mandatory=True + ch.confirm_delivery() + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Deposit two messages in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestBasicRecoverWithRequeue1', + mandatory=True) + ch.publish(exchange='', routing_key=q_name, + body='TestBasicRecoverWithRequeue2', + mandatory=True) + + rx_messages = [] + num_messages = 0 + for msg in ch.consume(q_name, no_ack=False): + num_messages += 1 + + if num_messages == 2: + ch.basic_recover(requeue=True) + + if num_messages > 2: + rx_messages.append(msg) + + if num_messages == 4: + break + else: + self.fail('consumer aborted prematurely') + + # Get the messages + (_, _, rx_body) = rx_messages[0] + self.assertEqual(rx_body, + as_bytes('TestBasicRecoverWithRequeue1')) + + (_, _, rx_body) = rx_messages[1] + self.assertEqual(rx_body, + as_bytes('TestBasicRecoverWithRequeue2')) + + +class TestTxCommit(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.tx_commit""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestTxCommit_q' + uuid.uuid1().hex + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Select standard transaction mode + frame = ch.tx_select() + self.assertIsInstance(frame.method, pika.spec.Tx.SelectOk) + + # Deposit a message in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestTxCommit1', + mandatory=True) + + # Verify that queue is still empty + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + # Commit the transaction + ch.tx_commit() + + # Verify that the queue has the expected message + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 1) + + (_, _, rx_body) = ch.basic_get(q_name, no_ack=False) + self.assertEqual(rx_body, as_bytes('TestTxCommit1')) + + +class TestTxRollback(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.tx_commit""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestTxRollback_q' + uuid.uuid1().hex + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Select standard transaction mode + frame = ch.tx_select() + self.assertIsInstance(frame.method, pika.spec.Tx.SelectOk) + + # Deposit a message in the queue via default exchange + ch.publish(exchange='', routing_key=q_name, + body='TestTxRollback1', + mandatory=True) + + # Verify that queue is still empty + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + # Roll back the transaction + ch.tx_rollback() + + # Verify that the queue continues to be empty + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + +class TestBasicConsumeFromUnknownQueueRaisesChannelClosed(BlockingTestCaseBase): + def test(self): + """ChannelClosed raised when consuming from unknown queue""" + connection = self._connect() + ch = connection.channel() + + q_name = ("TestBasicConsumeFromUnknownQueueRaisesChannelClosed_q_" + + uuid.uuid1().hex) + + with self.assertRaises(pika.exceptions.ChannelClosed) as ex_cm: + ch.basic_consume(lambda *args: None, q_name) + + self.assertEqual(ex_cm.exception.args[0], 404) + + +class TestPublishAndBasicPublishWithPubacksUnroutable(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914 + """BlockingChannel.publish amd basic_publish unroutable message with pubacks""" # pylint: disable=C0301 + connection = self._connect() + + ch = connection.channel() + + exg_name = ('TestPublishAndBasicPublishUnroutable_exg_' + + uuid.uuid1().hex) + routing_key = 'TestPublishAndBasicPublishUnroutable' + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + res = ch.confirm_delivery() + self.assertIsNone(res) + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Verify unroutable message handling using basic_publish + res = ch.basic_publish(exg_name, routing_key=routing_key, body='', + mandatory=True) + self.assertEqual(res, False) + + # Verify unroutable message handling using publish + msg2_headers = dict( + test_name='TestPublishAndBasicPublishWithPubacksUnroutable') + msg2_properties = pika.spec.BasicProperties(headers=msg2_headers) + with self.assertRaises(pika.exceptions.UnroutableError) as cm: + ch.publish(exg_name, routing_key=routing_key, body='', + properties=msg2_properties, mandatory=True) + (msg,) = cm.exception.messages + self.assertIsInstance(msg, blocking_connection.ReturnedMessage) + self.assertIsInstance(msg.method, pika.spec.Basic.Return) + self.assertEqual(msg.method.reply_code, 312) + self.assertEqual(msg.method.exchange, exg_name) + self.assertEqual(msg.method.routing_key, routing_key) + self.assertIsInstance(msg.properties, pika.BasicProperties) + self.assertEqual(msg.properties.headers, msg2_headers) + self.assertEqual(msg.body, as_bytes('')) + + +class TestConfirmDeliveryAfterUnroutableMessage(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914 + """BlockingChannel.confirm_delivery following unroutable message""" + connection = self._connect() + + ch = connection.channel() + + exg_name = ('TestConfirmDeliveryAfterUnroutableMessage_exg_' + + uuid.uuid1().hex) + routing_key = 'TestConfirmDeliveryAfterUnroutableMessage' + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Register on-return callback + returned_messages = [] + ch.add_on_return_callback(lambda *args: returned_messages.append(args)) + + # Emit unroutable message without pubacks + res = ch.basic_publish(exg_name, routing_key=routing_key, body='', + mandatory=True) + self.assertEqual(res, True) + + # Select delivery confirmations + ch.confirm_delivery() + + # Verify that unroutable message is in pending events + self.assertEqual(len(ch._pending_events), 1) + self.assertIsInstance(ch._pending_events[0], + blocking_connection._ReturnedMessageEvt) + # Verify that repr of _ReturnedMessageEvt instance does crash + repr(ch._pending_events[0]) + + # Dispach events + connection.process_data_events() + + self.assertEqual(len(ch._pending_events), 0) + + # Verify that unroutable message was dispatched + ((channel, method, properties, body,),) = returned_messages + self.assertIs(channel, ch) + self.assertIsInstance(method, pika.spec.Basic.Return) + self.assertEqual(method.reply_code, 312) + self.assertEqual(method.exchange, exg_name) + self.assertEqual(method.routing_key, routing_key) + self.assertIsInstance(properties, pika.BasicProperties) + self.assertEqual(body, as_bytes('')) + + +class TestUnroutableMessagesReturnedInNonPubackMode(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914 + """BlockingChannel: unroutable messages is returned in non-puback mode""" # pylint: disable=C0301 + connection = self._connect() + + ch = connection.channel() + + exg_name = ( + 'TestUnroutableMessageReturnedInNonPubackMode_exg_' + + uuid.uuid1().hex) + routing_key = 'TestUnroutableMessageReturnedInNonPubackMode' + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Register on-return callback + returned_messages = [] + ch.add_on_return_callback( + lambda *args: returned_messages.append(args)) + + # Emit unroutable messages without pubacks + ch.publish(exg_name, routing_key=routing_key, body='msg1', + mandatory=True) + + ch.publish(exg_name, routing_key=routing_key, body='msg2', + mandatory=True) + + # Process I/O until Basic.Return are dispatched + while len(returned_messages) < 2: + connection.process_data_events() + + self.assertEqual(len(returned_messages), 2) + + self.assertEqual(len(ch._pending_events), 0) + + # Verify returned messages + (channel, method, properties, body,) = returned_messages[0] + self.assertIs(channel, ch) + self.assertIsInstance(method, pika.spec.Basic.Return) + self.assertEqual(method.reply_code, 312) + self.assertEqual(method.exchange, exg_name) + self.assertEqual(method.routing_key, routing_key) + self.assertIsInstance(properties, pika.BasicProperties) + self.assertEqual(body, as_bytes('msg1')) + + (channel, method, properties, body,) = returned_messages[1] + self.assertIs(channel, ch) + self.assertIsInstance(method, pika.spec.Basic.Return) + self.assertEqual(method.reply_code, 312) + self.assertEqual(method.exchange, exg_name) + self.assertEqual(method.routing_key, routing_key) + self.assertIsInstance(properties, pika.BasicProperties) + self.assertEqual(body, as_bytes('msg2')) + + +class TestUnroutableMessageReturnedInPubackMode(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914 + """BlockingChannel: unroutable messages is returned in puback mode""" + connection = self._connect() + + ch = connection.channel() + + exg_name = ( + 'TestUnroutableMessageReturnedInPubackMode_exg_' + + uuid.uuid1().hex) + routing_key = 'TestUnroutableMessageReturnedInPubackMode' + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Select delivery confirmations + ch.confirm_delivery() + + # Register on-return callback + returned_messages = [] + ch.add_on_return_callback( + lambda *args: returned_messages.append(args)) + + # Emit unroutable messages with pubacks + res = ch.basic_publish(exg_name, routing_key=routing_key, body='msg1', + mandatory=True) + self.assertEqual(res, False) + + res = ch.basic_publish(exg_name, routing_key=routing_key, body='msg2', + mandatory=True) + self.assertEqual(res, False) + + # Verify that unroutable messages are already in pending events + self.assertEqual(len(ch._pending_events), 2) + self.assertIsInstance(ch._pending_events[0], + blocking_connection._ReturnedMessageEvt) + self.assertIsInstance(ch._pending_events[1], + blocking_connection._ReturnedMessageEvt) + # Verify that repr of _ReturnedMessageEvt instance does crash + repr(ch._pending_events[0]) + repr(ch._pending_events[1]) + + # Dispatch events + connection.process_data_events() + + self.assertEqual(len(ch._pending_events), 0) + + # Verify returned messages + (channel, method, properties, body,) = returned_messages[0] + self.assertIs(channel, ch) + self.assertIsInstance(method, pika.spec.Basic.Return) + self.assertEqual(method.reply_code, 312) + self.assertEqual(method.exchange, exg_name) + self.assertEqual(method.routing_key, routing_key) + self.assertIsInstance(properties, pika.BasicProperties) + self.assertEqual(body, as_bytes('msg1')) + + (channel, method, properties, body,) = returned_messages[1] + self.assertIs(channel, ch) + self.assertIsInstance(method, pika.spec.Basic.Return) + self.assertEqual(method.reply_code, 312) + self.assertEqual(method.exchange, exg_name) + self.assertEqual(method.routing_key, routing_key) + self.assertIsInstance(properties, pika.BasicProperties) + self.assertEqual(body, as_bytes('msg2')) + + +class TestBasicPublishDeliveredWhenPendingUnroutable(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914 + """BlockingChannel.basic_publish msg delivered despite pending unroutable message""" # pylint: disable=C0301 + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestBasicPublishDeliveredWhenPendingUnroutable_q' + + uuid.uuid1().hex) + exg_name = ('TestBasicPublishDeliveredWhenPendingUnroutable_exg_' + + uuid.uuid1().hex) + routing_key = 'TestBasicPublishDeliveredWhenPendingUnroutable' + + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Bind the queue to the exchange using routing key + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) + + # Attempt to send an unroutable message in the queue via basic_publish + res = ch.basic_publish(exg_name, routing_key='', + body='unroutable-message', + mandatory=True) + self.assertEqual(res, True) + + # Flush connection to force Basic.Return + connection.channel().close() + + # Deposit a routable message in the queue + res = ch.basic_publish(exg_name, routing_key=routing_key, + body='routable-message', + mandatory=True) + self.assertEqual(res, True) + + # Wait for the queue to get the routable message + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=1) + + msg = ch.basic_get(q_name) + + # Check the first message + self.assertIsInstance(msg, tuple) + rx_method, rx_properties, rx_body = msg + self.assertIsInstance(rx_method, pika.spec.Basic.GetOk) + self.assertEqual(rx_method.delivery_tag, 1) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, exg_name) + self.assertEqual(rx_method.routing_key, routing_key) + + self.assertIsInstance(rx_properties, pika.BasicProperties) + self.assertEqual(rx_body, as_bytes('routable-message')) + + # There shouldn't be any more events now + self.assertFalse(ch._pending_events) + + # Ack the message + ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) + + # Verify that the queue is now empty + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + +class TestPublishAndConsumeWithPubacksAndQosOfOne(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914,R0915 + """BlockingChannel.basic_publish, publish, basic_consume, QoS, \ + Basic.Cancel from broker + """ + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestPublishAndConsumeAndQos_q' + uuid.uuid1().hex + exg_name = 'TestPublishAndConsumeAndQos_exg_' + uuid.uuid1().hex + routing_key = 'TestPublishAndConsumeAndQos' + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + res = ch.confirm_delivery() + self.assertIsNone(res) + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Bind the queue to the exchange using routing key + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) + + # Deposit a message in the queue via basic_publish + msg1_headers = dict( + test_name='TestPublishAndConsumeWithPubacksAndQosOfOne') + msg1_properties = pika.spec.BasicProperties(headers=msg1_headers) + res = ch.basic_publish(exg_name, routing_key=routing_key, + body='via-basic_publish', + properties=msg1_properties, + mandatory=True) + self.assertEqual(res, True) + + # Deposit another message in the queue via publish + ch.publish(exg_name, routing_key, body='via-publish', + mandatory=True) + + # Check that the queue now has two messages + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 2) + + # Configure QoS for one message + ch.basic_qos(prefetch_size=0, prefetch_count=1, all_channels=False) + + # Create a consumer + rx_messages = [] + consumer_tag = ch.basic_consume( + lambda *args: rx_messages.append(args), + q_name, + no_ack=False, + exclusive=False, + arguments=None) + + # Wait for first message to arrive + while not rx_messages: + connection.process_data_events(time_limit=None) + + self.assertEqual(len(rx_messages), 1) + + # Check the first message + msg = rx_messages[0] + self.assertIsInstance(msg, tuple) + rx_ch, rx_method, rx_properties, rx_body = msg + self.assertIs(rx_ch, ch) + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.consumer_tag, consumer_tag) + self.assertEqual(rx_method.delivery_tag, 1) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, exg_name) + self.assertEqual(rx_method.routing_key, routing_key) + + self.assertIsInstance(rx_properties, pika.BasicProperties) + self.assertEqual(rx_properties.headers, msg1_headers) + self.assertEqual(rx_body, as_bytes('via-basic_publish')) + + # There shouldn't be any more events now + self.assertFalse(ch._pending_events) + + # Ack the message so that the next one can arrive (we configured QoS + # with prefetch_count=1) + ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) + + # Get the second message + while len(rx_messages) < 2: + connection.process_data_events(time_limit=None) + + self.assertEqual(len(rx_messages), 2) + + msg = rx_messages[1] + self.assertIsInstance(msg, tuple) + rx_ch, rx_method, rx_properties, rx_body = msg + self.assertIs(rx_ch, ch) + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.consumer_tag, consumer_tag) + self.assertEqual(rx_method.delivery_tag, 2) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, exg_name) + self.assertEqual(rx_method.routing_key, routing_key) + + self.assertIsInstance(rx_properties, pika.BasicProperties) + self.assertEqual(rx_body, as_bytes('via-publish')) + + # There shouldn't be any more events now + self.assertFalse(ch._pending_events) + + ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) + + # Verify that the queue is now empty + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + # Attempt to consume again with a short timeout + connection.process_data_events(time_limit=0.005) + self.assertEqual(len(rx_messages), 2) + + # Delete the queue and wait for consumer cancellation + rx_cancellations = [] + ch.add_on_cancel_callback(rx_cancellations.append) + ch.queue_delete(q_name) + ch.start_consuming() + + self.assertEqual(len(rx_cancellations), 1) + frame, = rx_cancellations + self.assertEqual(frame.method.consumer_tag, consumer_tag) + + +class TestBasicConsumeWithAckFromAnotherThread(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914,R0915 + """BlockingChannel.basic_consume with ack from another thread and \ + requesting basic_ack via add_callback_threadsafe + """ + # This test simulates processing of a message on another thread and + # then requesting an ACK to be dispatched on the connection's thread + # via BlockingConnection.add_callback_threadsafe + + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestBasicConsumeWithAckFromAnotherThread_q' + uuid.uuid1().hex + exg_name = ('TestBasicConsumeWithAckFromAnotherThread_exg' + + uuid.uuid1().hex) + routing_key = 'TestBasicConsumeWithAckFromAnotherThread' + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous (for convenience) + res = ch.confirm_delivery() + self.assertIsNone(res) + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Bind the queue to the exchange using routing key + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) + + # Publish 2 messages with mandatory=True for synchronous processing + ch.publish(exg_name, routing_key, body='msg1', mandatory=True) + ch.publish(exg_name, routing_key, body='last-msg', mandatory=True) + + # Configure QoS for one message so that the 2nd message will be + # delivered only after the 1st one is ACKed + ch.basic_qos(prefetch_size=0, prefetch_count=1, all_channels=False) + + # Create a consumer + rx_messages = [] + def ackAndEnqueueMessageViaAnotherThread(rx_ch, + rx_method, + rx_properties, # pylint: disable=W0613 + rx_body): + LOGGER.debug( + '%s: Got message body=%r; delivery-tag=%r', + datetime.now(), rx_body, rx_method.delivery_tag) + + # Request ACK dispatch via add_callback_threadsafe from other + # thread; if last message, cancel consumer so that start_consuming + # can return + + def processOnConnectionThread(): + LOGGER.debug('%s: ACKing message body=%r; delivery-tag=%r', + datetime.now(), + rx_body, + rx_method.delivery_tag) + ch.basic_ack(delivery_tag=rx_method.delivery_tag, + multiple=False) + rx_messages.append(rx_body) + + # NOTE on python3, `b'last-msg' != 'last-msg'` + if rx_body == b'last-msg': + LOGGER.debug('%s: Canceling consumer consumer-tag=%r', + datetime.now(), + rx_method.consumer_tag) + rx_ch.basic_cancel(rx_method.consumer_tag) + + # Spawn a thread to initiate ACKing + timer = threading.Timer(0, + lambda: connection.add_callback_threadsafe( + processOnConnectionThread)) + self.addCleanup(timer.cancel) + timer.start() + + consumer_tag = ch.basic_consume( + ackAndEnqueueMessageViaAnotherThread, + q_name, + no_ack=False, + exclusive=False, + arguments=None) + + # Wait for both messages + LOGGER.debug('%s: calling start_consuming(); consumer tag=%r', + datetime.now(), + consumer_tag) + ch.start_consuming() + LOGGER.debug('%s: Returned from start_consuming(); consumer tag=%r', + datetime.now(), + consumer_tag) + + self.assertEqual(len(rx_messages), 2) + self.assertEqual(rx_messages[0], b'msg1') + self.assertEqual(rx_messages[1], b'last-msg') + + +class TestConsumeGeneratorWithAckFromAnotherThread(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914,R0915 + """BlockingChannel.consume and requesting basic_ack from another \ + thread via add_callback_threadsafe + """ + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestConsumeGeneratorWithAckFromAnotherThread_q' + + uuid.uuid1().hex) + exg_name = ('TestConsumeGeneratorWithAckFromAnotherThread_exg' + + uuid.uuid1().hex) + routing_key = 'TestConsumeGeneratorWithAckFromAnotherThread' + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous (for convenience) + res = ch.confirm_delivery() + self.assertIsNone(res) + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Bind the queue to the exchange using routing key + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) + + # Publish 2 messages with mandatory=True for synchronous processing + ch.publish(exg_name, routing_key, body='msg1', mandatory=True) + ch.publish(exg_name, routing_key, body='last-msg', mandatory=True) + + # Configure QoS for one message so that the 2nd message will be + # delivered only after the 1st one is ACKed + ch.basic_qos(prefetch_size=0, prefetch_count=1, all_channels=False) + + # Create a consumer + rx_messages = [] + def ackAndEnqueueMessageViaAnotherThread(rx_ch, + rx_method, + rx_properties, # pylint: disable=W0613 + rx_body): + LOGGER.debug( + '%s: Got message body=%r; delivery-tag=%r', + datetime.now(), rx_body, rx_method.delivery_tag) + + # Request ACK dispatch via add_callback_threadsafe from other + # thread; if last message, cancel consumer so that consumer + # generator completes + + def processOnConnectionThread(): + LOGGER.debug('%s: ACKing message body=%r; delivery-tag=%r', + datetime.now(), + rx_body, + rx_method.delivery_tag) + ch.basic_ack(delivery_tag=rx_method.delivery_tag, + multiple=False) + rx_messages.append(rx_body) + + # NOTE on python3, `b'last-msg' != 'last-msg'` + if rx_body == b'last-msg': + LOGGER.debug('%s: Canceling consumer consumer-tag=%r', + datetime.now(), + rx_method.consumer_tag) + # NOTE Need to use cancel() for the consumer generator + # instead of basic_cancel() + rx_ch.cancel() + + # Spawn a thread to initiate ACKing + timer = threading.Timer(0, + lambda: connection.add_callback_threadsafe( + processOnConnectionThread)) + self.addCleanup(timer.cancel) + timer.start() + + for method, properties, body in ch.consume(q_name, no_ack=False): + ackAndEnqueueMessageViaAnotherThread(rx_ch=ch, + rx_method=method, + rx_properties=properties, + rx_body=body) + + self.assertEqual(len(rx_messages), 2) + self.assertEqual(rx_messages[0], b'msg1') + self.assertEqual(rx_messages[1], b'last-msg') + + +class TestTwoBasicConsumersOnSameChannel(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914 + """BlockingChannel: two basic_consume consumers on same channel + """ + connection = self._connect() + + ch = connection.channel() + + exg_name = 'TestPublishAndConsumeAndQos_exg_' + uuid.uuid1().hex + q1_name = 'TestTwoBasicConsumersOnSameChannel_q1' + uuid.uuid1().hex + q2_name = 'TestTwoBasicConsumersOnSameChannel_q2' + uuid.uuid1().hex + q1_routing_key = 'TestTwoBasicConsumersOnSameChannel1' + q2_routing_key = 'TestTwoBasicConsumersOnSameChannel2' + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + ch.confirm_delivery() + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare the two new queues and bind them to the exchange + ch.queue_declare(q1_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q1_name) + ch.queue_bind(q1_name, exchange=exg_name, routing_key=q1_routing_key) + + ch.queue_declare(q2_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q2_name) + ch.queue_bind(q2_name, exchange=exg_name, routing_key=q2_routing_key) + + # Deposit messages in the queues + q1_tx_message_bodies = ['q1_message+%s' % (i,) + for i in pika.compat.xrange(100)] + for message_body in q1_tx_message_bodies: + ch.publish(exg_name, q1_routing_key, body=message_body, + mandatory=True) + + q2_tx_message_bodies = ['q2_message+%s' % (i,) + for i in pika.compat.xrange(150)] + for message_body in q2_tx_message_bodies: + ch.publish(exg_name, q2_routing_key, body=message_body, + mandatory=True) + + # Create the consumers + q1_rx_messages = [] + q1_consumer_tag = ch.basic_consume( + lambda *args: q1_rx_messages.append(args), + q1_name, + no_ack=False, + exclusive=False, + arguments=None) + + q2_rx_messages = [] + q2_consumer_tag = ch.basic_consume( + lambda *args: q2_rx_messages.append(args), + q2_name, + no_ack=False, + exclusive=False, + arguments=None) + + # Wait for all messages to be delivered + while (len(q1_rx_messages) < len(q1_tx_message_bodies) or + len(q2_rx_messages) < len(q2_tx_message_bodies)): + connection.process_data_events(time_limit=None) + + self.assertEqual(len(q2_rx_messages), len(q2_tx_message_bodies)) + + # Verify the messages + def validate_messages(rx_messages, + routing_key, + consumer_tag, + tx_message_bodies): + self.assertEqual(len(rx_messages), len(tx_message_bodies)) + + for msg, expected_body in zip(rx_messages, tx_message_bodies): + self.assertIsInstance(msg, tuple) + rx_ch, rx_method, rx_properties, rx_body = msg + self.assertIs(rx_ch, ch) + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.consumer_tag, consumer_tag) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, exg_name) + self.assertEqual(rx_method.routing_key, routing_key) + + self.assertIsInstance(rx_properties, pika.BasicProperties) + self.assertEqual(rx_body, as_bytes(expected_body)) + + # Validate q1 consumed messages + validate_messages(rx_messages=q1_rx_messages, + routing_key=q1_routing_key, + consumer_tag=q1_consumer_tag, + tx_message_bodies=q1_tx_message_bodies) + + # Validate q2 consumed messages + validate_messages(rx_messages=q2_rx_messages, + routing_key=q2_routing_key, + consumer_tag=q2_consumer_tag, + tx_message_bodies=q2_tx_message_bodies) + + # There shouldn't be any more events now + self.assertFalse(ch._pending_events) + + +class TestBasicCancelPurgesPendingConsumerCancellationEvt(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_cancel purges pending _ConsumerCancellationEvt""" # pylint: disable=C0301 + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestBasicCancelPurgesPendingConsumerCancellationEvt_q' + + uuid.uuid1().hex) + + ch.queue_declare(q_name) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + ch.publish('', routing_key=q_name, body='via-publish', mandatory=True) + + # Create a consumer + rx_messages = [] + consumer_tag = ch.basic_consume( + lambda *args: rx_messages.append(args), + q_name, + no_ack=False, + exclusive=False, + arguments=None) + + # Wait for the published message to arrive, but don't consume it + while not ch._pending_events: + # Issue synchronous command that forces processing of incoming I/O + connection.channel().close() + + self.assertEqual(len(ch._pending_events), 1) + self.assertIsInstance(ch._pending_events[0], + blocking_connection._ConsumerDeliveryEvt) + + # Delete the queue and wait for broker-initiated consumer cancellation + ch.queue_delete(q_name) + while len(ch._pending_events) < 2: + # Issue synchronous command that forces processing of incoming I/O + connection.channel().close() + + self.assertEqual(len(ch._pending_events), 2) + self.assertIsInstance(ch._pending_events[1], + blocking_connection._ConsumerCancellationEvt) + + # Issue consumer cancellation and verify that the pending + # _ConsumerCancellationEvt instance was removed + messages = ch.basic_cancel(consumer_tag) + self.assertEqual(messages, []) + + self.assertEqual(len(ch._pending_events), 0) + + +class TestBasicPublishWithoutPubacks(BlockingTestCaseBase): + + def test(self): # pylint: disable=R0914,R0915 + """BlockingChannel.basic_publish without pubacks""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestBasicPublishWithoutPubacks_q' + uuid.uuid1().hex + exg_name = 'TestBasicPublishWithoutPubacks_exg_' + uuid.uuid1().hex + routing_key = 'TestBasicPublishWithoutPubacks' + + # Declare a new exchange + ch.exchange_declare(exg_name, exchange_type='direct') + self.addCleanup(connection.channel().exchange_delete, exg_name) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Bind the queue to the exchange using routing key + ch.queue_bind(q_name, exchange=exg_name, routing_key=routing_key) + + # Deposit a message in the queue via basic_publish and mandatory=True + msg1_headers = dict( + test_name='TestBasicPublishWithoutPubacks') + msg1_properties = pika.spec.BasicProperties(headers=msg1_headers) + res = ch.basic_publish(exg_name, routing_key=routing_key, + body='via-basic_publish_mandatory=True', + properties=msg1_properties, + mandatory=True) + self.assertEqual(res, True) + + # Deposit a message in the queue via basic_publish and mandatory=False + res = ch.basic_publish(exg_name, routing_key=routing_key, + body='via-basic_publish_mandatory=False', + mandatory=False) + self.assertEqual(res, True) + + # Wait for the messages to arrive in queue + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) + + # Create a consumer + rx_messages = [] + consumer_tag = ch.basic_consume( + lambda *args: rx_messages.append(args), + q_name, + no_ack=False, + exclusive=False, + arguments=None) + + # Wait for first message to arrive + while not rx_messages: + connection.process_data_events(time_limit=None) + + self.assertGreaterEqual(len(rx_messages), 1) + + # Check the first message + msg = rx_messages[0] + self.assertIsInstance(msg, tuple) + rx_ch, rx_method, rx_properties, rx_body = msg + self.assertIs(rx_ch, ch) + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.consumer_tag, consumer_tag) + self.assertEqual(rx_method.delivery_tag, 1) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, exg_name) + self.assertEqual(rx_method.routing_key, routing_key) + + self.assertIsInstance(rx_properties, pika.BasicProperties) + self.assertEqual(rx_properties.headers, msg1_headers) + self.assertEqual(rx_body, as_bytes('via-basic_publish_mandatory=True')) + + # There shouldn't be any more events now + self.assertFalse(ch._pending_events) + + # Ack the message so that the next one can arrive (we configured QoS + # with prefetch_count=1) + ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) + + # Get the second message + while len(rx_messages) < 2: + connection.process_data_events(time_limit=None) + + self.assertEqual(len(rx_messages), 2) + + msg = rx_messages[1] + self.assertIsInstance(msg, tuple) + rx_ch, rx_method, rx_properties, rx_body = msg + self.assertIs(rx_ch, ch) + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.consumer_tag, consumer_tag) + self.assertEqual(rx_method.delivery_tag, 2) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, exg_name) + self.assertEqual(rx_method.routing_key, routing_key) + + self.assertIsInstance(rx_properties, pika.BasicProperties) + self.assertEqual(rx_body, as_bytes('via-basic_publish_mandatory=False')) + + # There shouldn't be any more events now + self.assertFalse(ch._pending_events) + + ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) + + # Verify that the queue is now empty + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + # Attempt to consume again with a short timeout + connection.process_data_events(time_limit=0.005) + self.assertEqual(len(rx_messages), 2) + + +class TestPublishFromBasicConsumeCallback(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.basic_publish from basic_consume callback + """ + connection = self._connect() + + ch = connection.channel() + + src_q_name = ( + 'TestPublishFromBasicConsumeCallback_src_q' + uuid.uuid1().hex) + dest_q_name = ( + 'TestPublishFromBasicConsumeCallback_dest_q' + uuid.uuid1().hex) + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + ch.confirm_delivery() + + # Declare source and destination queues + ch.queue_declare(src_q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, src_q_name) + ch.queue_declare(dest_q_name, auto_delete=True) + self.addCleanup(self._connect().channel().queue_delete, dest_q_name) + + # Deposit a message in the source queue + ch.publish('', + routing_key=src_q_name, + body='via-publish', + mandatory=True) + + # Create a consumer + def on_consume(channel, method, props, body): + channel.publish( + '', routing_key=dest_q_name, body=body, + properties=props, mandatory=True) + channel.basic_ack(method.delivery_tag) + + ch.basic_consume(on_consume, + src_q_name, + no_ack=False, + exclusive=False, + arguments=None) + + # Consume from destination queue + for _, _, rx_body in ch.consume(dest_q_name, no_ack=True): + self.assertEqual(rx_body, as_bytes('via-publish')) + break + else: + self.fail('failed to consume a messages from destination q') + + +class TestStopConsumingFromBasicConsumeCallback(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.stop_consuming from basic_consume callback + """ + connection = self._connect() + + ch = connection.channel() + + q_name = ( + 'TestStopConsumingFromBasicConsumeCallback_q' + uuid.uuid1().hex) + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + ch.confirm_delivery() + + # Declare the queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(connection.channel().queue_delete, q_name) + + # Deposit two messages in the queue + ch.publish('', + routing_key=q_name, + body='via-publish1', + mandatory=True) + + ch.publish('', + routing_key=q_name, + body='via-publish2', + mandatory=True) + + # Create a consumer + def on_consume(channel, method, props, body): # pylint: disable=W0613 + channel.stop_consuming() + channel.basic_ack(method.delivery_tag) + + ch.basic_consume(on_consume, + q_name, + no_ack=False, + exclusive=False, + arguments=None) + + ch.start_consuming() + + ch.close() + + ch = connection.channel() + + # Verify that only the second message is present in the queue + _, _, rx_body = ch.basic_get(q_name) + self.assertEqual(rx_body, as_bytes('via-publish2')) + + msg = ch.basic_get(q_name) + self.assertTupleEqual(msg, (None, None, None)) + + +class TestCloseChannelFromBasicConsumeCallback(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.close from basic_consume callback + """ + connection = self._connect() + + ch = connection.channel() + + q_name = ( + 'TestCloseChannelFromBasicConsumeCallback_q' + uuid.uuid1().hex) + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + ch.confirm_delivery() + + # Declare the queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(connection.channel().queue_delete, q_name) + + # Deposit two messages in the queue + ch.publish('', + routing_key=q_name, + body='via-publish1', + mandatory=True) + + ch.publish('', + routing_key=q_name, + body='via-publish2', + mandatory=True) + + # Create a consumer + def on_consume(channel, method, props, body): # pylint: disable=W0613 + channel.close() + + ch.basic_consume(on_consume, + q_name, + no_ack=False, + exclusive=False, + arguments=None) + + ch.start_consuming() + + self.assertTrue(ch.is_closed) + + + # Verify that both messages are present in the queue + ch = connection.channel() + _, _, rx_body = ch.basic_get(q_name) + self.assertEqual(rx_body, as_bytes('via-publish1')) + _, _, rx_body = ch.basic_get(q_name) + self.assertEqual(rx_body, as_bytes('via-publish2')) + + +class TestCloseConnectionFromBasicConsumeCallback(BlockingTestCaseBase): + + def test(self): + """BlockingConnection.close from basic_consume callback + """ + connection = self._connect() + + ch = connection.channel() + + q_name = ( + 'TestCloseConnectionFromBasicConsumeCallback_q' + uuid.uuid1().hex) + + # Place channel in publisher-acknowledgments mode so that publishing + # with mandatory=True will be synchronous + ch.confirm_delivery() + + # Declare the queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Deposit two messages in the queue + ch.publish('', + routing_key=q_name, + body='via-publish1', + mandatory=True) + + ch.publish('', + routing_key=q_name, + body='via-publish2', + mandatory=True) + + # Create a consumer + def on_consume(channel, method, props, body): # pylint: disable=W0613 + connection.close() + + ch.basic_consume(on_consume, + q_name, + no_ack=False, + exclusive=False, + arguments=None) + + ch.start_consuming() + + self.assertTrue(ch.is_closed) + self.assertTrue(connection.is_closed) + + + # Verify that both messages are present in the queue + ch = self._connect().channel() + _, _, rx_body = ch.basic_get(q_name) + self.assertEqual(rx_body, as_bytes('via-publish1')) + _, _, rx_body = ch.basic_get(q_name) + self.assertEqual(rx_body, as_bytes('via-publish2')) + + +class TestNonPubAckPublishAndConsumeHugeMessage(BlockingTestCaseBase): + + def test(self): + """BlockingChannel.publish/consume huge message""" + connection = self._connect() + + ch = connection.channel() + + q_name = 'TestPublishAndConsumeHugeMessage_q' + uuid.uuid1().hex + body = 'a' * 1000000 + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Publish a message to the queue by way of default exchange + ch.publish(exchange='', routing_key=q_name, body=body) + LOGGER.info('Published message body size=%s', len(body)) + + # Consume the message + for rx_method, rx_props, rx_body in ch.consume(q_name, no_ack=False, + exclusive=False, + arguments=None): + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.delivery_tag, 1) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, '') + self.assertEqual(rx_method.routing_key, q_name) + + self.assertIsInstance(rx_props, pika.BasicProperties) + self.assertEqual(rx_body, as_bytes(body)) + + # Ack the message + ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) + + break + + # There shouldn't be any more events now + self.assertFalse(ch._queue_consumer_generator.pending_events) + + # Verify that the queue is now empty + ch.close() + ch = connection.channel() + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + +class TestNonPubackPublishAndConsumeManyMessages(BlockingTestCaseBase): + + def test(self): + """BlockingChannel non-pub-ack publish/consume many messages""" + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestNonPubackPublishAndConsumeManyMessages_q' + + uuid.uuid1().hex) + body = 'b' * 1024 + + num_messages_to_publish = 500 + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + for _ in pika.compat.xrange(num_messages_to_publish): + # Publish a message to the queue by way of default exchange + ch.publish(exchange='', routing_key=q_name, body=body) + + # Consume the messages + num_consumed = 0 + for rx_method, rx_props, rx_body in ch.consume(q_name, + no_ack=False, + exclusive=False, + arguments=None): + num_consumed += 1 + self.assertIsInstance(rx_method, pika.spec.Basic.Deliver) + self.assertEqual(rx_method.delivery_tag, num_consumed) + self.assertFalse(rx_method.redelivered) + self.assertEqual(rx_method.exchange, '') + self.assertEqual(rx_method.routing_key, q_name) + + self.assertIsInstance(rx_props, pika.BasicProperties) + self.assertEqual(rx_body, as_bytes(body)) + + # Ack the message + ch.basic_ack(delivery_tag=rx_method.delivery_tag, multiple=False) + + if num_consumed >= num_messages_to_publish: + break + + # There shouldn't be any more events now + self.assertFalse(ch._queue_consumer_generator.pending_events) + + ch.close() + + self.assertIsNone(ch._queue_consumer_generator) + + # Verify that the queue is now empty + ch = connection.channel() + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + +class TestBasicCancelWithNonAckableConsumer(BlockingTestCaseBase): + + def test(self): + """BlockingChannel user cancels non-ackable consumer via basic_cancel""" + connection = self._connect() + + ch = connection.channel() + + q_name = ( + 'TestBasicCancelWithNonAckableConsumer_q' + uuid.uuid1().hex) + + body1 = 'a' * 1024 + body2 = 'b' * 2048 + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Publish two messages to the queue by way of default exchange + ch.publish(exchange='', routing_key=q_name, body=body1) + ch.publish(exchange='', routing_key=q_name, body=body2) + + # Wait for queue to contain both messages + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) + + # Create a non-ackable consumer + consumer_tag = ch.basic_consume(lambda *x: None, q_name, no_ack=True, + exclusive=False, arguments=None) + + # Wait for all messages to be sent by broker to client + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + # Cancel the consumer + messages = ch.basic_cancel(consumer_tag) + + # Both messages should have been on their way when we cancelled + self.assertEqual(len(messages), 2) + + _, _, rx_body1 = messages[0] + self.assertEqual(rx_body1, as_bytes(body1)) + + _, _, rx_body2 = messages[1] + self.assertEqual(rx_body2, as_bytes(body2)) + + ch.close() + + ch = connection.channel() + + # Verify that the queue is now empty + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + +class TestBasicCancelWithAckableConsumer(BlockingTestCaseBase): + + def test(self): + """BlockingChannel user cancels ackable consumer via basic_cancel""" + connection = self._connect() + + ch = connection.channel() + + q_name = ( + 'TestBasicCancelWithAckableConsumer_q' + uuid.uuid1().hex) + + body1 = 'a' * 1024 + body2 = 'b' * 2048 + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Publish two messages to the queue by way of default exchange + ch.publish(exchange='', routing_key=q_name, body=body1) + ch.publish(exchange='', routing_key=q_name, body=body2) + + # Wait for queue to contain both messages + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) + + # Create an ackable consumer + consumer_tag = ch.basic_consume(lambda *x: None, q_name, no_ack=False, + exclusive=False, arguments=None) + + # Wait for all messages to be sent by broker to client + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=0) + + # Cancel the consumer + messages = ch.basic_cancel(consumer_tag) + + # Both messages should have been on their way when we cancelled + self.assertEqual(len(messages), 0) + + ch.close() + + ch = connection.channel() + + # Verify that canceling the ackable consumer restored both messages + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) + + +class TestUnackedMessageAutoRestoredToQueueOnChannelClose(BlockingTestCaseBase): + + def test(self): + """BlockingChannel unacked message restored to q on channel close """ + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestUnackedMessageAutoRestoredToQueueOnChannelClose_q' + + uuid.uuid1().hex) + + body1 = 'a' * 1024 + body2 = 'b' * 2048 + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Publish two messages to the queue by way of default exchange + ch.publish(exchange='', routing_key=q_name, body=body1) + ch.publish(exchange='', routing_key=q_name, body=body2) + + # Consume the events, but don't ack + rx_messages = [] + ch.basic_consume(lambda *args: rx_messages.append(args), + q_name, + no_ack=False, + exclusive=False, + arguments=None) + while len(rx_messages) != 2: + connection.process_data_events(time_limit=None) + + self.assertEqual(rx_messages[0][1].delivery_tag, 1) + self.assertEqual(rx_messages[1][1].delivery_tag, 2) + + # Verify no more ready messages in queue + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + # Closing channel should restore messages back to queue + ch.close() + + # Verify that there are two messages in q now + ch = connection.channel() + + self._assert_exact_message_count_with_retries(channel=ch, + queue=q_name, + expected_count=2) + + +class TestNoAckMessageNotRestoredToQueueOnChannelClose(BlockingTestCaseBase): + + def test(self): + """BlockingChannel unacked message restored to q on channel close """ + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestNoAckMessageNotRestoredToQueueOnChannelClose_q' + + uuid.uuid1().hex) + + body1 = 'a' * 1024 + body2 = 'b' * 2048 + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Publish two messages to the queue by way of default exchange + ch.publish(exchange='', routing_key=q_name, body=body1) + ch.publish(exchange='', routing_key=q_name, body=body2) + + # Consume, but don't ack + num_messages = 0 + for rx_method, _, _ in ch.consume(q_name, no_ack=True, exclusive=False): + num_messages += 1 + + self.assertEqual(rx_method.delivery_tag, num_messages) + + if num_messages == 2: + break + else: + self.fail('expected 2 messages, but consumed %i' % (num_messages,)) + + # Verify no more ready messages in queue + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + # Closing channel should not restore no-ack messages back to queue + ch.close() + + # Verify that there are no messages in q now + ch = connection.channel() + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.message_count, 0) + + +class TestConsumeInactivityTimeout(BlockingTestCaseBase): + + def test(self): + """BlockingChannel consume returns 3-tuple on inactivity timeout """ + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestConsumeInactivityTimeout_q' + + uuid.uuid1().hex) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=True) + + # Consume, but don't ack + for msg in ch.consume(q_name, inactivity_timeout=0.1): + a, b, c = msg + self.assertIsNone(a) + self.assertIsNone(b) + self.assertIsNone(c) + break + else: + self.fail('expected (None, None, None), but got %s' % msg) + + ch.close() + + +class TestChannelFlow(BlockingTestCaseBase): + + def test(self): + """BlockingChannel Channel.Flow activate and deactivate """ + connection = self._connect() + + ch = connection.channel() + + q_name = ('TestChannelFlow_q' + uuid.uuid1().hex) + + # Declare a new queue + ch.queue_declare(q_name, auto_delete=False) + self.addCleanup(self._connect().channel().queue_delete, q_name) + + # Verify zero active consumers on the queue + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.consumer_count, 0) + + # Create consumer + ch.basic_consume(lambda *args: None, q_name) + + # Verify one active consumer on the queue now + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.consumer_count, 1) + + # Activate flow from default state (active by default) + active = ch.flow(True) + self.assertEqual(active, True) + + # Verify still one active consumer on the queue now + frame = ch.queue_declare(q_name, passive=True) + self.assertEqual(frame.method.consumer_count, 1) + + # active=False is not supported by RabbitMQ per + # https://www.rabbitmq.com/specification.html: + # "active=false is not supported by the server. Limiting prefetch with + # basic.qos provides much better control" +## # Deactivate flow +## active = ch.flow(False) +## self.assertEqual(active, False) +## +## # Verify zero active consumers on the queue now +## frame = ch.queue_declare(q_name, passive=True) +## self.assertEqual(frame.method.consumer_count, 0) +## +## # Re-activate flow +## active = ch.flow(True) +## self.assertEqual(active, True) +## +## # Verify one active consumers on the queue once again +## frame = ch.queue_declare(q_name, passive=True) +## self.assertEqual(frame.method.consumer_count, 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/enforce_one_basicget_test.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/enforce_one_basicget_test.py new file mode 100644 index 000000000..8a65ac3d7 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/enforce_one_basicget_test.py @@ -0,0 +1,29 @@ +import unittest + +from mock import MagicMock +from pika.frame import Method, Header +from pika.exceptions import DuplicateGetOkCallback +from pika.channel import Channel +from pika.connection import Connection + + +class OnlyOneBasicGetTestCase(unittest.TestCase): + def setUp(self): + self.channel = Channel(MagicMock(Connection)(), 0, None) + self.channel._state = Channel.OPEN + self.callback = MagicMock() + + def test_two_basic_get_with_callback(self): + self.channel.basic_get(self.callback) + self.channel._on_getok(MagicMock(Method)(), MagicMock(Header)(), '') + self.channel.basic_get(self.callback) + self.channel._on_getok(MagicMock(Method)(), MagicMock(Header)(), '') + self.assertEqual(self.callback.call_count, 2) + + def test_two_basic_get_without_callback(self): + self.channel.basic_get(self.callback) + with self.assertRaises(DuplicateGetOkCallback): + self.channel.basic_get(self.callback) + +if __name__ == '__main__': + unittest.main() diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/forward_server.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/forward_server.py new file mode 100644 index 000000000..a2ea48261 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/forward_server.py @@ -0,0 +1,526 @@ +"""TCP/IP forwarding/echo service for testing.""" + +from __future__ import print_function + +import array +from datetime import datetime +import errno +from functools import partial +import logging +import multiprocessing +import os +import socket +import struct +import sys +import threading +import traceback + +import pika.compat + +if pika.compat.PY3: + + def buffer(object, offset, size): # pylint: disable=W0622 + """array etc. have the buffer protocol""" + return object[offset:offset + size] + + +try: + import SocketServer +except ImportError: + import socketserver as SocketServer # pylint: disable=F0401 + + +def _trace(fmt, *args): + """Format and output the text to stderr""" + print((fmt % args) + "\n", end="", file=sys.stderr) + + +class ForwardServer(object): # pylint: disable=R0902 + """ Implement a TCP/IP forwarding/echo service for testing. Listens for + an incoming TCP/IP connection, accepts it, then connects to the given + remote address and forwards data back and forth between the two + endpoints. + + This is similar to the subset of `netcat` functionality, but without + dependency on any specific flavor of netcat + + Connection forwarding example; forward local connection to default + rabbitmq addr, connect to rabbit via forwarder, then disconnect + forwarder, then attempt another pika operation to see what happens + + with ForwardServer(("localhost", 5672)) as fwd: + params = pika.ConnectionParameters( + host=fwd.server_address[0], + port=fwd.server_address[1]) + conn = pika.BlockingConnection(params) + + # Once outside the context, the forwarder is disconnected + + # Let's see what happens in pika with a disconnected server + channel = conn.channel() + + Echo server example + def produce(sock): + sock.sendall("12345") + sock.shutdown(socket.SHUT_WR) + + with ForwardServer(None) as fwd: + sock = socket.socket() + sock.connect(fwd.server_address) + + worker = threading.Thread(target=produce, + args=[sock]) + worker.start() + + data = sock.makefile().read() + assert data == "12345", data + + worker.join() + + """ + # Amount of time, in seconds, we're willing to wait for the subprocess + _SUBPROC_TIMEOUT = 10 + + def __init__( + self, # pylint: disable=R0913 + remote_addr, + remote_addr_family=socket.AF_INET, + remote_socket_type=socket.SOCK_STREAM, + server_addr=("127.0.0.1", 0), + server_addr_family=socket.AF_INET, + server_socket_type=socket.SOCK_STREAM, + local_linger_args=None): + """ + :param tuple remote_addr: remote server's IP address, whose structure + depends on remote_addr_family; pair (host-or-ip-addr, port-number). + Pass None to have ForwardServer behave as echo server. + :param remote_addr_family: socket.AF_INET (the default), socket.AF_INET6 + or socket.AF_UNIX. + :param remote_socket_type: only socket.SOCK_STREAM is supported at this + time + :param server_addr: optional address for binding this server's listening + socket; the format depends on server_addr_family; defaults to + ("127.0.0.1", 0) + :param server_addr_family: Address family for this server's listening + socket; socket.AF_INET (the default), socket.AF_INET6 or + socket.AF_UNIX; defaults to socket.AF_INET + :param server_socket_type: only socket.SOCK_STREAM is supported at this + time + :param tuple local_linger_args: SO_LINGER sockoverride for the local + connection sockets, to be configured after connection is accepted. + None for default, which is to not change the SO_LINGER option. + Otherwise, its a two-tuple, where the first element is the `l_onoff` + switch, and the second element is the `l_linger` value, in seconds + """ + self._logger = logging.getLogger(__name__) + + self._remote_addr = remote_addr + self._remote_addr_family = remote_addr_family + assert remote_socket_type == socket.SOCK_STREAM, remote_socket_type + self._remote_socket_type = remote_socket_type + + assert server_addr is not None + self._server_addr = server_addr + + assert server_addr_family is not None + self._server_addr_family = server_addr_family + + assert server_socket_type == socket.SOCK_STREAM, server_socket_type + self._server_socket_type = server_socket_type + + self._local_linger_args = local_linger_args + + self._subproc = None + + @property + def running(self): + """Property: True if ForwardServer is active""" + return self._subproc is not None + + @property + def server_address_family(self): + """Property: Get listening socket's address family + + NOTE: undefined before server starts and after it shuts down + """ + assert self._server_addr_family is not None, "Not in context" + + return self._server_addr_family + + @property + def server_address(self): + """ Property: Get listening socket's address; the returned value + depends on the listening socket's address family + + NOTE: undefined before server starts and after it shuts down + """ + assert self._server_addr is not None, "Not in context" + + return self._server_addr + + def __enter__(self): + """ Context manager entry. Starts the forwarding server + + :returns: self + """ + return self.start() + + def __exit__(self, *args): + """ Context manager exit; stops the forwarding server + """ + self.stop() + + def start(self): + """ Start the server + + NOTE: The context manager is the recommended way to use + ForwardServer. start()/stop() are alternatives to the context manager + use case and are mutually exclusive with it. + + :returns: self + """ + queue = multiprocessing.Queue() + + self._subproc = multiprocessing.Process( + target=_run_server, + kwargs=dict( + local_addr=self._server_addr, + local_addr_family=self._server_addr_family, + local_socket_type=self._server_socket_type, + local_linger_args=self._local_linger_args, + remote_addr=self._remote_addr, + remote_addr_family=self._remote_addr_family, + remote_socket_type=self._remote_socket_type, + queue=queue)) + self._subproc.daemon = True + self._subproc.start() + + try: + # Get server socket info from subprocess + self._server_addr_family, self._server_addr = queue.get( + block=True, timeout=self._SUBPROC_TIMEOUT) + queue.close() + except Exception: # pylint: disable=W0703 + try: + self._logger.exception( + "Failed while waiting for local socket info") + # Preserve primary exception and traceback + raise + finally: + # Clean up + try: + self.stop() + except Exception: # pylint: disable=W0703 + # Suppress secondary exception in favor of the primary + self._logger.exception( + "Emergency subprocess shutdown failed") + + return self + + def stop(self): + """Stop the server + + NOTE: The context manager is the recommended way to use + ForwardServer. start()/stop() are alternatives to the context manager + use case and are mutually exclusive with it. + """ + self._logger.info("ForwardServer STOPPING") + + try: + self._subproc.terminate() + self._subproc.join(timeout=self._SUBPROC_TIMEOUT) + if self._subproc.is_alive(): + self._logger.error( + "ForwardServer failed to terminate, killing it") + os.kill(self._subproc.pid) + self._subproc.join(timeout=self._SUBPROC_TIMEOUT) + assert not self._subproc.is_alive(), self._subproc + + # Log subprocess's exit code; NOTE: negative signal.SIGTERM (usually + # -15) is normal on POSIX systems - it corresponds to SIGTERM + exit_code = self._subproc.exitcode + self._logger.info("ForwardServer terminated with exitcode=%s", + exit_code) + finally: + self._subproc = None + + +def _run_server( + local_addr, + local_addr_family, + local_socket_type, # pylint: disable=R0913 + local_linger_args, + remote_addr, + remote_addr_family, + remote_socket_type, + queue): + """ Run the server; executed in the subprocess + + :param local_addr: listening address + :param local_addr_family: listening address family; one of socket.AF_* + :param local_socket_type: listening socket type; typically + socket.SOCK_STREAM + :param tuple local_linger_args: SO_LINGER sockoverride for the local + connection sockets, to be configured after connection is accepted. + Pass None to not change SO_LINGER. Otherwise, its a two-tuple, where the + first element is the `l_onoff` switch, and the second element is the + `l_linger` value in seconds + :param remote_addr: address of the target server. Pass None to have + ForwardServer behave as echo server + :param remote_addr_family: address family for connecting to target server; + one of socket.AF_* + :param remote_socket_type: socket type for connecting to target server; + typically socket.SOCK_STREAM + :param multiprocessing.Queue queue: queue for depositing the forwarding + server's actual listening socket address family and bound address. The + parent process waits for this. + """ + + # NOTE: We define _ThreadedTCPServer class as a closure in order to + # override some of its class members dynamically + # NOTE: we add `object` to the base classes because `_ThreadedTCPServer` + # isn't derived from `object`, which prevents `super` from working properly + class _ThreadedTCPServer(SocketServer.ThreadingMixIn, + SocketServer.TCPServer, object): + """Threaded streaming server for forwarding""" + + # Override TCPServer's class members + address_family = local_addr_family + socket_type = local_socket_type + allow_reuse_address = True + + def __init__(self): + + handler_class_factory = partial( + _TCPHandler, + local_linger_args=local_linger_args, + remote_addr=remote_addr, + remote_addr_family=remote_addr_family, + remote_socket_type=remote_socket_type) + + super(_ThreadedTCPServer, self).__init__( + local_addr, handler_class_factory, bind_and_activate=True) + + server = _ThreadedTCPServer() + + # Send server socket info back to parent process + queue.put([server.socket.family, server.server_address]) + + queue.close() + + server.serve_forever() + + +# NOTE: we add `object` to the base classes because `StreamRequestHandler` isn't +# derived from `object`, which prevents `super` from working properly +class _TCPHandler(SocketServer.StreamRequestHandler, object): + """TCP/IP session handler instantiated by TCPServer upon incoming + connection. Implements forwarding/echo of the incoming connection. + """ + + _SOCK_RX_BUF_SIZE = 16 * 1024 + + def __init__( + self, # pylint: disable=R0913 + request, + client_address, + server, + local_linger_args, + remote_addr, + remote_addr_family, + remote_socket_type): + """ + :param request: for super + :param client_address: for super + "paarm server: for super + :param tuple local_linger_args: SO_LINGER sockoverride for the local + connection sockets, to be configured after connection is accepted. + Pass None to not change SO_LINGER. Otherwise, its a two-tuple, where + the first element is the `l_onoff` switch, and the second element is + the `l_linger` value in seconds + :param remote_addr: address of the target server. Pass None to have + ForwardServer behave as echo server. + :param remote_addr_family: address family for connecting to target + server; one of socket.AF_* + :param remote_socket_type: socket type for connecting to target server; + typically socket.SOCK_STREAM + :param **kwargs: kwargs for super class + """ + self._local_linger_args = local_linger_args + self._remote_addr = remote_addr + self._remote_addr_family = remote_addr_family + self._remote_socket_type = remote_socket_type + + super(_TCPHandler, self).__init__( + request=request, client_address=client_address, server=server) + + def handle(self): # pylint: disable=R0912 + """Connect to remote and forward data between local and remote""" + local_sock = self.connection + + if self._local_linger_args is not None: + # Set SO_LINGER socket options on local socket + l_onoff, l_linger = self._local_linger_args + local_sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, + struct.pack('ii', l_onoff, l_linger)) + + if self._remote_addr is not None: + # Forwarding set-up + remote_dest_sock = remote_src_sock = socket.socket( + family=self._remote_addr_family, + type=self._remote_socket_type, + proto=socket.IPPROTO_IP) + remote_dest_sock.connect(self._remote_addr) + _trace("%s _TCPHandler connected to remote %s", datetime.utcnow(), + remote_dest_sock.getpeername()) + else: + # Echo set-up + remote_dest_sock, remote_src_sock = \ + pika.compat._nonblocking_socketpair() + + try: + local_forwarder = threading.Thread( + target=self._forward, args=( + local_sock, + remote_dest_sock, + )) + local_forwarder.setDaemon(True) + local_forwarder.start() + + try: + self._forward(remote_src_sock, local_sock) + finally: + # Wait for local forwarder thread to exit + local_forwarder.join() + finally: + try: + try: + _safe_shutdown_socket(remote_dest_sock, socket.SHUT_RDWR) + finally: + if remote_src_sock is not remote_dest_sock: + _safe_shutdown_socket(remote_src_sock, socket.SHUT_RDWR) + finally: + remote_dest_sock.close() + if remote_src_sock is not remote_dest_sock: + remote_src_sock.close() + + def _forward(self, src_sock, dest_sock): # pylint: disable=R0912 + """Forward from src_sock to dest_sock""" + src_peername = src_sock.getpeername() + + _trace("%s forwarding from %s to %s", datetime.utcnow(), src_peername, + dest_sock.getpeername()) + try: + # NOTE: python 2.6 doesn't support bytearray with recv_into, so + # we use array.array instead; this is only okay as long as the + # array instance isn't shared across threads. See + # http://bugs.python.org/issue7827 and + # groups.google.com/forum/#!topic/comp.lang.python/M6Pqr-KUjQw + rx_buf = array.array("B", [0] * self._SOCK_RX_BUF_SIZE) + + while True: + try: + nbytes = src_sock.recv_into(rx_buf) + except pika.compat.SOCKET_ERROR as exc: + if exc.errno == errno.EINTR: + continue + elif exc.errno == errno.ECONNRESET: + # Source peer forcibly closed connection + _trace("%s errno.ECONNRESET from %s", datetime.utcnow(), + src_peername) + break + else: + _trace("%s Unexpected errno=%s from %s\n%s", + datetime.utcnow(), exc.errno, src_peername, + "".join(traceback.format_stack())) + raise + + if not nbytes: + # Source input EOF + _trace("%s EOF on %s", datetime.utcnow(), src_peername) + break + + try: + dest_sock.sendall(buffer(rx_buf, 0, nbytes)) + except pika.compat.SOCKET_ERROR as exc: + if exc.errno == errno.EPIPE: + # Destination peer closed its end of the connection + _trace("%s Destination peer %s closed its end of " + "the connection: errno.EPIPE", datetime.utcnow(), + dest_sock.getpeername()) + break + elif exc.errno == errno.ECONNRESET: + # Destination peer forcibly closed connection + _trace("%s Destination peer %s forcibly closed " + "connection: errno.ECONNRESET", + datetime.utcnow(), dest_sock.getpeername()) + break + else: + _trace("%s Unexpected errno=%s in sendall to %s\n%s", + datetime.utcnow(), exc.errno, + dest_sock.getpeername(), "".join( + traceback.format_stack())) + raise + except: + _trace("forward failed\n%s", "".join(traceback.format_exc())) + raise + finally: + _trace("%s done forwarding from %s", datetime.utcnow(), + src_peername) + try: + # Let source peer know we're done receiving + _safe_shutdown_socket(src_sock, socket.SHUT_RD) + finally: + # Let destination peer know we're done sending + _safe_shutdown_socket(dest_sock, socket.SHUT_WR) + + +def echo(port=0): + """ This function implements a simple echo server for testing the + Forwarder class. + + :param int port: port number on which to listen + + We run this function and it prints out the listening socket binding. + Then, we run Forwarder and point it at this echo "server". + Then, we run telnet and point it at forwarder and see if whatever we + type gets echoed back to us. + + This function exits when the remote end connects, then closes connection + """ + lsock = socket.socket() + lsock.bind(("", port)) + lsock.listen(1) + _trace("Listening on sockname=%s", lsock.getsockname()) + + sock, remote_addr = lsock.accept() + try: + _trace("Connection from peer=%s", remote_addr) + while True: + try: + data = sock.recv(4 * 1024) # pylint: disable=E1101 + except pika.compat.SOCKET_ERROR as exc: + if exc.errno == errno.EINTR: + continue + else: + raise + + if not data: + break + + sock.sendall(data) # pylint: disable=E1101 + finally: + try: + _safe_shutdown_socket(sock, socket.SHUT_RDWR) + finally: + sock.close() + + +def _safe_shutdown_socket(sock, how=socket.SHUT_RDWR): + """ Shutdown a socket, suppressing ENOTCONN + """ + try: + sock.shutdown(how) + except pika.compat.SOCKET_ERROR as exc: + if exc.errno != errno.ENOTCONN: + raise diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/test_utils.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/test_utils.py new file mode 100644 index 000000000..adf5934cc --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/acceptance/test_utils.py @@ -0,0 +1,73 @@ +"""Acceptance test utils""" + +import functools +import logging +import time +import traceback + + +def retry_assertion(timeout_sec, retry_interval_sec=0.1): + """Creates a decorator that retries the decorated function or + method only upon `AssertionError` exception at the given retry interval + not to exceed the overall given timeout. + + :param float timeout_sec: overall timeout in seconds + :param float retry_interval_sec: amount of time to sleep + between retries in seconds. + + :returns: decorator that implements the following behavior + + 1. This decorator guarantees to call the decorated function or method at + least once. + 2. It passes through all exceptions besides `AssertionError`, preserving the + original exception and its traceback. + 3. If no exception, it returns the return value from the decorated function/method. + 4. It sleeps `time.sleep(retry_interval_sec)` between retries. + 5. It checks for expiry of the overall timeout before sleeping. + 6. If the overall timeout is exceeded, it re-raises the latest `AssertionError`, + preserving its original traceback + """ + + def retry_assertion_decorator(func): + """Decorator""" + + @functools.wraps(func) + def retry_assertion_wrap(*args, **kwargs): + """The wrapper""" + + num_attempts = 0 + start_time = time.time() + + while True: + num_attempts += 1 + + try: + result = func(*args, **kwargs) + except AssertionError: + + now = time.time() + # Compensate for time adjustment + if now < start_time: + start_time = now + + if (now - start_time) > timeout_sec: + logging.exception( + 'Exceeded retry timeout of %s sec in %s attempts ' + 'with func %r. Caller\'s stack:\n%s', + timeout_sec, num_attempts, func, + ''.join(traceback.format_stack())) + raise + + logging.debug('Attempt %s failed; retrying %r in %s sec.', + num_attempts, func, retry_interval_sec) + + time.sleep(retry_interval_sec) + else: + logging.debug('%r succeeded at attempt %s', + func, num_attempts) + return result + + return retry_assertion_wrap + + return retry_assertion_decorator + diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/unit/amqp_object_tests.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/unit/amqp_object_tests.py new file mode 100644 index 000000000..998a95b83 --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/unit/amqp_object_tests.py @@ -0,0 +1,62 @@ +import unittest + +from pika import amqp_object + + +class AMQPObjectTests(unittest.TestCase): + def test_base_name(self): + self.assertEqual(amqp_object.AMQPObject().NAME, 'AMQPObject') + + def test_repr_no_items(self): + obj = amqp_object.AMQPObject() + self.assertEqual(repr(obj), '') + + def test_repr_items(self): + obj = amqp_object.AMQPObject() + setattr(obj, 'foo', 'bar') + setattr(obj, 'baz', 'qux') + self.assertEqual(repr(obj), "") + + +class ClassTests(unittest.TestCase): + def test_base_name(self): + self.assertEqual(amqp_object.Class().NAME, 'Unextended Class') + + +class MethodTests(unittest.TestCase): + def test_base_name(self): + self.assertEqual(amqp_object.Method().NAME, 'Unextended Method') + + def test_set_content_body(self): + properties = amqp_object.Properties() + body = 'This is a test' + obj = amqp_object.Method() + obj._set_content(properties, body) + self.assertEqual(obj._body, body) + + def test_set_content_properties(self): + properties = amqp_object.Properties() + body = 'This is a test' + obj = amqp_object.Method() + obj._set_content(properties, body) + self.assertEqual(obj._properties, properties) + + def test_get_body(self): + properties = amqp_object.Properties() + body = 'This is a test' + obj = amqp_object.Method() + obj._set_content(properties, body) + self.assertEqual(obj.get_body(), body) + + def test_get_properties(self): + properties = amqp_object.Properties() + body = 'This is a test' + obj = amqp_object.Method() + obj._set_content(properties, body) + self.assertEqual(obj.get_properties(), properties) + + +class PropertiesTests(unittest.TestCase): + def test_base_name(self): + self.assertEqual(amqp_object.Properties().NAME, + 'Unextended Properties') diff --git a/NodeRed/NodeRedFiles/pika-0.13.1/tests/unit/base_connection_tests.py b/NodeRed/NodeRedFiles/pika-0.13.1/tests/unit/base_connection_tests.py new file mode 100644 index 000000000..b010d729f --- /dev/null +++ b/NodeRed/NodeRedFiles/pika-0.13.1/tests/unit/base_connection_tests.py @@ -0,0 +1,85 @@ +""" +Tests for pika.base_connection.BaseConnection + +""" + +import socket +import unittest + +import mock + +import pika +import sys +import ssl + +import pika.tcp_socket_opts +from pika.adapters import base_connection + + +# If this is missing, set it manually. We need it to test tcp opt setting. +try: + socket.TCP_KEEPIDLE +except AttributeError: + socket.TCP_KEEPIDLE = 4 + + +class BaseConnectionTests(unittest.TestCase): + def setUp(self): + with mock.patch('pika.connection.Connection.connect'): + self.connection = base_connection.BaseConnection() + self.connection._set_connection_state( + base_connection.BaseConnection.CONNECTION_OPEN) + + def test_repr(self): + text = repr(self.connection) + self.assertTrue(text.startswith('I', encoded, offset)[0]") + print(prefix + "offset += 4") + print(prefix + "%s = encoded[offset:offset + length]" % cLvalue) + print(prefix + "try:") + print(prefix + " %s = str(%s)" % (cLvalue, cLvalue)) + print(prefix + "except UnicodeEncodeError:") + print(prefix + " pass") + print(prefix + "offset += length") + elif type == 'octet': + print(prefix + "%s = struct.unpack_from('B', encoded, offset)[0]" % + cLvalue) + print(prefix + "offset += 1") + elif type == 'short': + print(prefix + "%s = struct.unpack_from('>H', encoded, offset)[0]" % + cLvalue) + print(prefix + "offset += 2") + elif type == 'long': + print(prefix + "%s = struct.unpack_from('>I', encoded, offset)[0]" % + cLvalue) + print(prefix + "offset += 4") + elif type == 'longlong': + print(prefix + "%s = struct.unpack_from('>Q', encoded, offset)[0]" % + cLvalue) + print(prefix + "offset += 8") + elif type == 'timestamp': + print(prefix + "%s = struct.unpack_from('>Q', encoded, offset)[0]" % + cLvalue) + print(prefix + "offset += 8") + elif type == 'bit': + raise Exception("Can't decode bit in genSingleDecode") + elif type == 'table': + print(Exception(prefix + "(%s, offset) = data.decode_table(encoded, offset)" % + cLvalue)) + else: + raise Exception("Illegal domain in genSingleDecode", type) + + def genSingleEncode(prefix, cValue, unresolved_domain): + type = spec.resolveDomain(unresolved_domain) + if type == 'shortstr': + print(prefix + + "assert isinstance(%s, str_or_bytes),\\\n%s 'A non-string value was supplied for %s'" + % (cValue, prefix, cValue)) + print(prefix + "data.encode_short_string(pieces, %s)" % cValue) + elif type == 'longstr': + print(prefix + + "assert isinstance(%s, str_or_bytes),\\\n%s 'A non-string value was supplied for %s'" + % (cValue, prefix, cValue)) + print( + prefix + + "value = %s.encode('utf-8') if isinstance(%s, unicode_type) else %s" + % (cValue, cValue, cValue)) + print(prefix + "pieces.append(struct.pack('>I', len(value)))") + print(prefix + "pieces.append(value)") + elif type == 'octet': + print(prefix + "pieces.append(struct.pack('B', %s))" % cValue) + elif type == 'short': + print(prefix + "pieces.append(struct.pack('>H', %s))" % cValue) + elif type == 'long': + print(prefix + "pieces.append(struct.pack('>I', %s))" % cValue) + elif type == 'longlong': + print(prefix + "pieces.append(struct.pack('>Q', %s))" % cValue) + elif type == 'timestamp': + print(prefix + "pieces.append(struct.pack('>Q', %s))" % cValue) + elif type == 'bit': + raise Exception("Can't encode bit in genSingleEncode") + elif type == 'table': + print(Exception(prefix + "data.encode_table(pieces, %s)" % cValue)) + else: + raise Exception("Illegal domain in genSingleEncode", type) + + def genDecodeMethodFields(m): + print(" def decode(self, encoded, offset=0):") + bitindex = None + for f in m.arguments: + if spec.resolveDomain(f.domain) == 'bit': + if bitindex is None: + bitindex = 0 + if bitindex >= 8: + bitindex = 0 + if not bitindex: + print( + " bit_buffer = struct.unpack_from('B', encoded, offset)[0]") + print(" offset += 1") + print(" self.%s = (bit_buffer & (1 << %d)) != 0" % + (pyize(f.name), bitindex)) + bitindex += 1 + else: + bitindex = None + genSingleDecode(" ", "self.%s" % (pyize(f.name),), + f.domain) + print(" return self") + print('') + + def genDecodeProperties(c): + print(" def decode(self, encoded, offset=0):") + print(" flags = 0") + print(" flagword_index = 0") + print(" while True:") + print( + " partial_flags = struct.unpack_from('>H', encoded, offset)[0]") + print(" offset += 2") + print( + " flags = flags | (partial_flags << (flagword_index * 16))") + print(" if not (partial_flags & 1):") + print(" break") + print(" flagword_index += 1") + for f in c.fields: + if spec.resolveDomain(f.domain) == 'bit': + print(" self.%s = (flags & %s) != 0" % + (pyize(f.name), flagName(c, f))) + else: + print(" if flags & %s:" % (flagName(c, f),)) + genSingleDecode(" ", "self.%s" % (pyize(f.name),), + f.domain) + print(" else:") + print(" self.%s = None" % (pyize(f.name),)) + print(" return self") + print('') + + def genEncodeMethodFields(m): + print(" def encode(self):") + print(" pieces = list()") + bitindex = None + + def finishBits(): + if bitindex is not None: + print(" pieces.append(struct.pack('B', bit_buffer))") + + for f in m.arguments: + if spec.resolveDomain(f.domain) == 'bit': + if bitindex is None: + bitindex = 0 + print(" bit_buffer = 0") + if bitindex >= 8: + finishBits() + print(" bit_buffer = 0") + bitindex = 0 + print(" if self.%s:" % pyize(f.name)) + print(" bit_buffer = bit_buffer | (1 << %d)" % + bitindex) + bitindex += 1 + else: + finishBits() + bitindex = None + genSingleEncode(" ", "self.%s" % (pyize(f.name),), + f.domain) + finishBits() + print(" return pieces") + print('') + + def genEncodeProperties(c): + print(" def encode(self):") + print(" pieces = list()") + print(" flags = 0") + for f in c.fields: + if spec.resolveDomain(f.domain) == 'bit': + print(" if self.%s: flags = flags | %s" % + (pyize(f.name), flagName(c, f))) + else: + print(" if self.%s is not None:" % (pyize(f.name),)) + print(" flags = flags | %s" % (flagName(c, f),)) + genSingleEncode(" ", "self.%s" % (pyize(f.name),), + f.domain) + print(" flag_pieces = list()") + print(" while True:") + print(" remainder = flags >> 16") + print(" partial_flags = flags & 0xFFFE") + print(" if remainder != 0:") + print(" partial_flags |= 1") + print( + " flag_pieces.append(struct.pack('>H', partial_flags))") + print(" flags = remainder") + print(" if not flags:") + print(" break") + print(" return flag_pieces + pieces") + print('') + + def fieldDeclList(fields): + return ''.join([", %s=%s" % (pyize(f.name), fieldvalue(f.defaultvalue)) + for f in fields]) + + def fieldInitList(prefix, fields): + if fields: + return ''.join(["%sself.%s = %s\n" % (prefix, pyize(f.name), pyize(f.name)) \ + for f in fields]) + else: + return '%spass\n' % (prefix,) + + print("""\"\"\" +AMQP Specification +================== +This module implements the constants and classes that comprise AMQP protocol +level constructs. It should rarely be directly referenced outside of Pika's +own internal use. + +.. note:: Auto-generated code by codegen.py, do not edit directly. Pull +requests to this file without accompanying ``utils/codegen.py`` changes will be +rejected. + +\"\"\" + +import struct +from pika import amqp_object +from pika import data +from pika.compat import str_or_bytes, unicode_type + +# Python 3 support for str object +str = bytes +""") + + print("PROTOCOL_VERSION = (%d, %d, %d)" % (spec.major, spec.minor, + spec.revision)) + print("PORT = %d" % spec.port) + print('') + + # Append some constants that arent in the spec json file + spec.constants.append(('FRAME_MAX_SIZE', 131072, '')) + spec.constants.append(('FRAME_HEADER_SIZE', 7, '')) + spec.constants.append(('FRAME_END_SIZE', 1, '')) + spec.constants.append(('TRANSIENT_DELIVERY_MODE', 1, '')) + spec.constants.append(('PERSISTENT_DELIVERY_MODE', 2, '')) + + constants = {} + for c, v, cls in spec.constants: + constants[constantName(c)] = v + + for key in sorted(constants.keys()): + print("%s = %s" % (key, constants[key])) + print('') + + for c in spec.allClasses(): + print('') + print('class %s(amqp_object.Class):' % (camel(c.name),)) + print('') + print(" INDEX = 0x%.04X # %d" % (c.index, c.index)) + print(" NAME = %s" % (fieldvalue(camel(c.name)),)) + print('') + + for m in c.allMethods(): + print(' class %s(amqp_object.Method):' % (camel(m.name),)) + print('') + methodid = m.klass.index << 16 | m.index + print(" INDEX = 0x%.08X # %d, %d; %d" % + (methodid, + m.klass.index, + m.index, + methodid)) + print(" NAME = %s" % (fieldvalue(m.structName(),))) + print('') + print(" def __init__(self%s):" % + (fieldDeclList(m.arguments),)) + print(fieldInitList(' ', m.arguments)) + print(" @property") + print(" def synchronous(self):") + print(" return %s" % m.isSynchronous) + print('') + genDecodeMethodFields(m) + genEncodeMethodFields(m) + + for c in spec.allClasses(): + if c.fields: + print('') + print('class %s(amqp_object.Properties):' % (c.structName(),)) + print('') + print(" CLASS = %s" % (camel(c.name),)) + print(" INDEX = 0x%.04X # %d" % (c.index, c.index)) + print(" NAME = %s" % (fieldvalue(c.structName(),))) + print('') + + index = 0 + if c.fields: + for f in c.fields: + if index % 16 == 15: + index += 1 + shortnum = index / 16 + partialindex = 15 - (index % 16) + bitindex = shortnum * 16 + partialindex + print(' %s = (1 << %d)' % (flagName(None, f), bitindex)) + index += 1 + print('') + + print(" def __init__(self%s):" % (fieldDeclList(c.fields),)) + print(fieldInitList(' ', c.fields)) + genDecodeProperties(c) + genEncodeProperties(c) + + print("methods = {") + print(',\n'.join([" 0x%08X: %s" % (m.klass.index << 16 | m.index, m.structName()) + for m in spec.allMethods()])) + print("}") + print('') + + print("props = {") + print(',\n'.join([" 0x%04X: %s" % (c.index, c.structName()) + for c in spec.allClasses() + if c.fields])) + print("}") + print('') + print('') + + print("def has_content(methodNumber):") + print(' return methodNumber in (') + for m in spec.allMethods(): + if m.hasContent: + print(' %s.INDEX,' % m.structName()) + print(' )') + + +if __name__ == "__main__": + with open(PIKA_SPEC, 'w') as handle: + sys.stdout = handle + generate(['%s/amqp-rabbitmq-0.9.1.json' % CODEGEN_PATH]) diff --git a/NodeRed/NodeRedFiles/rc.local b/NodeRed/NodeRedFiles/rc.local new file mode 100755 index 000000000..c4180c980 --- /dev/null +++ b/NodeRed/NodeRedFiles/rc.local @@ -0,0 +1,28 @@ +#!/bin/bash + +mount -o remount,rw / + +# Source directory +source_dir="/data/dbus-fzsonick-48tl" + +# Destination directory +destination_dir_upper="/opt/victronenergy/" +destination_dir="/opt/victronenergy/dbus-fzsonick-48tl/" + +# Check if the destination directory exists +if [ -d "$destination_dir" ]; then + # Remove the destination directory + rm -r "$destination_dir" +fi + +# Copy the contents of the source directory to the destination directory +cp -r "$source_dir" "$destination_dir_upper" + +# Set MPPT network mode to 0 +sed -i "s|('/Link/NetworkMode', [^)]*)|('/Link/NetworkMode', 0)|g" /opt/victronenergy/dbus-systemcalc-py/delegates/dvcc.py + +find /data/innovenergy/openvpn -type f -exec chmod 777 {} \; + +/data/innovenergy/openvpn/service/run + +exit 0 diff --git a/NodeRed/NodeRedFiles/service/log/run b/NodeRed/NodeRedFiles/service/log/run new file mode 100755 index 000000000..58bf02ea0 --- /dev/null +++ b/NodeRed/NodeRedFiles/service/log/run @@ -0,0 +1,3 @@ +#!/bin/sh +exec 2>&1 +exec multilog t s25000 n4 /var/log/openvpn diff --git a/NodeRed/NodeRedFiles/service/log/supervise/lock b/NodeRed/NodeRedFiles/service/log/supervise/lock new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/service/log/supervise/status b/NodeRed/NodeRedFiles/service/log/supervise/status new file mode 100644 index 000000000..a6a3bf85a Binary files /dev/null and b/NodeRed/NodeRedFiles/service/log/supervise/status differ diff --git a/NodeRed/NodeRedFiles/service/run b/NodeRed/NodeRedFiles/service/run new file mode 100755 index 000000000..fe1d5ca51 --- /dev/null +++ b/NodeRed/NodeRedFiles/service/run @@ -0,0 +1,53 @@ +#!/bin/sh +exec 2>&1 + +ie_url='static.innovenergy.ch' +ie_data_dir='/data/innovenergy' +ovpn_bin_dir='/data/innovenergy/openvpn' +ovpn_data_dir=${ie_data_dir}'/openvpn' +ovpn_status_file='/var/volatile/ovpnstatus' + +# somehow the new (static) openvpn binary lost the ability to dns lookup, so we have to do this: +ie_ip=$(nslookup "$ie_url" | grep -F -A 1 "$ie_url" | grep -o -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+') + +# certificate query +query="https://${ie_url}/get_cert?unique_id=" +serial=$(/sbin/get-unique-id) +delay=1 + + +# create data dirs if necessary +[ -d ${ie_data_dir} ] || mkdir ${ie_data_dir} +[ -d ${ovpn_data_dir} ] || mkdir ${ovpn_data_dir} + + +# download certificates from server if necessary +while [ ! -f ${ovpn_data_dir}/client-certificate ] +do + sleep ${delay} + delay=$(( $delay * 2 )) + curl ${query}${serial} | tar -C ${ovpn_data_dir} -xv +done + +# run ovpn +exe="${ovpn_bin_dir}/openvpn +--client +--nobind +--resolv-retry infinite +--ca ${ovpn_data_dir}/ca-certificate +--cert ${ovpn_data_dir}/client-certificate +--key ${ovpn_data_dir}/client-key +--status ${ovpn_status_file} 5 +--remote ${ie_ip} +--port 7002 +--proto udp +--dev innovenergy +--dev-type tun +--auth SHA256 +--cipher AES-256-CBC +--verb 3 +--keepalive 10 120 +--persist-key +--persist-tun" + +exec ${exe} diff --git a/NodeRed/NodeRedFiles/service/supervise/lock b/NodeRed/NodeRedFiles/service/supervise/lock new file mode 100644 index 000000000..e69de29bb diff --git a/NodeRed/NodeRedFiles/service/supervise/status b/NodeRed/NodeRedFiles/service/supervise/status new file mode 100644 index 000000000..c86dec4a1 Binary files /dev/null and b/NodeRed/NodeRedFiles/service/supervise/status differ diff --git a/NodeRed/NodeRedFiles/settings-user.js b/NodeRed/NodeRedFiles/settings-user.js new file mode 100644 index 000000000..d76cdd8ec --- /dev/null +++ b/NodeRed/NodeRedFiles/settings-user.js @@ -0,0 +1,31 @@ +module.exports = { + uiHost:"", + /* To password protect the Node-RED editor and admin API, the following + property can be used. See https://nodered.org/docs/security.html for details. + */ + adminAuth: { + sessionExpiryTime: 86400, + type: "credentials", + users: [{ + username: "admin", + password: "$2b$08$d7A0gwkDh4KtultiCAVH6eQ.tQUwVApq.tDVOOYQ51EpLIMbYy2GW",//salidomo + permissions: "*" + }] + }, + + /* Context Storage + The following property can be used to enable context storage. The configuration + provided here will enable file-based context that flushes to disk every 30 seconds. + Refer to the documentation for further options: https://nodered.org/docs/api/context/ + */ + //contextStorage: { + // default: { + // module:"localfilesystem" + // }, + //}, + contextStorage: { + default: "memoryOnly", + memoryOnly: { module: 'memory' }, + file: { module: 'localfilesystem' } + }, + } diff --git a/NodeRed/update_Cerbo.py b/NodeRed/update_Cerbo.py new file mode 100644 index 000000000..ffd56bf94 --- /dev/null +++ b/NodeRed/update_Cerbo.py @@ -0,0 +1,226 @@ +import subprocess +import sys +import asyncio +import os + +async def run_remote_command(remote_host, command): + ssh_command = ['ssh', f'root@{remote_host}', command] + process = await asyncio.create_subprocess_exec( + *ssh_command, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + stdout, stderr = await process.communicate() + + stdout_decoded = stdout.decode() + stderr_decoded = stderr.decode() + + if process.returncode == 0: + return stdout_decoded + stderr_decoded + else: + return f"Failed to run the command: {command}, error: {stderr_decoded}" + +async def check_GX_type(remote_host): + command = "cat /etc/venus/machine" + return await run_remote_command(remote_host, command) + +async def resize(remote_host): + command = "sh /opt/victronenergy/swupdate-scripts/resize2fs.sh" + return await run_remote_command(remote_host, command) + +async def update_venus_firmware(remote_host): + # command = "sh /opt/victronenergy/swupdate-scripts/check-updates.sh -swu https://updates.victronenergy.com/feeds/venus/release/images/beaglebone/venus-swu-2-large-beaglebone-20240422090512-v3.31.swu" + command = "sh /opt/victronenergy/swupdate-scripts/check-updates.sh -swu http://updates.victronenergy.com/feeds/venus/release/images/beaglebone/venus-swu-2-beaglebone-20240422090512-v3.31.swu" + return await run_remote_command(remote_host, command) + +async def update_cerbo_firmware(remote_host): + # command = "sh /opt/victronenergy/swupdate-scripts/check-updates.sh -swu http://updates.victronenergy.com/feeds/venus/release/images/einstein/venus-swu-large-einstein-20240422090512-v3.31.swu" + command = "sh /opt/victronenergy/swupdate-scripts/check-updates.sh -swu http://updates.victronenergy.com/feeds/venus/release/images/einstein/venus-swu-einstein-20240422090512-v3.31.swu" + return await run_remote_command(remote_host, command) + +async def set_official_update_feed(remote_host): + command = "dbus -y com.victronenergy.settings /Settings/System/ReleaseType SetValue %0" + return await run_remote_command(remote_host, command) + +async def enable_large_image(remote_host): + command = "dbus -y com.victronenergy.settings /Settings/System/ImageType SetValue %1" + return await run_remote_command(remote_host, command) + +async def check_large_image_or_not(remote_host): + command = "dbus -y com.victronenergy.settings /Settings/System/ImageType GetValue" + return await run_remote_command(remote_host, command) + +async def update_firmware(remote_host): + command = "sh /opt/victronenergy/swupdate-scripts/check-updates.sh -update" + return await run_remote_command(remote_host, command) + +async def enable_NodeRed(remote_host): + command = "dbus -y com.victronenergy.platform /Services/NodeRed/Mode SetValue %1" + return await run_remote_command(remote_host, command) + +async def disable_NodeRed(remote_host): + command = "dbus -y com.victronenergy.platform /Services/NodeRed/Mode SetValue %0" + return await run_remote_command(remote_host, command) + +async def check_NodeRed_enabled_or_not(remote_host): + command = "dbus -y com.victronenergy.platform /Services/NodeRed/Mode GetValue" + return await run_remote_command(remote_host, command) + +async def download_node_red_dashboard(remote_host): + change_dir_command = "cd /data/home/nodered/.node-red" + install_command = "npm install --no-audit --no-update-notifier --no-fund --save --save-prefix=~ --production --engine-strict node-red-dashboard@latest" + command = f"{change_dir_command} && {install_command}" + return await run_remote_command(remote_host, command) + +async def disable_BMS_Controlling_MPPT(remote_host): + command = "dbus -y com.victronenergy.solarcharger /Settings/BmsPresent SetValue %0" + return await run_remote_command(remote_host, command) + +async def check_mkVersion(remote_host): + command = "dbus -y com.victronenergy.vebus.ttyS4 /Interfaces/Mk2/Version GetValue" + return await run_remote_command(remote_host, command) + +async def check_allow_mkVersion_update_or_not(remote_host): + command = "dbus -y com.victronenergy.settings /Settings/Vebus/AllowMk3Fw212Update GetValue" + return await run_remote_command(remote_host, command) + +async def update_mkVersion(remote_host): + command = "dbus -y com.victronenergy.settings /Settings/Vebus/AllowMk3Fw212Update SetValue %1" + return await run_remote_command(remote_host, command) + +async def import_pika(remote_host): + change_dir_command = "cd /data/innovenergy/pika-0.13.1/" + install_command = "python3 setup.py install --user" + command = f"{change_dir_command} && {install_command}" + return await run_remote_command(remote_host, command) + +async def reboot(remote_host): + command = "reboot" + return await run_remote_command(remote_host, command) + +async def upload_files(remote_host,which_file): + if which_file == 1: + file_location_mappings = { + "rc.local": "/data/", + "dbus-fzsonick-48tl": "/data/", + "service": "/data/innovenergy/openvpn/", + "openvpn": "/data/innovenergy/openvpn/", + "pika-0.13.1": "/data/innovenergy/" + } + else: + file_location_mappings = { + "flows.json": "/data/home/nodered/.node-red/", + "settings-user.js": "/data/home/nodered/.node-red/", + } + + node_red_files_folder = os.path.join(os.getcwd(), "NodeRedFiles") + if not os.path.exists(node_red_files_folder): + return "NodeRedFiles folder does not exist." + + try: + tasks = [] + for file_name, remote_location in file_location_mappings.items(): + file_path = os.path.join(node_red_files_folder, file_name) + if not os.path.exists(file_path): + raise FileNotFoundError(f"File {file_name} not found in {node_red_files_folder}.") + + command = [ + "rsync", + "-r", + file_path, + f"root@{remote_host}:{remote_location}" + ] + + tasks.append(command) + + # Execute rsync commands asynchronously + for task in tasks: + subprocess.run(task, check=True) + + return "All files uploaded successfully." + + except FileNotFoundError as e: + return str(e) + + except subprocess.CalledProcessError as e: + return f"Error occurred while uploading files: {e}" + + except Exception as e: + return f"An error occurred while uploading files: {str(e)}" + +async def main(remote_host): + # #### 1. upload VPN and battery files ##### + # print("Upload VPN and battery files!") + # if(await upload_files(remote_host,1)!="All files uploaded successfully."): + # sys.exit("Failed to upload files!") + # else: + # print(await upload_files(remote_host,1)) + + # ##### 2. update firmware with normal image ##### + # gx_type = await check_GX_type(remote_host) + # if gx_type == "beaglebone\n": + # print("Update Venus GX firmware now!") + # print(await update_venus_firmware(remote_host)) + # elif gx_type == "einstein\n": + # print("Update Cerbo GX firmware now!") + # print(await update_cerbo_firmware(remote_host)) + # else: + # sys.exit("It's neither Venus GX nor Cerbo GX!") + + # ##### Need to rerun the script to continue after rebooting due to firmware update #### + # #### 3. udpate to MK3 ##### + # if (await check_mkVersion(remote_host) == "value = 1170212\n" and await check_allow_mkVersion_update_or_not(remote_host) == "0\n"): + # print("Update MK3!") + # print(await update_mkVersion(remote_host)) + # else: + # print("No need to update to MK3!") + + # #### 4. import pika #### + # print("Import pika!") + # print(await import_pika(remote_host)) + + # ##### 5. update firmware with large image ##### + # print("Set update feed to official release now!") + # print(await set_official_update_feed(remote_host)) + # print("Enable large image now!") + # print(await enable_large_image(remote_host)) + # image_type = await check_large_image_or_not(remote_host) + # if image_type == "1\n": + # print("Update firmware with large image!") + # print(await update_firmware(remote_host)) + # else: + # sys.exit("Failed to enable large image!") + + ##### Need to rerun the script to continue after rebooting due to firmware update #### + #### 6. resize /dev/root ##### + print("Resize /dev/root now!") + print(await resize(remote_host)) + + #### 7. enable Node Red ##### + print("Enable Node Red now!") + print(await enable_NodeRed(remote_host)) + if(await check_NodeRed_enabled_or_not(remote_host) == "value = 1\n"): + ##### 8. download Node Red Dashboard ##### + print("Download Node Red Dashboard now!") + print(await download_node_red_dashboard(remote_host)) + else: + sys.exit("Failed to enable Node Red!") + + ##### 9. upload files related to Node Red ##### + print("Upload files related to Node Red now!") + if(await upload_files(remote_host,2)!="All files uploaded successfully."): + sys.exit("Failed to upload files!") + + #### 10. restart Node Red to load and deploy flows ##### + print("Disable Node Red!") + if(await disable_NodeRed(remote_host) == "retval = 0\n"): + print("Re-enable Node Red!") + print(await enable_NodeRed(remote_host)) + if(await check_NodeRed_enabled_or_not(remote_host) == "value = 1\n"): + print("Node Red is set now!") + else: + sys.exit("Failed to re-enable Node Red!") + +if __name__ == "__main__": + remote_host = sys.argv[1] + asyncio.run(main(remote_host)) diff --git a/csharp/App/ResetBms/ResetBms.csproj b/csharp/App/ResetBms/ResetBms.csproj new file mode 100644 index 000000000..394fd6b96 --- /dev/null +++ b/csharp/App/ResetBms/ResetBms.csproj @@ -0,0 +1,14 @@ + + + + + InnovEnergy.App.ResetBms + + + + + + + + + diff --git a/csharp/App/SaliMax/deploy.sh b/csharp/App/SaliMax/deploy.sh index 9bd0d801b..057e274b5 100755 --- a/csharp/App/SaliMax/deploy.sh +++ b/csharp/App/SaliMax/deploy.sh @@ -21,6 +21,6 @@ rsync -v \ ./bin/Release/$dotnet_version/linux-x64/publish/* \ $username@"$salimax_ip":~/salimax -#echo -e "\n============================ Execute ============================\n" +echo -e "\n============================ Execute ============================\n" -#sshpass -p "$root_password" ssh -o StrictHostKeyChecking=no -t "$username"@"$salimax_ip" "echo '$root_password' | sudo -S sh -c 'cd salimax && ./restart'" 2>/dev/null +sshpass -p "$root_password" ssh -o StrictHostKeyChecking=no -t "$username"@"$salimax_ip" "echo '$root_password' | sudo -S sh -c 'cd salimax && ./restart'" 2>/dev/null diff --git a/csharp/App/SaliMax/src/Ess/Controller.cs b/csharp/App/SaliMax/src/Ess/Controller.cs index 3d35cf2d7..1cc305352 100644 --- a/csharp/App/SaliMax/src/Ess/Controller.cs +++ b/csharp/App/SaliMax/src/Ess/Controller.cs @@ -195,9 +195,9 @@ public static class Controller var mustDoCalibrationCharge = calibrationChargeForced == CalibrationChargeType.ChargePermanently || (calibrationChargeForced == CalibrationChargeType.AdditionallyOnce && additionalCalibrationRequired) || (calibrationChargeForced == CalibrationChargeType.RepetitivelyEvery && repetitiveCalibrationRequired); + Console.WriteLine("Next Repetitive calibration charge date is "+statusRecord.Config.DayAndTimeForRepetitiveCalibration); Console.WriteLine("Next Additional calibration charge date is "+statusRecord.Config.DayAndTimeForAdditionalCalibration); - //Console.WriteLine("Time now is "+DateTime.Now); if (statusRecord.Battery is not null) { diff --git a/csharp/App/SaliMax/src/Program.cs b/csharp/App/SaliMax/src/Program.cs index c51db600d..ee0a76355 100644 --- a/csharp/App/SaliMax/src/Program.cs +++ b/csharp/App/SaliMax/src/Program.cs @@ -1,4 +1,4 @@ -#define Amax +#undef Amax #undef GridLimit using System.IO.Compression; diff --git a/csharp/App/SchneiderMeterDriver/Config.cs b/csharp/App/SchneiderMeterDriver/Config.cs new file mode 100644 index 000000000..dbe60ebf8 --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/Config.cs @@ -0,0 +1,65 @@ +using System.Reflection; +using InnovEnergy.Lib.Victron.VeDBus; + +namespace InnovEnergy.App.SchneiderDriver; + +public static class Config +{ + public const String Version = "1.0"; + public const String BusName = "com.victronenergy.grid.Schnieder"; + public const Byte ModbusNodeId = 1; + public const String OwnAddress = "192.168.1.246"; + public const String PeerAddress = "192.168.1.82"; + //public const String PeerAddress = "127.0.0.1"; + public const UInt16 PeerPort = 502; + + public static TimeSpan TcpTimeout { get; } = TimeSpan.FromSeconds(2); + + + public static readonly TimeSpan UpdatePeriod = TimeSpan.FromSeconds(1); + + public static readonly IReadOnlyList Signals = new List + { + // new(s => s..CurrentL1, "/Ac/L1/Current", "0.0 A"), + // new(s => s..CurrentL2, "/Ac/L2/Current", "0.0 A"), + // new(s => s..CurrentL3, "/Ac/L3/Current", "0.0 A"), + // new(s => s..CurrentL1 + s.Ac.L2.Current + s.Ac.L3.Current, "/Ac/Current", "0.0 A"), + + // new(s => s.Ac.L1.Voltage, "/Ac/L1/Voltage", "0.0 A"), + // new(s => s.Ac.L2.Voltage, "/Ac/L2/Voltage", "0.0 A"), + // new(s => s.Ac.L3.Voltage, "/Ac/L3/Voltage", "0.0 A"), + // new(s => (s.Ac.L1.Voltage + s.Ac.L2.Voltage + s.Ac.L3.Voltage) / 3.0m, "/Ac/Voltage", "0.0 A"), + + new Signal(s => s.ActivePowerL1, "/Ac/L1/Power", "0 W"), + new Signal(s => s.ActivePowerL2, "/Ac/L2/Power", "0 W"), + new Signal(s => s.ActivePowerL3, "/Ac/L3/Power", "0 W"), + new Signal(s => s.ActivePowerL1 + s.ActivePowerL2 + s.ActivePowerL3, "/Ac/Power", "0 W"), + + // new(s => s.EnergyImportL123, "Ac/Energy/Forward", "0.00 kWh"), + // new(s => s.EnergyExportL123, "Ac/Energy/Reverse", "0.00 kWh"), + // + // new(s => s.EnergyImportL1, "Ac/L1/Energy/Forward", "0.00 kWh"), + // new(s => s.EnergyExportL1, "Ac/L1/Energy/Reverse", "0.00 kWh"), + // + // new(s => s.EnergyImportL2, "Ac/L2/Energy/Forward", "0.00 kWh"), + // new(s => s.EnergyExportL2, "Ac/L2/Energy/Reverse", "0.00 kWh"), + // + // new(s => s.EnergyImportL3, "Ac/L3/Energy/Forward", "0.00 kWh"), + // new(s => s.EnergyExportL3, "Ac/L3/Energy/Reverse", "0.00 kWh"), + }; + + public static VeProperties DefaultProperties => new VeProperties + { + new("/ProductName" , "Grid meter" ), + new("/CustomName" , "Schneider Professional"), + new("/DeviceInstance" , 30), + new("/DeviceType" , 72), + new("/Mgmt/Connection" , "Modbus TCP"), + new("/Mgmt/ProcessName" , Assembly.GetEntryAssembly()?.Location ?? "unknown"), + new("/Mgmt/ProcessVersion", Version), + new("/Connected" , 1), + new("/ProductId" , 45058, "b002"), + new("/Role" , "grid"), + }; + +} \ No newline at end of file diff --git a/csharp/App/SchneiderMeterDriver/Nic.cs b/csharp/App/SchneiderMeterDriver/Nic.cs new file mode 100644 index 000000000..a580f6fd3 --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/Nic.cs @@ -0,0 +1,149 @@ +using System.Text.Json.Nodes; +using CliWrap; +using CliWrap.Buffered; + +namespace InnovEnergy.App.SchneiderDriver; + +public readonly struct Nic +{ + private static Command IpCommand { get; } = Cli + .Wrap("/sbin/ip") + .WithValidation(CommandResultValidation.None); + + private readonly JsonNode _Node; + + private Nic(JsonNode node) + { + _Node = node; + } + + public Boolean IsEthernet + { + get + { + try + { + return _Node["link_type"]!.GetValue() == "ether"; + } + catch + { + return false; + } + } + } + + public Boolean IsUp + { + get + { + // ReSharper disable once StringLiteralTypo + try + { + return _Node["operstate"]!.GetValue() == "UP"; + } + catch + { + return false; + } + } + } + + public IReadOnlyList Ip4Addresses + { + get + { + // ReSharper disable once StringLiteralTypo + try + { + return _Node["addr_info"]! + .AsArray() + .TryWhere(n => n!["family"]!.GetValue() == "inet") + .TrySelect(n => n!["local"]!.GetValue()) + .ToList(); + } + catch + { + return Array.Empty(); + } + } + } + + public String Name + { + get + { + // ReSharper disable once StringLiteralTypo + try + { + return _Node["ifname"]!.GetValue(); + } + catch + { + return ""; + } + } + } + + + + public async Task AddPointToPoint(String sourceAddress, String destinationAddress) + { + var result = await IpCommand + .WithArguments($"address add local {sourceAddress} peer {destinationAddress} dev {Name}") + .ExecuteAsync(); + + return result.ExitCode == 0; + } + + public async Task RemoveAddress(String address) + { + var result = await IpCommand + .WithArguments($"address del {address} dev {Name}") + .ExecuteBufferedAsync(); + + return result.ExitCode == 0; + } + + + public async Task AddRoute(String route) + { + var result = await IpCommand + .WithArguments($"route add {route} dev {Name}") + .ExecuteAsync(); + + return result.ExitCode == 0; + } + + public async Task RemoveRoute(String route) + { + var result = await IpCommand + .WithArguments($"route del {route} dev {Name}") + .ExecuteAsync(); + + return result.ExitCode == 0; + } + + + public static async Task> GetNetworkInterfaces() + { + + try + { + var result = await IpCommand + .WithArguments("-details -pretty -json address") + .ExecuteBufferedAsync(); + + return JsonNode + .Parse(result.StandardOutput)! + .AsArray() + .Where(n => n != null) + .Select(n => new Nic(n!)) + .ToList(); + } + catch + { + return Array.Empty(); + } + } + +} \ No newline at end of file diff --git a/csharp/App/SchneiderMeterDriver/Program.cs b/csharp/App/SchneiderMeterDriver/Program.cs new file mode 100644 index 000000000..b447c8933 --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/Program.cs @@ -0,0 +1,60 @@ +using InnovEnergy.App.SchneiderDriver; +using InnovEnergy.Lib.Protocols.DBus; +using InnovEnergy.Lib.Utils; +using InnovEnergy.Lib.Utils.Net; + + +// dotnet publish EmuMeter.csproj -c Release -r linux-arm -p:PublishSingleFile=true --self-contained true && \ +// rsync -av bin/Release/net6.0/linux-arm/publish/ root@10.2.1.6:/home/root/emu && clear && \ +// ssh root@10.2.1.6 /home/root/emu/EmuMeter + + +Console.WriteLine("Starting Schneider Driver " + Config.Version); + +var networkInterfaces = await Nic.GetNetworkInterfaces(); + +var candidates = networkInterfaces.Where(n => n.IsUp && + n.IsEthernet && + (!n.Ip4Addresses.Any() || n.Ip4Addresses.Contains(Config.OwnAddress))); + +foreach (var nic in candidates) +{ + Console.WriteLine($"Found new network interface: {nic.Name}"); + + if (!nic.Ip4Addresses.Contains(Config.OwnAddress)) + { + Console.WriteLine($"Configuring Point-to-Point connection on {nic.Name}"); + Console.WriteLine($" own address: {Config.OwnAddress}"); + Console.WriteLine($" peer address: {Config.PeerAddress}"); + + var success = await nic.AddPointToPoint($"{Config.OwnAddress}/16", $"{Config.PeerAddress}/16"); + + if (!success) + { + Console.WriteLine($"Failed to configure network interface: {nic.Name}"); + continue; + } + } + + Console.WriteLine($"Pinging peer @ {Config.PeerAddress}"); + + var ping = await Config.PeerAddress.Ping(); + + if (ping) + { + Console.WriteLine($"Got answer from {Config.PeerAddress}"); + var ex = await SchneiderMeterDriver.Run($"{Config.PeerAddress}:{Config.PeerPort}", Bus.System); + + Console.WriteLine($"{nameof(SchneiderMeterDriver)} FAILED with\n{ex}"); + } + else + { + Console.WriteLine($"No answer from {Config.PeerAddress}"); + } + + Console.Write($"Removing Point-to-Point connection on {nic.Name} ..."); + var removed = await nic.RemoveAddress($"{Config.OwnAddress}/16"); + Console.WriteLine(removed ? "done" : "failed"); +} + +Console.WriteLine("Stopping SchneiderMeter Driver"); \ No newline at end of file diff --git a/csharp/App/SchneiderMeterDriver/SchneiderMeterDriver.cs b/csharp/App/SchneiderMeterDriver/SchneiderMeterDriver.cs new file mode 100644 index 000000000..2953e9cd4 --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/SchneiderMeterDriver.cs @@ -0,0 +1,155 @@ +/*using System.Reactive.Linq; +using InnovEnergy.Lib.Devices.IEM3kGridMeter; +using InnovEnergy.Lib.Protocols.DBus; +using InnovEnergy.Lib.Protocols.Modbus.Clients; +using InnovEnergy.Lib.Utils; +using InnovEnergy.Lib.Utils.Reflection; +using InnovEnergy.Lib.Victron.VeDBus; + +namespace InnovEnergy.App.SchneiderDriver; + +public static class SchneiderMeterDriver +{ + public static Task Run(String hostName, Bus dbusAddress) + { + return Run(hostName, ModbusTcpClient.DefaultPort, dbusAddress); + } + + public static async Task Run(String hostName, UInt16 port, Bus dbusAddress) + { + // var ep = new UnixDomainSocketEndPoint("/home/eef/graber_dbus.sock"); + // var auth = AuthenticationMethod.ExternalAsRoot(); + // dbusAddress = new Bus(ep, auth); + + var schneider = new Iem3KGridMeterDevice(hostName, port, Config.ModbusNodeId); + + + var schneiderStatus = Observable + .Interval(Config.UpdatePeriod) + .Select(_ => schneider.Read()) + .Publish(); + + var x = schneider.Read(); + + x?.ActivePowerL1.WriteLine(); + x?.ActivePowerL2.WriteLine(); + x?.ActivePowerL3.WriteLine(); + + + var poller = schneiderStatus.Connect(); + + var properties = Config.DefaultProperties; + + foreach (var p in properties) + { + p.WriteLine(" Signal"); + } + + // Step 1: Access Config.Signals + var signalsCollection = Config.Signals; + + foreach (var s in signalsCollection) + { + s.WriteLine(" Signal"); + } + + var signals = Config + .Signals + .Select(signal => schneiderStatus.Select(signal.ToVeProperty)) + .Merge() + .Do(p => properties.Set(p)); + + // TODO: remove when possible + // Apparently some VE services need to be periodically reminded that + // this service is /Connected + schneiderStatus.Subscribe(_ => properties.Set("/Connected", 1)); + + // Wait until status is read once to make sure all + // properties are set when we go onto the bus. + var dbus = schneiderStatus + .Skip(1) + .Take(1) + .SelectMany(_ => PublishPropertiesOnDBus(properties, dbusAddress)); + + return await signals + .MergeErrors(dbus) + .Finally(poller.Dispose) + .SelectErrors(); + + } + + + private static Task PublishPropertiesOnDBus(VeProperties properties, Bus bus) + { + Console.WriteLine($"Connecting to DBus {bus}"); + return properties.PublishOnDBus(bus, Config.BusName); + } +}*/ + +using System; +using System.Reactive.Linq; +using InnovEnergy.Lib.Devices.IEM3kGridMeter; +using InnovEnergy.Lib.Protocols.DBus; +using InnovEnergy.Lib.Protocols.Modbus.Clients; +using InnovEnergy.Lib.Utils; +using InnovEnergy.Lib.Victron.VeDBus; + +namespace InnovEnergy.App.SchneiderDriver +{ + public static class SchneiderMeterDriver + { + public static Task Run(string hostName, Bus dbusAddress) + { + return Run(hostName, ModbusTcpClient.DefaultPort, dbusAddress); + } + + public static async Task Run(string hostName, ushort port, Bus dbusAddress) + { + var schneider = new Iem3KGridMeterDevice(hostName, port, Config.ModbusNodeId); + var schneiderStatus = Observable + .Interval(Config.UpdatePeriod) + .Select(_ => + { + var status = schneider.Read(); + if (status == null) + { + Console.WriteLine("Failed to read data from Iem3KGridMeterDevice"); + } + return status; + }) + .Where(status => status != null) // Ignore null readings + .Publish(); + + var poller = schneiderStatus.Connect(); + var properties = Config.DefaultProperties; + + var signals = Config + .Signals + .Select(signal => schneiderStatus.Select(signal.ToVeProperty)) + .Merge() + .Do(p => properties.Set(p)); + + schneiderStatus.Subscribe(_ => properties.Set("/Connected", 1)); + + var dbus = schneiderStatus + .Skip(1) + .Take(1) + .SelectMany(_ => PublishPropertiesOnDBus(properties, dbusAddress)); + + return await signals + .MergeErrors(dbus) + .Finally(poller.Dispose) + .SelectErrors(); + } + + private static Task PublishPropertiesOnDBus(VeProperties properties, Bus bus) + { + Console.WriteLine($"Connecting to DBus {bus}"); + return properties.PublishOnDBus(bus, Config.BusName); + } + } +} + + + + diff --git a/csharp/App/SchneiderMeterDriver/SchneiderMeterDriver.csproj b/csharp/App/SchneiderMeterDriver/SchneiderMeterDriver.csproj new file mode 100644 index 000000000..ac5b87bcd --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/SchneiderMeterDriver.csproj @@ -0,0 +1,26 @@ + + + + + InnovEnergy.App.SchneiderDriver + SchniederDriver + + + + + + + + + + + + + + + + + + + + diff --git a/csharp/App/SchneiderMeterDriver/Signal.cs b/csharp/App/SchneiderMeterDriver/Signal.cs new file mode 100644 index 000000000..e7eba2bec --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/Signal.cs @@ -0,0 +1,43 @@ +/*using InnovEnergy.Lib.Devices.IEM3kGridMeter; +using InnovEnergy.Lib.Protocols.DBus.Protocol.DataTypes; +using InnovEnergy.Lib.Victron.VeDBus; + +namespace InnovEnergy.App.SchneiderDriver; + + +// TODO: Does not compile +public record Signal(Func Source, ObjectPath Path, string Format = "") +{ + public VeProperty ToVeProperty(Iem3KGridMeterRegisters status) + { + var value = Source(status); + return new VeProperty(Path, value, string.Format($"{{0:{Format}}}", value)); + } +}*/ + +using InnovEnergy.Lib.Devices.IEM3kGridMeter; +using InnovEnergy.Lib.Protocols.DBus.Protocol.DataTypes; +using InnovEnergy.Lib.Victron.VeDBus; +using System; + +namespace InnovEnergy.App.SchneiderDriver +{ + public record Signal(Func Source, ObjectPath Path, string Format = "") + { + public VeProperty ToVeProperty(Iem3KGridMeterRegisters status) + { + if (status == null) + { + Console.WriteLine($"Status is null for path: {Path}"); + // Return a default VeProperty if status is null + return new VeProperty(Path, default, String.Format($"{{0:{Format}}}", default)); + } + + var value = Source(status); + return new VeProperty(Path, value, String.Format($"{{0:{Format}}}", value)); + } + } +} + + + diff --git a/csharp/App/SchneiderMeterDriver/Utils.cs b/csharp/App/SchneiderMeterDriver/Utils.cs new file mode 100644 index 000000000..4b5735a35 --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/Utils.cs @@ -0,0 +1,49 @@ +namespace InnovEnergy.App.SchneiderDriver; + +public static class Utils +{ + public static IEnumerable TryWhere(this IEnumerable src, Func predicate) + { + foreach (var e in src) + { + var ok = false; + + try + { + ok = predicate(e); + } + catch + { + // ignored + } + + if (ok) + yield return e; + } + } + + public static IEnumerable TrySelect(this IEnumerable src, Func map) + { + foreach (var e in src) + { + var ok = false; + var result = default(R); + + try + { + result = map(e); + ok = true; + } + catch + { + // ignored + } + + if (ok) + yield return result!; + } + } + + + +} \ No newline at end of file diff --git a/csharp/App/SchneiderMeterDriver/debug.sh b/csharp/App/SchneiderMeterDriver/debug.sh new file mode 100755 index 000000000..e68489b70 --- /dev/null +++ b/csharp/App/SchneiderMeterDriver/debug.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +csproj="SchneiderMeterDriver.csproj" +exe="SchneiderMeterDriver" +#remote="10.2.1.6" +remote="10.2.4.155" + +netVersion="net6.0" +platform="linux-arm" +config="Release" +host="root@$remote" +dir="/opt/innovenergy/$exe" + +set -e + +dotnet publish "$csproj" -c $config -r $platform -p:SuppressTrimmAnalysisWarnings=true -p:PublishSingleFile=true -p:PublishTrimmed=true -p:DebugType=None -p:DebugSymbols=false --self-contained true +rsync -av "bin/$config/$netVersion/$platform/publish/" "$host:$dir" +#clear +#ssh "$host" "$dir/$exe" + \ No newline at end of file diff --git a/csharp/InnovEnergy.sln b/csharp/InnovEnergy.sln index 624ace482..10c541c54 100644 --- a/csharp/InnovEnergy.sln +++ b/csharp/InnovEnergy.sln @@ -87,6 +87,12 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Doepke", "Lib\Devices\Doepk EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Amax5070", "Lib\Devices\Amax5070\Amax5070.csproj", "{09E280B0-43D3-47BD-AF15-CF4FCDD24FE6}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Sofar", "App\Sofar\Sofar.csproj", "{6B98449D-BF75-415A-8893-E49518F9307D}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SofarInverter", "Lib\Devices\SofarInverter\SofarInverter.csproj", "{2C7F3D89-402B-43CB-988E-8D2D853BEF44}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SchneiderMeterDriver", "App\SchneiderMeterDriver\SchneiderMeterDriver.csproj", "{2E7E7657-3A53-4B62-8927-FE9A082B81DE}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -226,6 +232,18 @@ Global {09E280B0-43D3-47BD-AF15-CF4FCDD24FE6}.Debug|Any CPU.Build.0 = Debug|Any CPU {09E280B0-43D3-47BD-AF15-CF4FCDD24FE6}.Release|Any CPU.ActiveCfg = Release|Any CPU {09E280B0-43D3-47BD-AF15-CF4FCDD24FE6}.Release|Any CPU.Build.0 = Release|Any CPU + {6B98449D-BF75-415A-8893-E49518F9307D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6B98449D-BF75-415A-8893-E49518F9307D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6B98449D-BF75-415A-8893-E49518F9307D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6B98449D-BF75-415A-8893-E49518F9307D}.Release|Any CPU.Build.0 = Release|Any CPU + {2C7F3D89-402B-43CB-988E-8D2D853BEF44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2C7F3D89-402B-43CB-988E-8D2D853BEF44}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2C7F3D89-402B-43CB-988E-8D2D853BEF44}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2C7F3D89-402B-43CB-988E-8D2D853BEF44}.Release|Any CPU.Build.0 = Release|Any CPU + {2E7E7657-3A53-4B62-8927-FE9A082B81DE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2E7E7657-3A53-4B62-8927-FE9A082B81DE}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2E7E7657-3A53-4B62-8927-FE9A082B81DE}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2E7E7657-3A53-4B62-8927-FE9A082B81DE}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution {CF4834CB-91B7-4172-AC13-ECDA8613CD17} = {145597B4-3E30-45E6-9F72-4DD43194539A} @@ -265,5 +283,8 @@ Global {73B97F6E-2BDC-40DA-84A7-7FB0264387D6} = {AD5B98A8-AB7F-4DA2-B66D-5B4E63E7D854} {C2B14CD4-1BCA-4933-96D9-92F40EACD2B9} = {4931A385-24DC-4E78-BFF4-356F8D6D5183} {09E280B0-43D3-47BD-AF15-CF4FCDD24FE6} = {4931A385-24DC-4E78-BFF4-356F8D6D5183} + {6B98449D-BF75-415A-8893-E49518F9307D} = {145597B4-3E30-45E6-9F72-4DD43194539A} + {2C7F3D89-402B-43CB-988E-8D2D853BEF44} = {4931A385-24DC-4E78-BFF4-356F8D6D5183} + {2E7E7657-3A53-4B62-8927-FE9A082B81DE} = {145597B4-3E30-45E6-9F72-4DD43194539A} EndGlobalSection EndGlobal diff --git a/csharp/InnovEnergy.sln.DotSettings b/csharp/InnovEnergy.sln.DotSettings index 33953991f..22bbb44ce 100644 --- a/csharp/InnovEnergy.sln.DotSettings +++ b/csharp/InnovEnergy.sln.DotSettings @@ -2,6 +2,8 @@ <Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /> False + <Policy><Descriptor Staticness="Any" AccessRightKinds="Any" Description="Interfaces"><ElementKinds><Kind Name="INTERFACE" /></ElementKinds></Descriptor><Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /></Policy> + True True True diff --git a/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterDevice.cs b/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterDevice.cs index de9e04b62..7a1106c5f 100644 --- a/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterDevice.cs +++ b/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterDevice.cs @@ -1,4 +1,4 @@ -using InnovEnergy.Lib.Protocols.Modbus.Channels; +/*using InnovEnergy.Lib.Protocols.Modbus.Channels; using InnovEnergy.Lib.Protocols.Modbus.Clients; using InnovEnergy.Lib.Protocols.Modbus.Slaves; using InnovEnergy.Lib.Utils; @@ -10,11 +10,11 @@ public class Iem3KGridMeterDevice: ModbusDevice public Iem3KGridMeterDevice(String hostname, UInt16 port = 502, Byte slaveId = 1) : this(new TcpChannel(hostname, port), slaveId) { } - - public Iem3KGridMeterDevice(Channel channel, Byte slaveId = 1) : base(new ModbusTcpClient(channel, slaveId)) + + private Iem3KGridMeterDevice(Channel channel, Byte slaveId = 1) : base(new ModbusTcpClient(channel, slaveId)) { } - + public Iem3KGridMeterDevice(ModbusClient client) : base(client) { } @@ -26,7 +26,7 @@ public class Iem3KGridMeterDevice: ModbusDevice { return base.Read(); } - catch + catch { // TODO: Log $"Failed to read data from {nameof(Iem3KGridMeterDevice)}".WriteLine(); @@ -41,10 +41,91 @@ public class Iem3KGridMeterDevice: ModbusDevice { base.Write(registers); } - catch + catch { // TODO: Log $"Failed to write data to {nameof(Iem3KGridMeterDevice)}".WriteLine(); } } -} \ No newline at end of file + + +}*/ + +using InnovEnergy.Lib.Protocols.Modbus.Channels; +using InnovEnergy.Lib.Protocols.Modbus.Clients; +using InnovEnergy.Lib.Protocols.Modbus.Slaves; +using InnovEnergy.Lib.Utils; +using System; + +namespace InnovEnergy.Lib.Devices.IEM3kGridMeter +{ + public class Iem3KGridMeterDevice : ModbusDevice + { + private readonly string _hostname; + private readonly ushort _port; + private readonly byte _slaveId; + + public Iem3KGridMeterDevice(string hostname, ushort port = 502, byte slaveId = 1) + : this(new TcpChannel(hostname, port), slaveId) + { + _hostname = hostname ?? throw new ArgumentNullException(nameof(hostname)); + _port = port; + _slaveId = slaveId; + } + + private Iem3KGridMeterDevice(TcpChannel channel, byte slaveId = 1) + : base(new ModbusTcpClient(channel, slaveId)) + { + _hostname = channel.Host; + _port = channel.Port; + _slaveId = slaveId; + Console.WriteLine($"Initializing Iem3KGridMeterDevice with channel: {channel.Host}:{channel.Port}"); + } + + public Iem3KGridMeterDevice(ModbusClient client) + : base(client) + { + if (client is ModbusTcpClient tcpClient) + { + _hostname = tcpClient.Channel.Host; + _port = tcpClient.Channel.Port; + _slaveId = tcpClient.SlaveId; + } + else + { + throw new ArgumentException("Invalid client type", nameof(client)); + } + Console.WriteLine("Initializing Iem3KGridMeterDevice with ModbusClient"); + } + + public new Iem3KGridMeterRegisters? Read() + { + try + { + Console.WriteLine($"Attempting to read data from {_hostname}:{_port} with slaveId {_slaveId}"); + return base.Read(); + } + catch (Exception ex) + { + Console.WriteLine($"Failed to read data from {nameof(Iem3KGridMeterDevice)}: {ex.Message}"); + return null; + } + } + + public new void Write(Iem3KGridMeterRegisters registers) + { + try + { + base.Write(registers); + } + catch (Exception ex) + { + Console.WriteLine($"Failed to write data to {nameof(Iem3KGridMeterDevice)}: {ex.Message}"); + } + } + } +} + + + + diff --git a/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterRegisters.cs b/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterRegisters.cs index d7db41717..fb0183b76 100644 --- a/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterRegisters.cs +++ b/csharp/Lib/Devices/IEM3kGridMeter/IEM3kGridMeterRegisters.cs @@ -12,25 +12,25 @@ using Float32 = Single; [AddressOffset(-2)] // why? -public class Iem3KGridMeterRegisters : IAc3Meter +public class Iem3KGridMeterRegisters //: IAc3Meter { private const Float32 ZeroBecauseReactivePowerNotSupported = 0; // TODO - [HoldingRegister(3054)] private Float32 _ActivePowerL1; - [HoldingRegister(3056)] private Float32 _ActivePowerL2; - [HoldingRegister(3058)] private Float32 _ActivePowerL3; + [HoldingRegister(3054)] public Float32 ActivePowerL1; + [HoldingRegister(3056)] public Float32 ActivePowerL2; + [HoldingRegister(3058)] public Float32 ActivePowerL3; - [HoldingRegister(3000)] private Float32 _CurrentL1; - [HoldingRegister(3002)] private Float32 _CurrentL2; - [HoldingRegister(3004)] private Float32 _CurrentL3; - - [HoldingRegister(3028)] private Float32 _VoltageL1N; - [HoldingRegister(3030)] private Float32 _VoltageL2N; - [HoldingRegister(3032)] private Float32 _VoltageL3N; - - [HoldingRegister(3110)] private Float32 _Frequency; + //[HoldingRegister(3000)] private Float32 _CurrentL1; + //[HoldingRegister(3002)] private Float32 _CurrentL2; + //[HoldingRegister(3004)] private Float32 _CurrentL3; + // + //[HoldingRegister(3028)] private Float32 _VoltageL1N; + //[HoldingRegister(3030)] private Float32 _VoltageL2N; + //[HoldingRegister(3032)] private Float32 _VoltageL3N; + // + //[HoldingRegister(3110)] private Float32 _Frequency; //[HoldingRegister(9012)] private Float32 _ReactivePowerL1; //[HoldingRegister(9014)] private Float32 _ReactivePowerL2; @@ -45,7 +45,7 @@ public class Iem3KGridMeterRegisters : IAc3Meter //[HoldingRegister(9026)] private Float32 _ApparentPowerL3; - public Ac3Bus Ac => new Ac3Bus + /*public Ac3Bus Ac => new Ac3Bus { L1 = new () { @@ -66,7 +66,9 @@ public class Iem3KGridMeterRegisters : IAc3Meter Phi = Atan2(ZeroBecauseReactivePowerNotSupported, _ActivePowerL3) }, Frequency = _Frequency - }; + };*/ + + } diff --git a/csharp/Lib/Protocols/Modbus/Channels/Channel.cs b/csharp/Lib/Protocols/Modbus/Channels/Channel.cs index 9fd8ec731..676025c16 100644 --- a/csharp/Lib/Protocols/Modbus/Channels/Channel.cs +++ b/csharp/Lib/Protocols/Modbus/Channels/Channel.cs @@ -1,7 +1,19 @@ -namespace InnovEnergy.Lib.Protocols.Modbus.Channels; +//namespace InnovEnergy.Lib.Protocols.Modbus.Channels; -public abstract class Channel +/*public abstract class Channel { public abstract IReadOnlyList Read(Int32 nBytes); public abstract void Write(IReadOnlyList bytes); -} \ No newline at end of file +}*/ + +using System; +using System.Collections.Generic; + +namespace InnovEnergy.Lib.Protocols.Modbus.Channels +{ + public abstract class Channel + { + public abstract IReadOnlyList Read(int nBytes); + public abstract void Write(IReadOnlyList bytes); + } +} diff --git a/csharp/Lib/Protocols/Modbus/Channels/RemoteSerialChannel.cs b/csharp/Lib/Protocols/Modbus/Channels/RemoteSerialChannel.cs index 98ecd1cd5..c42299221 100644 --- a/csharp/Lib/Protocols/Modbus/Channels/RemoteSerialChannel.cs +++ b/csharp/Lib/Protocols/Modbus/Channels/RemoteSerialChannel.cs @@ -4,7 +4,7 @@ using System.Reactive.Linq; using CliWrap; using InnovEnergy.Lib.Utils; -namespace InnovEnergy.Lib.Protocols.Modbus.Channels; +/*namespace InnovEnergy.Lib.Protocols.Modbus.Channels; public record RemoteSerialConnection ( @@ -207,4 +207,93 @@ public class RemoteSerialChannel : ConnectionChannel { connection.Write(data); } -} \ No newline at end of file +}*/ + +using System; +using System.IO.Ports; +using CliWrap; +using InnovEnergy.Lib.Utils; + +namespace InnovEnergy.Lib.Protocols.Modbus.Channels +{ + public class RemoteSerialChannel : ConnectionChannel, IDisposable + { + private readonly Command _Command; + private readonly TcpChannel _TcpChannel; + + const string SsDir = "/opt/victronenergy/serial-starter"; + const string KillTasks = "kill $!"; + + private CancellationTokenSource _CancellationTokenSource = new CancellationTokenSource(); + + private CommandTask? _CommandTask; + + public RemoteSerialChannel(SshHost host, string tty, int baudRate, Parity parity, int dataBits, int stopBits) + { + const int port = 6855; + + tty = tty.EnsureStartsWith("/dev/"); + + var configureTty = ConfigureTty(tty, baudRate, parity, stopBits, dataBits); + + var stopTty = $"{SsDir}/stop-tty.sh {tty}"; + var startTty = $"{SsDir}/start-tty.sh {tty}"; + + var socat = $"socat TCP-LISTEN:{port},nodelay {tty},raw"; + + var script = $"{configureTty} && {socat}"; + + _Command = host.Command.AppendArgument(script); + _TcpChannel = new TcpChannel(host.HostName, port); + } + + private static string ConfigureTty(string tty, int baudRate, Parity parity, int stopBits, int dataBits) + { + var oParity = parity switch + { + Parity.Even => "parenb -parodd", + Parity.Odd => "parenb parodd", + Parity.None => "-parenb", + _ => throw new NotImplementedException() + }; + + var oStopBits = stopBits switch + { + 1 => "-cstopb", + 2 => "cstopb", + _ => throw new NotImplementedException() + }; + + var oDataBits = "cs" + dataBits; + + return $"stty -F {tty} {baudRate} {oDataBits} {oStopBits} {oParity}"; + } + + protected override TcpChannel Open() + { + return _TcpChannel; + } + + protected override void Close(TcpChannel connection) + { + _CancellationTokenSource.Cancel(); + connection.Dispose(); + _CancellationTokenSource = new CancellationTokenSource(); + } + + protected override IReadOnlyList Read(TcpChannel connection, int nBytes) + { + return connection.Read(nBytes); + } + + protected override void Write(TcpChannel connection, IReadOnlyList data) + { + connection.Write(data); + } + + public void Dispose() + { + Close(_TcpChannel); + } + } +} diff --git a/csharp/Lib/Protocols/Modbus/Channels/TcpChannel.cs b/csharp/Lib/Protocols/Modbus/Channels/TcpChannel.cs index 22a20d018..f1f39a09a 100644 --- a/csharp/Lib/Protocols/Modbus/Channels/TcpChannel.cs +++ b/csharp/Lib/Protocols/Modbus/Channels/TcpChannel.cs @@ -2,7 +2,7 @@ using System.Net.Sockets; using InnovEnergy.Lib.Protocols.Modbus.Protocol; using InnovEnergy.Lib.Utils.Net; -namespace InnovEnergy.Lib.Protocols.Modbus.Channels; +/*namespace InnovEnergy.Lib.Protocols.Modbus.Channels; public class TcpChannel : ConnectionChannel { @@ -82,4 +82,104 @@ public class TcpChannel : ConnectionChannel var array = data.ToArray(); tcpClient.GetStream().Write(array, 0, array.Length); } -} \ No newline at end of file +}*/ + +using System; +using System.Net.Sockets; + +namespace InnovEnergy.Lib.Protocols.Modbus.Channels +{ + public class TcpChannel : Channel, IDisposable + { + public string Host { get; } + public ushort Port { get; } + + private const int TimeoutMs = 500; // TODO: parametrize + private Socket? Socket { get; set; } + private byte[] Buffer { get; } + + public TcpChannel(string hostname, ushort port) + { + Host = hostname ?? throw new ArgumentNullException(nameof(hostname)); + Port = port; + Buffer = new byte[8192]; // Buffer size can be adjusted + } + + public override IReadOnlyList Read(int nBytes) + { + if (Socket == null) + throw new InvalidOperationException("Socket is not connected."); + + var buffer = new byte[nBytes]; + int bytesRead = 0; + + while (bytesRead < nBytes) + { + var read = Socket.Receive(buffer, bytesRead, nBytes - bytesRead, SocketFlags.None); + if (read == 0) + throw new Exception("Socket closed."); + + bytesRead += read; + } + + return buffer; + } + + public override void Write(IReadOnlyList bytes) + { + if (Socket == null) + throw new InvalidOperationException("Socket is not connected."); + + Socket.Send(bytes.ToArray(), SocketFlags.None); + } + + public void Connect() + { + if (Socket != null) + return; + + Socket = new Socket(SocketType.Stream, ProtocolType.Tcp) + { + Blocking = true, + NoDelay = true, + LingerState = new LingerOption(false, 0), + ReceiveTimeout = TimeoutMs, + SendTimeout = TimeoutMs + }; + + var cts = new CancellationTokenSource(); + cts.CancelAfter(TimeoutMs); + + try + { + Socket.ConnectAsync(Host, Port).Wait(TimeoutMs); + } + catch + { + Socket = null; + throw; + } + } + + public void Disconnect() + { + if (Socket == null) + return; + + try + { + Socket.Close(); + } + finally + { + Socket = null; + } + } + + public void Dispose() + { + Disconnect(); + } + } +} + diff --git a/csharp/Lib/Protocols/Modbus/Clients/ModbusTcpClient.cs b/csharp/Lib/Protocols/Modbus/Clients/ModbusTcpClient.cs index ff206717f..7966bacb4 100644 --- a/csharp/Lib/Protocols/Modbus/Clients/ModbusTcpClient.cs +++ b/csharp/Lib/Protocols/Modbus/Clients/ModbusTcpClient.cs @@ -12,7 +12,7 @@ namespace InnovEnergy.Lib.Protocols.Modbus.Clients; using UInt16s = IReadOnlyCollection; using Booleans = IReadOnlyCollection; -public class ModbusTcpClient : ModbusClient +/*public class ModbusTcpClient : ModbusClient { public const UInt16 DefaultPort = 502; private UInt16 _Id; @@ -184,4 +184,171 @@ public class ModbusTcpClient : ModbusClient return new MbData(rxFrm.RegistersRead.RawData, readAddress, Endian); } +}*/ + +public class ModbusTcpClient : ModbusClient +{ + public const ushort DefaultPort = 502; + private ushort _Id; + public TcpChannel Channel { get; } + + public ModbusTcpClient(TcpChannel channel, byte slaveId) : base(channel, slaveId) + { + Channel = channel; + Channel.Connect(); + } + + private ushort NextId() => unchecked(++_Id); + + public override MbData ReadCoils(ushort readAddress, ushort nValues) + { + var id = NextId(); // TODO: check response id + + var cmd = new ReadCoilsCommandFrame(SlaveId, readAddress, nValues); + var hdr = new MbapHeader(id, cmd.Data.Count); + var frm = new ModbusTcpFrame(hdr, cmd); + + Channel.Write(frm.Data); + + var hData = Channel.Read(MbapHeader.Size).ToArray(); + var rxHdr = new MbapHeader(hData); + + var rxFrm = Channel + .Read(rxHdr.FrameLength) + .ToArray() + .Apply(ReadCoilsResponseFrame.Parse) + .Apply(cmd.VerifyResponse); + + return new MbData(rxFrm.Coils.RawData, readAddress, Endian); + } + + public override MbData ReadDiscreteInputs(ushort readAddress, ushort nValues) + { + var id = NextId(); // TODO: check response id + + var cmd = new ReadDiscreteInputsCommandFrame(SlaveId, readAddress, nValues); + var hdr = new MbapHeader(id, cmd.Data.Count); + var frm = new ModbusTcpFrame(hdr, cmd); + + Channel.Write(frm.Data); + + var hData = Channel.Read(MbapHeader.Size).ToArray(); + var rxHdr = new MbapHeader(hData); + + var rxFrm = Channel + .Read(rxHdr.FrameLength) + .ToArray() + .Apply(ReadDiscreteInputsResponseFrame.Parse) + .Apply(cmd.VerifyResponse); + + return new MbData(rxFrm.Inputs.RawData, readAddress, Endian); + } + + public override MbData ReadInputRegisters(ushort readAddress, ushort nValues) + { + var id = NextId(); // TODO: check response id + + var cmd = new ReadInputRegistersCommandFrame(SlaveId, readAddress, nValues); + var hdr = new MbapHeader(id, cmd.Data.Count); + var frm = new ModbusTcpFrame(hdr, cmd); + + Channel.Write(frm.Data); + + var hData = Channel.Read(MbapHeader.Size).ToArray(); + var rxHdr = new MbapHeader(hData); + + var rxFrm = Channel + .Read(rxHdr.FrameLength) + .ToArray() + .Apply(ReadInputRegistersResponseFrame.Parse) + .Apply(cmd.VerifyResponse); + + return new MbData(rxFrm.RegistersRead.RawData, readAddress, Endian); + } + + public override MbData ReadHoldingRegisters(ushort readAddress, ushort nValues) + { + var id = NextId(); // TODO: check response id + + var cmd = new ReadHoldingRegistersCommandFrame(SlaveId, readAddress, nValues); + var hdr = new MbapHeader(id, cmd.Data.Count); + var frm = new ModbusTcpFrame(hdr, cmd); + + Channel.Write(frm.Data); + + var hData = Channel.Read(MbapHeader.Size).ToArray(); + var rxHdr = new MbapHeader(hData); + + var rxFrm = Channel + .Read(rxHdr.FrameLength) + .ToArray() + .Apply(ReadHoldingRegistersResponseFrame.Parse) + .Apply(cmd.VerifyResponse); + + return new MbData(rxFrm.RegistersRead.RawData, readAddress, Endian); + } + + public override ushort WriteCoils(ushort writeAddress, Booleans coils) + { + var id = NextId(); // TODO: check response id + var cmd = new WriteCoilsCommandFrame(SlaveId, writeAddress, coils); + var hdr = new MbapHeader(id, cmd.Data.Count); + var frm = new ModbusTcpFrame(hdr, cmd); + + Channel.Write(frm.Data); + + var hData = Channel.Read(MbapHeader.Size).ToArray(); + var rxHdr = new MbapHeader(hData); + + var rxFrm = Channel + .Read(rxHdr.FrameLength) + .ToArray() + .Apply(WriteCoilsResponseFrame.Parse) + .Apply(cmd.VerifyResponse); + + return rxFrm.NbWritten; + } + + public override ushort WriteRegisters(ushort writeAddress, UInt16s values) + { + var id = NextId(); // TODO: check response id + var cmd = new WriteRegistersCommandFrame(SlaveId, writeAddress, values); + var hdr = new MbapHeader(id, cmd.Data.Count); + var frm = new ModbusTcpFrame(hdr, cmd); + + Channel.Write(frm.Data); + + var hData = Channel.Read(MbapHeader.Size).ToArray(); + var rxHdr = new MbapHeader(hData); + + var rxFrm = Channel + .Read(rxHdr.FrameLength) + .ToArray() + .Apply(WriteRegistersResponseFrame.Parse) + .Apply(cmd.VerifyResponse); + + return rxFrm.NbWritten; + } + + public override MbData ReadWriteRegisters(ushort readAddress, ushort nbToRead, ushort writeAddress, UInt16s registersToWrite) + { + var id = NextId(); // TODO: check response id + + var cmd = new ReadWriteRegistersCommandFrame(SlaveId, readAddress, nbToRead, writeAddress, registersToWrite); + + var hdr = new MbapHeader(id, cmd.Data.Count); + var frm = new ModbusTcpFrame(hdr, cmd); + + Channel.Write(frm.Data); + + var hData = Enumerable.ToArray(Channel.Read(MbapHeader.Size)); + var rxHdr = new MbapHeader(hData); + + var fData = Enumerable.ToArray(Channel.Read(rxHdr.FrameLength)); + var rxFrm = fData + .Apply(ReadWriteRegistersResponseFrame.Parse) + .Apply(cmd.VerifyResponse); + + return new MbData(rxFrm.RegistersRead.RawData, readAddress, Endian); + } } \ No newline at end of file diff --git a/csharp/Lib/Protocols/Modbus/Slaves/ModbusSlave.cs b/csharp/Lib/Protocols/Modbus/Slaves/ModbusSlave.cs index 3b1870058..8c78dbb31 100644 --- a/csharp/Lib/Protocols/Modbus/Slaves/ModbusSlave.cs +++ b/csharp/Lib/Protocols/Modbus/Slaves/ModbusSlave.cs @@ -1,7 +1,7 @@ using InnovEnergy.Lib.Protocols.Modbus.Channels; using InnovEnergy.Lib.Protocols.Modbus.Clients; -namespace InnovEnergy.Lib.Protocols.Modbus.Slaves; +/*namespace InnovEnergy.Lib.Protocols.Modbus.Slaves; public static class ModbusSlave { @@ -11,14 +11,14 @@ public static class ModbusSlave ModbusTcpClient SlaveId(Byte slaveId) => new ModbusTcpClient(channel, slaveId); return SlaveId; } - + public static Func ModbusRtu(this Channel channel) { ModbusRtuClient SlaveId(Byte slaveId) => new ModbusRtuClient(channel, slaveId); return SlaveId; } - - + + public static Func ModbusTcp(this Channel channel) where R : notnull, new() { ModbusTcpClient SlaveId(Byte slaveId) @@ -28,28 +28,82 @@ public static class ModbusSlave return SlaveId; } - + public static Func ModbusRtu(this Channel channel) where R : notnull, new() { ModbusRtuClient SlaveId(Byte slaveId) => new ModbusRtuClient(channel, slaveId); return SlaveId; } - + public static ModbusDevice TcpSlave(this Channel channel, Byte slaveId) where T : notnull, new() { var client = new ModbusTcpClient(channel, slaveId); return new ModbusDevice(client); } - + public static ModbusDevice RtuSlave(this Channel channel, Byte slaveId) where T : notnull, new() { var client = new ModbusRtuClient(channel, slaveId); return new ModbusDevice(client); } - + public static ModbusDevice Slave(this ModbusClient modbusClient) where T : notnull, new() { return new ModbusDevice(modbusClient); } - -} \ No newline at end of file + +}*/ + +using InnovEnergy.Lib.Protocols.Modbus.Channels; +using InnovEnergy.Lib.Protocols.Modbus.Clients; + +namespace InnovEnergy.Lib.Protocols.Modbus.Slaves +{ + public static class ModbusSlave + { + public static Func ModbusTcp(this Channel channel) + { + ModbusTcpClient SlaveId(byte slaveId) => new ModbusTcpClient((TcpChannel)channel, slaveId); + return SlaveId; + } + + public static Func ModbusRtu(this Channel channel) + { + ModbusRtuClient SlaveId(byte slaveId) => new ModbusRtuClient(channel, slaveId); + return SlaveId; + } + + public static Func ModbusTcp(this Channel channel) where R : notnull, new() + { + ModbusTcpClient SlaveId(byte slaveId) + { + return new ModbusTcpClient((TcpChannel)channel, slaveId); + } + + return SlaveId; + } + + public static Func ModbusRtu(this Channel channel) where R : notnull, new() + { + ModbusRtuClient SlaveId(byte slaveId) => new ModbusRtuClient(channel, slaveId); + return SlaveId; + } + + public static ModbusDevice TcpSlave(this Channel channel, byte slaveId) where T : notnull, new() + { + var client = new ModbusTcpClient((TcpChannel)channel, slaveId); + return new ModbusDevice(client); + } + + public static ModbusDevice RtuSlave(this Channel channel, byte slaveId) where T : notnull, new() + { + var client = new ModbusRtuClient(channel, slaveId); + return new ModbusDevice(client); + } + + public static ModbusDevice Slave(this ModbusClient modbusClient) where T : notnull, new() + { + return new ModbusDevice(modbusClient); + } + } +} diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/__init__.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/config.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/config.py new file mode 100755 index 000000000..1d4962daa --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/config.py @@ -0,0 +1,59 @@ +import serial +import logging +from data import read_file_one_line + +# dbus configuration + +CONNECTION = 'Modbus RTU' +PRODUCT_NAME = 'FIAMM 48TL Series Battery' +PRODUCT_ID = 0xB012 # assigned by victron +DEVICE_INSTANCE = 1 +SERVICE_NAME_PREFIX = 'com.victronenergy.battery.' + + +# driver configuration + +SOFTWARE_VERSION = '3.0.0' +UPDATE_INTERVAL = 2000 # milliseconds +#LOG_LEVEL = logging.INFO +LOG_LEVEL = logging.DEBUG + + +# battery config + +V_MAX = 54.2 +V_MIN = 42 +R_STRING_MIN = 0.125 +R_STRING_MAX = 0.250 +I_MAX_PER_STRING = 15 +AH_PER_STRING = 40 +NUM_OF_STRINGS_PER_BATTERY = 5 + +# modbus configuration + +BASE_ADDRESS = 999 +NO_OF_REGISTERS = 64 +MAX_SLAVE_ADDRESS = 25 + + +# RS 485 configuration + +PARITY = serial.PARITY_ODD +TIMEOUT = 0.1 # seconds +BAUD_RATE = 115200 +BYTE_SIZE = 8 +STOP_BITS = 1 +MODE = 'rtu' + +# InnovEnergy IOT configuration + +INSTALLATION_NAME = read_file_one_line('/data/innovenergy/openvpn/installation-name') +INNOVENERGY_SERVER_IP = '10.2.0.1' +INNOVENERGY_SERVER_PORT = 8134 +INNOVENERGY_PROTOCOL_VERSION = '48TL200V3' + + +# S3 Credentials +S3BUCKET = "5-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" +S3KEY = "EXO6bb63d9bbe5f938a68fa444b" +S3SECRET = "A4-5wIjIlAqn-p0cUkQu0f9fBIrX1V5PGTBDwjsrlR8" diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/controller.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/controller.py new file mode 100755 index 000000000..749093592 --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/controller.py @@ -0,0 +1,644 @@ +#!/usr/bin/python -u +# coding=utf-8 + +import logging +import os +import time +import states as State +import target_type as TargetType + +from random import randint +from python_libs.ie_dbus.dbus_service import DBusService +from python_libs.ie_utils.main_loop import run_on_main_loop + +# noinspection PyUnreachableCode +if False: + from typing import NoReturn, Optional, Any, Iterable, List + +logging.basicConfig(level=logging.INFO) +_log = logging.getLogger(__name__) + +VERSION = '1.0.0' +PRODUCT = 'Controller' + +GRID_SERVICE_PREFIX = 'com.victronenergy.grid.' +BATTERY_SERVICE_PREFIX = 'com.victronenergy.battery.' +INVERTER_SERVICE_PREFIX = 'com.victronenergy.vebus.' +SYSTEM_SERVICE_PREFIX = 'com.victronenergy.system' +HUB4_SERVICE_PREFIX = 'com.victronenergy.hub4' +SETTINGS_SERVICE_PREFIX = 'com.victronenergy.settings' + +UPDATE_PERIOD_MS = 2000 +MAX_POWER_PER_BATTERY = 2500 + +MAX_DAYS_WITHOUT_EOC = 7 +SECONDS_PER_DAY = 24 * 60 * 60 + +GRID_SET_POINT_SETTING = PRODUCT + '/GridSetPoint' +LAST_EOC_SETTING = PRODUCT + '/LastEOC' +CALIBRATION_CHARGE_START_TIME_OF_DAY_SETTING = PRODUCT + '/CalibrationChargeStartTime' + +HEAT_LOSS = 150 # W +P_CONST = 0.5 # W/W + +Epoch = int +Seconds = int + + +def time_now(): + return int(time.time()) + + +class Controller(object): + + def __init__(self, measurement, target, target_type, state): + # type: (float, float, int, int) -> NoReturn + self.target_type = target_type + self.target = target + self.measurement = measurement + self.state = state + + d_p = target - measurement + self.delta = d_p * P_CONST + + @staticmethod + def min(controllers): + # type: (Iterable[Controller]) -> Controller + return min(controllers, key=lambda c: c.delta) + + @staticmethod + def max(controllers): + # type: (Iterable[Controller]) -> Controller + return max(controllers, key=lambda c: c.delta) + + def clamp(self, lower_limit_controllers, upper_limit_controllers): + # type: (List[Controller],List[Controller]) -> Controller + c_min = Controller.min(upper_limit_controllers + [self]) + return Controller.max(lower_limit_controllers + [c_min]) + + +# noinspection PyMethodMayBeStatic +class InnovEnergyController(DBusService): + + def __init__(self): + + super(InnovEnergyController, self).__init__(PRODUCT.lower()) + + self.settings.add_setting(path=LAST_EOC_SETTING, default_value=0) # unix epoch timestamp + self.settings.add_setting(path=GRID_SET_POINT_SETTING, default_value=0) # grid setpoint, Watts + + self.settings.add_setting(path=CALIBRATION_CHARGE_START_TIME_OF_DAY_SETTING, default_value=32400) # 09:00 + + self.own_properties.set('/ProductName', PRODUCT) + self.own_properties.set('/Mgmt/ProcessName', __file__) + self.own_properties.set('/Mgmt/ProcessVersion', VERSION) + self.own_properties.set('/Mgmt/Connection', 'dbus') + self.own_properties.set('/ProductId', PRODUCT) + self.own_properties.set('/FirmwareVersion', VERSION) + self.own_properties.set('/HardwareVersion', VERSION) + self.own_properties.set('/Connected', 1) + self.own_properties.set('/TimeToCalibrationCharge', 'unknown') + self.own_properties.set('/State', 0) + + self.phases = [ + p for p in ['/Hub4/L1/AcPowerSetpoint', '/Hub4/L2/AcPowerSetpoint', '/Hub4/L3/AcPowerSetpoint'] + if self.remote_properties.exists(self.inverter_service + p) + ] + + self.n_phases = len(self.phases) + print ('The system has ' + str(self.n_phases) + ' phase' + ('s' if self.n_phases != 1 else '')) + + self.max_inverter_power = 32700 + # ^ defined in https://github.com/victronenergy/dbus_modbustcp/blob/master/CCGX-Modbus-TCP-register-list.xlsx + + def clamp_power_command(self, value): + # type: (float) -> int + + value = max(value, -self.max_inverter_power) + value = min(value, self.max_inverter_power) + + return int(value) + + def get_service(self, prefix): + # type: (str) -> Optional[unicode] + service = next((s for s in self.available_services if s.startswith(prefix)), None) + + if service is None: + raise Exception('no service matching ' + prefix + '* available') + + return service + + def is_service_available(self, prefix): + # type: (str) -> bool + return next((True for s in self.available_services if s.startswith(prefix)), False) + + @property + def battery_service(self): + # type: () -> Optional[unicode] + return self.get_service(BATTERY_SERVICE_PREFIX) + + @property + def battery_available(self): + # type: () -> bool + return self.is_service_available(BATTERY_SERVICE_PREFIX) + + @property + def grid_service(self): + # type: () -> Optional[unicode] + return self.get_service(GRID_SERVICE_PREFIX) + + @property + def grid_meter_available(self): + # type: () -> bool + return self.is_service_available(GRID_SERVICE_PREFIX) + + @property + def inverter_service(self): + # type: () -> Optional[unicode] + return self.get_service(INVERTER_SERVICE_PREFIX) + + @property + def inverter_available(self): + # type: () -> bool + return self.is_service_available(INVERTER_SERVICE_PREFIX) + + @property + def system_service(self): + # type: () -> Optional[unicode] + return self.get_service(SYSTEM_SERVICE_PREFIX) + + @property + def system_service_available(self): + # type: () -> bool + return self.is_service_available(SYSTEM_SERVICE_PREFIX) + + @property + def hub4_service(self): + # type: () -> Optional[unicode] + return self.get_service(HUB4_SERVICE_PREFIX) + + @property + def hub4_service_available(self): + # type: () -> bool + return self.is_service_available(HUB4_SERVICE_PREFIX) + + @property + def inverter_power_setpoint(self): + # type: () -> float + return sum((self.get_inverter_prop(p) for p in self.phases)) + + def get_battery_prop(self, dbus_path): + # type: (str) -> Any + battery_service = self.battery_service + return self.remote_properties.get(battery_service + dbus_path).value + + def get_grid_prop(self, dbus_path): + # type: (str) -> Any + return self.remote_properties.get(self.grid_service + dbus_path).value + + def get_inverter_prop(self, dbus_path): + # type: (str) -> Any + return self.remote_properties.get(self.inverter_service + dbus_path).value + + def get_system_prop(self, dbus_path): + # type: (str) -> Any + system_service = self.system_service + return self.remote_properties.get(system_service + dbus_path).value + + def get_hub4_prop(self, dbus_path): + # type: (str) -> Any + hub4_service = self.hub4_service + return self.remote_properties.get(hub4_service + dbus_path).value + + def set_settings_prop(self, dbus_path, value): + # type: (str, Any) -> bool + return self.remote_properties.set(SETTINGS_SERVICE_PREFIX + dbus_path, value) + + def set_inverter_prop(self, dbus_path, value): + # type: (str, Any) -> bool + inverter_service = self.inverter_service + return self.remote_properties.set(inverter_service + dbus_path, value) + + @property + def max_battery_charge_power(self): + # type: () -> int + return self.get_battery_prop('/Info/MaxChargePower') + + @property + def max_battery_discharge_power(self): + # type: () -> int + return self.get_battery_prop('/Info/MaxDischargePower') + + @property + def max_configured_charge_power(self): + # type: () -> Optional[int] + max_power = self.settings.get('/Settings/CGwacs/MaxChargePower') + return max_power if max_power >= 0 else None + + @property + def max_configured_discharge_power(self): # unsigned + # type: () -> Optional[int] + max_power = self.settings.get('/Settings/CGwacs/MaxDischargePower') + return max_power if max_power >= 0 else None + + @property + def max_charge_power(self): + # type: () -> int + if self.max_configured_charge_power is None: + return self.max_battery_charge_power + else: + return min(self.max_battery_charge_power, self.max_configured_charge_power) + + @property + def max_discharge_power(self): # unsigned + # type: () -> int + if self.max_configured_discharge_power is None: + return self.max_battery_discharge_power + else: + return min(self.max_battery_discharge_power, self.max_configured_discharge_power) + + def set_inverter_power_setpoint(self, power): + # type: (float) -> NoReturn + + if self.settings.get('/Settings/CGwacs/BatteryLife/State') == 9: + self.settings.set('/Settings/CGwacs/BatteryLife/State', 0) # enables scheduled charge + self.settings.set('/Settings/CGwacs/Hub4Mode', 3) # disable hub4 + self.set_inverter_prop('/Hub4/DisableCharge', 0) + self.set_inverter_prop('/Hub4/DisableFeedIn', 0) + + power = self.clamp_power_command(power / self.n_phases) + for p in self.phases: + self.set_inverter_prop(p, power + randint(-1, 1)) # use randint to force dbus re-send + + def set_controller_state(self, state): + # type: (int) -> NoReturn + self.own_properties.set('/State', state) + + @property + def grid_power(self): + # type: () -> Optional[float] + try: + return self.get_grid_prop('/Ac/Power') + except: + return None + + @property + def battery_cold(self): + # type: () -> bool + return self.get_battery_prop('/IoStatus/BatteryCold') == 1 + + @property + def eoc_reached(self): + # type: () -> bool + if not self.battery_available: + return False + + return min(self.get_battery_prop('/EOCReached')) == 1 + + @property + def battery_power(self): + # type: () -> float + return self.get_battery_prop('/Dc/0/Power') + + @property + def inverter_ac_in_power(self): + # type: () -> float + return self.get_inverter_prop('/Ac/ActiveIn/P') + + @property + def inverter_ac_out_power(self): + # type: () -> float + return self.get_inverter_prop('/Ac/Out/P') + + @property + def soc(self): + # type: () -> float + return self.get_battery_prop('/Soc') + + @property + def n_batteries(self): + # type: () -> int + return self.get_battery_prop('/NbOfBatteries') + + @property + def min_soc(self): + # type: () -> float + return self.settings.get('/Settings/CGwacs/BatteryLife/MinimumSocLimit') + + @property + def should_hold_min_soc(self): + # type: () -> bool + return self.min_soc <= self.soc <= self.min_soc + 5 + + @property + def utc_offset(self): + # type: () -> int + + # stackoverflow.com/a/1301528 + # stackoverflow.com/a/3168394 + + os.environ['TZ'] = self.settings.get('/Settings/System/TimeZone') + time.tzset() + is_dst = time.daylight and time.localtime().tm_isdst > 0 + return -(time.altzone if is_dst else time.timezone) + + @property + def grid_set_point(self): + # type: () -> float + return self.settings.get('/Settings/CGwacs/AcPowerSetPoint') + + @property + def time_to_calibration_charge_str(self): + # type: () -> str + return self.own_properties.get('/TimeToCalibrationCharge').text + + @property + def calibration_charge_deadline(self): + # type: () -> Epoch + + utc_offset = self.utc_offset + ultimate_deadline = self.settings.get(LAST_EOC_SETTING) + MAX_DAYS_WITHOUT_EOC * SECONDS_PER_DAY + midnight_before_udl = int((ultimate_deadline + utc_offset) / SECONDS_PER_DAY) * SECONDS_PER_DAY - utc_offset # round off to last midnight + + dead_line = midnight_before_udl + self.calibration_charge_start_time_of_day + + while dead_line > ultimate_deadline: # should fire at most once, but let's be defensive... + dead_line -= SECONDS_PER_DAY # too late, advance one day + + return dead_line + + @property + def time_to_calibration_charge(self): + # type: () -> Seconds + return self.calibration_charge_deadline - time_now() + + @property + def grid_blackout(self): + # type: () -> bool + return self.get_inverter_prop('/Leds/Mains') < 1 + + @property + def scheduled_charge(self): + # type: () -> bool + return self.get_hub4_prop('/Overrides/ForceCharge') != 0 + + @property + def calibration_charge_start_time_of_day(self): + # type: () -> Seconds + return self.settings.get(CALIBRATION_CHARGE_START_TIME_OF_DAY_SETTING) # seconds since midnight + + @property + def must_do_calibration_charge(self): + # type: () -> bool + return self.time_to_calibration_charge <= 0 + + def controller_charge_to_min_soc(self): + # type: () -> Controller + + return Controller( + measurement=self.battery_power, + target=self.max_charge_power, + target_type=TargetType.BATTERY_DC, + state=State.CHARGE_TO_MIN_SOC + ) + + def controller_hold_min_soc(self): + # type: () -> Controller + + # TODO: explain + + a = -4 * HEAT_LOSS * self.n_batteries + b = -a * (self.min_soc + .5) + + target_dc_power = a * self.soc + b + + return Controller( + measurement = self.battery_power, + target = target_dc_power, + target_type = TargetType.BATTERY_DC, + state = State.HOLD_MIN_SOC + ) + + def controller_calibration_charge(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.CALIBRATION_CHARGE + ) + + def controller_limit_discharge_power(self): # signed + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = -self.max_discharge_power, # add sign! + target_type = TargetType.BATTERY_DC, + state = State.LIMIT_DISCHARGE_POWER + ) + + def controller_limit_charge_power(self): + # type: () -> Controller + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.LIMIT_CHARGE_POWER + ) + + def controller_optimize_self_consumption(self): + # type: () -> Controller + + return Controller( + measurement = self.grid_power, + target = self.grid_set_point, + target_type = TargetType.GRID_AC, + state = State.OPTIMIZE_SELF_CONSUMPTION + ) + + def controller_heating(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.HEATING + ) + + def controller_scheduled_charge(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.SCHEDULED_CHARGE + ) + + def controller_no_grid_meter(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.NO_GRID_METER_AVAILABLE + ) + + def controller_no_battery(self): + # type: () -> Controller + + return Controller( + measurement = self.inverter_ac_in_power, + target = 0, + target_type = TargetType.INVERTER_AC_IN, + state = State.NO_BATTERY_AVAILABLE + ) + + def controller_bridge_grid_blackout(self): + # type: () -> Controller + + return Controller( + measurement = 0, + target = 0, + target_type = TargetType.GRID_AC, + state = State.BRIDGE_GRID_BLACKOUT + ) + + def update_eoc(self): + + if self.eoc_reached: + print('battery has reached EOC') + self.settings.set(LAST_EOC_SETTING, time_now()) + + self.publish_time_to_calibration_charge() + + def publish_time_to_calibration_charge(self): + + total_seconds = self.time_to_calibration_charge + + if total_seconds <= 0: + time_to_eoc_str = 'now' + else: + total_minutes, seconds = divmod(total_seconds, 60) + total_hours, minutes = divmod(total_minutes, 60) + total_days, hours = divmod(total_hours, 24) + + days_str = (str(total_days) + 'd') if total_days > 0 else '' + hours_str = (str(hours) + 'h') if total_hours > 0 else '' + minutes_str = (str(minutes) + 'm') if total_days == 0 else '' + + time_to_eoc_str = "{0} {1} {2}".format(days_str, hours_str, minutes_str).strip() + + self.own_properties.set('/TimeToCalibrationCharge', time_to_eoc_str) + + def print_system_stats(self, controller): + # type: (Controller) -> NoReturn + + def soc_setpoint(): + if controller.state == State.CALIBRATION_CHARGE or controller.state == State.NO_GRID_METER_AVAILABLE: + return ' => 100%' + if controller.state == State.CHARGE_TO_MIN_SOC: + return ' => ' + str(int(self.min_soc)) + '%' + return '' + + def setpoint(target_type): + if target_type != controller.target_type: + return '' + return ' => ' + str(int(controller.target)) + 'W' + + def p(power): + # type: (Optional[float]) -> str + if power is None: + return ' --- W' + else: + return str(int(power)) + 'W' + + ac_loads = None if self.grid_power is None else self.grid_power - self.inverter_ac_in_power + delta = p(controller.delta) if controller.delta < 0 else '+' + p(controller.delta) + battery_power = self.battery_power if self.battery_available else None + soc_ = str(self.soc) + '%' if self.battery_available else '---' + + print (State.name_of[controller.state]) + print ('') + print ('time to CC: ' + self.time_to_calibration_charge_str) + print (' SOC: ' + soc_ + soc_setpoint()) + print (' grid: ' + p(self.grid_power) + setpoint(TargetType.GRID_AC)) + print (' battery: ' + p(battery_power) + setpoint(TargetType.BATTERY_DC)) + print (' AC in: ' + p(self.inverter_ac_in_power) + ' ' + delta) + print (' AC out: ' + p(self.inverter_ac_out_power)) + print (' AC loads: ' + p(ac_loads)) + + def choose_controller(self): + # type: () -> Controller + + if self.grid_blackout: + return self.controller_bridge_grid_blackout() + + if not self.battery_available: + return self.controller_no_battery() + + if self.battery_cold: + return self.controller_heating() + + if self.scheduled_charge: + return self.controller_scheduled_charge() + + if self.must_do_calibration_charge: + return self.controller_calibration_charge() + + if self.soc < self.min_soc: + return self.controller_charge_to_min_soc() + + if not self.grid_meter_available: + return self.controller_no_grid_meter() + + hold_min_soc = self.controller_hold_min_soc() + limit_discharge_power = self.controller_limit_discharge_power() # signed + + lower_limit = [limit_discharge_power, hold_min_soc] + + # No upper limit. We no longer actively limit charge power. DC/DC Charger inside the BMS will do that for us. + upper_limit = [] + + optimize_self_consumption = self.controller_optimize_self_consumption() + + return optimize_self_consumption.clamp(lower_limit, upper_limit) + + def update(self): + + print('iteration started\n') + + self.update_eoc() + + if self.inverter_available: + + controller = self.choose_controller() + power = self.inverter_ac_in_power + controller.delta + + self.set_inverter_power_setpoint(power) + self.set_controller_state(controller.state) + self.print_system_stats(controller) # for debug + + else: + self.set_controller_state(State.NO_INVERTER_AVAILABLE) + print('inverter not available!') + + print('\niteration finished\n') + + +def main(): + + print('starting ' + __file__) + + with InnovEnergyController() as service: + run_on_main_loop(service.update, UPDATE_PERIOD_MS) + + print(__file__ + ' has shut down') + + +if __name__ == '__main__': + main() diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/convert.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/convert.py new file mode 100755 index 000000000..7138d856a --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/convert.py @@ -0,0 +1,192 @@ +import struct + +import config as cfg +from data import LedState, BatteryStatus + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Iterable, Union, AnyStr, Any + + +def read_bool(base_register, bit): + # type: (int, int) -> Callable[[BatteryStatus], bool] + + # TODO: explain base register offset + register = base_register + int(bit/16) + bit = bit % 16 + + def get_value(status): + # type: (BatteryStatus) -> bool + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value & (1 << bit) > 0 + + return get_value + + +def read_float(register, scale_factor=1.0, offset=0.0): + # type: (int, float, float) -> Callable[[BatteryStatus], float] + + def get_value(status): + # type: (BatteryStatus) -> float + value = status.modbus_data[register - cfg.BASE_ADDRESS] + + if value >= 0x8000: # convert to signed int16 + value -= 0x10000 # fiamm stores their integers signed AND with sign-offset @#%^&! + + return (value + offset) * scale_factor + + return get_value + + +def read_registers(register, count): + # type: (int, int) -> Callable[[BatteryStatus], List[int]] + + start = register - cfg.BASE_ADDRESS + end = start + count + + def get_value(status): + # type: (BatteryStatus) -> List[int] + return [x for x in status.modbus_data[start:end]] + + return get_value + + +def comma_separated(values): + # type: (Iterable[str]) -> str + return ", ".join(set(values)) + + +def count_bits(base_register, nb_of_registers, nb_of_bits, first_bit=0): + # type: (int, int, int, int) -> Callable[[BatteryStatus], int] + + get_registers = read_registers(base_register, nb_of_registers) + end_bit = first_bit + nb_of_bits + + def get_value(status): + # type: (BatteryStatus) -> int + + registers = get_registers(status) + bin_registers = [bin(x)[-1:1:-1] for x in registers] # reverse the bits in each register so that bit0 is to the left + str_registers = [str(x).ljust(16, "0") for x in bin_registers] # add leading zeroes, so all registers are 16 chars long + bit_string = ''.join(str_registers) # join them, one long string of 0s and 1s + filtered_bits = bit_string[first_bit:end_bit] # take the first nb_of_bits bits starting at first_bit + + return filtered_bits.count('1') # count 1s + + return get_value + + +def read_led_state(register, led): + # type: (int, int) -> Callable[[BatteryStatus], int] + + read_lo = read_bool(register, led * 2) + read_hi = read_bool(register, led * 2 + 1) + + def get_value(status): + # type: (BatteryStatus) -> int + + lo = read_lo(status) + hi = read_hi(status) + + if hi: + if lo: + return LedState.blinking_fast + else: + return LedState.blinking_slow + else: + if lo: + return LedState.on + else: + return LedState.off + + return get_value + + +# noinspection PyShadowingNames +def unit(unit): + # type: (unicode) -> Callable[[unicode], unicode] + + def get_text(v): + # type: (unicode) -> unicode + return "{0}{1}".format(str(v), unit) + + return get_text + + +def const(constant): + # type: (any) -> Callable[[any], any] + def get(*args): + return constant + return get + + +def mean(numbers): + # type: (List[Union[float,int]]) -> float + return float(sum(numbers)) / len(numbers) + + +def first(ts, default=None): + return next((t for t in ts), default) + + +def bitfields_to_str(lists): + # type: (List[List[int]]) -> str + + def or_lists(): + # type: () -> Iterable[int] + + length = len(first(lists)) + n_lists = len(lists) + + for i in range(0, length): + e = 0 + for l in range(0, n_lists): + e = e | lists[l][i] + yield e + + hexed = [ + '{0:0>4X}'.format(x) + for x in or_lists() + ] + + return ' '.join(hexed) + + +def pack_string(string): + # type: (AnyStr) -> Any + data = string.encode('UTF-8') + return struct.pack('B', len(data)) + data + + +def read_bitmap(register): + # type: (int) -> Callable[[BatteryStatus], int] + + def get_value(status): + # type: (BatteryStatus) -> int + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value + + return get_value + +def return_in_list(ts): + return ts + +def first(ts): + return next(t for t in ts) + +def read_hex_string(register, count): + # type: (int, int) -> Callable[[BatteryStatus], str] + """ + reads count consecutive modbus registers from start_address, + and returns a hex representation of it: + e.g. for count=4: DEAD BEEF DEAD BEEF. + """ + start = register - cfg.BASE_ADDRESS + end = start + count + + def get_value(status): + # type: (BatteryStatus) -> str + return ' '.join(['{0:0>4X}'.format(x) for x in status.modbus_data[start:end]]) + + return get_value diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/data.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/data.py new file mode 100755 index 000000000..9bff4ff93 --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/data.py @@ -0,0 +1,134 @@ +import config as cfg + + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Optional, AnyStr, Union, Any + + +class LedState(object): + """ + from page 6 of the '48TLxxx ModBus Protocol doc' + """ + off = 0 + on = 1 + blinking_slow = 2 + blinking_fast = 3 + + +class LedColor(object): + green = 0 + amber = 1 + blue = 2 + red = 3 + + +class ServiceSignal(object): + + def __init__(self, dbus_path, get_value_or_const, unit=''): + # type: (str, Union[Callable[[],Any],Any], Optional[AnyStr] )->None + + self.get_value_or_const = get_value_or_const + self.dbus_path = dbus_path + self.unit = unit + + @property + def value(self): + try: + return self.get_value_or_const() # callable + except: + return self.get_value_or_const # value + + +class BatterySignal(object): + + def __init__(self, dbus_path, aggregate, get_value, unit=''): + # type: (str, Callable[[List[any]],any], Callable[[BatteryStatus],any], Optional[AnyStr] )->None + """ + A Signal holds all information necessary for the handling of a + certain datum (e.g. voltage) published by the battery. + + :param dbus_path: str + object_path on DBus where the datum needs to be published + + :param aggregate: Iterable[any] -> any + function that combines the values of multiple batteries into one. + e.g. sum for currents, or mean for voltages + + :param get_value: (BatteryStatus) -> any + function to extract the datum from the modbus record, + """ + + self.dbus_path = dbus_path + self.aggregate = aggregate + self.get_value = get_value + self.unit = unit + + +class Battery(object): + + """ Data record to hold hardware and firmware specs of the battery """ + + def __init__(self, slave_address, hardware_version, firmware_version, bms_version, ampere_hours): + # type: (int, str, str, str, int) -> None + self.slave_address = slave_address + self.hardware_version = hardware_version + self.firmware_version = firmware_version + self.bms_version = bms_version + self.ampere_hours = ampere_hours + self.n_strings = int(ampere_hours/cfg.AH_PER_STRING) + self.i_max = self.n_strings * cfg.I_MAX_PER_STRING + self.v_min = cfg.V_MIN + self.v_max = cfg.V_MAX + self.r_int_min = cfg.R_STRING_MIN / self.n_strings + self.r_int_max = cfg.R_STRING_MAX / self.n_strings + + def __str__(self): + return 'slave address = {0}\nhardware version = {1}\nfirmware version = {2}\nbms version = {3}\nampere hours = {4}'.format( + self.slave_address, self.hardware_version, self.firmware_version, self.bms_version, str(self.ampere_hours)) + + +class BatteryStatus(object): + """ + record holding the current status of a battery + """ + def __init__(self, battery, modbus_data): + # type: (Battery, List[int]) -> None + + self.battery = battery + self.modbus_data = modbus_data + + def serialize(self): + # type: () -> str + + b = self.battery + + s = cfg.INNOVENERGY_PROTOCOL_VERSION + '\n' + s += cfg.INSTALLATION_NAME + '\n' + s += str(b.slave_address) + '\n' + s += b.hardware_version + '\n' + s += b.firmware_version + '\n' + s += b.bms_version + '\n' + s += str(b.ampere_hours) + '\n' + + for d in self.modbus_data: + s += str(d) + '\n' + + return s + + +def read_file_one_line(file_name): + + with open(file_name, 'r') as file: + return file.read().replace('\n', '').replace('\r', '').strip() + + +class CsvSignal(object): + def __init__(self, name, get_value, get_text=None): + self.name = name + self.get_value = get_value if callable(get_value) else lambda _: get_value + self.get_text = get_text + + if get_text is None: + self.get_text = "" diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/dbus-fzsonick-48tl.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/dbus-fzsonick-48tl.py new file mode 100755 index 000000000..87a64c631 --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/dbus-fzsonick-48tl.py @@ -0,0 +1,674 @@ +#!/usr/bin/python2 -u +# coding=utf-8 + +import logging +import re +import socket +import sys +import gobject +import signals +import config as cfg + +from dbus.mainloop.glib import DBusGMainLoop +from pymodbus.client.sync import ModbusSerialClient as Modbus +from pymodbus.exceptions import ModbusException, ModbusIOException +from pymodbus.other_message import ReportSlaveIdRequest +from pymodbus.pdu import ExceptionResponse +from pymodbus.register_read_message import ReadInputRegistersResponse +from data import BatteryStatus, BatterySignal, Battery, ServiceSignal +from python_libs.ie_dbus.dbus_service import DBusService + +import time +import os +import csv +import pika +import zipfile +import hashlib +import base64 +import hmac +import requests +from datetime import datetime +import io +import json +from convert import first +CSV_DIR = "/data/csv_files/" +INSTALLATION_NAME_FILE = '/data/innovenergy/openvpn/installation-name' + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Iterable, NoReturn + + +RESET_REGISTER = 0x2087 + + +def compress_csv_data(csv_data, file_name="data.csv"): + memory_stream = io.BytesIO() + + # Create a zip archive in the memory buffer + with zipfile.ZipFile(memory_stream, 'w', zipfile.ZIP_DEFLATED) as archive: + # Add CSV data to the ZIP archive using writestr + archive.writestr(file_name, csv_data.encode('utf-8')) + + # Get the compressed byte array from the memory buffer + compressed_bytes = memory_stream.getvalue() + + # Encode the compressed byte array as a Base64 string + base64_string = base64.b64encode(compressed_bytes).decode('utf-8') + + return base64_string + +class S3config: + def __init__(self): + self.bucket = cfg.S3BUCKET + self.region = "sos-ch-dk-2" + self.provider = "exo.io" + self.key = cfg.S3KEY + self.secret = cfg.S3SECRET + self.content_type = "application/base64; charset=utf-8" + + @property + def host(self): + return "{}.{}.{}".format(self.bucket, self.region, self.provider) + + @property + def url(self): + return "https://{}".format(self.host) + + def create_put_request(self, s3_path, data): + headers = self._create_request("PUT", s3_path) + url = "{}/{}".format(self.url, s3_path) + response = requests.put(url, headers=headers, data=data) + return response + + def _create_request(self, method, s3_path): + date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + auth = self._create_authorization(method, self.bucket, s3_path, date, self.key, self.secret, self.content_type) + headers = { + "Host": self.host, + "Date": date, + "Authorization": auth, + "Content-Type": self.content_type + } + return headers + + @staticmethod + def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): + payload = "{}\n{}\n{}\n{}\n/{}/{}".format( + method, md5_hash, content_type, date, bucket.strip('/'), s3_path.strip('/') + ) + signature = base64.b64encode( + hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() + ).decode() + return "AWS {}:{}".format(s3_key, signature) + + +def SubscribeToQueue(): + try: + connection = pika.BlockingConnection(pika.ConnectionParameters(host="10.2.0.11", + port=5672, + virtual_host="/", + credentials=pika.PlainCredentials("producer", "b187ceaddb54d5485063ddc1d41af66f"))) + channel = connection.channel() + channel.queue_declare(queue="statusQueue", durable=True) + print("Subscribed to queue") + except Exception as ex: + print("An error occurred while connecting to the RabbitMQ queue:", ex) + return channel + + +previous_warnings = {} +previous_alarms = {} + +class MessageType: + ALARM_OR_WARNING = "AlarmOrWarning" + HEARTBEAT = "Heartbeat" + +class AlarmOrWarning: + def __init__(self, description, created_by): + self.date = datetime.now().strftime('%Y-%m-%d') + self.time = datetime.now().strftime('%H:%M:%S') + self.description = description + self.created_by = created_by + + def to_dict(self): + return { + "Date": self.date, + "Time": self.time, + "Description": self.description, + "CreatedBy": self.created_by + } + +channel = SubscribeToQueue() +# Create an S3config instance +s3_config = S3config() +INSTALLATION_ID=int(s3_config.bucket.split('-')[0]) +PRODUCT_ID = 1 +is_first_update = True +prev_status = 0 +subscribed_to_queue_first_time = False +heartbit_interval = 0 + +def update_state_from_dictionaries(current_warnings, current_alarms, node_numbers): + global previous_warnings, previous_alarms, INSTALLATION_ID, PRODUCT_ID, is_first_update, channel, prev_status, heartbit_interval, subscribed_to_queue_first_time + + heartbit_interval += 1 + + if is_first_update: + changed_warnings = current_warnings + changed_alarms = current_alarms + is_first_update = False + else: + changed_alarms = {} + changed_warnings = {} + # calculate the diff in warnings and alarms + prev_alarm_value_list = list(previous_alarms.values()) + alarm_keys = list(previous_alarms.keys()) + + for i, alarm in enumerate(current_alarms.values()): + if alarm != prev_alarm_value_list[i]: + changed_alarms[alarm_keys[i]] = True + else: + changed_alarms[alarm_keys[i]] = False + + prev_warning_value_list=list(previous_warnings.values()) + warning_keys=list(previous_warnings.keys()) + + for i, warning in enumerate(current_warnings.values()): + if warning!=prev_warning_value_list[i]: + changed_warnings[warning_keys[i]]=True + else: + changed_warnings[warning_keys[i]]=False + + status_message = { + "InstallationId": INSTALLATION_ID, + "Product": PRODUCT_ID, + "Status": 0, + "Type": 1, + "Warnings": [], + "Alarms": [] + } + + alarms_number_list = [] + for node_number in node_numbers: + cnt = 0 + for alarm_value in current_alarms.values(): + if alarm_value: + cnt+=1 + alarms_number_list.append(cnt) + + warnings_number_list = [] + for node_number in node_numbers: + cnt = 0 + for warning_value in current_warnings.values(): + if warning_value: + cnt+=1 + warnings_number_list.append(cnt) + + # Evaluate alarms + if any(changed_alarms.values()): + for i, changed_alarm in enumerate(changed_alarms.values()): + if changed_alarm and list(current_alarms.values())[i]: + status_message["Alarms"].append(AlarmOrWarning(list(current_alarms.keys())[i],"System").to_dict()) + + if any(changed_warnings.values()): + for i, changed_warning in enumerate(changed_warnings.values()): + if changed_warning and list(current_warnings.values())[i]: + status_message["Warnings"].append(AlarmOrWarning(list(current_warnings.keys())[i],"System").to_dict()) + + if any(current_alarms.values()): + status_message["Status"]=2 + + if not any(current_alarms.values()) and any(current_warnings.values()): + status_message["Status"]=1 + + if not any(current_alarms.values()) and not any(current_warnings.values()): + status_message["Status"]=0 + + if status_message["Status"]!=prev_status or len(status_message["Warnings"])>0 or len(status_message["Alarms"])>0: + prev_status=status_message["Status"] + status_message["Type"]=0 + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + print(status_message) + print("Message sent successfully") + elif heartbit_interval>=15 or not subscribed_to_queue_first_time: + print("Send heartbit message to rabbitmq") + heartbit_interval=0 + subscribed_to_queue_first_time=True + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + + previous_warnings = current_warnings.copy() + previous_alarms = current_alarms.copy() + + return status_message, alarms_number_list, warnings_number_list + +def read_csv_as_string(file_path): + """ + Reads a CSV file from the given path and returns its content as a single string. + """ + try: + # Note: 'encoding' is not available in open() in Python 2.7, so we'll use 'codecs' module. + import codecs + with codecs.open(file_path, 'r', encoding='utf-8') as file: + return file.read() + except IOError as e: + if e.errno == 2: # errno 2 corresponds to "No such file or directory" + print("Error: The file {} does not exist.".format(file_path)) + else: + print("IO error occurred: {}".format(str(e))) + return None + + + +def init_modbus(tty): + # type: (str) -> Modbus + + logging.debug('initializing Modbus') + + return Modbus( + port='/dev/' + tty, + method=cfg.MODE, + baudrate=cfg.BAUD_RATE, + stopbits=cfg.STOP_BITS, + bytesize=cfg.BYTE_SIZE, + timeout=cfg.TIMEOUT, + parity=cfg.PARITY) + + +def init_udp_socket(): + # type: () -> socket + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setblocking(False) + + return s + + +def report_slave_id(modbus, slave_address): + # type: (Modbus, int) -> str + + slave = str(slave_address) + + logging.debug('requesting slave id from node ' + slave) + + with modbus: + + request = ReportSlaveIdRequest(unit=slave_address) + response = modbus.execute(request) + + if response is ExceptionResponse or issubclass(type(response), ModbusException): + raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) + + return response.identifier + + +def identify_battery(modbus, slave_address): + # type: (Modbus, int) -> Battery + + logging.info('identifying battery...') + + hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) + firmware_version = read_firmware_version(modbus, slave_address) + + specs = Battery( + slave_address=slave_address, + hardware_version=hardware_version, + firmware_version=firmware_version, + bms_version=bms_version, + ampere_hours=ampere_hours) + + logging.info('battery identified:\n{0}'.format(str(specs))) + + return specs + + +def identify_batteries(modbus): + # type: (Modbus) -> List[Battery] + + def _identify_batteries(): + slave_address = 0 + n_missing = -255 + + while n_missing < 3: + slave_address += 1 + try: + yield identify_battery(modbus, slave_address) + n_missing = 0 + except Exception as e: + logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) + n_missing += 1 + + logging.info('giving up searching for further batteries') + + batteries = list(_identify_batteries()) # dont be lazy! + + n = len(batteries) + logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) + + return batteries + + +def parse_slave_id(modbus, slave_address): + # type: (Modbus, int) -> (str, str, int) + + slave_id = report_slave_id(modbus, slave_address) + + sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars + + match = re.match('(?P48TL(?P[0-9]+)) *(?P.*)', sid) + + if match is None: + raise Exception('no known battery found') + + return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip()) + + +def read_firmware_version(modbus, slave_address): + # type: (Modbus, int) -> str + + logging.debug('reading firmware version') + + with modbus: + + response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) + register = response.registers[0] + + return '{0:0>4X}'.format(register) + + +def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS): + # type: (Modbus, int, int, int) -> ReadInputRegistersResponse + + logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) + + return modbus.read_input_registers( + address=base_address, + count=count, + unit=slave_address) + + +def read_battery_status(modbus, battery): + # type: (Modbus, Battery) -> BatteryStatus + """ + Read the modbus registers containing the battery's status info. + """ + + logging.debug('reading battery status') + + with modbus: + data = read_modbus_registers(modbus, battery.slave_address) + return BatteryStatus(battery, data.registers) + + +def publish_values_on_dbus(service, battery_signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + publish_individuals(service, battery_signals, battery_statuses) + publish_aggregates(service, battery_signals, battery_statuses) + + +def publish_aggregates(service, signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + for s in signals: + if s.aggregate is None: + continue + values = [s.get_value(battery_status) for battery_status in battery_statuses] + value = s.aggregate(values) + service.own_properties.set(s.dbus_path, value, s.unit) + + +def publish_individuals(service, signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + for signal in signals: + for battery_status in battery_statuses: + address = battery_status.battery.slave_address + dbus_path = '/_Battery/' + str(address) + signal.dbus_path + value = signal.get_value(battery_status) + service.own_properties.set(dbus_path, value, signal.unit) + + +def publish_service_signals(service, signals): + # type: (DBusService, Iterable[ServiceSignal]) -> NoReturn + + for signal in signals: + service.own_properties.set(signal.dbus_path, signal.value, signal.unit) + + +def upload_status_to_innovenergy(sock, statuses): + # type: (socket, Iterable[BatteryStatus]) -> bool + + logging.debug('upload status') + + try: + for s in statuses: + sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT)) + except: + logging.debug('FAILED') + return False + else: + return True + + +def print_usage(): + print ('Usage: ' + __file__ + ' ') + print ('Example: ' + __file__ + ' ttyUSB0') + + +def parse_cmdline_args(argv): + # type: (List[str]) -> str + + if len(argv) == 0: + logging.info('missing command line argument for tty device') + print_usage() + sys.exit(1) + + return argv[0] + + +def reset_batteries(modbus, batteries): + # type: (Modbus, Iterable[Battery]) -> NoReturn + + logging.info('Resetting batteries...') + + for battery in batteries: + + result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address) + + # expecting a ModbusIOException (timeout) + # BMS can no longer reply because it is already reset + success = isinstance(result, ModbusIOException) + + outcome = 'successfully' if success else 'FAILED to' + logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome)) + + logging.info('Shutting down fz-sonick driver') + exit(0) + + +alive = True # global alive flag, watchdog_task clears it, update_task sets it + + +def create_update_task(modbus, service, batteries): + # type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool] + """ + Creates an update task which runs the main update function + and resets the alive flag + """ + _socket = init_udp_socket() + _signals = signals.init_battery_signals() + + csv_signals = signals.create_csv_signals(first(batteries).firmware_version) + node_numbers = [battery.slave_address for battery in batteries] + warnings_signals, alarm_signals = signals.read_warning_and_alarm_flags() + current_warnings = {} + current_alarms = {} + + def update_task(): + # type: () -> bool + + global alive + + logging.debug('starting update cycle') + + if service.own_properties.get('/ResetBatteries').value == 1: + reset_batteries(modbus, batteries) + + statuses = [read_battery_status(modbus, battery) for battery in batteries] + + # Iterate over each node and signal to create rows in the new format + for i, node in enumerate(node_numbers): + for s in warnings_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_warnings[signal_name] = value + for s in alarm_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_alarms[signal_name] = value + + status_message, alarms_number_list, warnings_number_list = update_state_from_dictionaries(current_warnings, current_alarms, node_numbers) + + publish_values_on_dbus(service, _signals, statuses) + + create_csv_files(csv_signals, statuses, node_numbers, alarms_number_list, warnings_number_list) + + upload_status_to_innovenergy(_socket, statuses) + + logging.debug('finished update cycle\n') + + alive = True + + return True + + return update_task + +def manage_csv_files(directory_path, max_files=20): + csv_files = [f for f in os.listdir(directory_path)] + csv_files.sort(key=lambda x: os.path.getctime(os.path.join(directory_path, x))) + # Remove oldest files if exceeds maximum + while len(csv_files) > max_files: + file_to_delete = os.path.join(directory_path, csv_files.pop(0)) + os.remove(file_to_delete) +def insert_id(path, id_number): + parts = path.split("/") + insert_position = parts.index("Devices") + 1 + parts.insert(insert_position, str(id_number)) + return "/".join(parts) + +def create_csv_files(signals, statuses, node_numbers, alarms_number_list, warnings_number_list): + timestamp = int(time.time()) + if timestamp % 2 != 0: + timestamp-=1 + if not os.path.exists(CSV_DIR): + os.makedirs(CSV_DIR) + csv_filename = "{}.csv".format(timestamp) + csv_path = os.path.join(CSV_DIR, csv_filename) + + with open(csv_path, 'ab') as csvfile: + csv_writer = csv.writer(csvfile, delimiter=';') + nodes_config_path = "/Config/Devices/BatteryNodes" + nodes_list = ",".join(str(node) for node in node_numbers) + config_row = [nodes_config_path, nodes_list, ""] + csv_writer.writerow(config_row) + for i, node in enumerate(node_numbers): + csv_writer.writerow(["/Battery/Devices/{}/Alarms".format(str(i+1)), alarms_number_list[i], ""]) + csv_writer.writerow(["/Battery/Devices/{}/Warnings".format(str(i+1)), warnings_number_list[i], ""]) + for s in signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + row_values = [signal_name, value, s.get_text] + csv_writer.writerow(row_values) + + csv_data = read_csv_as_string(csv_path) + + if csv_data is None: + print("error while reading csv as string") + return + + # zip-comp additions + compressed_csv = compress_csv_data(csv_data) + compressed_filename = "{}.csv".format(timestamp) + + response = s3_config.create_put_request(compressed_filename, compressed_csv) + if response.status_code == 200: + #os.remove(csv_path) + print("Success") + else: + failed_dir = os.path.join(CSV_DIR, "failed") + if not os.path.exists(failed_dir): + os.makedirs(failed_dir) + failed_path = os.path.join(failed_dir, csv_filename) + os.rename(csv_path, failed_path) + print("Uploading failed") + manage_csv_files(failed_dir, 10) + + manage_csv_files(CSV_DIR) + + +def create_watchdog_task(main_loop): + # type: (DBusGMainLoop) -> Callable[[],bool] + """ + Creates a Watchdog task that monitors the alive flag. + The watchdog kills the main loop if the alive flag is not periodically reset by the update task. + Who watches the watchdog? + """ + def watchdog_task(): + # type: () -> bool + + global alive + + if alive: + logging.debug('watchdog_task: update_task is alive') + alive = False + return True + else: + logging.info('watchdog_task: killing main loop because update_task is no longer alive') + main_loop.quit() + return False + + return watchdog_task + + +def main(argv): + # type: (List[str]) -> () + print("INSIDE DBUS SONICK") + logging.basicConfig(level=cfg.LOG_LEVEL) + logging.info('starting ' + __file__) + + tty = parse_cmdline_args(argv) + modbus = init_modbus(tty) + + batteries = identify_batteries(modbus) + + if len(batteries) <= 0: + sys.exit(2) + + service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty) + + service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False + + main_loop = gobject.MainLoop() + + service_signals = signals.init_service_signals(batteries) + publish_service_signals(service, service_signals) + + update_task = create_update_task(modbus, service, batteries) + update_task() # run it right away, so that all props are initialized before anyone can ask + watchdog_task = create_watchdog_task(main_loop) + + gobject.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority = gobject.PRIORITY_LOW) # add watchdog first + gobject.timeout_add(cfg.UPDATE_INTERVAL, update_task, priority = gobject.PRIORITY_LOW) # call update once every update_interval + + logging.info('starting gobject.MainLoop') + main_loop.run() + logging.info('gobject.MainLoop was shut down') + + sys.exit(0xFF) # reaches this only on error + + +main(sys.argv[1:]) diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/dbus_types.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/dbus_types.py new file mode 100644 index 000000000..a5fcc6e8a --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/dbus_types.py @@ -0,0 +1,156 @@ +from logging import getLogger + +import dbus + + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import Any, Union, Dict + DbusString = Union[dbus.String, dbus.UTF8String, dbus.ObjectPath, dbus.Signature] + DbusInt = Union[dbus.Int16, dbus.Int32, dbus.Int64] + DbusDouble = dbus.Double + DbusBool = dbus.Boolean + + DbusStringVariant = DbusString # TODO: variant_level constraint ? + DbusIntVariant = DbusInt + DbusDoubleVariant = DbusDouble + DbusBoolVariant = DbusBool + + DbusValue = Union[DbusString, DbusInt, DbusDouble, DbusBool, DBUS_NONE] + DbusVariant = Union[DbusStringVariant, DbusIntVariant, DbusDoubleVariant, DbusBoolVariant, DBUS_NONE] + + DbusTextDict = dbus.Dictionary + DbusVariantDict = dbus.Dictionary + + DbusType = Union[DbusValue, DbusVariant, DbusVariantDict, DbusTextDict] + +DBUS_NONE = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) # DEFINED by victron + +MAX_INT16 = 2 ** 15 - 1 +MAX_INT32 = 2 ** 31 - 1 + + +def dbus_uint32(value): + # type: (int) -> dbus.UInt32 + if value < 0: + raise Exception('cannot convert negative value to UInt32') + + return dbus.UInt32(value) + + +def dbus_int(value): + # type: (Union[int, long]) -> Union[dbus.Int16, dbus.Int32, dbus.Int64] + abs_value = abs(value) + if abs_value < MAX_INT16: + return dbus.Int16(value) + elif abs_value < MAX_INT32: + return dbus.Int32(value) + else: + return dbus.Int64(value) + + +def dbus_string(value): + # type: (Union[str, unicode]) -> DbusString + if isinstance(value, unicode): + return dbus.UTF8String(value) + else: + return dbus.String(value) + + +def dbus_double(value): + # type: (float) -> DbusDouble + return dbus.Double(value) + + +def dbus_bool(value): + # type: (bool) -> DbusBool + return dbus.Boolean(value) + + +# VARIANTS + +def dbus_int_variant(value): + # type: (Union[int, long]) -> DbusIntVariant + abs_value = abs(value) + if abs_value < MAX_INT16: + return dbus.Int16(value, variant_level=1) + elif abs_value < MAX_INT32: + return dbus.Int32(value, variant_level=1) + else: + return dbus.Int64(value, variant_level=1) + + +def dbus_string_variant(value): + # type: (Union[str, unicode]) -> DbusStringVariant + if isinstance(value, unicode): + return dbus.UTF8String(value, variant_level=1) + else: + return dbus.String(value, variant_level=1) + + +def dbus_double_variant(value): + # type: (float) -> DbusDoubleVariant + return dbus.Double(value, variant_level=1) + + +def dbus_bool_variant(value): + # type: (bool) -> DbusBoolVariant + return dbus.Boolean(value, variant_level=1) + + +def dbus_variant(value): + # type: (Any) -> DbusVariant + + if value is None: + return DBUS_NONE + if isinstance(value, float): + return dbus_double_variant(value) + if isinstance(value, bool): + return dbus_bool_variant(value) + if isinstance(value, (int, long)): + return dbus_int_variant(value) + if isinstance(value, (str, unicode)): + return dbus_string_variant(value) + # TODO: container types + if isinstance(value, list): + # Convert each element in the list to a dbus variant + dbus_array = [dbus_variant(item) for item in value] + if not dbus_array: + return dbus.Array([], signature='v') # Empty array with variant type + first_element = value[0] + if isinstance(first_element, float): + signature = 'd' + elif isinstance(first_element, bool): + signature = 'b' + elif isinstance(first_element, (int, long)): + signature = 'x' + elif isinstance(first_element, (str, unicode)): + signature = 's' + else: + signature = 'v' # default to variant if unknown + return dbus.Array(dbus_array, signature=signature) + + raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value)) + + +def dbus_value(value): + # type: (Any) -> DbusVariant + + if value is None: + return DBUS_NONE + if isinstance(value, float): + return dbus_double(value) + if isinstance(value, bool): + return dbus_bool(value) + if isinstance(value, (int, long)): + return dbus_int(value) + if isinstance(value, (str, unicode)): + return dbus_string_variant(value) + # TODO: container types + + raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value)) + + + diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/ext/velib_python/ve_utils.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/ext/velib_python/ve_utils.py new file mode 100644 index 000000000..459584bab --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/ext/velib_python/ve_utils.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from traceback import print_exc +from os import _exit as os_exit +from os import statvfs +import logging +from functools import update_wrapper +import dbus +logger = logging.getLogger(__name__) + +VEDBUS_INVALID = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) + +# Use this function to make sure the code quits on an unexpected exception. Make sure to use it +# when using gobject.idle_add and also gobject.timeout_add. +# Without this, the code will just keep running, since gobject does not stop the mainloop on an +# exception. +# Example: gobject.idle_add(exit_on_error, myfunc, arg1, arg2) +def exit_on_error(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except: + try: + print 'exit_on_error: there was an exception. Printing stacktrace will be tryed and then exit' + print_exc() + except: + pass + + # sys.exit() is not used, since that throws an exception, which does not lead to a program + # halt when used in a dbus callback, see connection.py in the Python/Dbus libraries, line 230. + os_exit(1) + + +__vrm_portal_id = None +def get_vrm_portal_id(): + # For the CCGX, the definition of the VRM Portal ID is that it is the mac address of the onboard- + # ethernet port (eth0), stripped from its colons (:) and lower case. + + # nice coincidence is that this also works fine when running on your (linux) development computer. + + global __vrm_portal_id + + if __vrm_portal_id: + return __vrm_portal_id + + # Assume we are on linux + import fcntl, socket, struct + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', 'eth0'[:15])) + __vrm_portal_id = ''.join(['%02x' % ord(char) for char in info[18:24]]) + + return __vrm_portal_id + + +# See VE.Can registers - public.docx for definition of this conversion +def convert_vreg_version_to_readable(version): + def str_to_arr(x, length): + a = [] + for i in range(0, len(x), length): + a.append(x[i:i+length]) + return a + + x = "%x" % version + x = x.upper() + + if len(x) == 5 or len(x) == 3 or len(x) == 1: + x = '0' + x + + a = str_to_arr(x, 2); + + # remove the first 00 if there are three bytes and it is 00 + if len(a) == 3 and a[0] == '00': + a.remove(0); + + # if we have two or three bytes now, and the first character is a 0, remove it + if len(a) >= 2 and a[0][0:1] == '0': + a[0] = a[0][1]; + + result = '' + for item in a: + result += ('.' if result != '' else '') + item + + + result = 'v' + result + + return result + + +def get_free_space(path): + result = -1 + + try: + s = statvfs(path) + result = s.f_frsize * s.f_bavail # Number of free bytes that ordinary users + except Exception, ex: + logger.info("Error while retrieving free space for path %s: %s" % (path, ex)) + + return result + + +def get_load_averages(): + c = read_file('/proc/loadavg') + return c.split(' ')[:3] + + +# Returns False if it cannot find a machine name. Otherwise returns the string +# containing the name +def get_machine_name(): + c = read_file('/proc/device-tree/model') + + if c != False: + return c.strip('\x00') + + return read_file('/etc/venus/machine') + + +# Returns False if it cannot open the file. Otherwise returns its rstripped contents +def read_file(path): + content = False + + try: + with open(path, 'r') as f: + content = f.read().rstrip() + except Exception, ex: + logger.debug("Error while reading %s: %s" % (path, ex)) + + return content + + +def wrap_dbus_value(value): + if value is None: + return VEDBUS_INVALID + if isinstance(value, float): + return dbus.Double(value, variant_level=1) + if isinstance(value, bool): + return dbus.Boolean(value, variant_level=1) + if isinstance(value, int): + return dbus.Int32(value, variant_level=1) + if isinstance(value, str): + return dbus.String(value, variant_level=1) + if isinstance(value, unicode): + return dbus.String(value, variant_level=1) + if isinstance(value, list): + if len(value) == 0: + # If the list is empty we cannot infer the type of the contents. So assume unsigned integer. + # A (signed) integer is dangerous, because an empty list of signed integers is used to encode + # an invalid value. + return dbus.Array([], signature=dbus.Signature('u'), variant_level=1) + return dbus.Array([wrap_dbus_value(x) for x in value], variant_level=1) + if isinstance(value, long): + return dbus.Int64(value, variant_level=1) + if isinstance(value, dict): + # Wrapping the keys of the dictionary causes D-Bus errors like: + # 'arguments to dbus_message_iter_open_container() were incorrect, + # assertion "(type == DBUS_TYPE_ARRAY && contained_signature && + # *contained_signature == DBUS_DICT_ENTRY_BEGIN_CHAR) || (contained_signature == NULL || + # _dbus_check_is_valid_signature (contained_signature))" failed in file ...' + return dbus.Dictionary({(k, wrap_dbus_value(v)) for k, v in value.items()}, variant_level=1) + return value + + +dbus_int_types = (dbus.Int32, dbus.UInt32, dbus.Byte, dbus.Int16, dbus.UInt16, dbus.UInt32, dbus.Int64, dbus.UInt64) + + +def unwrap_dbus_value(val): + """Converts D-Bus values back to the original type. For example if val is of type DBus.Double, + a float will be returned.""" + if isinstance(val, dbus_int_types): + return int(val) + if isinstance(val, dbus.Double): + return float(val) + if isinstance(val, dbus.Array): + v = [unwrap_dbus_value(x) for x in val] + return None if len(v) == 0 else v + if isinstance(val, (dbus.Signature, dbus.String)): + return unicode(val) + # Python has no byte type, so we convert to an integer. + if isinstance(val, dbus.Byte): + return int(val) + if isinstance(val, dbus.ByteArray): + return "".join([str(x) for x in val]) + if isinstance(val, (list, tuple)): + return [unwrap_dbus_value(x) for x in val] + if isinstance(val, (dbus.Dictionary, dict)): + # Do not unwrap the keys, see comment in wrap_dbus_value + return dict([(x, unwrap_dbus_value(y)) for x, y in val.items()]) + if isinstance(val, dbus.Boolean): + return bool(val) + return val + +class reify(object): + """ Decorator to replace a property of an object with the calculated value, + to make it concrete. """ + def __init__(self, wrapped): + self.wrapped = wrapped + update_wrapper(self, wrapped) + def __get__(self, inst, objtype=None): + if inst is None: + return self + v = self.wrapped(inst) + setattr(inst, self.wrapped.__name__, v) + return v diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/ext/velib_python/vedbus.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/ext/velib_python/vedbus.py new file mode 100644 index 000000000..2dbed13e2 --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/ext/velib_python/vedbus.py @@ -0,0 +1,496 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import dbus.service +import logging +import traceback +import os +import weakref +from ve_utils import wrap_dbus_value, unwrap_dbus_value + +# vedbus contains three classes: +# VeDbusItemImport -> use this to read data from the dbus, ie import +# VeDbusItemExport -> use this to export data to the dbus (one value) +# VeDbusService -> use that to create a service and export several values to the dbus + +# Code for VeDbusItemImport is copied from busitem.py and thereafter modified. +# All projects that used busitem.py need to migrate to this package. And some +# projects used to define there own equivalent of VeDbusItemExport. Better to +# use VeDbusItemExport, or even better the VeDbusService class that does it all for you. + +# TODOS +# 1 check for datatypes, it works now, but not sure if all is compliant with +# com.victronenergy.BusItem interface definition. See also the files in +# tests_and_examples. And see 'if type(v) == dbus.Byte:' on line 102. Perhaps +# something similar should also be done in VeDbusBusItemExport? +# 2 Shouldn't VeDbusBusItemExport inherit dbus.service.Object? +# 7 Make hard rules for services exporting data to the D-Bus, in order to make tracking +# changes possible. Does everybody first invalidate its data before leaving the bus? +# And what about before taking one object away from the bus, instead of taking the +# whole service offline? +# They should! And after taking one value away, do we need to know that someone left +# the bus? Or we just keep that value in invalidated for ever? Result is that we can't +# see the difference anymore between an invalidated value and a value that was first on +# the bus and later not anymore. See comments above VeDbusItemImport as well. +# 9 there are probably more todos in the code below. + +# Some thoughts with regards to the data types: +# +# Text from: http://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html#data-types +# --- +# Variants are represented by setting the variant_level keyword argument in the +# constructor of any D-Bus data type to a value greater than 0 (variant_level 1 +# means a variant containing some other data type, variant_level 2 means a variant +# containing a variant containing some other data type, and so on). If a non-variant +# is passed as an argument but introspection indicates that a variant is expected, +# it'll automatically be wrapped in a variant. +# --- +# +# Also the different dbus datatypes, such as dbus.Int32, and dbus.UInt32 are a subclass +# of Python int. dbus.String is a subclass of Python standard class unicode, etcetera +# +# So all together that explains why we don't need to explicitly convert back and forth +# between the dbus datatypes and the standard python datatypes. Note that all datatypes +# in python are objects. Even an int is an object. + +# The signature of a variant is 'v'. + +# Export ourselves as a D-Bus service. +class VeDbusService(object): + def __init__(self, servicename, bus=None): + # dict containing the VeDbusItemExport objects, with their path as the key. + self._dbusobjects = {} + self._dbusnodes = {} + + # dict containing the onchange callbacks, for each object. Object path is the key + self._onchangecallbacks = {} + + # Connect to session bus whenever present, else use the system bus + self._dbusconn = bus or (dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus()) + + # make the dbus connection available to outside, could make this a true property instead, but ach.. + self.dbusconn = self._dbusconn + + # Register ourselves on the dbus, trigger an error if already in use (do_not_queue) + self._dbusname = dbus.service.BusName(servicename, self._dbusconn, do_not_queue=True) + + # Add the root item that will return all items as a tree + self._dbusnodes['/'] = self._create_tree_export(self._dbusconn, '/', self._get_tree_dict) + + logging.info("registered ourselves on D-Bus as %s" % servicename) + + def _get_tree_dict(self, path, get_text=False): + logging.debug("_get_tree_dict called for %s" % path) + r = {} + px = path + if not px.endswith('/'): + px += '/' + for p, item in self._dbusobjects.items(): + if p.startswith(px): + v = item.GetText() if get_text else wrap_dbus_value(item.local_get_value()) + r[p[len(px):]] = v + logging.debug(r) + return r + + # To force immediate deregistering of this dbus service and all its object paths, explicitly + # call __del__(). + def __del__(self): + for node in self._dbusnodes.values(): + node.__del__() + self._dbusnodes.clear() + for item in self._dbusobjects.values(): + item.__del__() + self._dbusobjects.clear() + if self._dbusname: + self._dbusname.__del__() # Forces call to self._bus.release_name(self._name), see source code + self._dbusname = None + + # @param callbackonchange function that will be called when this value is changed. First parameter will + # be the path of the object, second the new value. This callback should return + # True to accept the change, False to reject it. + def add_path(self, path, value, description="", writeable=False, + onchangecallback=None, gettextcallback=None): + + if onchangecallback is not None: + self._onchangecallbacks[path] = onchangecallback + + item = VeDbusItemExport( + self._dbusconn, path, value, description, writeable, + self._value_changed, gettextcallback, deletecallback=self._item_deleted) + + spl = path.split('/') + for i in range(2, len(spl)): + subPath = '/'.join(spl[:i]) + if subPath not in self._dbusnodes and subPath not in self._dbusobjects: + self._dbusnodes[subPath] = self._create_tree_export(self._dbusconn, subPath, self._get_tree_dict) + self._dbusobjects[path] = item + logging.debug('added %s with start value %s. Writeable is %s' % (path, value, writeable)) + + # Add the mandatory paths, as per victron dbus api doc + def add_mandatory_paths(self, processname, processversion, connection, + deviceinstance, productid, productname, firmwareversion, hardwareversion, connected): + self.add_path('/Mgmt/ProcessName', processname) + self.add_path('/Mgmt/ProcessVersion', processversion) + self.add_path('/Mgmt/Connection', connection) + + # Create rest of the mandatory objects + self.add_path('/DeviceInstance', deviceinstance) + self.add_path('/ProductId', productid) + self.add_path('/ProductName', productname) + self.add_path('/FirmwareVersion', firmwareversion) + self.add_path('/HardwareVersion', hardwareversion) + self.add_path('/Connected', connected) + + def _create_tree_export(self, bus, objectPath, get_value_handler): + return VeDbusTreeExport(bus, objectPath, get_value_handler) + + # Callback function that is called from the VeDbusItemExport objects when a value changes. This function + # maps the change-request to the onchangecallback given to us for this specific path. + def _value_changed(self, path, newvalue): + if path not in self._onchangecallbacks: + return True + + return self._onchangecallbacks[path](path, newvalue) + + def _item_deleted(self, path): + self._dbusobjects.pop(path) + for np in self._dbusnodes.keys(): + if np != '/': + for ip in self._dbusobjects: + if ip.startswith(np + '/'): + break + else: + self._dbusnodes[np].__del__() + self._dbusnodes.pop(np) + + def __getitem__(self, path): + return self._dbusobjects[path].local_get_value() + + def __setitem__(self, path, newvalue): + self._dbusobjects[path].local_set_value(newvalue) + + def __delitem__(self, path): + self._dbusobjects[path].__del__() # Invalidates and then removes the object path + assert path not in self._dbusobjects + + def __contains__(self, path): + return path in self._dbusobjects + +""" +Importing basics: + - If when we power up, the D-Bus service does not exist, or it does exist and the path does not + yet exist, still subscribe to a signal: as soon as it comes online it will send a signal with its + initial value, which VeDbusItemImport will receive and use to update local cache. And, when set, + call the eventCallback. + - If when we power up, save it + - When using get_value, know that there is no difference between services (or object paths) that don't + exist and paths that are invalid (= empty array, see above). Both will return None. In case you do + really want to know ifa path exists or not, use the exists property. + - When a D-Bus service leaves the D-Bus, it will first invalidate all its values, and send signals + with that update, and only then leave the D-Bus. (or do we need to subscribe to the NameOwnerChanged- + signal!?!) To be discussed and make sure. Not really urgent, since all existing code that uses this + class already subscribes to the NameOwnerChanged signal, and subsequently removes instances of this + class. + +Read when using this class: +Note that when a service leaves that D-Bus without invalidating all its exported objects first, for +example because it is killed, VeDbusItemImport doesn't have a clue. So when using VeDbusItemImport, +make sure to also subscribe to the NamerOwnerChanged signal on bus-level. Or just use dbusmonitor, +because that takes care of all of that for you. +""" +class VeDbusItemImport(object): + ## Constructor + # @param bus the bus-object (SESSION or SYSTEM). + # @param serviceName the dbus-service-name (string), for example 'com.victronenergy.battery.ttyO1' + # @param path the object-path, for example '/Dc/V' + # @param eventCallback function that you want to be called on a value change + # @param createSignal only set this to False if you use this function to one time read a value. When + # leaving it to True, make sure to also subscribe to the NameOwnerChanged signal + # elsewhere. See also note some 15 lines up. + def __init__(self, bus, serviceName, path, eventCallback=None, createsignal=True): + # TODO: is it necessary to store _serviceName and _path? Isn't it + # stored in the bus_getobjectsomewhere? + self._serviceName = serviceName + self._path = path + self._match = None + # TODO: _proxy is being used in settingsdevice.py, make a getter for that + self._proxy = bus.get_object(serviceName, path, introspect=False) + self.eventCallback = eventCallback + + assert eventCallback is None or createsignal == True + if createsignal: + self._match = self._proxy.connect_to_signal( + "PropertiesChanged", weak_functor(self._properties_changed_handler)) + + # store the current value in _cachedvalue. When it doesn't exists set _cachedvalue to + # None, same as when a value is invalid + self._cachedvalue = None + try: + v = self._proxy.GetValue() + except dbus.exceptions.DBusException: + pass + else: + self._cachedvalue = unwrap_dbus_value(v) + + def __del__(self): + if self._match != None: + self._match.remove() + self._match = None + self._proxy = None + + def _refreshcachedvalue(self): + self._cachedvalue = unwrap_dbus_value(self._proxy.GetValue()) + + ## Returns the path as a string, for example '/AC/L1/V' + @property + def path(self): + return self._path + + ## Returns the dbus service name as a string, for example com.victronenergy.vebus.ttyO1 + @property + def serviceName(self): + return self._serviceName + + ## Returns the value of the dbus-item. + # the type will be a dbus variant, for example dbus.Int32(0, variant_level=1) + # this is not a property to keep the name consistant with the com.victronenergy.busitem interface + # returns None when the property is invalid + def get_value(self): + return self._cachedvalue + + ## Writes a new value to the dbus-item + def set_value(self, newvalue): + r = self._proxy.SetValue(wrap_dbus_value(newvalue)) + + # instead of just saving the value, go to the dbus and get it. So we have the right type etc. + if r == 0: + self._refreshcachedvalue() + + return r + + ## Returns the text representation of the value. + # For example when the value is an enum/int GetText might return the string + # belonging to that enum value. Another example, for a voltage, GetValue + # would return a float, 12.0Volt, and GetText could return 12 VDC. + # + # Note that this depends on how the dbus-producer has implemented this. + def get_text(self): + return self._proxy.GetText() + + ## Returns true of object path exists, and false if it doesn't + @property + def exists(self): + # TODO: do some real check instead of this crazy thing. + r = False + try: + r = self._proxy.GetValue() + r = True + except dbus.exceptions.DBusException: + pass + + return r + + ## callback for the trigger-event. + # @param eventCallback the event-callback-function. + @property + def eventCallback(self): + return self._eventCallback + + @eventCallback.setter + def eventCallback(self, eventCallback): + self._eventCallback = eventCallback + + ## Is called when the value of the imported bus-item changes. + # Stores the new value in our local cache, and calls the eventCallback, if set. + def _properties_changed_handler(self, changes): + if "Value" in changes: + changes['Value'] = unwrap_dbus_value(changes['Value']) + self._cachedvalue = changes['Value'] + if self._eventCallback: + # The reason behind this try/except is to prevent errors silently ending up the an error + # handler in the dbus code. + try: + self._eventCallback(self._serviceName, self._path, changes) + except: + traceback.print_exc() + os._exit(1) # sys.exit() is not used, since that also throws an exception + + +class VeDbusTreeExport(dbus.service.Object): + def __init__(self, bus, objectPath, get_value_handler): + dbus.service.Object.__init__(self, bus, objectPath) + self._get_value_handler = get_value_handler + logging.debug("VeDbusTreeExport %s has been created" % objectPath) + + def __del__(self): + # self._get_path() will raise an exception when retrieved after the call to .remove_from_connection, + # so we need a copy. + path = self._get_path() + if path is None: + return + self.remove_from_connection() + logging.debug("VeDbusTreeExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + value = self._get_value_handler(self._get_path()) + return dbus.Dictionary(value, signature=dbus.Signature('sv'), variant_level=1) + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetText(self): + return self._get_value_handler(self._get_path(), True) + + def local_get_value(self): + return self._get_value_handler(self.path) + + +class VeDbusItemExport(dbus.service.Object): + ## Constructor of VeDbusItemExport + # + # Use this object to export (publish), values on the dbus + # Creates the dbus-object under the given dbus-service-name. + # @param bus The dbus object. + # @param objectPath The dbus-object-path. + # @param value Value to initialize ourselves with, defaults to None which means Invalid + # @param description String containing a description. Can be called over the dbus with GetDescription() + # @param writeable what would this do!? :). + # @param callback Function that will be called when someone else changes the value of this VeBusItem + # over the dbus. First parameter passed to callback will be our path, second the new + # value. This callback should return True to accept the change, False to reject it. + def __init__(self, bus, objectPath, value=None, description=None, writeable=False, + onchangecallback=None, gettextcallback=None, deletecallback=None): + dbus.service.Object.__init__(self, bus, objectPath) + self._onchangecallback = onchangecallback + self._gettextcallback = gettextcallback + self._value = value + self._description = description + self._writeable = writeable + self._deletecallback = deletecallback + + # To force immediate deregistering of this dbus object, explicitly call __del__(). + def __del__(self): + # self._get_path() will raise an exception when retrieved after the + # call to .remove_from_connection, so we need a copy. + path = self._get_path() + if path == None: + return + if self._deletecallback is not None: + self._deletecallback(path) + self.local_set_value(None) + self.remove_from_connection() + logging.debug("VeDbusItemExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + ## Sets the value. And in case the value is different from what it was, a signal + # will be emitted to the dbus. This function is to be used in the python code that + # is using this class to export values to the dbus. + # set value to None to indicate that it is Invalid + def local_set_value(self, newvalue): + if self._value == newvalue: + return + + self._value = newvalue + + changes = {} + changes['Value'] = wrap_dbus_value(newvalue) + changes['Text'] = self.GetText() + self.PropertiesChanged(changes) + + def local_get_value(self): + return self._value + + # ==== ALL FUNCTIONS BELOW THIS LINE WILL BE CALLED BY OTHER PROCESSES OVER THE DBUS ==== + + ## Dbus exported method SetValue + # Function is called over the D-Bus by other process. It will first check (via callback) if new + # value is accepted. And it is, stores it and emits a changed-signal. + # @param value The new value. + # @return completion-code When successful a 0 is return, and when not a -1 is returned. + @dbus.service.method('com.victronenergy.BusItem', in_signature='v', out_signature='i') + def SetValue(self, newvalue): + if not self._writeable: + return 1 # NOT OK + + newvalue = unwrap_dbus_value(newvalue) + + if newvalue == self._value: + return 0 # OK + + # call the callback given to us, and check if new value is OK. + if (self._onchangecallback is None or + (self._onchangecallback is not None and self._onchangecallback(self.__dbus_object_path__, newvalue))): + + self.local_set_value(newvalue) + return 0 # OK + + return 2 # NOT OK + + ## Dbus exported method GetDescription + # + # Returns the a description. + # @param language A language code (e.g. ISO 639-1 en-US). + # @param length Lenght of the language string. + # @return description + @dbus.service.method('com.victronenergy.BusItem', in_signature='si', out_signature='s') + def GetDescription(self, language, length): + return self._description if self._description is not None else 'No description given' + + ## Dbus exported method GetValue + # Returns the value. + # @return the value when valid, and otherwise an empty array + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + return wrap_dbus_value(self._value) + + ## Dbus exported method GetText + # Returns the value as string of the dbus-object-path. + # @return text A text-value. '---' when local value is invalid + @dbus.service.method('com.victronenergy.BusItem', out_signature='s') + def GetText(self): + if self._value is None: + return '---' + + # Default conversion from dbus.Byte will get you a character (so 'T' instead of '84'), so we + # have to convert to int first. Note that if a dbus.Byte turns up here, it must have come from + # the application itself, as all data from the D-Bus should have been unwrapped by now. + if self._gettextcallback is None and type(self._value) == dbus.Byte: + return str(int(self._value)) + + if self._gettextcallback is None and self.__dbus_object_path__ == '/ProductId': + return "0x%X" % self._value + + if self._gettextcallback is None: + return str(self._value) + + return self._gettextcallback(self.__dbus_object_path__, self._value) + + ## The signal that indicates that the value has changed. + # Other processes connected to this BusItem object will have subscribed to the + # event when they want to track our state. + @dbus.service.signal('com.victronenergy.BusItem', signature='a{sv}') + def PropertiesChanged(self, changes): + pass + +## This class behaves like a regular reference to a class method (eg. self.foo), but keeps a weak reference +## to the object which method is to be called. +## Use this object to break circular references. +class weak_functor: + def __init__(self, f): + self._r = weakref.ref(f.__self__) + self._f = weakref.ref(f.__func__) + + def __call__(self, *args, **kargs): + r = self._r() + f = self._f() + if r == None or f == None: + return + f(r, *args, **kargs) diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/down b/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/down new file mode 100644 index 000000000..e69de29bb diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/log/down b/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/log/down new file mode 100644 index 000000000..e69de29bb diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/log/run b/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/log/run new file mode 100755 index 000000000..74e759d9b --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/log/run @@ -0,0 +1,3 @@ +#!/bin/sh +exec 2>&1 +exec multilog t s25000 n4 /var/log/dbus-fzsonick-48tl.TTY diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/run b/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/run new file mode 100755 index 000000000..7f5301435 --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/service/run @@ -0,0 +1,4 @@ +#!/bin/sh +exec 2>&1 + +exec softlimit -d 100000000 -s 1000000 -a 100000000 /opt/innovenergy/dbus-fzsonick-48tl/start.sh TTY diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/signals.py b/firmware/opt/dbus-fz-sonick-48tl-with-s3/signals.py new file mode 100644 index 000000000..e35c95603 --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/signals.py @@ -0,0 +1,374 @@ +# coding=utf-8 + +import config as cfg +from convert import mean, read_float, read_led_state, read_bool, count_bits, comma_separated, read_bitmap, return_in_list, first, read_hex_string +from data import BatterySignal, Battery, LedColor, ServiceSignal, BatteryStatus, LedState, CsvSignal + +# noinspection PyUnreachableCode +if False: + from typing import List, Iterable + +def read_voltage(): + return read_float(register=999, scale_factor=0.01, offset=0) + +def read_current(): + return read_float(register=1000, scale_factor=0.01, offset=-10000) + +def read_limb_bitmap(): + return read_bitmap(1059) + +def read_power(status): + return int(read_current()(status) * read_voltage()(status)) + +def interpret_limb_bitmap(bitmap_value): + string1_disabled = int((bitmap_value & 0b00001) != 0) + string2_disabled = int((bitmap_value & 0b00010) != 0) + string3_disabled = int((bitmap_value & 0b00100) != 0) + string4_disabled = int((bitmap_value & 0b01000) != 0) + string5_disabled = int((bitmap_value & 0b10000) != 0) + n_limb_strings = string1_disabled + string2_disabled + string3_disabled + string4_disabled + string5_disabled + return n_limb_strings + +def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap()(status)) + +def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int): + dv = v_limit - v + di = dv / r_int + p_limit = v_limit * (i + di) + return p_limit + +def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int): + di = i_limit - i + dv = di * r_int + p_limit = i_limit * (v + dv) + return p_limit + +def calc_max_charge_power(status): + n_strings = cfg.NUM_OF_STRINGS_PER_BATTERY - limp_strings_value(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage()(status) + i = read_current()(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) + p_limit = max(p_limit, 0) + return int(p_limit) + +def calc_max_discharge_power(status): + n_strings = cfg.NUM_OF_STRINGS_PER_BATTERY - limp_strings_value(status) + max_discharge_current = n_strings * cfg.I_MAX_PER_STRING + return int(max_discharge_current * read_voltage()(status)) + +def read_switch_closed(status): + value = read_bool(base_register=1013, bit=0)(status) + if value: + return False + return True + +def read_alarm_out_active(status): + value = read_bool(base_register=1013, bit=1)(status) + if value: + return False + return True + +def read_aux_relay(status): + value = read_bool(base_register=1013, bit=4)(status) + if value: + return False + return True + +def hex_string_to_ascii(hex_string): + hex_string = hex_string.replace(" ", "") + ascii_string = ''.join([chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2)]) + return ascii_string + +def init_service_signals(batteries): + print("INSIDE INIT SERVICE SIGNALS") + n_batteries = len(batteries) + product_name = cfg.PRODUCT_NAME + ' x' + str(n_batteries) + return [ + ServiceSignal('/NbOfBatteries', n_batteries), + ServiceSignal('/Mgmt/ProcessName', __file__), + ServiceSignal('/Mgmt/ProcessVersion', cfg.SOFTWARE_VERSION), + ServiceSignal('/Mgmt/Connection', cfg.CONNECTION), + ServiceSignal('/DeviceInstance', cfg.DEVICE_INSTANCE), + ServiceSignal('/ProductName', product_name), + ServiceSignal('/ProductId', cfg.PRODUCT_ID), + ServiceSignal('/Connected', 1) + ] + +def init_battery_signals(): + print("START INIT SIGNALS") + battery_status_reader = read_hex_string(1060, 2) + + def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + + def read_battery_cold(status): + return \ + read_led_state(register=1004, led=LedColor.green)(status) >= LedState.blinking_slow and \ + read_led_state(register=1004, led=LedColor.blue)(status) >= LedState.blinking_slow + + def read_soc(status): + soc = read_float(register=1053, scale_factor=0.1, offset=0)(status) + if soc > 99.9 and not read_eoc_reached(status): + return 99.9 + if soc >= 99.9 and read_eoc_reached(status): + return 100 + return soc + + def number_of_active_strings(status): + return cfg.NUM_OF_STRINGS_PER_BATTERY - limp_strings_value(status) + + def max_discharge_current(status): + return number_of_active_strings(status) * cfg.I_MAX_PER_STRING + + def max_charge_current(status): + return status.battery.ampere_hours / 2 + + return [ + BatterySignal('/TimeToTOCRequest', max, read_float(register=1052)), + BatterySignal('/EOCReached', return_in_list, read_eoc_reached), + BatterySignal('/NumOfLimbStrings', return_in_list, limp_strings_value), + BatterySignal('/Dc/0/Voltage', mean, get_value=read_voltage(), unit='V'), + BatterySignal('/Dc/0/Current', sum, get_value=read_current(), unit='A'), + BatterySignal('/Dc/0/Power', sum, get_value=read_power, unit='W'), + BatterySignal('/BussVoltage', mean, read_float(register=1001, scale_factor=0.01, offset=0), unit='V'), + BatterySignal('/Soc', mean, read_soc, unit='%'), + BatterySignal('/LowestSoc', min, read_float(register=1053, scale_factor=0.1, offset=0), unit='%'), + BatterySignal('/Dc/0/Temperature', mean, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'), + BatterySignal('/Dc/0/LowestTemperature', min, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'), + BatterySignal('/WarningFlags/TaM1', return_in_list, read_bool(base_register=1005, bit=1)), + BatterySignal('/WarningFlags/TbM1', return_in_list, read_bool(base_register=1005, bit=4)), + BatterySignal('/WarningFlags/VBm1', return_in_list, read_bool(base_register=1005, bit=6)), + BatterySignal('/WarningFlags/VBM1', return_in_list, read_bool(base_register=1005, bit=8)), + BatterySignal('/WarningFlags/IDM1', return_in_list, read_bool(base_register=1005, bit=10)), + BatterySignal('/WarningFlags/vsm1', return_in_list, read_bool(base_register=1005, bit=22)), + BatterySignal('/WarningFlags/vsM1', return_in_list, read_bool(base_register=1005, bit=24)), + BatterySignal('/WarningFlags/iCM1', return_in_list, read_bool(base_register=1005, bit=26)), + BatterySignal('/WarningFlags/iDM1', return_in_list, read_bool(base_register=1005, bit=28)), + BatterySignal('/WarningFlags/MID1', return_in_list, read_bool(base_register=1005, bit=30)), + BatterySignal('/WarningFlags/BLPW', return_in_list, read_bool(base_register=1005, bit=32)), + BatterySignal('/WarningFlags/CCBF', return_in_list, read_bool(base_register=1005, bit=33)), + BatterySignal('/WarningFlags/Ah_W', return_in_list, read_bool(base_register=1005, bit=35)), + BatterySignal('/WarningFlags/MPMM', return_in_list, read_bool(base_register=1005, bit=38)), + BatterySignal('/WarningFlags/TCdi', return_in_list, read_bool(base_register=1005, bit=40)), + BatterySignal('/WarningFlags/LMPW', return_in_list, read_bool(base_register=1005, bit=44)), + BatterySignal('/WarningFlags/TOCW', return_in_list, read_bool(base_register=1005, bit=47)), + BatterySignal('/WarningFlags/BUSL', return_in_list, read_bool(base_register=1005, bit=49)), + BatterySignal('/AlarmFlags/Tam', return_in_list, read_bool(base_register=1005, bit=0)), + BatterySignal('/AlarmFlags/TaM2', return_in_list, read_bool(base_register=1005, bit=2)), + BatterySignal('/AlarmFlags/Tbm', return_in_list, read_bool(base_register=1005, bit=3)), + BatterySignal('/AlarmFlags/TbM2', return_in_list, read_bool(base_register=1005, bit=5)), + BatterySignal('/AlarmFlags/VBm2', return_in_list, read_bool(base_register=1005, bit=7)), + BatterySignal('/AlarmFlags/VBM2', return_in_list, read_bool(base_register=1005, bit=9)), + BatterySignal('/AlarmFlags/IDM2', return_in_list, read_bool(base_register=1005, bit=11)), + BatterySignal('/AlarmFlags/ISOB', return_in_list, read_bool(base_register=1005, bit=12)), + BatterySignal('/AlarmFlags/MSWE', return_in_list, read_bool(base_register=1005, bit=13)), + BatterySignal('/AlarmFlags/FUSE', return_in_list, read_bool(base_register=1005, bit=14)), + BatterySignal('/AlarmFlags/HTRE', return_in_list, read_bool(base_register=1005, bit=15)), + BatterySignal('/AlarmFlags/TCPE', return_in_list, read_bool(base_register=1005, bit=16)), + BatterySignal('/AlarmFlags/STRE', return_in_list, read_bool(base_register=1005, bit=17)), + BatterySignal('/AlarmFlags/CME', return_in_list, read_bool(base_register=1005, bit=18)), + BatterySignal('/AlarmFlags/HWFL', return_in_list, read_bool(base_register=1005, bit=19)), + BatterySignal('/AlarmFlags/HWEM', return_in_list, read_bool(base_register=1005, bit=20)), + BatterySignal('/AlarmFlags/ThM', return_in_list, read_bool(base_register=1005, bit=21)), + BatterySignal('/AlarmFlags/vsm2', return_in_list, read_bool(base_register=1005, bit=23)), + BatterySignal('/AlarmFlags/vsM2', return_in_list, read_bool(base_register=1005, bit=25)), + BatterySignal('/AlarmFlags/iCM2', return_in_list, read_bool(base_register=1005, bit=27)), + BatterySignal('/AlarmFlags/iDM2', return_in_list, read_bool(base_register=1005, bit=29)), + BatterySignal('/AlarmFlags/MID2', return_in_list, read_bool(base_register=1005, bit=31)), + BatterySignal('/AlarmFlags/HTFS', return_in_list, read_bool(base_register=1005, bit=42)), + BatterySignal('/AlarmFlags/DATA', return_in_list, read_bool(base_register=1005, bit=43)), + BatterySignal('/AlarmFlags/LMPA', return_in_list, read_bool(base_register=1005, bit=45)), + BatterySignal('/AlarmFlags/HEBT', return_in_list, read_bool(base_register=1005, bit=46)), + BatterySignal('/AlarmFlags/CURM', return_in_list, read_bool(base_register=1005, bit=48)), + BatterySignal('/Diagnostics/LedStatus/Red', first, read_led_state(register=1004, led=LedColor.red)), + BatterySignal('/Diagnostics/LedStatus/Blue', first, read_led_state(register=1004, led=LedColor.blue)), + BatterySignal('/Diagnostics/LedStatus/Green', first, read_led_state(register=1004, led=LedColor.green)), + BatterySignal('/Diagnostics/LedStatus/Amber', first, read_led_state(register=1004, led=LedColor.amber)), + BatterySignal('/Diagnostics/IoStatus/MainSwitchClosed', return_in_list, read_switch_closed), + BatterySignal('/Diagnostics/IoStatus/AlarmOutActive', return_in_list, read_alarm_out_active), + BatterySignal('/Diagnostics/IoStatus/InternalFanActive', return_in_list, read_bool(base_register=1013, bit=2)), + BatterySignal('/Diagnostics/IoStatus/VoltMeasurementAllowed', return_in_list, read_bool(base_register=1013, bit=3)), + BatterySignal('/Diagnostics/IoStatus/AuxRelay', return_in_list, read_aux_relay), + BatterySignal('/Diagnostics/IoStatus/RemoteState', return_in_list, read_bool(base_register=1013, bit=5)), + BatterySignal('/Diagnostics/IoStatus/RiscOn', return_in_list, read_bool(base_register=1013, bit=6)), + BatterySignal('/IoStatus/BatteryCold', any, read_battery_cold), + BatterySignal('/Info/MaxDischargeCurrent', sum, max_discharge_current, unit='A'), + BatterySignal('/Info/MaxChargeCurrent', sum, max_charge_current, unit='A'), + BatterySignal('/Info/MaxChargeVoltage', min, lambda bs: bs.battery.v_max, unit='V'), + BatterySignal('/Info/MinDischargeVoltage', max, lambda bs: bs.battery.v_min, unit='V'), + BatterySignal('/Info/BatteryLowVoltage', max, lambda bs: bs.battery.v_min - 2, unit='V'), + BatterySignal('/Info/NumberOfStrings', sum, number_of_active_strings), + BatterySignal('/Info/MaxChargePower', sum, calc_max_charge_power), + BatterySignal('/Info/MaxDischargePower', sum, calc_max_discharge_power), + BatterySignal('/FirmwareVersion', comma_separated, lambda bs: bs.battery.firmware_version), + BatterySignal('/HardwareVersion', comma_separated, lambda bs: bs.battery.hardware_version), + BatterySignal('/BmsVersion', comma_separated, lambda bs: bs.battery.bms_version) + ] + +def create_csv_signals(firmware_version): + total_current = read_float(register=1062, scale_factor=0.01, offset=-10000) + + def read_total_current(status): + return total_current(status) + + def read_heating_current(status): + return total_current(status) - read_current()(status) + + def read_heating_power(status): + return read_voltage()(status) * read_heating_current(status) + + soc_ah = read_float(register=1002, scale_factor=0.1, offset=-10000) + + def read_soc_ah(status): + return soc_ah(status) + + def return_led_state(status, color): + led_state = read_led_state(register=1004, led=color)(status) + if led_state == LedState.blinking_fast or led_state == LedState.blinking_slow: + return "Blinking" + elif led_state == LedState.on: + return "On" + elif led_state == LedState.off: + return "Off" + return "Unknown" + + def return_led_state_blue(status): + return return_led_state(status, LedColor.blue) + + def return_led_state_red(status): + return return_led_state(status, LedColor.red) + + def return_led_state_green(status): + return return_led_state(status, LedColor.green) + + def return_led_state_amber(status): + return return_led_state(status, LedColor.amber) + + battery_status_reader = read_hex_string(1060, 2) + + def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + + def read_serial_number(status): + serial_regs = [1055, 1056, 1057, 1058] + serial_parts = [] + for reg in serial_regs: + hex_value_fun = read_hex_string(reg, 1) + hex_value = hex_value_fun(status) + serial_parts.append(hex_value.replace(' ', '')) + serial_number = ''.join(serial_parts).rstrip('0') + return serial_number + + def time_since_toc_in_time_format(status): + time_in_minutes = read_float(register=1052)(status) + total_seconds = int(time_in_minutes * 60) + days = total_seconds // (24 * 3600) + total_seconds = total_seconds % (24 * 3600) + hours = total_seconds // 3600 + total_seconds %= 3600 + minutes = total_seconds // 60 + seconds = total_seconds % 60 + return "{}.{:02}:{:02}:{:02}".format(days, hours, minutes, seconds) + + return [ + CsvSignal('/Battery/Devices/FwVersion', firmware_version), + CsvSignal('/Battery/Devices/Dc/Power', read_power, 'W'), + CsvSignal('/Battery/Devices/Dc/Voltage', read_voltage(), 'V'), + CsvSignal('/Battery/Devices/Soc', read_float(register=1053, scale_factor=0.1, offset=0), '%'), + CsvSignal('/Battery/Devices/Temperatures/Cells/Average', read_float(register=1003, scale_factor=0.1, offset=-400), 'C'), + CsvSignal('/Battery/Devices/Dc/Current', read_current(), 'A'), + CsvSignal('/Battery/Devices/BusCurrent', read_total_current, 'A'), + CsvSignal('/Battery/Devices/CellsCurrent', read_current(), 'A'), + CsvSignal('/Battery/Devices/HeatingCurrent', read_heating_current, 'A'), + CsvSignal('/Battery/Devices/HeatingPower', read_heating_power, 'W'), + CsvSignal('/Battery/Devices/SOCAh', read_soc_ah), + CsvSignal('/Battery/Devices/Leds/Blue', return_led_state_blue), + CsvSignal('/Battery/Devices/Leds/Red', return_led_state_red), + CsvSignal('/Battery/Devices/Leds/Green', return_led_state_green), + CsvSignal('/Battery/Devices/Leds/Amber', return_led_state_amber), + CsvSignal('/Battery/Devices/BatteryStrings/String1Active', lambda status: int((read_limb_bitmap()(status) & 0b00001) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String2Active', lambda status: int((read_limb_bitmap()(status) & 0b00010) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String3Active', lambda status: int((read_limb_bitmap()(status) & 0b00100) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String4Active', lambda status: int((read_limb_bitmap()(status) & 0b01000) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String5Active', lambda status: int((read_limb_bitmap()(status) & 0b10000) != 0)), + CsvSignal('/Battery/Devices/IoStatus/ConnectedToDcBus', read_switch_closed), + CsvSignal('/Battery/Devices/IoStatus/AlarmOutActive', read_alarm_out_active), + CsvSignal('/Battery/Devices/IoStatus/InternalFanActive', read_bool(base_register=1013, bit=2)), + CsvSignal('/Battery/Devices/IoStatus/VoltMeasurementAllowed', read_bool(base_register=1013, bit=3)), + CsvSignal('/Battery/Devices/IoStatus/AuxRelayBus', read_aux_relay), + CsvSignal('/Battery/Devices/IoStatus/RemoteStateActive', read_bool(base_register=1013, bit=5)), + CsvSignal('/Battery/Devices/IoStatus/RiscActive', read_bool(base_register=1013, bit=6)), + CsvSignal('/Battery/Devices/Eoc', read_eoc_reached), + CsvSignal('/Battery/Devices/SerialNumber', read_serial_number), + CsvSignal('/Battery/Devices/TimeSinceTOC', time_since_toc_in_time_format), + CsvSignal('/Battery/Devices/MaxChargePower', calc_max_charge_power), + CsvSignal('/Battery/Devices/MaxDischargePower', calc_max_discharge_power), + ] + +def read_warning_and_alarm_flags(): + return [ + # Warnings + CsvSignal('/Battery/Devices/WarningFlags/TaM1', read_bool(base_register=1005, bit=1)), + CsvSignal('/Battery/Devices/WarningFlags/TbM1', read_bool(base_register=1005, bit=4)), + CsvSignal('/Battery/Devices/WarningFlags/VBm1', read_bool(base_register=1005, bit=6)), + CsvSignal('/Battery/Devices/WarningFlags/VBM1', read_bool(base_register=1005, bit=8)), + CsvSignal('/Battery/Devices/WarningFlags/IDM1', read_bool(base_register=1005, bit=10)), + CsvSignal('/Battery/Devices/WarningFlags/vsm1', read_bool(base_register=1005, bit=22)), + CsvSignal('/Battery/Devices/WarningFlags/vsM1', read_bool(base_register=1005, bit=24)), + CsvSignal('/Battery/Devices/WarningFlags/iCM1', read_bool(base_register=1005, bit=26)), + CsvSignal('/Battery/Devices/WarningFlags/iDM1', read_bool(base_register=1005, bit=28)), + CsvSignal('/Battery/Devices/WarningFlags/MID1', read_bool(base_register=1005, bit=30)), + CsvSignal('/Battery/Devices/WarningFlags/BLPW', read_bool(base_register=1005, bit=32)), + CsvSignal('/Battery/Devices/WarningFlags/CCBF', read_bool(base_register=1005, bit=33)), + CsvSignal('/Battery/Devices/WarningFlags/Ah_W', read_bool(base_register=1005, bit=35)), + CsvSignal('/Battery/Devices/WarningFlags/MPMM', read_bool(base_register=1005, bit=38)), + CsvSignal('/Battery/Devices/WarningFlags/TCdi', read_bool(base_register=1005, bit=40)), + CsvSignal('/Battery/Devices/WarningFlags/LMPW', read_bool(base_register=1005, bit=44)), + CsvSignal('/Battery/Devices/WarningFlags/TOCW', read_bool(base_register=1005, bit=47)), + CsvSignal('/Battery/Devices/WarningFlags/BUSL', read_bool(base_register=1005, bit=49)), + ], [ + # Alarms + CsvSignal('/Battery/Devices/AlarmFlags/Tam', read_bool(base_register=1005, bit=0)), + CsvSignal('/Battery/Devices/AlarmFlags/TaM2', read_bool(base_register=1005, bit=2)), + CsvSignal('/Battery/Devices/AlarmFlags/Tbm', read_bool(base_register=1005, bit=3)), + CsvSignal('/Battery/Devices/AlarmFlags/TbM2', read_bool(base_register=1005, bit=5)), + CsvSignal('/Battery/Devices/AlarmFlags/VBm2', read_bool(base_register=1005, bit=7)), + CsvSignal('/Battery/Devices/AlarmFlags/VBM2', read_bool(base_register=1005, bit=9)), + CsvSignal('/Battery/Devices/AlarmFlags/IDM2', read_bool(base_register=1005, bit=11)), + CsvSignal('/Battery/Devices/AlarmFlags/ISOB', read_bool(base_register=1005, bit=12)), + CsvSignal('/Battery/Devices/AlarmFlags/MSWE', read_bool(base_register=1005, bit=13)), + CsvSignal('/Battery/Devices/AlarmFlags/FUSE', read_bool(base_register=1005, bit=14)), + CsvSignal('/Battery/Devices/AlarmFlags/HTRE', read_bool(base_register=1005, bit=15)), + CsvSignal('/Battery/Devices/AlarmFlags/TCPE', read_bool(base_register=1005, bit=16)), + CsvSignal('/Battery/Devices/AlarmFlags/STRE', read_bool(base_register=1005, bit=17)), + CsvSignal('/Battery/Devices/AlarmFlags/CME', read_bool(base_register=1005, bit=18)), + CsvSignal('/Battery/Devices/AlarmFlags/HWFL', read_bool(base_register=1005, bit=19)), + CsvSignal('/Battery/Devices/AlarmFlags/HWEM', read_bool(base_register=1005, bit=20)), + CsvSignal('/Battery/Devices/AlarmFlags/ThM', read_bool(base_register=1005, bit=21)), + CsvSignal('/Battery/Devices/AlarmFlags/vsm2', read_bool(base_register=1005, bit=23)), + CsvSignal('/Battery/Devices/AlarmFlags/vsM2', read_bool(base_register=1005, bit=25)), + CsvSignal('/Battery/Devices/AlarmFlags/iCM2', read_bool(base_register=1005, bit=27)), + CsvSignal('/Battery/Devices/AlarmFlags/iDM2', read_bool(base_register=1005, bit=29)), + CsvSignal('/Battery/Devices/AlarmFlags/MID2', read_bool(base_register=1005, bit=31)), + CsvSignal('/Battery/Devices/AlarmFlags/HTFS', read_bool(base_register=1005, bit=42)), + CsvSignal('/Battery/Devices/AlarmFlags/DATA', read_bool(base_register=1005, bit=43)), + CsvSignal('/Battery/Devices/AlarmFlags/LMPA', read_bool(base_register=1005, bit=45)), + CsvSignal('/Battery/Devices/AlarmFlags/HEBT', read_bool(base_register=1005, bit=46)), + CsvSignal('/Battery/Devices/AlarmFlags/CURM', read_bool(base_register=1005, bit=48)), + ] diff --git a/firmware/opt/dbus-fz-sonick-48tl-with-s3/start.sh b/firmware/opt/dbus-fz-sonick-48tl-with-s3/start.sh new file mode 100755 index 000000000..83860d3e4 --- /dev/null +++ b/firmware/opt/dbus-fz-sonick-48tl-with-s3/start.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. /opt/victronenergy/serial-starter/run-service.sh + +app="/opt/innovenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py" +args="$tty" +start $args diff --git a/firmware/opt/innovenergy/scripts/extractS3data.py b/firmware/opt/innovenergy/scripts/extractS3data.py index 41cd81f03..4aeb99ee7 100644 --- a/firmware/opt/innovenergy/scripts/extractS3data.py +++ b/firmware/opt/innovenergy/scripts/extractS3data.py @@ -4,6 +4,9 @@ import subprocess import argparse import matplotlib.pyplot as plt from collections import defaultdict +import zipfile +import base64 +import shutil def extract_timestamp(filename): timestamp_str = filename[:10] @@ -14,7 +17,6 @@ def extract_timestamp(filename): return 0 def extract_values_by_key(csv_file, key, exact_match): - # Initialize a defaultdict for lists matched_values = defaultdict(list) with open(csv_file, 'r') as file: reader = csv.reader(file) @@ -31,37 +33,26 @@ def extract_values_by_key(csv_file, key, exact_match): else: if key_item.lower() in first_column.lower(): matched_values[path_key].append(row[0]) - #return matched_values - # Concatenate all keys to create a single final_key final_key = ''.join(matched_values.keys()) - # Combine all lists of values into a single list combined_values = [] for values in matched_values.values(): combined_values.extend(values) - # Create the final dictionary with final_key and all combined values final_dict = {final_key: combined_values} - #return dict(matched_values) return final_dict def list_files_in_range(start_timestamp, end_timestamp, sampling_stepsize): filenames_in_range = [f"{timestamp:10d}" for timestamp in range(start_timestamp, end_timestamp + 1, 2*sampling_stepsize)] return filenames_in_range -def check_s3_files_exist(bucket_number, filename): - s3cmd_ls_command = f"s3cmd ls s3://{bucket_number}-3e5b3069-214a-43ee-8d85-57d72000c19d/{filename}*" - try: - result = subprocess.run(s3cmd_ls_command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - lines = result.stdout.decode().split('\n')[:-1] - filenames = [line.split()[-1].split('/')[-1] for line in lines] - return filenames - except subprocess.CalledProcessError as e: - print(f"Error checking S3 files: {e}") - return [] - -def download_files(bucket_number, filenames_to_download): +def download_files(bucket_number, filenames_to_download, product_type): + if product_type == 0: + hash = "3e5b3069-214a-43ee-8d85-57d72000c19d" + elif product_type == 1: + hash = "c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" + else: + raise ValueError("Invalid product type option. Use 0 or 1") output_directory = f"S3cmdData_{bucket_number}" - if not os.path.exists(output_directory): os.makedirs(output_directory) print(f"Directory '{output_directory}' created.") @@ -70,7 +61,7 @@ def download_files(bucket_number, filenames_to_download): stripfilename = filename.strip() local_path = os.path.join(output_directory, stripfilename + ".csv") if not os.path.exists(local_path): - s3cmd_command = f"s3cmd get s3://{bucket_number}-3e5b3069-214a-43ee-8d85-57d72000c19d/{stripfilename}.csv {output_directory}/" + s3cmd_command = f"s3cmd get s3://{bucket_number}-{hash}/{stripfilename}.csv {output_directory}/" try: subprocess.run(s3cmd_command, shell=True, check=True) downloaded_files = [file for file in os.listdir(output_directory) if file.startswith(filename)] @@ -84,44 +75,48 @@ def download_files(bucket_number, filenames_to_download): else: print(f"File '{filename}.csv' already exists locally. Skipping download.") +def decompress_file(compressed_file, output_directory): + base_name = os.path.splitext(os.path.basename(compressed_file))[0] -def visualize_data(data, output_directory): - # Extract data for visualization (replace this with your actual data extraction) - x_values = [int(entry[0]) for entry in data] - y_values = [float(entry[1]) for entry in data] + with open(compressed_file, 'rb') as file: + compressed_data = file.read() - # Plotting - plt.plot(x_values, y_values, marker='o', linestyle='-', color='b') - plt.xlabel('Timestamp') - plt.ylabel('Your Y-axis Label') - plt.title('Your Plot Title') - plt.grid(True) - plt.savefig(os.path.join(output_directory, f"{start_timestamp}_{key}_plot.png")) - plt.close() # Close the plot window + # Decode the base64 encoded content + decoded_data = base64.b64decode(compressed_data) + zip_path = os.path.join(output_directory, 'temp.zip') + with open(zip_path, 'wb') as zip_file: + zip_file.write(decoded_data) + + with zipfile.ZipFile(zip_path, 'r') as zip_ref: + zip_ref.extractall(output_directory) + + # Rename the extracted data.csv file to the original timestamp-based name + extracted_csv_path = os.path.join(output_directory, 'data.csv') + if os.path.exists(extracted_csv_path): + new_csv_path = os.path.join(output_directory, f"{base_name}.csv") + os.rename(extracted_csv_path, new_csv_path) + + os.remove(zip_path) + #os.remove(compressed_file) + print(f"Decompressed and renamed '{compressed_file}' to '{new_csv_path}'.") - # Save data to CSV - csv_file_path = os.path.join(output_directory, f"{start_timestamp}_{key}_extracted.csv") - with open(csv_file_path, 'w', newline='') as csvfile: - csv_writer = csv.writer(csvfile) - csv_writer.writerow(['Timestamp', 'Value']) # Adjust column names as needed - csv_writer.writerows(data) def get_last_component(path): path_without_slashes = path.replace('/', '') return path_without_slashes - -def download_and_process_files(bucket_number, start_timestamp, end_timestamp, sampling_stepsize, key, booleans_as_numbers, exact_match): +def download_and_process_files(bucket_number, start_timestamp, end_timestamp, sampling_stepsize, key, booleans_as_numbers, exact_match, product_type): output_directory = f"S3cmdData_{bucket_number}" + if os.path.exists(output_directory): + shutil.rmtree(output_directory) + if not os.path.exists(output_directory): os.makedirs(output_directory) print(f"Directory '{output_directory}' created.") - - filenames_to_check = list_files_in_range(start_timestamp, end_timestamp, sampling_stepsize) - #filenames_on_s3 = check_s3_files_exist(bucket_number, filenames_to_check, key) + filenames_to_check = list_files_in_range(start_timestamp, end_timestamp, sampling_stepsize) existing_files = [filename for filename in filenames_to_check if os.path.exists(os.path.join(output_directory, f"{filename}.csv"))] files_to_download = set(filenames_to_check) - set(existing_files) @@ -129,15 +124,20 @@ def download_and_process_files(bucket_number, start_timestamp, end_timestamp, sa print("Files already exist in the local folder. Skipping download.") else: if files_to_download: - download_files(bucket_number, files_to_download) + download_files(bucket_number, files_to_download, product_type) + # Decompress all downloaded .csv files (which are actually compressed) + compressed_files = [os.path.join(output_directory, file) for file in os.listdir(output_directory) if file.endswith('.csv')] + for compressed_file in compressed_files: + decompress_file(compressed_file, output_directory) - # Process CSV files csv_files = [file for file in os.listdir(output_directory) if file.endswith('.csv')] csv_files.sort(key=extract_timestamp) + + keypath = '' for key_item in key: - keypath+= get_last_component(key_item) + keypath += get_last_component(key_item) output_csv_filename = f"{keypath}_{start_timestamp}_{bucket_number}.csv" with open(output_csv_filename, 'w', newline='') as csvfile: csv_writer = csv.writer(csvfile) @@ -171,42 +171,35 @@ def download_and_process_files(bucket_number, start_timestamp, end_timestamp, sa print(f"Extracted data saved in '{output_csv_filename}'.") def parse_keys(input_string): - # Split the input string by commas and strip whitespace keys = [key.strip() for key in input_string.split(',')] - # Return keys as a list if more than one, else return the single key - #return keys if len(keys) > 1 else keys[0] return keys - def main(): parser = argparse.ArgumentParser(description='Download files from S3 using s3cmd and extract specific values from CSV files.') parser.add_argument('start_timestamp', type=int, help='The start timestamp for the range (even number)') parser.add_argument('end_timestamp', type=int, help='The end timestamp for the range (even number)') - #parser.add_argument('--key', type=str, required=True, help='The part to match from each CSV file') parser.add_argument('--keys', type=parse_keys, required=True, help='The part to match from each CSV file, can be a single key or a comma-separated list of keys') parser.add_argument('--bucket-number', type=int, required=True, help='The number of the bucket to download from') parser.add_argument('--sampling_stepsize', type=int, required=False, default=1, help='The number of 2sec intervals, which define the length of the sampling interval in S3 file retrieval') parser.add_argument('--booleans_as_numbers', action="store_true", required=False, help='If key used, then booleans are converted to numbers [0/1], if key not used, then booleans maintained as text [False/True]') parser.add_argument('--exact_match', action="store_true", required=False, help='If key used, then key has to match exactly "=", else it is enough that key is found "in" text') + parser.add_argument('--product_type', required=True, help='Use 0 for Salimax and 1 for Salidomo') - - args = parser.parse_args(); + args = parser.parse_args() start_timestamp = args.start_timestamp end_timestamp = args.end_timestamp keys = args.keys bucket_number = args.bucket_number sampling_stepsize = args.sampling_stepsize booleans_as_numbers = args.booleans_as_numbers - exact_match = args.exact_match + exact_match = args.exact_match + # new arg for product type + product_type = int(args.product_type) - - - # Check if start_timestamp is smaller than end_timestamp if start_timestamp >= end_timestamp: print("Error: start_timestamp must be smaller than end_timestamp.") return - download_and_process_files(bucket_number, start_timestamp, end_timestamp, sampling_stepsize, keys, booleans_as_numbers, exact_match) + download_and_process_files(bucket_number, start_timestamp, end_timestamp, sampling_stepsize, keys, booleans_as_numbers, exact_match, product_type) if __name__ == "__main__": main() - diff --git a/firmware/opt/victronenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py b/firmware/opt/victronenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py index 283e73bf3..f0af6f187 100755 --- a/firmware/opt/victronenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py +++ b/firmware/opt/victronenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py @@ -1,18 +1,16 @@ -#!/usr/bin/python3 -u +#!/usr/bin/python2 -u # coding=utf-8 import logging import re import socket import sys -import typing - -from gi.repository import GLib as glib +import gobject import signals import config as cfg from dbus.mainloop.glib import DBusGMainLoop -from pymodbus.client import ModbusSerialClient as Modbus +from pymodbus.client.sync import ModbusSerialClient as Modbus from pymodbus.exceptions import ModbusException, ModbusIOException from pymodbus.other_message import ReportSlaveIdRequest from pymodbus.pdu import ExceptionResponse @@ -23,353 +21,334 @@ from python_libs.ie_dbus.dbus_service import DBusService # trick the pycharm type-checker into thinking Callable is in scope, not used at runtime # noinspection PyUnreachableCode if False: - from typing import Callable, List, Iterable, NoReturn + from typing import Callable, List, Iterable, NoReturn + RESET_REGISTER = 0x2087 -SETTINGS_SERVICE_PREFIX = 'com.victronenergy.settings' -INVERTER_SERVICE_PREFIX = 'com.victronenergy.vebus.' - def init_modbus(tty): - # type: (str) -> Modbus + # type: (str) -> Modbus - logging.debug('initializing Modbus') + logging.debug('initializing Modbus') - return Modbus( - port='/dev/' + tty, - method=cfg.MODE, - baudrate=cfg.BAUD_RATE, - stopbits=cfg.STOP_BITS, - bytesize=cfg.BYTE_SIZE, - timeout=cfg.TIMEOUT, - parity=cfg.PARITY) + return Modbus( + port='/dev/' + tty, + method=cfg.MODE, + baudrate=cfg.BAUD_RATE, + stopbits=cfg.STOP_BITS, + bytesize=cfg.BYTE_SIZE, + timeout=cfg.TIMEOUT, + parity=cfg.PARITY) def init_udp_socket(): - # type: () -> socket + # type: () -> socket - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.setblocking(False) + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setblocking(False) - return s + return s def report_slave_id(modbus, slave_address): - # type: (Modbus, int) -> str + # type: (Modbus, int) -> str - slave = str(slave_address) + slave = str(slave_address) - logging.debug('requesting slave id from node ' + slave) + logging.debug('requesting slave id from node ' + slave) - with modbus: + with modbus: - request = ReportSlaveIdRequest(unit=slave_address) - response = modbus.execute(request) + request = ReportSlaveIdRequest(unit=slave_address) + response = modbus.execute(request) - if response is ExceptionResponse or issubclass(type(response), ModbusException): - raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) + if response is ExceptionResponse or issubclass(type(response), ModbusException): + raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) - return response.identifier + return response.identifier def identify_battery(modbus, slave_address): - # type: (Modbus, int) -> Battery + # type: (Modbus, int) -> Battery - logging.info('identifying battery...') + logging.info('identifying battery...') - hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) - firmware_version = read_firmware_version(modbus, slave_address) + hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) + firmware_version = read_firmware_version(modbus, slave_address) - specs = Battery( - slave_address=slave_address, - hardware_version=hardware_version, - firmware_version=firmware_version, - bms_version=bms_version, - ampere_hours=ampere_hours) + specs = Battery( + slave_address=slave_address, + hardware_version=hardware_version, + firmware_version=firmware_version, + bms_version=bms_version, + ampere_hours=ampere_hours) - logging.info('battery identified:\n{0}'.format(str(specs))) + logging.info('battery identified:\n{0}'.format(str(specs))) - return specs + return specs def identify_batteries(modbus): - # type: (Modbus) -> List[Battery] + # type: (Modbus) -> List[Battery] - def _identify_batteries(): - slave_address = 0 - n_missing = -255 + def _identify_batteries(): + slave_address = 0 + n_missing = -255 - while n_missing < 3: - slave_address += 1 - try: - yield identify_battery(modbus, slave_address) - n_missing = 0 - except Exception as e: - logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) - n_missing += 1 + while n_missing < 3: + slave_address += 1 + try: + yield identify_battery(modbus, slave_address) + n_missing = 0 + except Exception as e: + logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) + n_missing += 1 - logging.info('giving up searching for further batteries') + logging.info('giving up searching for further batteries') - batteries = list(_identify_batteries()) # dont be lazy! + batteries = list(_identify_batteries()) # dont be lazy! - n = len(batteries) - logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) + n = len(batteries) + logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) - return batteries + return batteries def parse_slave_id(modbus, slave_address): - # type: (Modbus, int) -> (str, str, int) + # type: (Modbus, int) -> (str, str, int) - slave_id = report_slave_id(modbus, slave_address) + slave_id = report_slave_id(modbus, slave_address) - sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars + sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars - match = re.match('(?P48TL(?P[0-9]+)) *(?P.*)', sid) + match = re.match('(?P48TL(?P[0-9]+)) *(?P.*)', sid) - if match is None: - raise Exception('no known battery found') + if match is None: + raise Exception('no known battery found') - return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip()) + return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip()) def read_firmware_version(modbus, slave_address): - # type: (Modbus, int) -> str + # type: (Modbus, int) -> str - logging.debug('reading firmware version') + logging.debug('reading firmware version') - with modbus: + with modbus: - response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) - register = response.registers[0] + response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) + register = response.registers[0] - return '{0:0>4X}'.format(register) + return '{0:0>4X}'.format(register) def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS): - # type: (Modbus, int, int, int) -> ReadInputRegistersResponse + # type: (Modbus, int, int, int) -> ReadInputRegistersResponse - logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) + logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) - return modbus.read_input_registers( - address=base_address, - count=count, - unit=slave_address) + return modbus.read_input_registers( + address=base_address, + count=count, + unit=slave_address) def read_battery_status(modbus, battery): - # type: (Modbus, Battery) -> BatteryStatus - """ - Read the modbus registers containing the battery's status info. - """ + # type: (Modbus, Battery) -> BatteryStatus + """ + Read the modbus registers containing the battery's status info. + """ - logging.debug('reading battery status') + logging.debug('reading battery status') - with modbus: - data = read_modbus_registers(modbus, battery.slave_address) - return BatteryStatus(battery, data.registers) + with modbus: + data = read_modbus_registers(modbus, battery.slave_address) + return BatteryStatus(battery, data.registers) def publish_values_on_dbus(service, battery_signals, battery_statuses): - # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () - publish_individuals(service, battery_signals, battery_statuses) - publish_aggregates(service, battery_signals, battery_statuses) + publish_individuals(service, battery_signals, battery_statuses) + publish_aggregates(service, battery_signals, battery_statuses) def publish_aggregates(service, signals, battery_statuses): - # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () - for s in signals: - if s.aggregate is None: - continue - values = [s.get_value(battery_status) for battery_status in battery_statuses] - value = s.aggregate(values) - - service.own_properties.set(s.dbus_path, value, s.unit) + for s in signals: + if s.aggregate is None: + continue + values = [s.get_value(battery_status) for battery_status in battery_statuses] + value = s.aggregate(values) + service.own_properties.set(s.dbus_path, value, s.unit) def publish_individuals(service, signals, battery_statuses): - # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () - for signal in signals: - for battery_status in battery_statuses: - address = battery_status.battery.slave_address - dbus_path = '/_Battery/' + str(address) + signal.dbus_path - value = signal.get_value(battery_status) - service.own_properties.set(dbus_path, value, signal.unit) + for signal in signals: + for battery_status in battery_statuses: + address = battery_status.battery.slave_address + dbus_path = '/_Battery/' + str(address) + signal.dbus_path + value = signal.get_value(battery_status) + service.own_properties.set(dbus_path, value, signal.unit) def publish_service_signals(service, signals): - # type: (DBusService, Iterable[ServiceSignal]) -> NoReturn + # type: (DBusService, Iterable[ServiceSignal]) -> NoReturn - for signal in signals: - service.own_properties.set(signal.dbus_path, signal.value, signal.unit) + for signal in signals: + service.own_properties.set(signal.dbus_path, signal.value, signal.unit) def upload_status_to_innovenergy(sock, statuses): - # type: (socket, Iterable[BatteryStatus]) -> bool + # type: (socket, Iterable[BatteryStatus]) -> bool - logging.debug('upload status') + logging.debug('upload status') - try: - for s in statuses: - sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT)) - except: - logging.debug('FAILED') - return False - else: - return True + try: + for s in statuses: + sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT)) + except: + logging.debug('FAILED') + return False + else: + return True def print_usage(): - print ('Usage: ' + __file__ + ' ') - print ('Example: ' + __file__ + ' ttyUSB0') + print ('Usage: ' + __file__ + ' ') + print ('Example: ' + __file__ + ' ttyUSB0') def parse_cmdline_args(argv): - # type: (List[str]) -> str + # type: (List[str]) -> str - if len(argv) == 0: - logging.info('missing command line argument for tty device') - print_usage() - sys.exit(1) + if len(argv) == 0: + logging.info('missing command line argument for tty device') + print_usage() + sys.exit(1) - return argv[0] + return argv[0] def reset_batteries(modbus, batteries): - # type: (Modbus, Iterable[Battery]) -> NoReturn + # type: (Modbus, Iterable[Battery]) -> NoReturn - logging.info('Resetting batteries...') + logging.info('Resetting batteries...') - for battery in batteries: + for battery in batteries: - result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address) + result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address) - # expecting a ModbusIOException (timeout) - # BMS can no longer reply because it is already reset - success = isinstance(result, ModbusIOException) + # expecting a ModbusIOException (timeout) + # BMS can no longer reply because it is already reset + success = isinstance(result, ModbusIOException) - outcome = 'successfully' if success else 'FAILED to' - logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome)) + outcome = 'successfully' if success else 'FAILED to' + logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome)) - logging.info('Shutting down fz-sonick driver') - exit(0) + logging.info('Shutting down fz-sonick driver') + exit(0) -alive = True # global alive flag, watchdog_task clears it, update_task sets it +alive = True # global alive flag, watchdog_task clears it, update_task sets it def create_update_task(modbus, service, batteries): - # type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool] - """ - Creates an update task which runs the main update function - and resets the alive flag - """ - _socket = init_udp_socket() - _signals = signals.init_battery_signals() + # type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool] + """ + Creates an update task which runs the main update function + and resets the alive flag + """ + _socket = init_udp_socket() + _signals = signals.init_battery_signals() - def update_task(): - # type: () -> bool + def update_task(): + # type: () -> bool - global alive + global alive - logging.debug('starting update cycle') + logging.debug('starting update cycle') - # Checking if we have excess power and if so charge batteries more + if service.own_properties.get('/ResetBatteries').value == 1: + reset_batteries(modbus, batteries) - target = service.remote_properties.get(get_service(SETTINGS_SERVICE_PREFIX) + '/Settings/CGwacs/AcPowerSetPoint').value or 0 - actual = service.remote_properties.get(get_service(INVERTER_SERVICE_PREFIX) + '/Ac/Out/P').value or 0 + statuses = [read_battery_status(modbus, battery) for battery in batteries] - if actual>target: - service.own_properties.set('/Info/MaxChargeCurrent').value = min([battery.i_max for battery in batteries]) + publish_values_on_dbus(service, _signals, statuses) + upload_status_to_innovenergy(_socket, statuses) - if service.own_properties.get('/ResetBatteries').value == 1: - reset_batteries(modbus, batteries) + logging.debug('finished update cycle\n') - statuses = [read_battery_status(modbus, battery) for battery in batteries] + alive = True - publish_values_on_dbus(service, _signals, statuses) - upload_status_to_innovenergy(_socket, statuses) + return True - logging.debug('finished update cycle\n') - - alive = True - - return True - - return update_task + return update_task def create_watchdog_task(main_loop): - # type: (DBusGMainLoop) -> Callable[[],bool] - """ - Creates a Watchdog task that monitors the alive flag. - The watchdog kills the main loop if the alive flag is not periodically reset by the update task. - Who watches the watchdog? - """ + # type: (DBusGMainLoop) -> Callable[[],bool] + """ + Creates a Watchdog task that monitors the alive flag. + The watchdog kills the main loop if the alive flag is not periodically reset by the update task. + Who watches the watchdog? + """ + def watchdog_task(): + # type: () -> bool - def watchdog_task(): - # type: () -> bool + global alive - global alive + if alive: + logging.debug('watchdog_task: update_task is alive') + alive = False + return True + else: + logging.info('watchdog_task: killing main loop because update_task is no longer alive') + main_loop.quit() + return False - if alive: - logging.debug('watchdog_task: update_task is alive') - alive = False - return True - else: - logging.info('watchdog_task: killing main loop because update_task is no longer alive') - main_loop.quit() - return False - - return watchdog_task - -def get_service(self, prefix: str) -> Optional[unicode]: - service = next((s for s in self.available_services if s.startswith(prefix)), None) - if service is None: - raise Exception('no service matching ' + prefix + '* available') - - return service + return watchdog_task def main(argv): - # type: (List[str]) -> () + # type: (List[str]) -> () - logging.basicConfig(level=cfg.LOG_LEVEL) - logging.info('starting ' + __file__) + logging.basicConfig(level=cfg.LOG_LEVEL) + logging.info('starting ' + __file__) - tty = parse_cmdline_args(argv) - modbus = init_modbus(tty) + tty = parse_cmdline_args(argv) + modbus = init_modbus(tty) - batteries = identify_batteries(modbus) + batteries = identify_batteries(modbus) - if len(batteries) <= 0: - sys.exit(2) + if len(batteries) <= 0: + sys.exit(2) - service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty) + service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty) - service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False + service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False - main_loop = GLib.MainLoop() + main_loop = gobject.MainLoop() - service_signals = signals.init_service_signals(batteries) - publish_service_signals(service, service_signals) + service_signals = signals.init_service_signals(batteries) + publish_service_signals(service, service_signals) - update_task = create_update_task(modbus, service, batteries) - update_task() # run it right away, so that all props are initialized before anyone can ask - watchdog_task = create_watchdog_task(main_loop) + update_task = create_update_task(modbus, service, batteries) + update_task() # run it right away, so that all props are initialized before anyone can ask + watchdog_task = create_watchdog_task(main_loop) - GLib.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority = GLib.PRIORITY_LOW) # add watchdog first - GLib.timeout_add(cfg.UPDATE_INTERVAL, update_task, priority = GLib.PRIORITY_LOW) # call update once every update_interval + gobject.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority = gobject.PRIORITY_LOW) # add watchdog first + gobject.timeout_add(cfg.UPDATE_INTERVAL, update_task, priority = gobject.PRIORITY_LOW) # call update once every update_interval - logging.info('starting gobject.MainLoop') - main_loop.run() - logging.info('gobject.MainLoop was shut down') + logging.info('starting gobject.MainLoop') + main_loop.run() + logging.info('gobject.MainLoop was shut down') - sys.exit(0xFF) # reaches this only on error + sys.exit(0xFF) # reaches this only on error main(sys.argv[1:]) diff --git a/python/dbus-fz-sonick-48tl-with-s3/__init__.py b/python/dbus-fz-sonick-48tl-with-s3/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/dbus-fz-sonick-48tl-with-s3/config.py b/python/dbus-fz-sonick-48tl-with-s3/config.py new file mode 100755 index 000000000..1d4962daa --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/config.py @@ -0,0 +1,59 @@ +import serial +import logging +from data import read_file_one_line + +# dbus configuration + +CONNECTION = 'Modbus RTU' +PRODUCT_NAME = 'FIAMM 48TL Series Battery' +PRODUCT_ID = 0xB012 # assigned by victron +DEVICE_INSTANCE = 1 +SERVICE_NAME_PREFIX = 'com.victronenergy.battery.' + + +# driver configuration + +SOFTWARE_VERSION = '3.0.0' +UPDATE_INTERVAL = 2000 # milliseconds +#LOG_LEVEL = logging.INFO +LOG_LEVEL = logging.DEBUG + + +# battery config + +V_MAX = 54.2 +V_MIN = 42 +R_STRING_MIN = 0.125 +R_STRING_MAX = 0.250 +I_MAX_PER_STRING = 15 +AH_PER_STRING = 40 +NUM_OF_STRINGS_PER_BATTERY = 5 + +# modbus configuration + +BASE_ADDRESS = 999 +NO_OF_REGISTERS = 64 +MAX_SLAVE_ADDRESS = 25 + + +# RS 485 configuration + +PARITY = serial.PARITY_ODD +TIMEOUT = 0.1 # seconds +BAUD_RATE = 115200 +BYTE_SIZE = 8 +STOP_BITS = 1 +MODE = 'rtu' + +# InnovEnergy IOT configuration + +INSTALLATION_NAME = read_file_one_line('/data/innovenergy/openvpn/installation-name') +INNOVENERGY_SERVER_IP = '10.2.0.1' +INNOVENERGY_SERVER_PORT = 8134 +INNOVENERGY_PROTOCOL_VERSION = '48TL200V3' + + +# S3 Credentials +S3BUCKET = "5-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" +S3KEY = "EXO6bb63d9bbe5f938a68fa444b" +S3SECRET = "A4-5wIjIlAqn-p0cUkQu0f9fBIrX1V5PGTBDwjsrlR8" diff --git a/python/dbus-fz-sonick-48tl-with-s3/controller.py b/python/dbus-fz-sonick-48tl-with-s3/controller.py new file mode 100755 index 000000000..749093592 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/controller.py @@ -0,0 +1,644 @@ +#!/usr/bin/python -u +# coding=utf-8 + +import logging +import os +import time +import states as State +import target_type as TargetType + +from random import randint +from python_libs.ie_dbus.dbus_service import DBusService +from python_libs.ie_utils.main_loop import run_on_main_loop + +# noinspection PyUnreachableCode +if False: + from typing import NoReturn, Optional, Any, Iterable, List + +logging.basicConfig(level=logging.INFO) +_log = logging.getLogger(__name__) + +VERSION = '1.0.0' +PRODUCT = 'Controller' + +GRID_SERVICE_PREFIX = 'com.victronenergy.grid.' +BATTERY_SERVICE_PREFIX = 'com.victronenergy.battery.' +INVERTER_SERVICE_PREFIX = 'com.victronenergy.vebus.' +SYSTEM_SERVICE_PREFIX = 'com.victronenergy.system' +HUB4_SERVICE_PREFIX = 'com.victronenergy.hub4' +SETTINGS_SERVICE_PREFIX = 'com.victronenergy.settings' + +UPDATE_PERIOD_MS = 2000 +MAX_POWER_PER_BATTERY = 2500 + +MAX_DAYS_WITHOUT_EOC = 7 +SECONDS_PER_DAY = 24 * 60 * 60 + +GRID_SET_POINT_SETTING = PRODUCT + '/GridSetPoint' +LAST_EOC_SETTING = PRODUCT + '/LastEOC' +CALIBRATION_CHARGE_START_TIME_OF_DAY_SETTING = PRODUCT + '/CalibrationChargeStartTime' + +HEAT_LOSS = 150 # W +P_CONST = 0.5 # W/W + +Epoch = int +Seconds = int + + +def time_now(): + return int(time.time()) + + +class Controller(object): + + def __init__(self, measurement, target, target_type, state): + # type: (float, float, int, int) -> NoReturn + self.target_type = target_type + self.target = target + self.measurement = measurement + self.state = state + + d_p = target - measurement + self.delta = d_p * P_CONST + + @staticmethod + def min(controllers): + # type: (Iterable[Controller]) -> Controller + return min(controllers, key=lambda c: c.delta) + + @staticmethod + def max(controllers): + # type: (Iterable[Controller]) -> Controller + return max(controllers, key=lambda c: c.delta) + + def clamp(self, lower_limit_controllers, upper_limit_controllers): + # type: (List[Controller],List[Controller]) -> Controller + c_min = Controller.min(upper_limit_controllers + [self]) + return Controller.max(lower_limit_controllers + [c_min]) + + +# noinspection PyMethodMayBeStatic +class InnovEnergyController(DBusService): + + def __init__(self): + + super(InnovEnergyController, self).__init__(PRODUCT.lower()) + + self.settings.add_setting(path=LAST_EOC_SETTING, default_value=0) # unix epoch timestamp + self.settings.add_setting(path=GRID_SET_POINT_SETTING, default_value=0) # grid setpoint, Watts + + self.settings.add_setting(path=CALIBRATION_CHARGE_START_TIME_OF_DAY_SETTING, default_value=32400) # 09:00 + + self.own_properties.set('/ProductName', PRODUCT) + self.own_properties.set('/Mgmt/ProcessName', __file__) + self.own_properties.set('/Mgmt/ProcessVersion', VERSION) + self.own_properties.set('/Mgmt/Connection', 'dbus') + self.own_properties.set('/ProductId', PRODUCT) + self.own_properties.set('/FirmwareVersion', VERSION) + self.own_properties.set('/HardwareVersion', VERSION) + self.own_properties.set('/Connected', 1) + self.own_properties.set('/TimeToCalibrationCharge', 'unknown') + self.own_properties.set('/State', 0) + + self.phases = [ + p for p in ['/Hub4/L1/AcPowerSetpoint', '/Hub4/L2/AcPowerSetpoint', '/Hub4/L3/AcPowerSetpoint'] + if self.remote_properties.exists(self.inverter_service + p) + ] + + self.n_phases = len(self.phases) + print ('The system has ' + str(self.n_phases) + ' phase' + ('s' if self.n_phases != 1 else '')) + + self.max_inverter_power = 32700 + # ^ defined in https://github.com/victronenergy/dbus_modbustcp/blob/master/CCGX-Modbus-TCP-register-list.xlsx + + def clamp_power_command(self, value): + # type: (float) -> int + + value = max(value, -self.max_inverter_power) + value = min(value, self.max_inverter_power) + + return int(value) + + def get_service(self, prefix): + # type: (str) -> Optional[unicode] + service = next((s for s in self.available_services if s.startswith(prefix)), None) + + if service is None: + raise Exception('no service matching ' + prefix + '* available') + + return service + + def is_service_available(self, prefix): + # type: (str) -> bool + return next((True for s in self.available_services if s.startswith(prefix)), False) + + @property + def battery_service(self): + # type: () -> Optional[unicode] + return self.get_service(BATTERY_SERVICE_PREFIX) + + @property + def battery_available(self): + # type: () -> bool + return self.is_service_available(BATTERY_SERVICE_PREFIX) + + @property + def grid_service(self): + # type: () -> Optional[unicode] + return self.get_service(GRID_SERVICE_PREFIX) + + @property + def grid_meter_available(self): + # type: () -> bool + return self.is_service_available(GRID_SERVICE_PREFIX) + + @property + def inverter_service(self): + # type: () -> Optional[unicode] + return self.get_service(INVERTER_SERVICE_PREFIX) + + @property + def inverter_available(self): + # type: () -> bool + return self.is_service_available(INVERTER_SERVICE_PREFIX) + + @property + def system_service(self): + # type: () -> Optional[unicode] + return self.get_service(SYSTEM_SERVICE_PREFIX) + + @property + def system_service_available(self): + # type: () -> bool + return self.is_service_available(SYSTEM_SERVICE_PREFIX) + + @property + def hub4_service(self): + # type: () -> Optional[unicode] + return self.get_service(HUB4_SERVICE_PREFIX) + + @property + def hub4_service_available(self): + # type: () -> bool + return self.is_service_available(HUB4_SERVICE_PREFIX) + + @property + def inverter_power_setpoint(self): + # type: () -> float + return sum((self.get_inverter_prop(p) for p in self.phases)) + + def get_battery_prop(self, dbus_path): + # type: (str) -> Any + battery_service = self.battery_service + return self.remote_properties.get(battery_service + dbus_path).value + + def get_grid_prop(self, dbus_path): + # type: (str) -> Any + return self.remote_properties.get(self.grid_service + dbus_path).value + + def get_inverter_prop(self, dbus_path): + # type: (str) -> Any + return self.remote_properties.get(self.inverter_service + dbus_path).value + + def get_system_prop(self, dbus_path): + # type: (str) -> Any + system_service = self.system_service + return self.remote_properties.get(system_service + dbus_path).value + + def get_hub4_prop(self, dbus_path): + # type: (str) -> Any + hub4_service = self.hub4_service + return self.remote_properties.get(hub4_service + dbus_path).value + + def set_settings_prop(self, dbus_path, value): + # type: (str, Any) -> bool + return self.remote_properties.set(SETTINGS_SERVICE_PREFIX + dbus_path, value) + + def set_inverter_prop(self, dbus_path, value): + # type: (str, Any) -> bool + inverter_service = self.inverter_service + return self.remote_properties.set(inverter_service + dbus_path, value) + + @property + def max_battery_charge_power(self): + # type: () -> int + return self.get_battery_prop('/Info/MaxChargePower') + + @property + def max_battery_discharge_power(self): + # type: () -> int + return self.get_battery_prop('/Info/MaxDischargePower') + + @property + def max_configured_charge_power(self): + # type: () -> Optional[int] + max_power = self.settings.get('/Settings/CGwacs/MaxChargePower') + return max_power if max_power >= 0 else None + + @property + def max_configured_discharge_power(self): # unsigned + # type: () -> Optional[int] + max_power = self.settings.get('/Settings/CGwacs/MaxDischargePower') + return max_power if max_power >= 0 else None + + @property + def max_charge_power(self): + # type: () -> int + if self.max_configured_charge_power is None: + return self.max_battery_charge_power + else: + return min(self.max_battery_charge_power, self.max_configured_charge_power) + + @property + def max_discharge_power(self): # unsigned + # type: () -> int + if self.max_configured_discharge_power is None: + return self.max_battery_discharge_power + else: + return min(self.max_battery_discharge_power, self.max_configured_discharge_power) + + def set_inverter_power_setpoint(self, power): + # type: (float) -> NoReturn + + if self.settings.get('/Settings/CGwacs/BatteryLife/State') == 9: + self.settings.set('/Settings/CGwacs/BatteryLife/State', 0) # enables scheduled charge + self.settings.set('/Settings/CGwacs/Hub4Mode', 3) # disable hub4 + self.set_inverter_prop('/Hub4/DisableCharge', 0) + self.set_inverter_prop('/Hub4/DisableFeedIn', 0) + + power = self.clamp_power_command(power / self.n_phases) + for p in self.phases: + self.set_inverter_prop(p, power + randint(-1, 1)) # use randint to force dbus re-send + + def set_controller_state(self, state): + # type: (int) -> NoReturn + self.own_properties.set('/State', state) + + @property + def grid_power(self): + # type: () -> Optional[float] + try: + return self.get_grid_prop('/Ac/Power') + except: + return None + + @property + def battery_cold(self): + # type: () -> bool + return self.get_battery_prop('/IoStatus/BatteryCold') == 1 + + @property + def eoc_reached(self): + # type: () -> bool + if not self.battery_available: + return False + + return min(self.get_battery_prop('/EOCReached')) == 1 + + @property + def battery_power(self): + # type: () -> float + return self.get_battery_prop('/Dc/0/Power') + + @property + def inverter_ac_in_power(self): + # type: () -> float + return self.get_inverter_prop('/Ac/ActiveIn/P') + + @property + def inverter_ac_out_power(self): + # type: () -> float + return self.get_inverter_prop('/Ac/Out/P') + + @property + def soc(self): + # type: () -> float + return self.get_battery_prop('/Soc') + + @property + def n_batteries(self): + # type: () -> int + return self.get_battery_prop('/NbOfBatteries') + + @property + def min_soc(self): + # type: () -> float + return self.settings.get('/Settings/CGwacs/BatteryLife/MinimumSocLimit') + + @property + def should_hold_min_soc(self): + # type: () -> bool + return self.min_soc <= self.soc <= self.min_soc + 5 + + @property + def utc_offset(self): + # type: () -> int + + # stackoverflow.com/a/1301528 + # stackoverflow.com/a/3168394 + + os.environ['TZ'] = self.settings.get('/Settings/System/TimeZone') + time.tzset() + is_dst = time.daylight and time.localtime().tm_isdst > 0 + return -(time.altzone if is_dst else time.timezone) + + @property + def grid_set_point(self): + # type: () -> float + return self.settings.get('/Settings/CGwacs/AcPowerSetPoint') + + @property + def time_to_calibration_charge_str(self): + # type: () -> str + return self.own_properties.get('/TimeToCalibrationCharge').text + + @property + def calibration_charge_deadline(self): + # type: () -> Epoch + + utc_offset = self.utc_offset + ultimate_deadline = self.settings.get(LAST_EOC_SETTING) + MAX_DAYS_WITHOUT_EOC * SECONDS_PER_DAY + midnight_before_udl = int((ultimate_deadline + utc_offset) / SECONDS_PER_DAY) * SECONDS_PER_DAY - utc_offset # round off to last midnight + + dead_line = midnight_before_udl + self.calibration_charge_start_time_of_day + + while dead_line > ultimate_deadline: # should fire at most once, but let's be defensive... + dead_line -= SECONDS_PER_DAY # too late, advance one day + + return dead_line + + @property + def time_to_calibration_charge(self): + # type: () -> Seconds + return self.calibration_charge_deadline - time_now() + + @property + def grid_blackout(self): + # type: () -> bool + return self.get_inverter_prop('/Leds/Mains') < 1 + + @property + def scheduled_charge(self): + # type: () -> bool + return self.get_hub4_prop('/Overrides/ForceCharge') != 0 + + @property + def calibration_charge_start_time_of_day(self): + # type: () -> Seconds + return self.settings.get(CALIBRATION_CHARGE_START_TIME_OF_DAY_SETTING) # seconds since midnight + + @property + def must_do_calibration_charge(self): + # type: () -> bool + return self.time_to_calibration_charge <= 0 + + def controller_charge_to_min_soc(self): + # type: () -> Controller + + return Controller( + measurement=self.battery_power, + target=self.max_charge_power, + target_type=TargetType.BATTERY_DC, + state=State.CHARGE_TO_MIN_SOC + ) + + def controller_hold_min_soc(self): + # type: () -> Controller + + # TODO: explain + + a = -4 * HEAT_LOSS * self.n_batteries + b = -a * (self.min_soc + .5) + + target_dc_power = a * self.soc + b + + return Controller( + measurement = self.battery_power, + target = target_dc_power, + target_type = TargetType.BATTERY_DC, + state = State.HOLD_MIN_SOC + ) + + def controller_calibration_charge(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.CALIBRATION_CHARGE + ) + + def controller_limit_discharge_power(self): # signed + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = -self.max_discharge_power, # add sign! + target_type = TargetType.BATTERY_DC, + state = State.LIMIT_DISCHARGE_POWER + ) + + def controller_limit_charge_power(self): + # type: () -> Controller + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.LIMIT_CHARGE_POWER + ) + + def controller_optimize_self_consumption(self): + # type: () -> Controller + + return Controller( + measurement = self.grid_power, + target = self.grid_set_point, + target_type = TargetType.GRID_AC, + state = State.OPTIMIZE_SELF_CONSUMPTION + ) + + def controller_heating(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.HEATING + ) + + def controller_scheduled_charge(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.SCHEDULED_CHARGE + ) + + def controller_no_grid_meter(self): + # type: () -> Controller + + return Controller( + measurement = self.battery_power, + target = self.max_charge_power, + target_type = TargetType.BATTERY_DC, + state = State.NO_GRID_METER_AVAILABLE + ) + + def controller_no_battery(self): + # type: () -> Controller + + return Controller( + measurement = self.inverter_ac_in_power, + target = 0, + target_type = TargetType.INVERTER_AC_IN, + state = State.NO_BATTERY_AVAILABLE + ) + + def controller_bridge_grid_blackout(self): + # type: () -> Controller + + return Controller( + measurement = 0, + target = 0, + target_type = TargetType.GRID_AC, + state = State.BRIDGE_GRID_BLACKOUT + ) + + def update_eoc(self): + + if self.eoc_reached: + print('battery has reached EOC') + self.settings.set(LAST_EOC_SETTING, time_now()) + + self.publish_time_to_calibration_charge() + + def publish_time_to_calibration_charge(self): + + total_seconds = self.time_to_calibration_charge + + if total_seconds <= 0: + time_to_eoc_str = 'now' + else: + total_minutes, seconds = divmod(total_seconds, 60) + total_hours, minutes = divmod(total_minutes, 60) + total_days, hours = divmod(total_hours, 24) + + days_str = (str(total_days) + 'd') if total_days > 0 else '' + hours_str = (str(hours) + 'h') if total_hours > 0 else '' + minutes_str = (str(minutes) + 'm') if total_days == 0 else '' + + time_to_eoc_str = "{0} {1} {2}".format(days_str, hours_str, minutes_str).strip() + + self.own_properties.set('/TimeToCalibrationCharge', time_to_eoc_str) + + def print_system_stats(self, controller): + # type: (Controller) -> NoReturn + + def soc_setpoint(): + if controller.state == State.CALIBRATION_CHARGE or controller.state == State.NO_GRID_METER_AVAILABLE: + return ' => 100%' + if controller.state == State.CHARGE_TO_MIN_SOC: + return ' => ' + str(int(self.min_soc)) + '%' + return '' + + def setpoint(target_type): + if target_type != controller.target_type: + return '' + return ' => ' + str(int(controller.target)) + 'W' + + def p(power): + # type: (Optional[float]) -> str + if power is None: + return ' --- W' + else: + return str(int(power)) + 'W' + + ac_loads = None if self.grid_power is None else self.grid_power - self.inverter_ac_in_power + delta = p(controller.delta) if controller.delta < 0 else '+' + p(controller.delta) + battery_power = self.battery_power if self.battery_available else None + soc_ = str(self.soc) + '%' if self.battery_available else '---' + + print (State.name_of[controller.state]) + print ('') + print ('time to CC: ' + self.time_to_calibration_charge_str) + print (' SOC: ' + soc_ + soc_setpoint()) + print (' grid: ' + p(self.grid_power) + setpoint(TargetType.GRID_AC)) + print (' battery: ' + p(battery_power) + setpoint(TargetType.BATTERY_DC)) + print (' AC in: ' + p(self.inverter_ac_in_power) + ' ' + delta) + print (' AC out: ' + p(self.inverter_ac_out_power)) + print (' AC loads: ' + p(ac_loads)) + + def choose_controller(self): + # type: () -> Controller + + if self.grid_blackout: + return self.controller_bridge_grid_blackout() + + if not self.battery_available: + return self.controller_no_battery() + + if self.battery_cold: + return self.controller_heating() + + if self.scheduled_charge: + return self.controller_scheduled_charge() + + if self.must_do_calibration_charge: + return self.controller_calibration_charge() + + if self.soc < self.min_soc: + return self.controller_charge_to_min_soc() + + if not self.grid_meter_available: + return self.controller_no_grid_meter() + + hold_min_soc = self.controller_hold_min_soc() + limit_discharge_power = self.controller_limit_discharge_power() # signed + + lower_limit = [limit_discharge_power, hold_min_soc] + + # No upper limit. We no longer actively limit charge power. DC/DC Charger inside the BMS will do that for us. + upper_limit = [] + + optimize_self_consumption = self.controller_optimize_self_consumption() + + return optimize_self_consumption.clamp(lower_limit, upper_limit) + + def update(self): + + print('iteration started\n') + + self.update_eoc() + + if self.inverter_available: + + controller = self.choose_controller() + power = self.inverter_ac_in_power + controller.delta + + self.set_inverter_power_setpoint(power) + self.set_controller_state(controller.state) + self.print_system_stats(controller) # for debug + + else: + self.set_controller_state(State.NO_INVERTER_AVAILABLE) + print('inverter not available!') + + print('\niteration finished\n') + + +def main(): + + print('starting ' + __file__) + + with InnovEnergyController() as service: + run_on_main_loop(service.update, UPDATE_PERIOD_MS) + + print(__file__ + ' has shut down') + + +if __name__ == '__main__': + main() diff --git a/python/dbus-fz-sonick-48tl-with-s3/convert.py b/python/dbus-fz-sonick-48tl-with-s3/convert.py new file mode 100755 index 000000000..7138d856a --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/convert.py @@ -0,0 +1,192 @@ +import struct + +import config as cfg +from data import LedState, BatteryStatus + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Iterable, Union, AnyStr, Any + + +def read_bool(base_register, bit): + # type: (int, int) -> Callable[[BatteryStatus], bool] + + # TODO: explain base register offset + register = base_register + int(bit/16) + bit = bit % 16 + + def get_value(status): + # type: (BatteryStatus) -> bool + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value & (1 << bit) > 0 + + return get_value + + +def read_float(register, scale_factor=1.0, offset=0.0): + # type: (int, float, float) -> Callable[[BatteryStatus], float] + + def get_value(status): + # type: (BatteryStatus) -> float + value = status.modbus_data[register - cfg.BASE_ADDRESS] + + if value >= 0x8000: # convert to signed int16 + value -= 0x10000 # fiamm stores their integers signed AND with sign-offset @#%^&! + + return (value + offset) * scale_factor + + return get_value + + +def read_registers(register, count): + # type: (int, int) -> Callable[[BatteryStatus], List[int]] + + start = register - cfg.BASE_ADDRESS + end = start + count + + def get_value(status): + # type: (BatteryStatus) -> List[int] + return [x for x in status.modbus_data[start:end]] + + return get_value + + +def comma_separated(values): + # type: (Iterable[str]) -> str + return ", ".join(set(values)) + + +def count_bits(base_register, nb_of_registers, nb_of_bits, first_bit=0): + # type: (int, int, int, int) -> Callable[[BatteryStatus], int] + + get_registers = read_registers(base_register, nb_of_registers) + end_bit = first_bit + nb_of_bits + + def get_value(status): + # type: (BatteryStatus) -> int + + registers = get_registers(status) + bin_registers = [bin(x)[-1:1:-1] for x in registers] # reverse the bits in each register so that bit0 is to the left + str_registers = [str(x).ljust(16, "0") for x in bin_registers] # add leading zeroes, so all registers are 16 chars long + bit_string = ''.join(str_registers) # join them, one long string of 0s and 1s + filtered_bits = bit_string[first_bit:end_bit] # take the first nb_of_bits bits starting at first_bit + + return filtered_bits.count('1') # count 1s + + return get_value + + +def read_led_state(register, led): + # type: (int, int) -> Callable[[BatteryStatus], int] + + read_lo = read_bool(register, led * 2) + read_hi = read_bool(register, led * 2 + 1) + + def get_value(status): + # type: (BatteryStatus) -> int + + lo = read_lo(status) + hi = read_hi(status) + + if hi: + if lo: + return LedState.blinking_fast + else: + return LedState.blinking_slow + else: + if lo: + return LedState.on + else: + return LedState.off + + return get_value + + +# noinspection PyShadowingNames +def unit(unit): + # type: (unicode) -> Callable[[unicode], unicode] + + def get_text(v): + # type: (unicode) -> unicode + return "{0}{1}".format(str(v), unit) + + return get_text + + +def const(constant): + # type: (any) -> Callable[[any], any] + def get(*args): + return constant + return get + + +def mean(numbers): + # type: (List[Union[float,int]]) -> float + return float(sum(numbers)) / len(numbers) + + +def first(ts, default=None): + return next((t for t in ts), default) + + +def bitfields_to_str(lists): + # type: (List[List[int]]) -> str + + def or_lists(): + # type: () -> Iterable[int] + + length = len(first(lists)) + n_lists = len(lists) + + for i in range(0, length): + e = 0 + for l in range(0, n_lists): + e = e | lists[l][i] + yield e + + hexed = [ + '{0:0>4X}'.format(x) + for x in or_lists() + ] + + return ' '.join(hexed) + + +def pack_string(string): + # type: (AnyStr) -> Any + data = string.encode('UTF-8') + return struct.pack('B', len(data)) + data + + +def read_bitmap(register): + # type: (int) -> Callable[[BatteryStatus], int] + + def get_value(status): + # type: (BatteryStatus) -> int + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value + + return get_value + +def return_in_list(ts): + return ts + +def first(ts): + return next(t for t in ts) + +def read_hex_string(register, count): + # type: (int, int) -> Callable[[BatteryStatus], str] + """ + reads count consecutive modbus registers from start_address, + and returns a hex representation of it: + e.g. for count=4: DEAD BEEF DEAD BEEF. + """ + start = register - cfg.BASE_ADDRESS + end = start + count + + def get_value(status): + # type: (BatteryStatus) -> str + return ' '.join(['{0:0>4X}'.format(x) for x in status.modbus_data[start:end]]) + + return get_value diff --git a/python/dbus-fz-sonick-48tl-with-s3/data.py b/python/dbus-fz-sonick-48tl-with-s3/data.py new file mode 100755 index 000000000..9bff4ff93 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/data.py @@ -0,0 +1,134 @@ +import config as cfg + + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Optional, AnyStr, Union, Any + + +class LedState(object): + """ + from page 6 of the '48TLxxx ModBus Protocol doc' + """ + off = 0 + on = 1 + blinking_slow = 2 + blinking_fast = 3 + + +class LedColor(object): + green = 0 + amber = 1 + blue = 2 + red = 3 + + +class ServiceSignal(object): + + def __init__(self, dbus_path, get_value_or_const, unit=''): + # type: (str, Union[Callable[[],Any],Any], Optional[AnyStr] )->None + + self.get_value_or_const = get_value_or_const + self.dbus_path = dbus_path + self.unit = unit + + @property + def value(self): + try: + return self.get_value_or_const() # callable + except: + return self.get_value_or_const # value + + +class BatterySignal(object): + + def __init__(self, dbus_path, aggregate, get_value, unit=''): + # type: (str, Callable[[List[any]],any], Callable[[BatteryStatus],any], Optional[AnyStr] )->None + """ + A Signal holds all information necessary for the handling of a + certain datum (e.g. voltage) published by the battery. + + :param dbus_path: str + object_path on DBus where the datum needs to be published + + :param aggregate: Iterable[any] -> any + function that combines the values of multiple batteries into one. + e.g. sum for currents, or mean for voltages + + :param get_value: (BatteryStatus) -> any + function to extract the datum from the modbus record, + """ + + self.dbus_path = dbus_path + self.aggregate = aggregate + self.get_value = get_value + self.unit = unit + + +class Battery(object): + + """ Data record to hold hardware and firmware specs of the battery """ + + def __init__(self, slave_address, hardware_version, firmware_version, bms_version, ampere_hours): + # type: (int, str, str, str, int) -> None + self.slave_address = slave_address + self.hardware_version = hardware_version + self.firmware_version = firmware_version + self.bms_version = bms_version + self.ampere_hours = ampere_hours + self.n_strings = int(ampere_hours/cfg.AH_PER_STRING) + self.i_max = self.n_strings * cfg.I_MAX_PER_STRING + self.v_min = cfg.V_MIN + self.v_max = cfg.V_MAX + self.r_int_min = cfg.R_STRING_MIN / self.n_strings + self.r_int_max = cfg.R_STRING_MAX / self.n_strings + + def __str__(self): + return 'slave address = {0}\nhardware version = {1}\nfirmware version = {2}\nbms version = {3}\nampere hours = {4}'.format( + self.slave_address, self.hardware_version, self.firmware_version, self.bms_version, str(self.ampere_hours)) + + +class BatteryStatus(object): + """ + record holding the current status of a battery + """ + def __init__(self, battery, modbus_data): + # type: (Battery, List[int]) -> None + + self.battery = battery + self.modbus_data = modbus_data + + def serialize(self): + # type: () -> str + + b = self.battery + + s = cfg.INNOVENERGY_PROTOCOL_VERSION + '\n' + s += cfg.INSTALLATION_NAME + '\n' + s += str(b.slave_address) + '\n' + s += b.hardware_version + '\n' + s += b.firmware_version + '\n' + s += b.bms_version + '\n' + s += str(b.ampere_hours) + '\n' + + for d in self.modbus_data: + s += str(d) + '\n' + + return s + + +def read_file_one_line(file_name): + + with open(file_name, 'r') as file: + return file.read().replace('\n', '').replace('\r', '').strip() + + +class CsvSignal(object): + def __init__(self, name, get_value, get_text=None): + self.name = name + self.get_value = get_value if callable(get_value) else lambda _: get_value + self.get_text = get_text + + if get_text is None: + self.get_text = "" diff --git a/python/dbus-fz-sonick-48tl-with-s3/dbus-fzsonick-48tl.py b/python/dbus-fz-sonick-48tl-with-s3/dbus-fzsonick-48tl.py new file mode 100755 index 000000000..cec68cd43 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/dbus-fzsonick-48tl.py @@ -0,0 +1,678 @@ +#!/usr/bin/python2 -u +# coding=utf-8 + +import logging +import re +import socket +import sys +import gobject +import signals +import config as cfg + +from dbus.mainloop.glib import DBusGMainLoop +from pymodbus.client.sync import ModbusSerialClient as Modbus +from pymodbus.exceptions import ModbusException, ModbusIOException +from pymodbus.other_message import ReportSlaveIdRequest +from pymodbus.pdu import ExceptionResponse +from pymodbus.register_read_message import ReadInputRegistersResponse +from data import BatteryStatus, BatterySignal, Battery, ServiceSignal +from python_libs.ie_dbus.dbus_service import DBusService + +import time +import os +import csv +import pika +import zipfile +import hashlib +import base64 +import hmac +import requests +from datetime import datetime +import io +import json +from convert import first +CSV_DIR = "/data/csv_files/" +INSTALLATION_NAME_FILE = '/data/innovenergy/openvpn/installation-name' + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable, List, Iterable, NoReturn + + +RESET_REGISTER = 0x2087 + + +def compress_csv_data(csv_data, file_name="data.csv"): + memory_stream = io.BytesIO() + + # Create a zip archive in the memory buffer + with zipfile.ZipFile(memory_stream, 'w', zipfile.ZIP_DEFLATED) as archive: + # Add CSV data to the ZIP archive using writestr + archive.writestr(file_name, csv_data.encode('utf-8')) + + # Get the compressed byte array from the memory buffer + compressed_bytes = memory_stream.getvalue() + + # Encode the compressed byte array as a Base64 string + base64_string = base64.b64encode(compressed_bytes).decode('utf-8') + + return base64_string + +class S3config: + def __init__(self): + self.bucket = cfg.S3BUCKET + self.region = "sos-ch-dk-2" + self.provider = "exo.io" + self.key = cfg.S3KEY + self.secret = cfg.S3SECRET + self.content_type = "application/base64; charset=utf-8" + + @property + def host(self): + return "{}.{}.{}".format(self.bucket, self.region, self.provider) + + @property + def url(self): + return "https://{}".format(self.host) + + def create_put_request(self, s3_path, data): + headers = self._create_request("PUT", s3_path) + url = "{}/{}".format(self.url, s3_path) + response = requests.put(url, headers=headers, data=data) + return response + + def _create_request(self, method, s3_path): + date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + auth = self._create_authorization(method, self.bucket, s3_path, date, self.key, self.secret, self.content_type) + headers = { + "Host": self.host, + "Date": date, + "Authorization": auth, + "Content-Type": self.content_type + } + return headers + + @staticmethod + def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): + payload = "{}\n{}\n{}\n{}\n/{}/{}".format( + method, md5_hash, content_type, date, bucket.strip('/'), s3_path.strip('/') + ) + signature = base64.b64encode( + hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() + ).decode() + return "AWS {}:{}".format(s3_key, signature) + + +def SubscribeToQueue(): + try: + connection = pika.BlockingConnection(pika.ConnectionParameters(host="10.2.0.11", + port=5672, + virtual_host="/", + credentials=pika.PlainCredentials("producer", "b187ceaddb54d5485063ddc1d41af66f"))) + channel = connection.channel() + channel.queue_declare(queue="statusQueue", durable=True) + print("Subscribed to queue") + except Exception as ex: + print("An error occurred while connecting to the RabbitMQ queue:", ex) + return channel + + +previous_warnings = {} +previous_alarms = {} + +class MessageType: + ALARM_OR_WARNING = "AlarmOrWarning" + HEARTBEAT = "Heartbeat" + +class AlarmOrWarning: + def __init__(self, description, created_by): + self.date = datetime.now().strftime('%Y-%m-%d') + self.time = datetime.now().strftime('%H:%M:%S') + self.description = description + self.created_by = created_by + + def to_dict(self): + return { + "Date": self.date, + "Time": self.time, + "Description": self.description, + "CreatedBy": self.created_by + } + +channel = SubscribeToQueue() +# Create an S3config instance +s3_config = S3config() +INSTALLATION_ID=int(s3_config.bucket.split('-')[0]) +PRODUCT_ID = 1 +is_first_update = True +prev_status = 0 +subscribed_to_queue_first_time = False +heartbit_interval = 0 + +def update_state_from_dictionaries(current_warnings, current_alarms, node_numbers): + global previous_warnings, previous_alarms, INSTALLATION_ID, PRODUCT_ID, is_first_update, channel, prev_status, heartbit_interval, subscribed_to_queue_first_time + + heartbit_interval += 1 + + if is_first_update: + changed_warnings = current_warnings + changed_alarms = current_alarms + is_first_update = False + else: + changed_alarms = {} + changed_warnings = {} + # calculate the diff in warnings and alarms + prev_alarm_value_list = list(previous_alarms.values()) + alarm_keys = list(previous_alarms.keys()) + + for i, alarm in enumerate(current_alarms.values()): + if alarm != prev_alarm_value_list[i]: + changed_alarms[alarm_keys[i]] = True + else: + changed_alarms[alarm_keys[i]] = False + + prev_warning_value_list=list(previous_warnings.values()) + warning_keys=list(previous_warnings.keys()) + + for i, warning in enumerate(current_warnings.values()): + if warning!=prev_warning_value_list[i]: + changed_warnings[warning_keys[i]]=True + else: + changed_warnings[warning_keys[i]]=False + + status_message = { + "InstallationId": INSTALLATION_ID, + "Product": PRODUCT_ID, + "Status": 0, + "Type": 1, + "Warnings": [], + "Alarms": [] + } + + alarms_number_list = [] + for node_number in node_numbers: + cnt = 0 + for alarm_value in current_alarms.values(): + if alarm_value: + cnt+=1 + alarms_number_list.append(cnt) + + warnings_number_list = [] + for node_number in node_numbers: + cnt = 0 + for warning_value in current_warnings.values(): + if warning_value: + cnt+=1 + warnings_number_list.append(cnt) + + # Evaluate alarms + if any(changed_alarms.values()): + for i, changed_alarm in enumerate(changed_alarms.values()): + if changed_alarm and list(current_alarms.values())[i]: + status_message["Alarms"].append(AlarmOrWarning(list(current_alarms.keys())[i],"System").to_dict()) + + if any(changed_warnings.values()): + for i, changed_warning in enumerate(changed_warnings.values()): + if changed_warning and list(current_warnings.values())[i]: + status_message["Warnings"].append(AlarmOrWarning(list(current_warnings.keys())[i],"System").to_dict()) + + if any(current_alarms.values()): + status_message["Status"]=2 + + if not any(current_alarms.values()) and any(current_warnings.values()): + status_message["Status"]=1 + + if not any(current_alarms.values()) and not any(current_warnings.values()): + status_message["Status"]=0 + + if status_message["Status"]!=prev_status or len(status_message["Warnings"])>0 or len(status_message["Alarms"])>0: + prev_status=status_message["Status"] + status_message["Type"]=0 + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + print(status_message) + print("Message sent successfully") + elif heartbit_interval>=15 or not subscribed_to_queue_first_time: + print("Send heartbit message to rabbitmq") + heartbit_interval=0 + subscribed_to_queue_first_time=True + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + + previous_warnings = current_warnings.copy() + previous_alarms = current_alarms.copy() + + return status_message, alarms_number_list, warnings_number_list + +def read_csv_as_string(file_path): + """ + Reads a CSV file from the given path and returns its content as a single string. + """ + try: + # Note: 'encoding' is not available in open() in Python 2.7, so we'll use 'codecs' module. + import codecs + with codecs.open(file_path, 'r', encoding='utf-8') as file: + return file.read() + except IOError as e: + if e.errno == 2: # errno 2 corresponds to "No such file or directory" + print("Error: The file {} does not exist.".format(file_path)) + else: + print("IO error occurred: {}".format(str(e))) + return None + + + +def init_modbus(tty): + # type: (str) -> Modbus + + logging.debug('initializing Modbus') + + return Modbus( + port='/dev/' + tty, + method=cfg.MODE, + baudrate=cfg.BAUD_RATE, + stopbits=cfg.STOP_BITS, + bytesize=cfg.BYTE_SIZE, + timeout=cfg.TIMEOUT, + parity=cfg.PARITY) + + +def init_udp_socket(): + # type: () -> socket + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setblocking(False) + + return s + + +def report_slave_id(modbus, slave_address): + # type: (Modbus, int) -> str + + slave = str(slave_address) + + logging.debug('requesting slave id from node ' + slave) + + with modbus: + + request = ReportSlaveIdRequest(unit=slave_address) + response = modbus.execute(request) + + if response is ExceptionResponse or issubclass(type(response), ModbusException): + raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) + + return response.identifier + + +def identify_battery(modbus, slave_address): + # type: (Modbus, int) -> Battery + + logging.info('identifying battery...') + + hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) + firmware_version = read_firmware_version(modbus, slave_address) + + specs = Battery( + slave_address=slave_address, + hardware_version=hardware_version, + firmware_version=firmware_version, + bms_version=bms_version, + ampere_hours=ampere_hours) + + logging.info('battery identified:\n{0}'.format(str(specs))) + + return specs + + +def identify_batteries(modbus): + # type: (Modbus) -> List[Battery] + + def _identify_batteries(): + slave_address = 0 + n_missing = -255 + + while n_missing < 3: + slave_address += 1 + try: + yield identify_battery(modbus, slave_address) + n_missing = 0 + except Exception as e: + logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) + n_missing += 1 + + logging.info('giving up searching for further batteries') + + batteries = list(_identify_batteries()) # dont be lazy! + + n = len(batteries) + logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) + + return batteries + + +def parse_slave_id(modbus, slave_address): + # type: (Modbus, int) -> (str, str, int) + + slave_id = report_slave_id(modbus, slave_address) + + sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars + + match = re.match('(?P48TL(?P[0-9]+)) *(?P.*)', sid) + + if match is None: + raise Exception('no known battery found') + + return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip()) + + +def read_firmware_version(modbus, slave_address): + # type: (Modbus, int) -> str + + logging.debug('reading firmware version') + + with modbus: + + response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) + register = response.registers[0] + + return '{0:0>4X}'.format(register) + + +def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS): + # type: (Modbus, int, int, int) -> ReadInputRegistersResponse + + logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) + + return modbus.read_input_registers( + address=base_address, + count=count, + unit=slave_address) + + +def read_battery_status(modbus, battery): + # type: (Modbus, Battery) -> BatteryStatus + """ + Read the modbus registers containing the battery's status info. + """ + + logging.debug('reading battery status') + + with modbus: + data = read_modbus_registers(modbus, battery.slave_address) + return BatteryStatus(battery, data.registers) + + +def publish_values_on_dbus(service, battery_signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + publish_individuals(service, battery_signals, battery_statuses) + publish_aggregates(service, battery_signals, battery_statuses) + + +def publish_aggregates(service, signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + for s in signals: + if s.aggregate is None: + continue + values = [s.get_value(battery_status) for battery_status in battery_statuses] + value = s.aggregate(values) + service.own_properties.set(s.dbus_path, value, s.unit) + + +def publish_individuals(service, signals, battery_statuses): + # type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> () + + for signal in signals: + for battery_status in battery_statuses: + address = battery_status.battery.slave_address + dbus_path = '/_Battery/' + str(address) + signal.dbus_path + value = signal.get_value(battery_status) + service.own_properties.set(dbus_path, value, signal.unit) + + +def publish_service_signals(service, signals): + # type: (DBusService, Iterable[ServiceSignal]) -> NoReturn + + for signal in signals: + service.own_properties.set(signal.dbus_path, signal.value, signal.unit) + + +def upload_status_to_innovenergy(sock, statuses): + # type: (socket, Iterable[BatteryStatus]) -> bool + + logging.debug('upload status') + + try: + for s in statuses: + sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT)) + except: + logging.debug('FAILED') + return False + else: + return True + + +def print_usage(): + print ('Usage: ' + __file__ + ' ') + print ('Example: ' + __file__ + ' ttyUSB0') + + +def parse_cmdline_args(argv): + # type: (List[str]) -> str + + if len(argv) == 0: + logging.info('missing command line argument for tty device') + print_usage() + sys.exit(1) + + return argv[0] + + +def reset_batteries(modbus, batteries): + # type: (Modbus, Iterable[Battery]) -> NoReturn + + logging.info('Resetting batteries...') + + for battery in batteries: + + result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address) + + # expecting a ModbusIOException (timeout) + # BMS can no longer reply because it is already reset + success = isinstance(result, ModbusIOException) + + outcome = 'successfully' if success else 'FAILED to' + logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome)) + + logging.info('Shutting down fz-sonick driver') + exit(0) + + +alive = True # global alive flag, watchdog_task clears it, update_task sets it + +start_time = time.time() +def create_update_task(modbus, service, batteries): + global start_time + # type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool] + """ + Creates an update task which runs the main update function + and resets the alive flag + """ + _socket = init_udp_socket() + _signals = signals.init_battery_signals() + + csv_signals = signals.create_csv_signals(first(batteries).firmware_version) + node_numbers = [battery.slave_address for battery in batteries] + warnings_signals, alarm_signals = signals.read_warning_and_alarm_flags() + current_warnings = {} + current_alarms = {} + + def update_task(): + # type: () -> bool + global alive, start_time + + logging.debug('starting update cycle') + + if service.own_properties.get('/ResetBatteries').value == 1: + reset_batteries(modbus, batteries) + + statuses = [read_battery_status(modbus, battery) for battery in batteries] + + # Iterate over each node and signal to create rows in the new format + for i, node in enumerate(node_numbers): + for s in warnings_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_warnings[signal_name] = value + for s in alarm_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_alarms[signal_name] = value + + status_message, alarms_number_list, warnings_number_list = update_state_from_dictionaries(current_warnings, current_alarms, node_numbers) + + publish_values_on_dbus(service, _signals, statuses) + + elapsed_time = time.time() - start_time + if elapsed_time >= 30: + create_csv_files(csv_signals, statuses, node_numbers, alarms_number_list, warnings_number_list) + start_time = time.time() + print("Elapsed time: {:.2f} seconds".format(elapsed_time)) + + upload_status_to_innovenergy(_socket, statuses) + + logging.debug('finished update cycle\n') + + alive = True + + return True + + return update_task + +def manage_csv_files(directory_path, max_files=20): + csv_files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))] + csv_files.sort(key=lambda x: os.path.getctime(os.path.join(directory_path, x))) + # Remove oldest files if exceeds maximum + while len(csv_files) > max_files: + file_to_delete = os.path.join(directory_path, csv_files.pop(0)) + os.remove(file_to_delete) +def insert_id(path, id_number): + parts = path.split("/") + insert_position = parts.index("Devices") + 1 + parts.insert(insert_position, str(id_number)) + return "/".join(parts) + +def create_csv_files(signals, statuses, node_numbers, alarms_number_list, warnings_number_list): + timestamp = int(time.time()) + if timestamp % 2 != 0: + timestamp-=1 + if not os.path.exists(CSV_DIR): + os.makedirs(CSV_DIR) + csv_filename = "{}.csv".format(timestamp) + csv_path = os.path.join(CSV_DIR, csv_filename) + + with open(csv_path, 'ab') as csvfile: + csv_writer = csv.writer(csvfile, delimiter=';') + nodes_config_path = "/Config/Devices/BatteryNodes" + nodes_list = ",".join(str(node) for node in node_numbers) + config_row = [nodes_config_path, nodes_list, ""] + csv_writer.writerow(config_row) + for i, node in enumerate(node_numbers): + csv_writer.writerow(["/Battery/Devices/{}/Alarms".format(str(i+1)), alarms_number_list[i], ""]) + csv_writer.writerow(["/Battery/Devices/{}/Warnings".format(str(i+1)), warnings_number_list[i], ""]) + for s in signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + row_values = [signal_name, value, s.get_text] + csv_writer.writerow(row_values) + + csv_data = read_csv_as_string(csv_path) + + if csv_data is None: + print("error while reading csv as string") + return + + # zip-comp additions + compressed_csv = compress_csv_data(csv_data) + compressed_filename = "{}.csv".format(timestamp) + + response = s3_config.create_put_request(compressed_filename, compressed_csv) + if response.status_code == 200: + #os.remove(csv_path) + print("Success") + else: + failed_dir = os.path.join(CSV_DIR, "failed") + if not os.path.exists(failed_dir): + os.makedirs(failed_dir) + failed_path = os.path.join(failed_dir, csv_filename) + os.rename(csv_path, failed_path) + print("Uploading failed") + manage_csv_files(failed_dir, 10) + + manage_csv_files(CSV_DIR) + + +def create_watchdog_task(main_loop): + # type: (DBusGMainLoop) -> Callable[[],bool] + """ + Creates a Watchdog task that monitors the alive flag. + The watchdog kills the main loop if the alive flag is not periodically reset by the update task. + Who watches the watchdog? + """ + def watchdog_task(): + # type: () -> bool + + global alive + + if alive: + logging.debug('watchdog_task: update_task is alive') + alive = False + return True + else: + logging.info('watchdog_task: killing main loop because update_task is no longer alive') + main_loop.quit() + return False + + return watchdog_task + + +def main(argv): + # type: (List[str]) -> () + print("INSIDE DBUS SONICK") + logging.basicConfig(level=cfg.LOG_LEVEL) + logging.info('starting ' + __file__) + + tty = parse_cmdline_args(argv) + modbus = init_modbus(tty) + + batteries = identify_batteries(modbus) + + if len(batteries) <= 0: + sys.exit(2) + + service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty) + + service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False + + main_loop = gobject.MainLoop() + + service_signals = signals.init_service_signals(batteries) + publish_service_signals(service, service_signals) + + update_task = create_update_task(modbus, service, batteries) + update_task() # run it right away, so that all props are initialized before anyone can ask + watchdog_task = create_watchdog_task(main_loop) + + gobject.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority = gobject.PRIORITY_LOW) # add watchdog first + gobject.timeout_add(cfg.UPDATE_INTERVAL, update_task, priority = gobject.PRIORITY_LOW) # call update once every update_interval + + logging.info('starting gobject.MainLoop') + main_loop.run() + logging.info('gobject.MainLoop was shut down') + + sys.exit(0xFF) # reaches this only on error + + +main(sys.argv[1:]) diff --git a/python/dbus-fz-sonick-48tl-with-s3/dbus_types.py b/python/dbus-fz-sonick-48tl-with-s3/dbus_types.py new file mode 100644 index 000000000..a5fcc6e8a --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/dbus_types.py @@ -0,0 +1,156 @@ +from logging import getLogger + +import dbus + + +_log = getLogger(__name__) + +# noinspection PyUnreachableCode +if False: + from typing import Any, Union, Dict + DbusString = Union[dbus.String, dbus.UTF8String, dbus.ObjectPath, dbus.Signature] + DbusInt = Union[dbus.Int16, dbus.Int32, dbus.Int64] + DbusDouble = dbus.Double + DbusBool = dbus.Boolean + + DbusStringVariant = DbusString # TODO: variant_level constraint ? + DbusIntVariant = DbusInt + DbusDoubleVariant = DbusDouble + DbusBoolVariant = DbusBool + + DbusValue = Union[DbusString, DbusInt, DbusDouble, DbusBool, DBUS_NONE] + DbusVariant = Union[DbusStringVariant, DbusIntVariant, DbusDoubleVariant, DbusBoolVariant, DBUS_NONE] + + DbusTextDict = dbus.Dictionary + DbusVariantDict = dbus.Dictionary + + DbusType = Union[DbusValue, DbusVariant, DbusVariantDict, DbusTextDict] + +DBUS_NONE = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) # DEFINED by victron + +MAX_INT16 = 2 ** 15 - 1 +MAX_INT32 = 2 ** 31 - 1 + + +def dbus_uint32(value): + # type: (int) -> dbus.UInt32 + if value < 0: + raise Exception('cannot convert negative value to UInt32') + + return dbus.UInt32(value) + + +def dbus_int(value): + # type: (Union[int, long]) -> Union[dbus.Int16, dbus.Int32, dbus.Int64] + abs_value = abs(value) + if abs_value < MAX_INT16: + return dbus.Int16(value) + elif abs_value < MAX_INT32: + return dbus.Int32(value) + else: + return dbus.Int64(value) + + +def dbus_string(value): + # type: (Union[str, unicode]) -> DbusString + if isinstance(value, unicode): + return dbus.UTF8String(value) + else: + return dbus.String(value) + + +def dbus_double(value): + # type: (float) -> DbusDouble + return dbus.Double(value) + + +def dbus_bool(value): + # type: (bool) -> DbusBool + return dbus.Boolean(value) + + +# VARIANTS + +def dbus_int_variant(value): + # type: (Union[int, long]) -> DbusIntVariant + abs_value = abs(value) + if abs_value < MAX_INT16: + return dbus.Int16(value, variant_level=1) + elif abs_value < MAX_INT32: + return dbus.Int32(value, variant_level=1) + else: + return dbus.Int64(value, variant_level=1) + + +def dbus_string_variant(value): + # type: (Union[str, unicode]) -> DbusStringVariant + if isinstance(value, unicode): + return dbus.UTF8String(value, variant_level=1) + else: + return dbus.String(value, variant_level=1) + + +def dbus_double_variant(value): + # type: (float) -> DbusDoubleVariant + return dbus.Double(value, variant_level=1) + + +def dbus_bool_variant(value): + # type: (bool) -> DbusBoolVariant + return dbus.Boolean(value, variant_level=1) + + +def dbus_variant(value): + # type: (Any) -> DbusVariant + + if value is None: + return DBUS_NONE + if isinstance(value, float): + return dbus_double_variant(value) + if isinstance(value, bool): + return dbus_bool_variant(value) + if isinstance(value, (int, long)): + return dbus_int_variant(value) + if isinstance(value, (str, unicode)): + return dbus_string_variant(value) + # TODO: container types + if isinstance(value, list): + # Convert each element in the list to a dbus variant + dbus_array = [dbus_variant(item) for item in value] + if not dbus_array: + return dbus.Array([], signature='v') # Empty array with variant type + first_element = value[0] + if isinstance(first_element, float): + signature = 'd' + elif isinstance(first_element, bool): + signature = 'b' + elif isinstance(first_element, (int, long)): + signature = 'x' + elif isinstance(first_element, (str, unicode)): + signature = 's' + else: + signature = 'v' # default to variant if unknown + return dbus.Array(dbus_array, signature=signature) + + raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value)) + + +def dbus_value(value): + # type: (Any) -> DbusVariant + + if value is None: + return DBUS_NONE + if isinstance(value, float): + return dbus_double(value) + if isinstance(value, bool): + return dbus_bool(value) + if isinstance(value, (int, long)): + return dbus_int(value) + if isinstance(value, (str, unicode)): + return dbus_string_variant(value) + # TODO: container types + + raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value)) + + + diff --git a/python/dbus-fz-sonick-48tl-with-s3/ext/velib_python/ve_utils.py b/python/dbus-fz-sonick-48tl-with-s3/ext/velib_python/ve_utils.py new file mode 100644 index 000000000..459584bab --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/ext/velib_python/ve_utils.py @@ -0,0 +1,202 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from traceback import print_exc +from os import _exit as os_exit +from os import statvfs +import logging +from functools import update_wrapper +import dbus +logger = logging.getLogger(__name__) + +VEDBUS_INVALID = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) + +# Use this function to make sure the code quits on an unexpected exception. Make sure to use it +# when using gobject.idle_add and also gobject.timeout_add. +# Without this, the code will just keep running, since gobject does not stop the mainloop on an +# exception. +# Example: gobject.idle_add(exit_on_error, myfunc, arg1, arg2) +def exit_on_error(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except: + try: + print 'exit_on_error: there was an exception. Printing stacktrace will be tryed and then exit' + print_exc() + except: + pass + + # sys.exit() is not used, since that throws an exception, which does not lead to a program + # halt when used in a dbus callback, see connection.py in the Python/Dbus libraries, line 230. + os_exit(1) + + +__vrm_portal_id = None +def get_vrm_portal_id(): + # For the CCGX, the definition of the VRM Portal ID is that it is the mac address of the onboard- + # ethernet port (eth0), stripped from its colons (:) and lower case. + + # nice coincidence is that this also works fine when running on your (linux) development computer. + + global __vrm_portal_id + + if __vrm_portal_id: + return __vrm_portal_id + + # Assume we are on linux + import fcntl, socket, struct + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', 'eth0'[:15])) + __vrm_portal_id = ''.join(['%02x' % ord(char) for char in info[18:24]]) + + return __vrm_portal_id + + +# See VE.Can registers - public.docx for definition of this conversion +def convert_vreg_version_to_readable(version): + def str_to_arr(x, length): + a = [] + for i in range(0, len(x), length): + a.append(x[i:i+length]) + return a + + x = "%x" % version + x = x.upper() + + if len(x) == 5 or len(x) == 3 or len(x) == 1: + x = '0' + x + + a = str_to_arr(x, 2); + + # remove the first 00 if there are three bytes and it is 00 + if len(a) == 3 and a[0] == '00': + a.remove(0); + + # if we have two or three bytes now, and the first character is a 0, remove it + if len(a) >= 2 and a[0][0:1] == '0': + a[0] = a[0][1]; + + result = '' + for item in a: + result += ('.' if result != '' else '') + item + + + result = 'v' + result + + return result + + +def get_free_space(path): + result = -1 + + try: + s = statvfs(path) + result = s.f_frsize * s.f_bavail # Number of free bytes that ordinary users + except Exception, ex: + logger.info("Error while retrieving free space for path %s: %s" % (path, ex)) + + return result + + +def get_load_averages(): + c = read_file('/proc/loadavg') + return c.split(' ')[:3] + + +# Returns False if it cannot find a machine name. Otherwise returns the string +# containing the name +def get_machine_name(): + c = read_file('/proc/device-tree/model') + + if c != False: + return c.strip('\x00') + + return read_file('/etc/venus/machine') + + +# Returns False if it cannot open the file. Otherwise returns its rstripped contents +def read_file(path): + content = False + + try: + with open(path, 'r') as f: + content = f.read().rstrip() + except Exception, ex: + logger.debug("Error while reading %s: %s" % (path, ex)) + + return content + + +def wrap_dbus_value(value): + if value is None: + return VEDBUS_INVALID + if isinstance(value, float): + return dbus.Double(value, variant_level=1) + if isinstance(value, bool): + return dbus.Boolean(value, variant_level=1) + if isinstance(value, int): + return dbus.Int32(value, variant_level=1) + if isinstance(value, str): + return dbus.String(value, variant_level=1) + if isinstance(value, unicode): + return dbus.String(value, variant_level=1) + if isinstance(value, list): + if len(value) == 0: + # If the list is empty we cannot infer the type of the contents. So assume unsigned integer. + # A (signed) integer is dangerous, because an empty list of signed integers is used to encode + # an invalid value. + return dbus.Array([], signature=dbus.Signature('u'), variant_level=1) + return dbus.Array([wrap_dbus_value(x) for x in value], variant_level=1) + if isinstance(value, long): + return dbus.Int64(value, variant_level=1) + if isinstance(value, dict): + # Wrapping the keys of the dictionary causes D-Bus errors like: + # 'arguments to dbus_message_iter_open_container() were incorrect, + # assertion "(type == DBUS_TYPE_ARRAY && contained_signature && + # *contained_signature == DBUS_DICT_ENTRY_BEGIN_CHAR) || (contained_signature == NULL || + # _dbus_check_is_valid_signature (contained_signature))" failed in file ...' + return dbus.Dictionary({(k, wrap_dbus_value(v)) for k, v in value.items()}, variant_level=1) + return value + + +dbus_int_types = (dbus.Int32, dbus.UInt32, dbus.Byte, dbus.Int16, dbus.UInt16, dbus.UInt32, dbus.Int64, dbus.UInt64) + + +def unwrap_dbus_value(val): + """Converts D-Bus values back to the original type. For example if val is of type DBus.Double, + a float will be returned.""" + if isinstance(val, dbus_int_types): + return int(val) + if isinstance(val, dbus.Double): + return float(val) + if isinstance(val, dbus.Array): + v = [unwrap_dbus_value(x) for x in val] + return None if len(v) == 0 else v + if isinstance(val, (dbus.Signature, dbus.String)): + return unicode(val) + # Python has no byte type, so we convert to an integer. + if isinstance(val, dbus.Byte): + return int(val) + if isinstance(val, dbus.ByteArray): + return "".join([str(x) for x in val]) + if isinstance(val, (list, tuple)): + return [unwrap_dbus_value(x) for x in val] + if isinstance(val, (dbus.Dictionary, dict)): + # Do not unwrap the keys, see comment in wrap_dbus_value + return dict([(x, unwrap_dbus_value(y)) for x, y in val.items()]) + if isinstance(val, dbus.Boolean): + return bool(val) + return val + +class reify(object): + """ Decorator to replace a property of an object with the calculated value, + to make it concrete. """ + def __init__(self, wrapped): + self.wrapped = wrapped + update_wrapper(self, wrapped) + def __get__(self, inst, objtype=None): + if inst is None: + return self + v = self.wrapped(inst) + setattr(inst, self.wrapped.__name__, v) + return v diff --git a/python/dbus-fz-sonick-48tl-with-s3/ext/velib_python/vedbus.py b/python/dbus-fz-sonick-48tl-with-s3/ext/velib_python/vedbus.py new file mode 100644 index 000000000..2dbed13e2 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/ext/velib_python/vedbus.py @@ -0,0 +1,496 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import dbus.service +import logging +import traceback +import os +import weakref +from ve_utils import wrap_dbus_value, unwrap_dbus_value + +# vedbus contains three classes: +# VeDbusItemImport -> use this to read data from the dbus, ie import +# VeDbusItemExport -> use this to export data to the dbus (one value) +# VeDbusService -> use that to create a service and export several values to the dbus + +# Code for VeDbusItemImport is copied from busitem.py and thereafter modified. +# All projects that used busitem.py need to migrate to this package. And some +# projects used to define there own equivalent of VeDbusItemExport. Better to +# use VeDbusItemExport, or even better the VeDbusService class that does it all for you. + +# TODOS +# 1 check for datatypes, it works now, but not sure if all is compliant with +# com.victronenergy.BusItem interface definition. See also the files in +# tests_and_examples. And see 'if type(v) == dbus.Byte:' on line 102. Perhaps +# something similar should also be done in VeDbusBusItemExport? +# 2 Shouldn't VeDbusBusItemExport inherit dbus.service.Object? +# 7 Make hard rules for services exporting data to the D-Bus, in order to make tracking +# changes possible. Does everybody first invalidate its data before leaving the bus? +# And what about before taking one object away from the bus, instead of taking the +# whole service offline? +# They should! And after taking one value away, do we need to know that someone left +# the bus? Or we just keep that value in invalidated for ever? Result is that we can't +# see the difference anymore between an invalidated value and a value that was first on +# the bus and later not anymore. See comments above VeDbusItemImport as well. +# 9 there are probably more todos in the code below. + +# Some thoughts with regards to the data types: +# +# Text from: http://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html#data-types +# --- +# Variants are represented by setting the variant_level keyword argument in the +# constructor of any D-Bus data type to a value greater than 0 (variant_level 1 +# means a variant containing some other data type, variant_level 2 means a variant +# containing a variant containing some other data type, and so on). If a non-variant +# is passed as an argument but introspection indicates that a variant is expected, +# it'll automatically be wrapped in a variant. +# --- +# +# Also the different dbus datatypes, such as dbus.Int32, and dbus.UInt32 are a subclass +# of Python int. dbus.String is a subclass of Python standard class unicode, etcetera +# +# So all together that explains why we don't need to explicitly convert back and forth +# between the dbus datatypes and the standard python datatypes. Note that all datatypes +# in python are objects. Even an int is an object. + +# The signature of a variant is 'v'. + +# Export ourselves as a D-Bus service. +class VeDbusService(object): + def __init__(self, servicename, bus=None): + # dict containing the VeDbusItemExport objects, with their path as the key. + self._dbusobjects = {} + self._dbusnodes = {} + + # dict containing the onchange callbacks, for each object. Object path is the key + self._onchangecallbacks = {} + + # Connect to session bus whenever present, else use the system bus + self._dbusconn = bus or (dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus()) + + # make the dbus connection available to outside, could make this a true property instead, but ach.. + self.dbusconn = self._dbusconn + + # Register ourselves on the dbus, trigger an error if already in use (do_not_queue) + self._dbusname = dbus.service.BusName(servicename, self._dbusconn, do_not_queue=True) + + # Add the root item that will return all items as a tree + self._dbusnodes['/'] = self._create_tree_export(self._dbusconn, '/', self._get_tree_dict) + + logging.info("registered ourselves on D-Bus as %s" % servicename) + + def _get_tree_dict(self, path, get_text=False): + logging.debug("_get_tree_dict called for %s" % path) + r = {} + px = path + if not px.endswith('/'): + px += '/' + for p, item in self._dbusobjects.items(): + if p.startswith(px): + v = item.GetText() if get_text else wrap_dbus_value(item.local_get_value()) + r[p[len(px):]] = v + logging.debug(r) + return r + + # To force immediate deregistering of this dbus service and all its object paths, explicitly + # call __del__(). + def __del__(self): + for node in self._dbusnodes.values(): + node.__del__() + self._dbusnodes.clear() + for item in self._dbusobjects.values(): + item.__del__() + self._dbusobjects.clear() + if self._dbusname: + self._dbusname.__del__() # Forces call to self._bus.release_name(self._name), see source code + self._dbusname = None + + # @param callbackonchange function that will be called when this value is changed. First parameter will + # be the path of the object, second the new value. This callback should return + # True to accept the change, False to reject it. + def add_path(self, path, value, description="", writeable=False, + onchangecallback=None, gettextcallback=None): + + if onchangecallback is not None: + self._onchangecallbacks[path] = onchangecallback + + item = VeDbusItemExport( + self._dbusconn, path, value, description, writeable, + self._value_changed, gettextcallback, deletecallback=self._item_deleted) + + spl = path.split('/') + for i in range(2, len(spl)): + subPath = '/'.join(spl[:i]) + if subPath not in self._dbusnodes and subPath not in self._dbusobjects: + self._dbusnodes[subPath] = self._create_tree_export(self._dbusconn, subPath, self._get_tree_dict) + self._dbusobjects[path] = item + logging.debug('added %s with start value %s. Writeable is %s' % (path, value, writeable)) + + # Add the mandatory paths, as per victron dbus api doc + def add_mandatory_paths(self, processname, processversion, connection, + deviceinstance, productid, productname, firmwareversion, hardwareversion, connected): + self.add_path('/Mgmt/ProcessName', processname) + self.add_path('/Mgmt/ProcessVersion', processversion) + self.add_path('/Mgmt/Connection', connection) + + # Create rest of the mandatory objects + self.add_path('/DeviceInstance', deviceinstance) + self.add_path('/ProductId', productid) + self.add_path('/ProductName', productname) + self.add_path('/FirmwareVersion', firmwareversion) + self.add_path('/HardwareVersion', hardwareversion) + self.add_path('/Connected', connected) + + def _create_tree_export(self, bus, objectPath, get_value_handler): + return VeDbusTreeExport(bus, objectPath, get_value_handler) + + # Callback function that is called from the VeDbusItemExport objects when a value changes. This function + # maps the change-request to the onchangecallback given to us for this specific path. + def _value_changed(self, path, newvalue): + if path not in self._onchangecallbacks: + return True + + return self._onchangecallbacks[path](path, newvalue) + + def _item_deleted(self, path): + self._dbusobjects.pop(path) + for np in self._dbusnodes.keys(): + if np != '/': + for ip in self._dbusobjects: + if ip.startswith(np + '/'): + break + else: + self._dbusnodes[np].__del__() + self._dbusnodes.pop(np) + + def __getitem__(self, path): + return self._dbusobjects[path].local_get_value() + + def __setitem__(self, path, newvalue): + self._dbusobjects[path].local_set_value(newvalue) + + def __delitem__(self, path): + self._dbusobjects[path].__del__() # Invalidates and then removes the object path + assert path not in self._dbusobjects + + def __contains__(self, path): + return path in self._dbusobjects + +""" +Importing basics: + - If when we power up, the D-Bus service does not exist, or it does exist and the path does not + yet exist, still subscribe to a signal: as soon as it comes online it will send a signal with its + initial value, which VeDbusItemImport will receive and use to update local cache. And, when set, + call the eventCallback. + - If when we power up, save it + - When using get_value, know that there is no difference between services (or object paths) that don't + exist and paths that are invalid (= empty array, see above). Both will return None. In case you do + really want to know ifa path exists or not, use the exists property. + - When a D-Bus service leaves the D-Bus, it will first invalidate all its values, and send signals + with that update, and only then leave the D-Bus. (or do we need to subscribe to the NameOwnerChanged- + signal!?!) To be discussed and make sure. Not really urgent, since all existing code that uses this + class already subscribes to the NameOwnerChanged signal, and subsequently removes instances of this + class. + +Read when using this class: +Note that when a service leaves that D-Bus without invalidating all its exported objects first, for +example because it is killed, VeDbusItemImport doesn't have a clue. So when using VeDbusItemImport, +make sure to also subscribe to the NamerOwnerChanged signal on bus-level. Or just use dbusmonitor, +because that takes care of all of that for you. +""" +class VeDbusItemImport(object): + ## Constructor + # @param bus the bus-object (SESSION or SYSTEM). + # @param serviceName the dbus-service-name (string), for example 'com.victronenergy.battery.ttyO1' + # @param path the object-path, for example '/Dc/V' + # @param eventCallback function that you want to be called on a value change + # @param createSignal only set this to False if you use this function to one time read a value. When + # leaving it to True, make sure to also subscribe to the NameOwnerChanged signal + # elsewhere. See also note some 15 lines up. + def __init__(self, bus, serviceName, path, eventCallback=None, createsignal=True): + # TODO: is it necessary to store _serviceName and _path? Isn't it + # stored in the bus_getobjectsomewhere? + self._serviceName = serviceName + self._path = path + self._match = None + # TODO: _proxy is being used in settingsdevice.py, make a getter for that + self._proxy = bus.get_object(serviceName, path, introspect=False) + self.eventCallback = eventCallback + + assert eventCallback is None or createsignal == True + if createsignal: + self._match = self._proxy.connect_to_signal( + "PropertiesChanged", weak_functor(self._properties_changed_handler)) + + # store the current value in _cachedvalue. When it doesn't exists set _cachedvalue to + # None, same as when a value is invalid + self._cachedvalue = None + try: + v = self._proxy.GetValue() + except dbus.exceptions.DBusException: + pass + else: + self._cachedvalue = unwrap_dbus_value(v) + + def __del__(self): + if self._match != None: + self._match.remove() + self._match = None + self._proxy = None + + def _refreshcachedvalue(self): + self._cachedvalue = unwrap_dbus_value(self._proxy.GetValue()) + + ## Returns the path as a string, for example '/AC/L1/V' + @property + def path(self): + return self._path + + ## Returns the dbus service name as a string, for example com.victronenergy.vebus.ttyO1 + @property + def serviceName(self): + return self._serviceName + + ## Returns the value of the dbus-item. + # the type will be a dbus variant, for example dbus.Int32(0, variant_level=1) + # this is not a property to keep the name consistant with the com.victronenergy.busitem interface + # returns None when the property is invalid + def get_value(self): + return self._cachedvalue + + ## Writes a new value to the dbus-item + def set_value(self, newvalue): + r = self._proxy.SetValue(wrap_dbus_value(newvalue)) + + # instead of just saving the value, go to the dbus and get it. So we have the right type etc. + if r == 0: + self._refreshcachedvalue() + + return r + + ## Returns the text representation of the value. + # For example when the value is an enum/int GetText might return the string + # belonging to that enum value. Another example, for a voltage, GetValue + # would return a float, 12.0Volt, and GetText could return 12 VDC. + # + # Note that this depends on how the dbus-producer has implemented this. + def get_text(self): + return self._proxy.GetText() + + ## Returns true of object path exists, and false if it doesn't + @property + def exists(self): + # TODO: do some real check instead of this crazy thing. + r = False + try: + r = self._proxy.GetValue() + r = True + except dbus.exceptions.DBusException: + pass + + return r + + ## callback for the trigger-event. + # @param eventCallback the event-callback-function. + @property + def eventCallback(self): + return self._eventCallback + + @eventCallback.setter + def eventCallback(self, eventCallback): + self._eventCallback = eventCallback + + ## Is called when the value of the imported bus-item changes. + # Stores the new value in our local cache, and calls the eventCallback, if set. + def _properties_changed_handler(self, changes): + if "Value" in changes: + changes['Value'] = unwrap_dbus_value(changes['Value']) + self._cachedvalue = changes['Value'] + if self._eventCallback: + # The reason behind this try/except is to prevent errors silently ending up the an error + # handler in the dbus code. + try: + self._eventCallback(self._serviceName, self._path, changes) + except: + traceback.print_exc() + os._exit(1) # sys.exit() is not used, since that also throws an exception + + +class VeDbusTreeExport(dbus.service.Object): + def __init__(self, bus, objectPath, get_value_handler): + dbus.service.Object.__init__(self, bus, objectPath) + self._get_value_handler = get_value_handler + logging.debug("VeDbusTreeExport %s has been created" % objectPath) + + def __del__(self): + # self._get_path() will raise an exception when retrieved after the call to .remove_from_connection, + # so we need a copy. + path = self._get_path() + if path is None: + return + self.remove_from_connection() + logging.debug("VeDbusTreeExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + value = self._get_value_handler(self._get_path()) + return dbus.Dictionary(value, signature=dbus.Signature('sv'), variant_level=1) + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetText(self): + return self._get_value_handler(self._get_path(), True) + + def local_get_value(self): + return self._get_value_handler(self.path) + + +class VeDbusItemExport(dbus.service.Object): + ## Constructor of VeDbusItemExport + # + # Use this object to export (publish), values on the dbus + # Creates the dbus-object under the given dbus-service-name. + # @param bus The dbus object. + # @param objectPath The dbus-object-path. + # @param value Value to initialize ourselves with, defaults to None which means Invalid + # @param description String containing a description. Can be called over the dbus with GetDescription() + # @param writeable what would this do!? :). + # @param callback Function that will be called when someone else changes the value of this VeBusItem + # over the dbus. First parameter passed to callback will be our path, second the new + # value. This callback should return True to accept the change, False to reject it. + def __init__(self, bus, objectPath, value=None, description=None, writeable=False, + onchangecallback=None, gettextcallback=None, deletecallback=None): + dbus.service.Object.__init__(self, bus, objectPath) + self._onchangecallback = onchangecallback + self._gettextcallback = gettextcallback + self._value = value + self._description = description + self._writeable = writeable + self._deletecallback = deletecallback + + # To force immediate deregistering of this dbus object, explicitly call __del__(). + def __del__(self): + # self._get_path() will raise an exception when retrieved after the + # call to .remove_from_connection, so we need a copy. + path = self._get_path() + if path == None: + return + if self._deletecallback is not None: + self._deletecallback(path) + self.local_set_value(None) + self.remove_from_connection() + logging.debug("VeDbusItemExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + ## Sets the value. And in case the value is different from what it was, a signal + # will be emitted to the dbus. This function is to be used in the python code that + # is using this class to export values to the dbus. + # set value to None to indicate that it is Invalid + def local_set_value(self, newvalue): + if self._value == newvalue: + return + + self._value = newvalue + + changes = {} + changes['Value'] = wrap_dbus_value(newvalue) + changes['Text'] = self.GetText() + self.PropertiesChanged(changes) + + def local_get_value(self): + return self._value + + # ==== ALL FUNCTIONS BELOW THIS LINE WILL BE CALLED BY OTHER PROCESSES OVER THE DBUS ==== + + ## Dbus exported method SetValue + # Function is called over the D-Bus by other process. It will first check (via callback) if new + # value is accepted. And it is, stores it and emits a changed-signal. + # @param value The new value. + # @return completion-code When successful a 0 is return, and when not a -1 is returned. + @dbus.service.method('com.victronenergy.BusItem', in_signature='v', out_signature='i') + def SetValue(self, newvalue): + if not self._writeable: + return 1 # NOT OK + + newvalue = unwrap_dbus_value(newvalue) + + if newvalue == self._value: + return 0 # OK + + # call the callback given to us, and check if new value is OK. + if (self._onchangecallback is None or + (self._onchangecallback is not None and self._onchangecallback(self.__dbus_object_path__, newvalue))): + + self.local_set_value(newvalue) + return 0 # OK + + return 2 # NOT OK + + ## Dbus exported method GetDescription + # + # Returns the a description. + # @param language A language code (e.g. ISO 639-1 en-US). + # @param length Lenght of the language string. + # @return description + @dbus.service.method('com.victronenergy.BusItem', in_signature='si', out_signature='s') + def GetDescription(self, language, length): + return self._description if self._description is not None else 'No description given' + + ## Dbus exported method GetValue + # Returns the value. + # @return the value when valid, and otherwise an empty array + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + return wrap_dbus_value(self._value) + + ## Dbus exported method GetText + # Returns the value as string of the dbus-object-path. + # @return text A text-value. '---' when local value is invalid + @dbus.service.method('com.victronenergy.BusItem', out_signature='s') + def GetText(self): + if self._value is None: + return '---' + + # Default conversion from dbus.Byte will get you a character (so 'T' instead of '84'), so we + # have to convert to int first. Note that if a dbus.Byte turns up here, it must have come from + # the application itself, as all data from the D-Bus should have been unwrapped by now. + if self._gettextcallback is None and type(self._value) == dbus.Byte: + return str(int(self._value)) + + if self._gettextcallback is None and self.__dbus_object_path__ == '/ProductId': + return "0x%X" % self._value + + if self._gettextcallback is None: + return str(self._value) + + return self._gettextcallback(self.__dbus_object_path__, self._value) + + ## The signal that indicates that the value has changed. + # Other processes connected to this BusItem object will have subscribed to the + # event when they want to track our state. + @dbus.service.signal('com.victronenergy.BusItem', signature='a{sv}') + def PropertiesChanged(self, changes): + pass + +## This class behaves like a regular reference to a class method (eg. self.foo), but keeps a weak reference +## to the object which method is to be called. +## Use this object to break circular references. +class weak_functor: + def __init__(self, f): + self._r = weakref.ref(f.__self__) + self._f = weakref.ref(f.__func__) + + def __call__(self, *args, **kargs): + r = self._r() + f = self._f() + if r == None or f == None: + return + f(r, *args, **kargs) diff --git a/python/dbus-fz-sonick-48tl-with-s3/old_signals.py b/python/dbus-fz-sonick-48tl-with-s3/old_signals.py new file mode 100755 index 000000000..79bdc97a1 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/old_signals.py @@ -0,0 +1,547 @@ +# coding=utf-8 + +import config as cfg +from convert import mean, read_float, read_led_state, read_bool, count_bits, comma_separated, read_bitmap, return_in_list, first, read_hex_string +from data import BatterySignal, Battery, LedColor, ServiceSignal, BatteryStatus, LedState, CsvSignal + +# noinspection PyUnreachableCode +if False: + from typing import List, Iterable + + +def init_service_signals(batteries): + print("INSIDE INIT SERVICE SIGNALS") + # type: (List[Battery]) -> Iterable[ServiceSignal] + + n_batteries = len(batteries) + product_name = cfg.PRODUCT_NAME + ' x' + str(n_batteries) + + return [ + ServiceSignal('/NbOfBatteries', n_batteries), # TODO: nb of operational batteries + ServiceSignal('/Mgmt/ProcessName', __file__), + ServiceSignal('/Mgmt/ProcessVersion', cfg.SOFTWARE_VERSION), + ServiceSignal('/Mgmt/Connection', cfg.CONNECTION), + ServiceSignal('/DeviceInstance', cfg.DEVICE_INSTANCE), + ServiceSignal('/ProductName', product_name), + ServiceSignal('/ProductId', cfg.PRODUCT_ID), + ServiceSignal('/Connected', 1) + ] + + +def init_battery_signals(): + # type: () -> Iterable[BatterySignal] + print("START INIT SIGNALS") + read_voltage = read_float(register=999, scale_factor=0.01, offset=0) + read_current = read_float(register=1000, scale_factor=0.01, offset=-10000) + + read_led_amber = read_led_state(register=1004, led=LedColor.amber) + read_led_green = read_led_state(register=1004, led=LedColor.green) + read_led_blue = read_led_state(register=1004, led=LedColor.blue) + read_led_red = read_led_state(register=1004, led=LedColor.red) + + def read_power(status): + # type: (BatteryStatus) -> int + return int(read_current(status) * read_voltage(status)) + + def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int): + # type: (float, float, float, float) -> float + + dv = v_limit - v + di = dv / r_int + p_limit = v_limit * (i + di) + + return p_limit + + def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int): + # type: (float, float, float, float) -> float + + di = i_limit - i + dv = di * r_int + p_limit = i_limit * (v + dv) + + return p_limit + + def calc_max_charge_power(status): + # type: (BatteryStatus) -> int + n_strings = number_of_active_strings(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage(status) + i = read_current(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) # p_limit is normally positive here (signed) + p_limit = max(p_limit, 0) # charge power must not become negative + + return int(p_limit) + + def calc_max_discharge_power(status): + n_strings = number_of_active_strings(status) + max_discharge_current = n_strings*cfg.I_MAX_PER_STRING + return int(max_discharge_current*read_voltage(status)) + + def read_battery_cold(status): + return \ + read_led_green(status) >= LedState.blinking_slow and \ + read_led_blue(status) >= LedState.blinking_slow + + def read_soc(status): + soc = read_float(register=1053, scale_factor=0.1, offset=0)(status) + + # if the SOC is 100 but EOC is not yet reached, report 99.9 instead of 100 + if soc > 99.9 and not read_eoc_reached(status): + return 99.9 + if soc >= 99.9 and read_eoc_reached(status): + return 100 + + return soc + + def hex_string_to_ascii(hex_string): + # Ensure the hex_string is correctly formatted without spaces + hex_string = hex_string.replace(" ", "") + # Convert every two characters (a byte) in the hex string to ASCII + ascii_string = ''.join([chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2)]) + return ascii_string + + battery_status_reader = read_hex_string(1060,2) + + def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + + read_limb_bitmap = read_bitmap(1059) + + def interpret_limb_bitmap(bitmap_value): + #print("DIABASE TIN TIMI KAI MPIKE STIN INTERPRET LIMB BITMAP") + # The bit for string 1 also monitors all 5 strings: 0000 0000 means All 5 strings activated. 0000 0001 means string 1 disabled. + string1_disabled = int((bitmap_value & 0b00001) != 0) + string2_disabled = int((bitmap_value & 0b00010) != 0) + string3_disabled = int((bitmap_value & 0b00100) != 0) + string4_disabled = int((bitmap_value & 0b01000) != 0) + string5_disabled = int((bitmap_value & 0b10000) != 0) + n_limb_strings = string1_disabled+string2_disabled+string3_disabled+string4_disabled+string5_disabled + #print("KAI I TIMI EINAI: ", n_limb_strings) + return n_limb_strings + + def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap(status)) + + def number_of_active_strings(status): + return cfg.NUM_OF_STRINGS_PER_BATTERY - limp_strings_value(status) + + def max_discharge_current(status): + #print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAinside discharge current") + #exit(0) + return number_of_active_strings(status) * cfg.I_MAX_PER_STRING + + def max_charge_current(status): + return status.battery.ampere_hours/2 + + def read_switch_closed(status): + value = read_bool(base_register=1013, bit=0)(status) + if value: + return False + return True + + def read_alarm_out_active(status): + value = read_bool(base_register=1013, bit=1)(status) + if value: + return False + return True + + def read_aux_relay(status): + value = read_bool(base_register=1013, bit=4)(status) + if value: + return False + return True + + return [ + BatterySignal('/TimeToTOCRequest', max, read_float(register=1052)), + BatterySignal('/EOCReached', return_in_list, read_eoc_reached), + BatterySignal('/NumOfLimbStrings', return_in_list, limp_strings_value), + BatterySignal('/Dc/0/Voltage', mean, get_value=read_voltage, unit='V'), + BatterySignal('/Dc/0/Current', sum, get_value=read_current, unit='A'), + BatterySignal('/Dc/0/Power', sum, get_value=read_power, unit='W'), + + BatterySignal('/BussVoltage', mean, read_float(register=1001, scale_factor=0.01, offset=0), unit='V'), + BatterySignal('/Soc', mean, read_soc, unit='%'), + BatterySignal('/LowestSoc', min, read_float(register=1053, scale_factor=0.1, offset=0), unit='%'), + BatterySignal('/Dc/0/Temperature', mean, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'), + BatterySignal('/Dc/0/LowestTemperature', min, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'), + + #BatterySignal('/NumberOfWarningFlags', sum, count_bits(base_register=1005, nb_of_registers=3, nb_of_bits=47)), + BatterySignal('/WarningFlags/TaM1', return_in_list, read_bool(base_register=1005, bit=1)), + BatterySignal('/WarningFlags/TbM1', return_in_list, read_bool(base_register=1005, bit=4)), + BatterySignal('/WarningFlags/VBm1', return_in_list, read_bool(base_register=1005, bit=6)), + BatterySignal('/WarningFlags/VBM1', return_in_list, read_bool(base_register=1005, bit=8)), + BatterySignal('/WarningFlags/IDM1', return_in_list, read_bool(base_register=1005, bit=10)), + BatterySignal('/WarningFlags/vsm1', return_in_list, read_bool(base_register=1005, bit=22)), + BatterySignal('/WarningFlags/vsM1', return_in_list, read_bool(base_register=1005, bit=24)), + BatterySignal('/WarningFlags/iCM1', return_in_list, read_bool(base_register=1005, bit=26)), + BatterySignal('/WarningFlags/iDM1', return_in_list, read_bool(base_register=1005, bit=28)), + BatterySignal('/WarningFlags/MID1', return_in_list, read_bool(base_register=1005, bit=30)), + BatterySignal('/WarningFlags/BLPW', return_in_list, read_bool(base_register=1005, bit=32)), + BatterySignal('/WarningFlags/CCBF', return_in_list, read_bool(base_register=1005, bit=33)), + BatterySignal('/WarningFlags/Ah_W', return_in_list, read_bool(base_register=1005, bit=35)), + BatterySignal('/WarningFlags/MPMM', return_in_list, read_bool(base_register=1005, bit=38)), + #BatterySignal('/WarningFlags/TCMM', any, read_bool(base_register=1005, bit=39)), + BatterySignal('/WarningFlags/TCdi', return_in_list, read_bool(base_register=1005, bit=40)), + #BatterySignal('/WarningFlags/WMTO', any, read_bool(base_register=1005, bit=41)), + BatterySignal('/WarningFlags/LMPW', return_in_list, read_bool(base_register=1005, bit=44)), + #BatterySignal('/WarningFlags/CELL1', any, read_bool(base_register=1005, bit=46)), + BatterySignal('/WarningFlags/TOCW', return_in_list, read_bool(base_register=1005, bit=47)), + BatterySignal('/WarningFlags/BUSL', return_in_list, read_bool(base_register=1005, bit=49)), + + #BatterySignal('/NumberOfAlarmFlags', sum, count_bits(base_register=1009, nb_of_registers=3, nb_of_bits=47)), + BatterySignal('/AlarmFlags/Tam', return_in_list, read_bool(base_register=1005, bit=0)), + BatterySignal('/AlarmFlags/TaM2', return_in_list, read_bool(base_register=1005, bit=2)), + BatterySignal('/AlarmFlags/Tbm', return_in_list, read_bool(base_register=1005, bit=3)), + BatterySignal('/AlarmFlags/TbM2', return_in_list, read_bool(base_register=1005, bit=5)), + BatterySignal('/AlarmFlags/VBm2', return_in_list, read_bool(base_register=1005, bit=7)), + BatterySignal('/AlarmFlags/VBM2', return_in_list, read_bool(base_register=1005, bit=9)), + BatterySignal('/AlarmFlags/IDM2', return_in_list, read_bool(base_register=1005, bit=11)), + BatterySignal('/AlarmFlags/ISOB', return_in_list, read_bool(base_register=1005, bit=12)), + BatterySignal('/AlarmFlags/MSWE', return_in_list, read_bool(base_register=1005, bit=13)), + BatterySignal('/AlarmFlags/FUSE', return_in_list, read_bool(base_register=1005, bit=14)), + BatterySignal('/AlarmFlags/HTRE', return_in_list, read_bool(base_register=1005, bit=15)), + BatterySignal('/AlarmFlags/TCPE', return_in_list, read_bool(base_register=1005, bit=16)), + BatterySignal('/AlarmFlags/STRE', return_in_list, read_bool(base_register=1005, bit=17)), + BatterySignal('/AlarmFlags/CME', return_in_list, read_bool(base_register=1005, bit=18)), + BatterySignal('/AlarmFlags/HWFL', return_in_list, read_bool(base_register=1005, bit=19)), + BatterySignal('/AlarmFlags/HWEM', return_in_list, read_bool(base_register=1005, bit=20)), + BatterySignal('/AlarmFlags/ThM', return_in_list, read_bool(base_register=1005, bit=21)), + #BatterySignal('/AlarmFlags/vsm1', any, read_bool(base_register=1005, bit=22)), + BatterySignal('/AlarmFlags/vsm2', return_in_list, read_bool(base_register=1005, bit=23)), + BatterySignal('/AlarmFlags/vsM2', return_in_list, read_bool(base_register=1005, bit=25)), + BatterySignal('/AlarmFlags/iCM2', return_in_list, read_bool(base_register=1005, bit=27)), + BatterySignal('/AlarmFlags/iDM2', return_in_list, read_bool(base_register=1005, bit=29)), + BatterySignal('/AlarmFlags/MID2', return_in_list, read_bool(base_register=1005, bit=31)), + #BatterySignal('/AlarmFlags/CCBF', any, read_bool(base_register=1005, bit=33)), + #BatterySignal('/AlarmFlags/AhFL', any, read_bool(base_register=1005, bit=34)), + #BatterySignal('/AlarmFlags/TbCM', any, read_bool(base_register=1005, bit=36)), + #BatterySignal('/AlarmFlags/BRNF', any, read_bool(base_register=1005, bit=37)), + BatterySignal('/AlarmFlags/HTFS', return_in_list, read_bool(base_register=1005, bit=42)), + BatterySignal('/AlarmFlags/DATA', return_in_list, read_bool(base_register=1005, bit=43)), + BatterySignal('/AlarmFlags/LMPA', return_in_list, read_bool(base_register=1005, bit=45)), + BatterySignal('/AlarmFlags/HEBT', return_in_list, read_bool(base_register=1005, bit=46)), + #BatterySignal('/AlarmFlags/bit47AlarmDummy', any,read_bool(base_register=1005, bit=47)), + BatterySignal('/AlarmFlags/CURM', return_in_list, read_bool(base_register=1005, bit=48)), + + BatterySignal('/Diagnostics/LedStatus/Red', first, read_led_red), + BatterySignal('/Diagnostics/LedStatus/Blue', first, read_led_blue), + BatterySignal('/Diagnostics/LedStatus/Green', first, read_led_green), + BatterySignal('/Diagnostics/LedStatus/Amber', first, read_led_amber), + + BatterySignal('/Diagnostics/IoStatus/MainSwitchClosed', return_in_list, read_switch_closed), + BatterySignal('/Diagnostics/IoStatus/AlarmOutActive', return_in_list, read_alarm_out_active), + BatterySignal('/Diagnostics/IoStatus/InternalFanActive', return_in_list, read_bool(base_register=1013, bit=2)), + BatterySignal('/Diagnostics/IoStatus/VoltMeasurementAllowed', return_in_list, read_bool(base_register=1013, bit=3)), + BatterySignal('/Diagnostics/IoStatus/AuxRelay', return_in_list, read_aux_relay), + BatterySignal('/Diagnostics/IoStatus/RemoteState', return_in_list, read_bool(base_register=1013, bit=5)), + BatterySignal('/Diagnostics/IoStatus/RiscOn', return_in_list, read_bool(base_register=1013, bit=6)), + + BatterySignal('/IoStatus/BatteryCold', any, read_battery_cold), + + # see protocol doc page 7 + BatterySignal('/Info/MaxDischargeCurrent', sum, max_discharge_current, unit='A'), + BatterySignal('/Info/MaxChargeCurrent', sum, max_charge_current, unit='A'), + BatterySignal('/Info/MaxChargeVoltage', min, lambda bs: bs.battery.v_max, unit='V'), + BatterySignal('/Info/MinDischargeVoltage', max, lambda bs: bs.battery.v_min, unit='V'), + BatterySignal('/Info/BatteryLowVoltage' , max, lambda bs: bs.battery.v_min-2, unit='V'), + BatterySignal('/Info/NumberOfStrings', sum, number_of_active_strings), + + BatterySignal('/Info/MaxChargePower', sum, calc_max_charge_power), + BatterySignal('/Info/MaxDischargePower', sum, calc_max_discharge_power), + + BatterySignal('/FirmwareVersion', comma_separated, lambda bs: bs.battery.firmware_version), + BatterySignal('/HardwareVersion', comma_separated, lambda bs: bs.battery.hardware_version), + BatterySignal('/BmsVersion', comma_separated, lambda bs: bs.battery.bms_version) + + ] + + +def create_csv_signals(firmware_version): + read_voltage = read_float(register=999, scale_factor=0.01, offset=0) + read_current = read_float(register=1000, scale_factor=0.01, offset=-10000) + read_limb_bitmap = read_bitmap(1059) + + def read_power(status): + return int(read_current(status) * read_voltage(status)) + + def string1_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00001) != 0) + + def string2_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00010) != 0) + + def string3_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00100) != 0) + + def string4_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b01000) != 0) + + def string5_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b10000) != 0) + + read_limb_bitmap = read_bitmap(1059) + + def interpret_limb_bitmap(bitmap_value): + #print("DIABASE TIN TIMI KAI MPIKE STIN INTERPRET LIMB BITMAP") + # The bit for string 1 also monitors all 5 strings: 0000 0000 means All 5 strings activated. 0000 0001 means string 1 disabled. + string1_disabled = int((bitmap_value & 0b00001) != 0) + string2_disabled = int((bitmap_value & 0b00010) != 0) + string3_disabled = int((bitmap_value & 0b00100) != 0) + string4_disabled = int((bitmap_value & 0b01000) != 0) + string5_disabled = int((bitmap_value & 0b10000) != 0) + n_limb_strings = string1_disabled+string2_disabled+string3_disabled+string4_disabled+string5_disabled + #print("KAI I TIMI EINAI: ", n_limb_strings) + return n_limb_strings + + + def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap(status)) + + def calc_max_charge_power(status): + # type: (BatteryStatus) -> int + n_strings = cfg.NUM_OF_STRINGS_PER_BATTERY-limp_strings_value(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage(status) + i = read_current(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) # p_limit is normally positive here (signed) + p_limit = max(p_limit, 0) # charge power must not become negative + + return int(p_limit) + + def calc_max_discharge_power(status): + n_strings = cfg.NUM_OF_STRINGS_PER_BATTERY-limp_strings_value(status) + max_discharge_current = n_strings*cfg.I_MAX_PER_STRING + return int(max_discharge_current*read_voltage(status)) + + total_current = read_float(register=1062, scale_factor=0.01, offset=-10000) + + def read_total_current(status): + return total_current(status) + + def read_heating_current(status): + return total_current(status) - read_current(status) + + def read_heating_power(status): + return read_voltage(status) * read_heating_current(status) + + soc_ah = read_float(register=1002, scale_factor=0.1, offset=-10000) + + def read_soc_ah(status): + return soc_ah(status) + + def return_led_state(status, color): + led_state = read_led_state(register=1004, led=color)(status) + if led_state == LedState.blinking_fast or led_state == LedState.blinking_slow: + return "Blinking" + elif led_state == LedState.on: + return "On" + elif led_state == LedState.off: + return "Off" + return "Unknown" + + def return_led_state_blue(status): + return return_led_state(status, LedColor.blue) + + def return_led_state_red(status): + return return_led_state(status, LedColor.red) + + def return_led_state_green(status): + return return_led_state(status, LedColor.green) + + def return_led_state_amber(status): + return return_led_state(status, LedColor.amber) + + def read_switch_closed(status): + value = read_bool(base_register=1013, bit=0)(status) + if value: + return False + return True + + def read_alarm_out_active(status): + value = read_bool(base_register=1013, bit=1)(status) + if value: + return False + return True + + def read_aux_relay(status): + value = read_bool(base_register=1013, bit=4)(status) + if value: + return False + return True + + battery_status_reader = read_hex_string(1060,2) + + def hex_string_to_ascii(hex_string): + # Ensure the hex_string is correctly formatted without spaces + hex_string = hex_string.replace(" ", "") + # Convert every two characters (a byte) in the hex string to ASCII + ascii_string = ''.join([chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2)]) + return ascii_string + + def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + + def read_serial_number(status): + serial_regs = [1055, 1056, 1057, 1058] + serial_parts = [] + for reg in serial_regs: + # reading each register as a single hex value + hex_value_fun = read_hex_string(reg, 1) + hex_value = hex_value_fun(status) + # append without spaces and leading zeros stripped if any + serial_parts.append(hex_value.replace(' ', '')) + # concatenate all parts to form the full serial number + serial_number = ''.join(serial_parts).rstrip('0') + return serial_number + + def time_since_toc_in_time_format(status): + time_in_minutes = read_float(register=1052)(status) + # Convert minutes to total seconds + total_seconds = int(time_in_minutes * 60) + # Calculate days, hours, minutes, and seconds + days = total_seconds // (24 * 3600) + total_seconds = total_seconds % (24 * 3600) + hours = total_seconds // 3600 + total_seconds %= 3600 + minutes = total_seconds // 60 + seconds = total_seconds % 60 + # Format the string to show days.hours:minutes:seconds + return "{}.{:02}:{:02}:{:02}".format(days, hours, minutes, seconds) + + def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int): + # type: (float, float, float, float) -> float + + dv = v_limit - v + di = dv / r_int + p_limit = v_limit * (i + di) + + return p_limit + + def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int): + # type: (float, float, float, float) -> float + + di = i_limit - i + dv = di * r_int + p_limit = i_limit * (v + dv) + + return p_limit + + + return [ + CsvSignal('/Battery/Devices/FwVersion', firmware_version), + CsvSignal('/Battery/Devices/Dc/Power', read_power, 'W'), + CsvSignal('/Battery/Devices/Dc/Voltage', read_voltage, 'V'), + CsvSignal('/Battery/Devices/Soc', read_float(register=1053, scale_factor=0.1, offset=0), '%'), + CsvSignal('/Battery/Devices/Temperatures/Cells/Average', read_float(register=1003, scale_factor=0.1, offset=-400), 'C'), + CsvSignal('/Battery/Devices/Dc/Current', read_current, 'A'), + CsvSignal('/Battery/Devices/BusCurrent', read_total_current, 'A'), + CsvSignal('/Battery/Devices/CellsCurrent', read_current, 'A'), + CsvSignal('/Battery/Devices/HeatingCurrent', read_heating_current, 'A'), + CsvSignal('/Battery/Devices/HeatingPower', read_heating_power, 'W'), + CsvSignal('/Battery/Devices/SOCAh', read_soc_ah), + CsvSignal('/Battery/Devices/Leds/Blue', return_led_state_blue), + CsvSignal('/Battery/Devices/Leds/Red', return_led_state_red), + CsvSignal('/Battery/Devices/Leds/Green', return_led_state_green), + CsvSignal('/Battery/Devices/Leds/Amber', return_led_state_amber), + CsvSignal('/Battery/Devices/BatteryStrings/String1Active', string1_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String2Active', string2_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String3Active', string3_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String4Active', string4_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String5Active', string5_disabled), + CsvSignal('/Battery/Devices/IoStatus/ConnectedToDcBus', read_switch_closed), + CsvSignal('/Battery/Devices/IoStatus/AlarmOutActive', read_alarm_out_active), + CsvSignal('/Battery/Devices/IoStatus/InternalFanActive', read_bool(base_register=1013, bit=2)), + CsvSignal('/Battery/Devices/IoStatus/VoltMeasurementAllowed', read_bool(base_register=1013, bit=3)), + CsvSignal('/Battery/Devices/IoStatus/AuxRelayBus', read_aux_relay), + CsvSignal('/Battery/Devices/IoStatus/RemoteStateActive', read_bool(base_register=1013, bit=5)), + CsvSignal('/Battery/Devices/IoStatus/RiscActive', read_bool(base_register=1013, bit=6)), + CsvSignal('/Battery/Devices/Eoc', read_eoc_reached), + CsvSignal('/Battery/Devices/SerialNumber', read_serial_number), + CsvSignal('/Battery/Devices/TimeSinceTOC', time_since_toc_in_time_format), + CsvSignal('/Battery/Devices/MaxChargePower', calc_max_charge_power), + CsvSignal('/Battery/Devices/MaxDischargePower', calc_max_discharge_power), + ] + + +def read_warning_and_alarm_flags(): + return [ + # Warnings + CsvSignal('/Battery/Devices/WarningFlags/TaM1', read_bool(base_register=1005, bit=1)), + CsvSignal('/Battery/Devices/WarningFlags/TbM1', read_bool(base_register=1005, bit=4)), + CsvSignal('/Battery/Devices/WarningFlags/VBm1', read_bool(base_register=1005, bit=6)), + CsvSignal('/Battery/Devices/WarningFlags/VBM1', read_bool(base_register=1005, bit=8)), + CsvSignal('/Battery/Devices/WarningFlags/IDM1', read_bool(base_register=1005, bit=10)), + CsvSignal('/Battery/Devices/WarningFlags/vsm1', read_bool(base_register=1005, bit=22)), + CsvSignal('/Battery/Devices/WarningFlags/vsM1', read_bool(base_register=1005, bit=24)), + CsvSignal('/Battery/Devices/WarningFlags/iCM1', read_bool(base_register=1005, bit=26)), + CsvSignal('/Battery/Devices/WarningFlags/iDM1', read_bool(base_register=1005, bit=28)), + CsvSignal('/Battery/Devices/WarningFlags/MID1', read_bool(base_register=1005, bit=30)), + CsvSignal('/Battery/Devices/WarningFlags/BLPW', read_bool(base_register=1005, bit=32)), + CsvSignal('/Battery/Devices/WarningFlags/CCBF', read_bool(base_register=1005, bit=33)), + CsvSignal('/Battery/Devices/WarningFlags/Ah_W', read_bool(base_register=1005, bit=35)), + CsvSignal('/Battery/Devices/WarningFlags/MPMM', read_bool(base_register=1005, bit=38)), + CsvSignal('/Battery/Devices/WarningFlags/TCdi', read_bool(base_register=1005, bit=40)), + CsvSignal('/Battery/Devices/WarningFlags/LMPW', read_bool(base_register=1005, bit=44)), + CsvSignal('/Battery/Devices/WarningFlags/TOCW', read_bool(base_register=1005, bit=47)), + CsvSignal('/Battery/Devices/WarningFlags/BUSL', read_bool(base_register=1005, bit=49)), + ], [ + # Alarms + CsvSignal('/Battery/Devices/AlarmFlags/Tam', read_bool(base_register=1005, bit=0)), + CsvSignal('/Battery/Devices/AlarmFlags/TaM2', read_bool(base_register=1005, bit=2)), + CsvSignal('/Battery/Devices/AlarmFlags/Tbm', read_bool(base_register=1005, bit=3)), + CsvSignal('/Battery/Devices/AlarmFlags/TbM2', read_bool(base_register=1005, bit=5)), + CsvSignal('/Battery/Devices/AlarmFlags/VBm2', read_bool(base_register=1005, bit=7)), + CsvSignal('/Battery/Devices/AlarmFlags/VBM2', read_bool(base_register=1005, bit=9)), + CsvSignal('/Battery/Devices/AlarmFlags/IDM2', read_bool(base_register=1005, bit=11)), + CsvSignal('/Battery/Devices/AlarmFlags/ISOB', read_bool(base_register=1005, bit=12)), + CsvSignal('/Battery/Devices/AlarmFlags/MSWE', read_bool(base_register=1005, bit=13)), + CsvSignal('/Battery/Devices/AlarmFlags/FUSE', read_bool(base_register=1005, bit=14)), + CsvSignal('/Battery/Devices/AlarmFlags/HTRE', read_bool(base_register=1005, bit=15)), + CsvSignal('/Battery/Devices/AlarmFlags/TCPE', read_bool(base_register=1005, bit=16)), + CsvSignal('/Battery/Devices/AlarmFlags/STRE', read_bool(base_register=1005, bit=17)), + CsvSignal('/Battery/Devices/AlarmFlags/CME', read_bool(base_register=1005, bit=18)), + CsvSignal('/Battery/Devices/AlarmFlags/HWFL', read_bool(base_register=1005, bit=19)), + CsvSignal('/Battery/Devices/AlarmFlags/HWEM', read_bool(base_register=1005, bit=20)), + CsvSignal('/Battery/Devices/AlarmFlags/ThM', read_bool(base_register=1005, bit=21)), + CsvSignal('/Battery/Devices/AlarmFlags/vsm2', read_bool(base_register=1005, bit=23)), + CsvSignal('/Battery/Devices/AlarmFlags/vsM2', read_bool(base_register=1005, bit=25)), + CsvSignal('/Battery/Devices/AlarmFlags/iCM2', read_bool(base_register=1005, bit=27)), + CsvSignal('/Battery/Devices/AlarmFlags/iDM2', read_bool(base_register=1005, bit=29)), + CsvSignal('/Battery/Devices/AlarmFlags/MID2', read_bool(base_register=1005, bit=31)), + CsvSignal('/Battery/Devices/AlarmFlags/HTFS', read_bool(base_register=1005, bit=42)), + CsvSignal('/Battery/Devices/AlarmFlags/DATA', read_bool(base_register=1005, bit=43)), + CsvSignal('/Battery/Devices/AlarmFlags/LMPA', read_bool(base_register=1005, bit=45)), + CsvSignal('/Battery/Devices/AlarmFlags/HEBT', read_bool(base_register=1005, bit=46)), + CsvSignal('/Battery/Devices/AlarmFlags/CURM', read_bool(base_register=1005, bit=48)), + ] diff --git a/python/dbus-fz-sonick-48tl-with-s3/service/down b/python/dbus-fz-sonick-48tl-with-s3/service/down new file mode 100644 index 000000000..e69de29bb diff --git a/python/dbus-fz-sonick-48tl-with-s3/service/log/down b/python/dbus-fz-sonick-48tl-with-s3/service/log/down new file mode 100644 index 000000000..e69de29bb diff --git a/python/dbus-fz-sonick-48tl-with-s3/service/log/run b/python/dbus-fz-sonick-48tl-with-s3/service/log/run new file mode 100755 index 000000000..74e759d9b --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/service/log/run @@ -0,0 +1,3 @@ +#!/bin/sh +exec 2>&1 +exec multilog t s25000 n4 /var/log/dbus-fzsonick-48tl.TTY diff --git a/python/dbus-fz-sonick-48tl-with-s3/service/run b/python/dbus-fz-sonick-48tl-with-s3/service/run new file mode 100755 index 000000000..7f5301435 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/service/run @@ -0,0 +1,4 @@ +#!/bin/sh +exec 2>&1 + +exec softlimit -d 100000000 -s 1000000 -a 100000000 /opt/innovenergy/dbus-fzsonick-48tl/start.sh TTY diff --git a/python/dbus-fz-sonick-48tl-with-s3/signals.py b/python/dbus-fz-sonick-48tl-with-s3/signals.py new file mode 100644 index 000000000..e35c95603 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/signals.py @@ -0,0 +1,374 @@ +# coding=utf-8 + +import config as cfg +from convert import mean, read_float, read_led_state, read_bool, count_bits, comma_separated, read_bitmap, return_in_list, first, read_hex_string +from data import BatterySignal, Battery, LedColor, ServiceSignal, BatteryStatus, LedState, CsvSignal + +# noinspection PyUnreachableCode +if False: + from typing import List, Iterable + +def read_voltage(): + return read_float(register=999, scale_factor=0.01, offset=0) + +def read_current(): + return read_float(register=1000, scale_factor=0.01, offset=-10000) + +def read_limb_bitmap(): + return read_bitmap(1059) + +def read_power(status): + return int(read_current()(status) * read_voltage()(status)) + +def interpret_limb_bitmap(bitmap_value): + string1_disabled = int((bitmap_value & 0b00001) != 0) + string2_disabled = int((bitmap_value & 0b00010) != 0) + string3_disabled = int((bitmap_value & 0b00100) != 0) + string4_disabled = int((bitmap_value & 0b01000) != 0) + string5_disabled = int((bitmap_value & 0b10000) != 0) + n_limb_strings = string1_disabled + string2_disabled + string3_disabled + string4_disabled + string5_disabled + return n_limb_strings + +def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap()(status)) + +def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int): + dv = v_limit - v + di = dv / r_int + p_limit = v_limit * (i + di) + return p_limit + +def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int): + di = i_limit - i + dv = di * r_int + p_limit = i_limit * (v + dv) + return p_limit + +def calc_max_charge_power(status): + n_strings = cfg.NUM_OF_STRINGS_PER_BATTERY - limp_strings_value(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage()(status) + i = read_current()(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) + p_limit = max(p_limit, 0) + return int(p_limit) + +def calc_max_discharge_power(status): + n_strings = cfg.NUM_OF_STRINGS_PER_BATTERY - limp_strings_value(status) + max_discharge_current = n_strings * cfg.I_MAX_PER_STRING + return int(max_discharge_current * read_voltage()(status)) + +def read_switch_closed(status): + value = read_bool(base_register=1013, bit=0)(status) + if value: + return False + return True + +def read_alarm_out_active(status): + value = read_bool(base_register=1013, bit=1)(status) + if value: + return False + return True + +def read_aux_relay(status): + value = read_bool(base_register=1013, bit=4)(status) + if value: + return False + return True + +def hex_string_to_ascii(hex_string): + hex_string = hex_string.replace(" ", "") + ascii_string = ''.join([chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2)]) + return ascii_string + +def init_service_signals(batteries): + print("INSIDE INIT SERVICE SIGNALS") + n_batteries = len(batteries) + product_name = cfg.PRODUCT_NAME + ' x' + str(n_batteries) + return [ + ServiceSignal('/NbOfBatteries', n_batteries), + ServiceSignal('/Mgmt/ProcessName', __file__), + ServiceSignal('/Mgmt/ProcessVersion', cfg.SOFTWARE_VERSION), + ServiceSignal('/Mgmt/Connection', cfg.CONNECTION), + ServiceSignal('/DeviceInstance', cfg.DEVICE_INSTANCE), + ServiceSignal('/ProductName', product_name), + ServiceSignal('/ProductId', cfg.PRODUCT_ID), + ServiceSignal('/Connected', 1) + ] + +def init_battery_signals(): + print("START INIT SIGNALS") + battery_status_reader = read_hex_string(1060, 2) + + def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + + def read_battery_cold(status): + return \ + read_led_state(register=1004, led=LedColor.green)(status) >= LedState.blinking_slow and \ + read_led_state(register=1004, led=LedColor.blue)(status) >= LedState.blinking_slow + + def read_soc(status): + soc = read_float(register=1053, scale_factor=0.1, offset=0)(status) + if soc > 99.9 and not read_eoc_reached(status): + return 99.9 + if soc >= 99.9 and read_eoc_reached(status): + return 100 + return soc + + def number_of_active_strings(status): + return cfg.NUM_OF_STRINGS_PER_BATTERY - limp_strings_value(status) + + def max_discharge_current(status): + return number_of_active_strings(status) * cfg.I_MAX_PER_STRING + + def max_charge_current(status): + return status.battery.ampere_hours / 2 + + return [ + BatterySignal('/TimeToTOCRequest', max, read_float(register=1052)), + BatterySignal('/EOCReached', return_in_list, read_eoc_reached), + BatterySignal('/NumOfLimbStrings', return_in_list, limp_strings_value), + BatterySignal('/Dc/0/Voltage', mean, get_value=read_voltage(), unit='V'), + BatterySignal('/Dc/0/Current', sum, get_value=read_current(), unit='A'), + BatterySignal('/Dc/0/Power', sum, get_value=read_power, unit='W'), + BatterySignal('/BussVoltage', mean, read_float(register=1001, scale_factor=0.01, offset=0), unit='V'), + BatterySignal('/Soc', mean, read_soc, unit='%'), + BatterySignal('/LowestSoc', min, read_float(register=1053, scale_factor=0.1, offset=0), unit='%'), + BatterySignal('/Dc/0/Temperature', mean, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'), + BatterySignal('/Dc/0/LowestTemperature', min, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'), + BatterySignal('/WarningFlags/TaM1', return_in_list, read_bool(base_register=1005, bit=1)), + BatterySignal('/WarningFlags/TbM1', return_in_list, read_bool(base_register=1005, bit=4)), + BatterySignal('/WarningFlags/VBm1', return_in_list, read_bool(base_register=1005, bit=6)), + BatterySignal('/WarningFlags/VBM1', return_in_list, read_bool(base_register=1005, bit=8)), + BatterySignal('/WarningFlags/IDM1', return_in_list, read_bool(base_register=1005, bit=10)), + BatterySignal('/WarningFlags/vsm1', return_in_list, read_bool(base_register=1005, bit=22)), + BatterySignal('/WarningFlags/vsM1', return_in_list, read_bool(base_register=1005, bit=24)), + BatterySignal('/WarningFlags/iCM1', return_in_list, read_bool(base_register=1005, bit=26)), + BatterySignal('/WarningFlags/iDM1', return_in_list, read_bool(base_register=1005, bit=28)), + BatterySignal('/WarningFlags/MID1', return_in_list, read_bool(base_register=1005, bit=30)), + BatterySignal('/WarningFlags/BLPW', return_in_list, read_bool(base_register=1005, bit=32)), + BatterySignal('/WarningFlags/CCBF', return_in_list, read_bool(base_register=1005, bit=33)), + BatterySignal('/WarningFlags/Ah_W', return_in_list, read_bool(base_register=1005, bit=35)), + BatterySignal('/WarningFlags/MPMM', return_in_list, read_bool(base_register=1005, bit=38)), + BatterySignal('/WarningFlags/TCdi', return_in_list, read_bool(base_register=1005, bit=40)), + BatterySignal('/WarningFlags/LMPW', return_in_list, read_bool(base_register=1005, bit=44)), + BatterySignal('/WarningFlags/TOCW', return_in_list, read_bool(base_register=1005, bit=47)), + BatterySignal('/WarningFlags/BUSL', return_in_list, read_bool(base_register=1005, bit=49)), + BatterySignal('/AlarmFlags/Tam', return_in_list, read_bool(base_register=1005, bit=0)), + BatterySignal('/AlarmFlags/TaM2', return_in_list, read_bool(base_register=1005, bit=2)), + BatterySignal('/AlarmFlags/Tbm', return_in_list, read_bool(base_register=1005, bit=3)), + BatterySignal('/AlarmFlags/TbM2', return_in_list, read_bool(base_register=1005, bit=5)), + BatterySignal('/AlarmFlags/VBm2', return_in_list, read_bool(base_register=1005, bit=7)), + BatterySignal('/AlarmFlags/VBM2', return_in_list, read_bool(base_register=1005, bit=9)), + BatterySignal('/AlarmFlags/IDM2', return_in_list, read_bool(base_register=1005, bit=11)), + BatterySignal('/AlarmFlags/ISOB', return_in_list, read_bool(base_register=1005, bit=12)), + BatterySignal('/AlarmFlags/MSWE', return_in_list, read_bool(base_register=1005, bit=13)), + BatterySignal('/AlarmFlags/FUSE', return_in_list, read_bool(base_register=1005, bit=14)), + BatterySignal('/AlarmFlags/HTRE', return_in_list, read_bool(base_register=1005, bit=15)), + BatterySignal('/AlarmFlags/TCPE', return_in_list, read_bool(base_register=1005, bit=16)), + BatterySignal('/AlarmFlags/STRE', return_in_list, read_bool(base_register=1005, bit=17)), + BatterySignal('/AlarmFlags/CME', return_in_list, read_bool(base_register=1005, bit=18)), + BatterySignal('/AlarmFlags/HWFL', return_in_list, read_bool(base_register=1005, bit=19)), + BatterySignal('/AlarmFlags/HWEM', return_in_list, read_bool(base_register=1005, bit=20)), + BatterySignal('/AlarmFlags/ThM', return_in_list, read_bool(base_register=1005, bit=21)), + BatterySignal('/AlarmFlags/vsm2', return_in_list, read_bool(base_register=1005, bit=23)), + BatterySignal('/AlarmFlags/vsM2', return_in_list, read_bool(base_register=1005, bit=25)), + BatterySignal('/AlarmFlags/iCM2', return_in_list, read_bool(base_register=1005, bit=27)), + BatterySignal('/AlarmFlags/iDM2', return_in_list, read_bool(base_register=1005, bit=29)), + BatterySignal('/AlarmFlags/MID2', return_in_list, read_bool(base_register=1005, bit=31)), + BatterySignal('/AlarmFlags/HTFS', return_in_list, read_bool(base_register=1005, bit=42)), + BatterySignal('/AlarmFlags/DATA', return_in_list, read_bool(base_register=1005, bit=43)), + BatterySignal('/AlarmFlags/LMPA', return_in_list, read_bool(base_register=1005, bit=45)), + BatterySignal('/AlarmFlags/HEBT', return_in_list, read_bool(base_register=1005, bit=46)), + BatterySignal('/AlarmFlags/CURM', return_in_list, read_bool(base_register=1005, bit=48)), + BatterySignal('/Diagnostics/LedStatus/Red', first, read_led_state(register=1004, led=LedColor.red)), + BatterySignal('/Diagnostics/LedStatus/Blue', first, read_led_state(register=1004, led=LedColor.blue)), + BatterySignal('/Diagnostics/LedStatus/Green', first, read_led_state(register=1004, led=LedColor.green)), + BatterySignal('/Diagnostics/LedStatus/Amber', first, read_led_state(register=1004, led=LedColor.amber)), + BatterySignal('/Diagnostics/IoStatus/MainSwitchClosed', return_in_list, read_switch_closed), + BatterySignal('/Diagnostics/IoStatus/AlarmOutActive', return_in_list, read_alarm_out_active), + BatterySignal('/Diagnostics/IoStatus/InternalFanActive', return_in_list, read_bool(base_register=1013, bit=2)), + BatterySignal('/Diagnostics/IoStatus/VoltMeasurementAllowed', return_in_list, read_bool(base_register=1013, bit=3)), + BatterySignal('/Diagnostics/IoStatus/AuxRelay', return_in_list, read_aux_relay), + BatterySignal('/Diagnostics/IoStatus/RemoteState', return_in_list, read_bool(base_register=1013, bit=5)), + BatterySignal('/Diagnostics/IoStatus/RiscOn', return_in_list, read_bool(base_register=1013, bit=6)), + BatterySignal('/IoStatus/BatteryCold', any, read_battery_cold), + BatterySignal('/Info/MaxDischargeCurrent', sum, max_discharge_current, unit='A'), + BatterySignal('/Info/MaxChargeCurrent', sum, max_charge_current, unit='A'), + BatterySignal('/Info/MaxChargeVoltage', min, lambda bs: bs.battery.v_max, unit='V'), + BatterySignal('/Info/MinDischargeVoltage', max, lambda bs: bs.battery.v_min, unit='V'), + BatterySignal('/Info/BatteryLowVoltage', max, lambda bs: bs.battery.v_min - 2, unit='V'), + BatterySignal('/Info/NumberOfStrings', sum, number_of_active_strings), + BatterySignal('/Info/MaxChargePower', sum, calc_max_charge_power), + BatterySignal('/Info/MaxDischargePower', sum, calc_max_discharge_power), + BatterySignal('/FirmwareVersion', comma_separated, lambda bs: bs.battery.firmware_version), + BatterySignal('/HardwareVersion', comma_separated, lambda bs: bs.battery.hardware_version), + BatterySignal('/BmsVersion', comma_separated, lambda bs: bs.battery.bms_version) + ] + +def create_csv_signals(firmware_version): + total_current = read_float(register=1062, scale_factor=0.01, offset=-10000) + + def read_total_current(status): + return total_current(status) + + def read_heating_current(status): + return total_current(status) - read_current()(status) + + def read_heating_power(status): + return read_voltage()(status) * read_heating_current(status) + + soc_ah = read_float(register=1002, scale_factor=0.1, offset=-10000) + + def read_soc_ah(status): + return soc_ah(status) + + def return_led_state(status, color): + led_state = read_led_state(register=1004, led=color)(status) + if led_state == LedState.blinking_fast or led_state == LedState.blinking_slow: + return "Blinking" + elif led_state == LedState.on: + return "On" + elif led_state == LedState.off: + return "Off" + return "Unknown" + + def return_led_state_blue(status): + return return_led_state(status, LedColor.blue) + + def return_led_state_red(status): + return return_led_state(status, LedColor.red) + + def return_led_state_green(status): + return return_led_state(status, LedColor.green) + + def return_led_state_amber(status): + return return_led_state(status, LedColor.amber) + + battery_status_reader = read_hex_string(1060, 2) + + def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + + def read_serial_number(status): + serial_regs = [1055, 1056, 1057, 1058] + serial_parts = [] + for reg in serial_regs: + hex_value_fun = read_hex_string(reg, 1) + hex_value = hex_value_fun(status) + serial_parts.append(hex_value.replace(' ', '')) + serial_number = ''.join(serial_parts).rstrip('0') + return serial_number + + def time_since_toc_in_time_format(status): + time_in_minutes = read_float(register=1052)(status) + total_seconds = int(time_in_minutes * 60) + days = total_seconds // (24 * 3600) + total_seconds = total_seconds % (24 * 3600) + hours = total_seconds // 3600 + total_seconds %= 3600 + minutes = total_seconds // 60 + seconds = total_seconds % 60 + return "{}.{:02}:{:02}:{:02}".format(days, hours, minutes, seconds) + + return [ + CsvSignal('/Battery/Devices/FwVersion', firmware_version), + CsvSignal('/Battery/Devices/Dc/Power', read_power, 'W'), + CsvSignal('/Battery/Devices/Dc/Voltage', read_voltage(), 'V'), + CsvSignal('/Battery/Devices/Soc', read_float(register=1053, scale_factor=0.1, offset=0), '%'), + CsvSignal('/Battery/Devices/Temperatures/Cells/Average', read_float(register=1003, scale_factor=0.1, offset=-400), 'C'), + CsvSignal('/Battery/Devices/Dc/Current', read_current(), 'A'), + CsvSignal('/Battery/Devices/BusCurrent', read_total_current, 'A'), + CsvSignal('/Battery/Devices/CellsCurrent', read_current(), 'A'), + CsvSignal('/Battery/Devices/HeatingCurrent', read_heating_current, 'A'), + CsvSignal('/Battery/Devices/HeatingPower', read_heating_power, 'W'), + CsvSignal('/Battery/Devices/SOCAh', read_soc_ah), + CsvSignal('/Battery/Devices/Leds/Blue', return_led_state_blue), + CsvSignal('/Battery/Devices/Leds/Red', return_led_state_red), + CsvSignal('/Battery/Devices/Leds/Green', return_led_state_green), + CsvSignal('/Battery/Devices/Leds/Amber', return_led_state_amber), + CsvSignal('/Battery/Devices/BatteryStrings/String1Active', lambda status: int((read_limb_bitmap()(status) & 0b00001) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String2Active', lambda status: int((read_limb_bitmap()(status) & 0b00010) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String3Active', lambda status: int((read_limb_bitmap()(status) & 0b00100) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String4Active', lambda status: int((read_limb_bitmap()(status) & 0b01000) != 0)), + CsvSignal('/Battery/Devices/BatteryStrings/String5Active', lambda status: int((read_limb_bitmap()(status) & 0b10000) != 0)), + CsvSignal('/Battery/Devices/IoStatus/ConnectedToDcBus', read_switch_closed), + CsvSignal('/Battery/Devices/IoStatus/AlarmOutActive', read_alarm_out_active), + CsvSignal('/Battery/Devices/IoStatus/InternalFanActive', read_bool(base_register=1013, bit=2)), + CsvSignal('/Battery/Devices/IoStatus/VoltMeasurementAllowed', read_bool(base_register=1013, bit=3)), + CsvSignal('/Battery/Devices/IoStatus/AuxRelayBus', read_aux_relay), + CsvSignal('/Battery/Devices/IoStatus/RemoteStateActive', read_bool(base_register=1013, bit=5)), + CsvSignal('/Battery/Devices/IoStatus/RiscActive', read_bool(base_register=1013, bit=6)), + CsvSignal('/Battery/Devices/Eoc', read_eoc_reached), + CsvSignal('/Battery/Devices/SerialNumber', read_serial_number), + CsvSignal('/Battery/Devices/TimeSinceTOC', time_since_toc_in_time_format), + CsvSignal('/Battery/Devices/MaxChargePower', calc_max_charge_power), + CsvSignal('/Battery/Devices/MaxDischargePower', calc_max_discharge_power), + ] + +def read_warning_and_alarm_flags(): + return [ + # Warnings + CsvSignal('/Battery/Devices/WarningFlags/TaM1', read_bool(base_register=1005, bit=1)), + CsvSignal('/Battery/Devices/WarningFlags/TbM1', read_bool(base_register=1005, bit=4)), + CsvSignal('/Battery/Devices/WarningFlags/VBm1', read_bool(base_register=1005, bit=6)), + CsvSignal('/Battery/Devices/WarningFlags/VBM1', read_bool(base_register=1005, bit=8)), + CsvSignal('/Battery/Devices/WarningFlags/IDM1', read_bool(base_register=1005, bit=10)), + CsvSignal('/Battery/Devices/WarningFlags/vsm1', read_bool(base_register=1005, bit=22)), + CsvSignal('/Battery/Devices/WarningFlags/vsM1', read_bool(base_register=1005, bit=24)), + CsvSignal('/Battery/Devices/WarningFlags/iCM1', read_bool(base_register=1005, bit=26)), + CsvSignal('/Battery/Devices/WarningFlags/iDM1', read_bool(base_register=1005, bit=28)), + CsvSignal('/Battery/Devices/WarningFlags/MID1', read_bool(base_register=1005, bit=30)), + CsvSignal('/Battery/Devices/WarningFlags/BLPW', read_bool(base_register=1005, bit=32)), + CsvSignal('/Battery/Devices/WarningFlags/CCBF', read_bool(base_register=1005, bit=33)), + CsvSignal('/Battery/Devices/WarningFlags/Ah_W', read_bool(base_register=1005, bit=35)), + CsvSignal('/Battery/Devices/WarningFlags/MPMM', read_bool(base_register=1005, bit=38)), + CsvSignal('/Battery/Devices/WarningFlags/TCdi', read_bool(base_register=1005, bit=40)), + CsvSignal('/Battery/Devices/WarningFlags/LMPW', read_bool(base_register=1005, bit=44)), + CsvSignal('/Battery/Devices/WarningFlags/TOCW', read_bool(base_register=1005, bit=47)), + CsvSignal('/Battery/Devices/WarningFlags/BUSL', read_bool(base_register=1005, bit=49)), + ], [ + # Alarms + CsvSignal('/Battery/Devices/AlarmFlags/Tam', read_bool(base_register=1005, bit=0)), + CsvSignal('/Battery/Devices/AlarmFlags/TaM2', read_bool(base_register=1005, bit=2)), + CsvSignal('/Battery/Devices/AlarmFlags/Tbm', read_bool(base_register=1005, bit=3)), + CsvSignal('/Battery/Devices/AlarmFlags/TbM2', read_bool(base_register=1005, bit=5)), + CsvSignal('/Battery/Devices/AlarmFlags/VBm2', read_bool(base_register=1005, bit=7)), + CsvSignal('/Battery/Devices/AlarmFlags/VBM2', read_bool(base_register=1005, bit=9)), + CsvSignal('/Battery/Devices/AlarmFlags/IDM2', read_bool(base_register=1005, bit=11)), + CsvSignal('/Battery/Devices/AlarmFlags/ISOB', read_bool(base_register=1005, bit=12)), + CsvSignal('/Battery/Devices/AlarmFlags/MSWE', read_bool(base_register=1005, bit=13)), + CsvSignal('/Battery/Devices/AlarmFlags/FUSE', read_bool(base_register=1005, bit=14)), + CsvSignal('/Battery/Devices/AlarmFlags/HTRE', read_bool(base_register=1005, bit=15)), + CsvSignal('/Battery/Devices/AlarmFlags/TCPE', read_bool(base_register=1005, bit=16)), + CsvSignal('/Battery/Devices/AlarmFlags/STRE', read_bool(base_register=1005, bit=17)), + CsvSignal('/Battery/Devices/AlarmFlags/CME', read_bool(base_register=1005, bit=18)), + CsvSignal('/Battery/Devices/AlarmFlags/HWFL', read_bool(base_register=1005, bit=19)), + CsvSignal('/Battery/Devices/AlarmFlags/HWEM', read_bool(base_register=1005, bit=20)), + CsvSignal('/Battery/Devices/AlarmFlags/ThM', read_bool(base_register=1005, bit=21)), + CsvSignal('/Battery/Devices/AlarmFlags/vsm2', read_bool(base_register=1005, bit=23)), + CsvSignal('/Battery/Devices/AlarmFlags/vsM2', read_bool(base_register=1005, bit=25)), + CsvSignal('/Battery/Devices/AlarmFlags/iCM2', read_bool(base_register=1005, bit=27)), + CsvSignal('/Battery/Devices/AlarmFlags/iDM2', read_bool(base_register=1005, bit=29)), + CsvSignal('/Battery/Devices/AlarmFlags/MID2', read_bool(base_register=1005, bit=31)), + CsvSignal('/Battery/Devices/AlarmFlags/HTFS', read_bool(base_register=1005, bit=42)), + CsvSignal('/Battery/Devices/AlarmFlags/DATA', read_bool(base_register=1005, bit=43)), + CsvSignal('/Battery/Devices/AlarmFlags/LMPA', read_bool(base_register=1005, bit=45)), + CsvSignal('/Battery/Devices/AlarmFlags/HEBT', read_bool(base_register=1005, bit=46)), + CsvSignal('/Battery/Devices/AlarmFlags/CURM', read_bool(base_register=1005, bit=48)), + ] diff --git a/python/dbus-fz-sonick-48tl-with-s3/start.sh b/python/dbus-fz-sonick-48tl-with-s3/start.sh new file mode 100755 index 000000000..83860d3e4 --- /dev/null +++ b/python/dbus-fz-sonick-48tl-with-s3/start.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. /opt/victronenergy/serial-starter/run-service.sh + +app="/opt/innovenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py" +args="$tty" +start $args diff --git a/python/dbus-fzsonick-48tl-nofork/config.py b/python/dbus-fzsonick-48tl-nofork/config.py new file mode 100644 index 000000000..5bccd15c6 --- /dev/null +++ b/python/dbus-fzsonick-48tl-nofork/config.py @@ -0,0 +1,55 @@ +import serial +import logging + +# dbus configuration + +FIRMWARE_VERSION = 1 # value returned by getValue (getText returns string value reported by battery) +HARDWARE_VERSION = 1 # value returned by getValue (getText returns string value reported by battery) + +CONNECTION = 'Modbus RTU' +PRODUCT_NAME = 'FZS 48TL200' +PRODUCT_ID = 0xB012 # assigned by victron +DEVICE_INSTANCE = 1 +SERVICE_NAME_PREFIX = 'com.victronenergy.battery.' + +#s3 configuration +S3BUCKET = "2-c0436b6a-d276-4cd8-9c44-1eae86cf5d0e" +S3KEY = "EXO5b2e35442791260eaaa7bdc8" +S3SECRET = "XFFOVzenDiEQoLPmhK6ML9RfQfsAMhrAs25MfJxi-24" + +# driver configuration + +SOFTWARE_VERSION = '3.0.3' +UPDATE_INTERVAL = 2000 # milliseconds +#LOG_LEVEL = logging.INFO +LOG_LEVEL = logging.DEBUG + +# modbus configuration + +BASE_ADDRESS = 999 +#NO_OF_REGISTERS = 63 +NO_OF_REGISTERS = 64 +MAX_SLAVE_ADDRESS = 10 + + +# RS 485 configuration + +PARITY = serial.PARITY_ODD +TIMEOUT = 0.1 # seconds +BAUD_RATE = 115200 +BYTE_SIZE = 8 +STOP_BITS = 1 +MODE = 'rtu' + + +# battery configuration + +MAX_CHARGE_VOLTAGE = 58 +I_MAX_PER_STRING = 15 +NUM_OF_STRING_PER_BATTERY = 5 +AH_PER_STRING = 40 +V_MAX = 54.2 +R_STRING_MIN = 0.125 +R_STRING_MAX = 0.250 + + diff --git a/python/dbus-fzsonick-48tl-nofork/convert.py b/python/dbus-fzsonick-48tl-nofork/convert.py new file mode 100644 index 000000000..2696f1664 --- /dev/null +++ b/python/dbus-fzsonick-48tl-nofork/convert.py @@ -0,0 +1,119 @@ +from collections import Iterable +from decimal import * + +import config as cfg +from data import LedState, BatteryStatus + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable + + +def read_bool(register, bit): + # type: (int, int) -> Callable[[BatteryStatus], bool] + + def get_value(status): + # type: (BatteryStatus) -> bool + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value & (1 << bit) > 0 + + return get_value + + +def read_float(register, scale_factor=1.0, offset=0.0, places=2): + # type: (int, float, float) -> Callable[[BatteryStatus], float] + + def get_value(status): + # type: (BatteryStatus) -> float + value = status.modbus_data[register - cfg.BASE_ADDRESS] + + if value >= 0x8000: # convert to signed int16 + value -= 0x10000 # fiamm stores their integers signed AND with sign-offset @#%^&! + + result = (value+offset)*scale_factor + return round(result,places) + + return get_value + + +def read_hex_string(register, count): + # type: (int, int) -> Callable[[BatteryStatus], str] + """ + reads count consecutive modbus registers from start_address, + and returns a hex representation of it: + e.g. for count=4: DEAD BEEF DEAD BEEF. + """ + start = register - cfg.BASE_ADDRESS + end = start + count + + def get_value(status): + # type: (BatteryStatus) -> str + return ' '.join(['{0:0>4X}'.format(x) for x in status.modbus_data[start:end]]) + + return get_value + + +def read_led_state(register, led): + # type: (int, int) -> Callable[[BatteryStatus], int] + + read_lo = read_bool(register, led * 2) + read_hi = read_bool(register, led * 2 + 1) + + def get_value(status): + # type: (BatteryStatus) -> int + + lo = read_lo(status) + hi = read_hi(status) + + if hi: + if lo: + return LedState.blinking_fast + else: + return LedState.blinking_slow + else: + if lo: + return LedState.on + else: + return LedState.off + + return get_value + + +def read_bitmap(register): + # type: (int) -> Callable[[BatteryStatus], bitmap] + + def get_value(status): + # type: (BatteryStatus) -> bitmap + value = status.modbus_data[register - cfg.BASE_ADDRESS] + return value + + return get_value + + +def append_unit(unit): + # type: (unicode) -> Callable[[unicode], unicode] + + def get_text(v): + # type: (unicode) -> unicode + return "{0}{1}".format(str(v), unit) + + return get_text + + +def mean(numbers): + # type: (Iterable[float] | Iterable[int]) -> float + return float("{:.2f}".format(float(sum(numbers)) / len(numbers))) + +def ssum(numbers): + # type: (Iterable[float] | Iterable[int]) -> float + return float("{:.2f}".format(float(sum(numbers)))) + + +def first(ts): + return next(t for t in ts) + +def return_in_list(ts): + return ts + + diff --git a/python/dbus-fzsonick-48tl-nofork/data.py b/python/dbus-fzsonick-48tl-nofork/data.py new file mode 100644 index 000000000..05cdd1aa7 --- /dev/null +++ b/python/dbus-fzsonick-48tl-nofork/data.py @@ -0,0 +1,97 @@ +import config as cfg +from collections import Iterable + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable + + +class LedState(object): + """ + from page 6 of the '48TLxxx ModBus Protocol doc' + """ + off = 0 + on = 1 + blinking_slow = 2 + blinking_fast = 3 + + +class LedColor(object): + green = 0 + amber = 1 + blue = 2 + red = 3 + + + +class CsvSignal(object): + def __init__(self, name, get_value, get_text = None): + self.name = name + self.get_value = get_value if callable(get_value) else lambda _: get_value + self.get_text = get_text + + if get_text is None: + self.get_text = "" + +class Signal(object): + + def __init__(self, dbus_path, aggregate, get_value, get_text=None): + # type: (str, Callable[[Iterable[object]],object], Callable[[BatteryStatus],object] | object, Callable[[object],unicode] | object)->None + """ + A Signal holds all information necessary for the handling of a + certain datum (e.g. voltage) published by the battery. + + :param dbus_path: str + object_path on DBus where the datum needs to be published + + :param aggregate: Iterable[object] -> object + function that combines the values of multiple batteries into one. + e.g. sum for currents, or mean for voltages + + :param get_value: (BatteryStatus) -> object + function to extract the datum from the modbus record, + alternatively: a constant + + :param get_text: (object) -> unicode [optional] + function to render datum to text, needed by DBus + alternatively: a constant + """ + + self.dbus_path = dbus_path + self.aggregate = aggregate + self.get_value = get_value if callable(get_value) else lambda _: get_value + self.get_text = get_text if callable(get_text) else lambda _: str(get_text) + + # if no 'get_text' provided use 'default_text' if available, otherwise str() + if get_text is None: + self.get_text = str + + +class Battery(object): + + """ Data record to hold hardware and firmware specs of the battery """ + + def __init__(self, slave_address, hardware_version, firmware_version, bms_version, ampere_hours): + # type: (int, str, str, str, int) -> None + self.slave_address = slave_address + self.hardware_version = hardware_version + self.firmware_version = firmware_version + self.bms_version = bms_version + self.ampere_hours = ampere_hours + + + def __str__(self): + return 'slave address = {0}\nhardware version = {1}\nfirmware version = {2}\nbms version = {3}\nampere hours = {4}'.format( + self.slave_address, self.hardware_version, self.firmware_version, self.bms_version, str(self.ampere_hours)) + + +class BatteryStatus(object): + """ + record holding the current status of a battery + """ + def __init__(self, battery, modbus_data): + # type: (Battery, list[int]) -> None + + self.battery = battery + self.modbus_data = modbus_data diff --git a/python/dbus-fzsonick-48tl-nofork/dbus-fzsonick-48tl.py b/python/dbus-fzsonick-48tl-nofork/dbus-fzsonick-48tl.py new file mode 100755 index 000000000..b06c51204 --- /dev/null +++ b/python/dbus-fzsonick-48tl-nofork/dbus-fzsonick-48tl.py @@ -0,0 +1,1063 @@ +#!/usr/bin/python3 -u +# coding=utf-8 + +import re +import sys +import logging +from gi.repository import GLib + +import config as cfg +import convert as c + +from pymodbus.register_read_message import ReadInputRegistersResponse +from pymodbus.client.sync import ModbusSerialClient as Modbus +from pymodbus.other_message import ReportSlaveIdRequest +from pymodbus.exceptions import ModbusException +from pymodbus.pdu import ExceptionResponse + +from dbus.mainloop.glib import DBusGMainLoop +from data import BatteryStatus, Signal, Battery, LedColor, CsvSignal, LedState + +from collections import Iterable +from os import path + +app_dir = path.dirname(path.realpath(__file__)) +sys.path.insert(1, path.join(app_dir, 'ext', 'velib_python')) + +from vedbus import VeDbusService as DBus + +import time +import os +import csv + +import requests +import hmac +import hashlib +import base64 +from datetime import datetime +import io +import json + +import requests +import hmac +import hashlib +import base64 +from datetime import datetime +import pika +import time + + +# zip-comp additions +import zipfile +import io + +def compress_csv_data(csv_data, file_name="data.csv"): + + memory_stream = io.BytesIO() + + # Create a zip archive in the memory buffer + with zipfile.ZipFile(memory_stream, 'w', zipfile.ZIP_DEFLATED) as archive: + # Add CSV data to the ZIP archive + with archive.open('data.csv', 'w') as entry_stream: + entry_stream.write(csv_data.encode('utf-8')) + + # Get the compressed byte array from the memory buffer + compressed_bytes = memory_stream.getvalue() + + # Encode the compressed byte array as a Base64 string + base64_string = base64.b64encode(compressed_bytes).decode('utf-8') + + return base64_string + +class S3config: + def __init__(self): + self.bucket = cfg.S3BUCKET + self.region = "sos-ch-dk-2" + self.provider = "exo.io" + self.key = cfg.S3KEY + self.secret = cfg.S3SECRET + self.content_type = "application/base64; charset=utf-8" + + @property + def host(self): + return f"{self.bucket}.{self.region}.{self.provider}" + + @property + def url(self): + return f"https://{self.host}" + + def create_put_request(self, s3_path, data): + headers = self._create_request("PUT", s3_path) + url = f"{self.url}/{s3_path}" + response = requests.put(url, headers=headers, data=data) + return response + + def _create_request(self, method, s3_path): + date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + auth = self._create_authorization(method, self.bucket, s3_path, date, self.key, self.secret, self.content_type) + headers = { + "Host": self.host, + "Date": date, + "Authorization": auth, + "Content-Type": self.content_type + } + return headers + + @staticmethod + def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): + payload = f"{method}\n{md5_hash}\n{content_type}\n{date}\n/{bucket.strip('/')}/{s3_path.strip('/')}" + signature = base64.b64encode( + hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() + ).decode() + return f"AWS {s3_key}:{signature}" + + @staticmethod + def _create_authorization(method, bucket, s3_path, date, s3_key, s3_secret, content_type="", md5_hash=""): + payload = f"{method}\n{md5_hash}\n{content_type}\n{date}\n/{bucket.strip('/')}/{s3_path.strip('/')}" + signature = base64.b64encode( + hmac.new(s3_secret.encode(), payload.encode(), hashlib.sha1).digest() + ).decode() + return f"AWS {s3_key}:{signature}" + +def read_csv_as_string(file_path): + """ + Reads a CSV file from the given path and returns its content as a single string. + """ + try: + with open(file_path, 'r', encoding='utf-8') as file: + return file.read() + except FileNotFoundError: + print(f"Error: The file {file_path} does not exist.") + return None + except IOError as e: + print(f"IO error occurred: {str(e)}") + return None + +CSV_DIR = "/data/csv_files/" +#CSV_DIR = "csv_files/" + +# Define the path to the file containing the installation name +INSTALLATION_NAME_FILE = '/data/innovenergy/openvpn/installation-name' + + +# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime +# noinspection PyUnreachableCode +if False: + from typing import Callable + +def interpret_limb_bitmap(bitmap_value): + # The bit for string 1 also monitors all 5 strings: 0000 0000 means All 5 strings activated. 0000 0001 means string 1 disabled. + string1_disabled = int((bitmap_value & 0b00001) != 0) + string2_disabled = int((bitmap_value & 0b00010) != 0) + string3_disabled = int((bitmap_value & 0b00100) != 0) + string4_disabled = int((bitmap_value & 0b01000) != 0) + string5_disabled = int((bitmap_value & 0b10000) != 0) + n_limb_strings = string1_disabled+string2_disabled+string3_disabled+string4_disabled+string5_disabled + return n_limb_strings + +def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int): + # type: (float, float, float, float) -> float + dv = v_limit - v + di = dv / r_int + p_limit = v_limit * (i + di) + return p_limit + +def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int): + # type: (float, float, float, float) -> float + di = i_limit - i + dv = di * r_int + p_limit = i_limit * (v + dv) + return p_limit + +def read_switch_closed(status): + value = c.read_bool(register=1013, bit=0)(status) + if value: + return False + return True + +def read_alarm_out_active(status): + value = c.read_bool(register=1013, bit=1)(status) + if value: + return False + return True + +def read_aux_relay(status): + value = c.read_bool(register=1013, bit=4)(status) + if value: + return False + return True + +def hex_string_to_ascii(hex_string): + # Ensure the hex_string is correctly formatted without spaces + hex_string = hex_string.replace(" ", "") + # Convert every two characters (a byte) in the hex string to ASCII + ascii_string = ''.join([chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2)]) + return ascii_string + +battery_status_reader = c.read_hex_string(1060,2) + +def read_eoc_reached(status): + battery_status_string = battery_status_reader(status) + return hex_string_to_ascii(battery_status_string) == "EOC_" + +def return_led_state(status, color): + led_state = c.read_led_state(register=1004, led=color)(status) + if led_state == LedState.blinking_fast or led_state == LedState.blinking_slow: + return "Blinking" + elif led_state == LedState.on: + return "On" + elif led_state == LedState.off: + return "Off" + return "Unknown" + +def return_led_state_blue(status): + return return_led_state(status, LedColor.blue) + +def return_led_state_red(status): + return return_led_state(status, LedColor.red) + +def return_led_state_green(status): + return return_led_state(status, LedColor.green) + +def return_led_state_amber(status): + return return_led_state(status, LedColor.amber) + +def read_serial_number(status): + serial_regs = [1055, 1056, 1057, 1058] + serial_parts = [] + for reg in serial_regs: + # reading each register as a single hex value + hex_value_fun = c.read_hex_string(reg, 1) + hex_value = hex_value_fun(status) + # append without spaces and leading zeros stripped if any + serial_parts.append(hex_value.replace(' ', '')) + # concatenate all parts to form the full serial number + serial_number = ''.join(serial_parts).rstrip('0') + return serial_number + +def time_since_toc_in_time_format(status): + time_in_minutes = c.read_float(register=1052)(status) + # Convert minutes to total seconds + total_seconds = int(time_in_minutes * 60) + # Calculate days, hours, minutes, and seconds + days = total_seconds // (24 * 3600) + total_seconds = total_seconds % (24 * 3600) + hours = total_seconds // 3600 + total_seconds %= 3600 + minutes = total_seconds // 60 + seconds = total_seconds % 60 + # Format the string to show days.hours:minutes:seconds + return f"{days}.{hours:02}:{minutes:02}:{seconds:02}" + +def create_csv_signals(firmware_version): + read_voltage = c.read_float(register=999, scale_factor=0.01, offset=0, places=2) + read_current = c.read_float(register=1000, scale_factor=0.01, offset=-10000, places=2) + read_limb_bitmap = c.read_bitmap(1059) + + def read_power(status): + return int(read_current(status) * read_voltage(status)) + + def string1_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00001) != 0) + + def string2_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00010) != 0) + + def string3_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b00100) != 0) + + def string4_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b01000) != 0) + + def string5_disabled(status): + bitmap_value = read_limb_bitmap(status) + return int((bitmap_value & 0b10000) != 0) + + def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap(status)) + + def calc_max_charge_power(status): + # type: (BatteryStatus) -> int + n_strings = cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage(status) + i = read_current(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) # p_limit is normally positive here (signed) + p_limit = max(p_limit, 0) # charge power must not become negative + + return int(p_limit) + + def calc_max_discharge_power(status): + n_strings = cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status) + max_discharge_current = n_strings*cfg.I_MAX_PER_STRING + return int(max_discharge_current*read_voltage(status)) + + total_current = c.read_float(register=1062, scale_factor=0.01, offset=-10000, places=1) + + def read_total_current(status): + return total_current(status) + + def read_heating_current(status): + return total_current(status) - read_current(status) + + def read_heating_power(status): + return read_voltage(status) * read_heating_current(status) + + soc_ah = c.read_float(register=1002, scale_factor=0.1, offset=-10000, places=1) + + def read_soc_ah(status): + return soc_ah(status) + + return [ + CsvSignal('/Battery/Devices/FwVersion', firmware_version), + CsvSignal('/Battery/Devices/Dc/Power', read_power, 'W'), + CsvSignal('/Battery/Devices/Dc/Voltage', read_voltage, 'V'), + CsvSignal('/Battery/Devices/Soc', c.read_float(register=1053, scale_factor=0.1, offset=0, places=1), '%'), + CsvSignal('/Battery/Devices/Temperatures/Cells/Average', c.read_float(register=1003, scale_factor=0.1, offset=-400, places=1), 'C'), + CsvSignal('/Battery/Devices/Dc/Current', read_current, 'A'), + CsvSignal('/Battery/Devices/BusCurrent', read_total_current, 'A'), + CsvSignal('/Battery/Devices/CellsCurrent', read_current, 'A'), + CsvSignal('/Battery/Devices/HeatingCurrent', read_heating_current, 'A'), + CsvSignal('/Battery/Devices/HeatingPower', read_heating_power, 'W'), + CsvSignal('/Battery/Devices/SOCAh', read_soc_ah), + CsvSignal('/Battery/Devices/Leds/Blue', return_led_state_blue), + CsvSignal('/Battery/Devices/Leds/Red', return_led_state_red), + CsvSignal('/Battery/Devices/Leds/Green', return_led_state_green), + CsvSignal('/Battery/Devices/Leds/Amber', return_led_state_amber), + CsvSignal('/Battery/Devices/BatteryStrings/String1Active', string1_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String2Active', string2_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String3Active', string3_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String4Active', string4_disabled), + CsvSignal('/Battery/Devices/BatteryStrings/String5Active', string5_disabled), + CsvSignal('/Battery/Devices/IoStatus/ConnectedToDcBus', read_switch_closed), + CsvSignal('/Battery/Devices/IoStatus/AlarmOutActive', read_alarm_out_active), + CsvSignal('/Battery/Devices/IoStatus/InternalFanActive', c.read_bool(register=1013, bit=2)), + CsvSignal('/Battery/Devices/IoStatus/VoltMeasurementAllowed', c.read_bool(register=1013, bit=3)), + CsvSignal('/Battery/Devices/IoStatus/AuxRelayBus', read_aux_relay), + CsvSignal('/Battery/Devices/IoStatus/RemoteStateActive', c.read_bool(register=1013, bit=5)), + CsvSignal('/Battery/Devices/IoStatus/RiscActive', c.read_bool(register=1013, bit=6)), + CsvSignal('/Battery/Devices/Eoc', read_eoc_reached), + CsvSignal('/Battery/Devices/SerialNumber', read_serial_number), + CsvSignal('/Battery/Devices/TimeSinceTOC', time_since_toc_in_time_format), + CsvSignal('/Battery/Devices/MaxChargePower', calc_max_charge_power), + CsvSignal('/Battery/Devices/MaxDischargePower', calc_max_discharge_power), + ] + +def init_signals(hardware_version, firmware_version, n_batteries): + # type: (str,str,int) -> Iterable[Signal] + """ + A Signal holds all information necessary for the handling of a + certain datum (e.g. voltage) published by the battery. + + Signal(dbus_path, aggregate, get_value, get_text = str) + + dbus_path: str + object_path on DBus where the datum needs to be published + + aggregate: Iterable[object] -> object + function that combines the values of multiple batteries into one. + e.g. sum for currents, or mean for voltages + + get_value: (BatteryStatus) -> object [optional] + function to extract the datum from the modbus record, + alternatively: a constant + + get_text: (object) -> unicode [optional] + function to render datum to text, needed by DBus + alternatively: a constant + + The conversion functions use the same parameters (e.g scale_factor, offset) + as described in the document 'T48TLxxx ModBus Protocol Rev.7.1' which can + be found in the /doc folder + """ + + product_id_hex = '0x{0:04x}'.format(cfg.PRODUCT_ID) + + read_voltage = c.read_float(register=999, scale_factor=0.01, offset=0, places=2) + read_current = c.read_float(register=1000, scale_factor=0.01, offset=-10000, places=2) + read_limb_bitmap = c.read_bitmap(1059) + + def read_power(status): + return int(read_current(status) * read_voltage(status)) + + def limp_strings_value(status): + return interpret_limb_bitmap(read_limb_bitmap(status)) + + def max_discharge_current(status): + return (cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status))*cfg.I_MAX_PER_STRING + + def max_charge_current(status): + return status.battery.ampere_hours/2 + + def calc_max_charge_power(status): + # type: (BatteryStatus) -> int + n_strings = cfg.NUM_OF_STRING_PER_BATTERY-limp_strings_value(status) + i_max = n_strings * cfg.I_MAX_PER_STRING + v_max = cfg.V_MAX + r_int_min = cfg.R_STRING_MIN / n_strings + r_int_max = cfg.R_STRING_MAX / n_strings + + v = read_voltage(status) + i = read_current(status) + + p_limits = [ + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_min), + calc_power_limit_imposed_by_voltage_limit(v, i, v_max, r_int_max), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_min), + calc_power_limit_imposed_by_current_limit(v, i, i_max, r_int_max), + ] + + p_limit = min(p_limits) # p_limit is normally positive here (signed) + p_limit = max(p_limit, 0) # charge power must not become negative + + return int(p_limit) + + product_name = cfg.PRODUCT_NAME + if n_batteries > 1: + product_name = cfg.PRODUCT_NAME + ' x' + str(n_batteries) + + return [ + # Node Red related dbus paths + Signal('/TimeToTOCRequest', max, c.read_float(register=1052)), + Signal('/EOCReached', c.return_in_list, read_eoc_reached), + Signal('/NumOfLimbStrings', c.return_in_list, get_value=limp_strings_value), + Signal('/NumOfBatteries', max, get_value=n_batteries), + Signal('/Dc/0/Voltage', c.mean, get_value=read_voltage, get_text=c.append_unit('V')), + Signal('/Dc/0/Current', c.ssum, get_value=read_current, get_text=c.append_unit('A')), + Signal('/Dc/0/Power', c.ssum, get_value=read_power, get_text=c.append_unit('W')), + Signal('/BussVoltage', c.mean, c.read_float(register=1001, scale_factor=0.01, offset=0, places=2), c.append_unit('V')), + Signal('/Soc', min, c.read_float(register=1053, scale_factor=0.1, offset=0, places=1), c.append_unit('%')), + Signal('/LowestSoc', min, c.read_float(register=1053, scale_factor=0.1, offset=0, places=1), c.append_unit('%')), + Signal('/Dc/0/Temperature', c.mean, c.read_float(register=1003, scale_factor=0.1, offset=-400, places=1), c.append_unit(u'°C')), + Signal('/Dc/0/LowestTemperature', min, c.read_float(register=1003, scale_factor=0.1, offset=-400, places=1), c.append_unit(u'°C')), + # Charge/Discharge current, voltage and power + Signal('/Info/MaxDischargeCurrent', c.ssum, max_discharge_current,c.append_unit('A')), + Signal('/Info/MaxChargeCurrent', c.ssum, max_charge_current, c.append_unit('A')), + Signal('/Info/MaxChargeVoltage', min, cfg.MAX_CHARGE_VOLTAGE, c.append_unit('V')), + Signal('/Info/MaxChargePower', c.ssum, calc_max_charge_power), + # Victron mandatory dbus paths + Signal('/Mgmt/ProcessName', c.first, __file__), + Signal('/Mgmt/ProcessVersion', c.first, cfg.SOFTWARE_VERSION), + Signal('/Mgmt/Connection', c.first, cfg.CONNECTION), + Signal('/DeviceInstance', c.first, cfg.DEVICE_INSTANCE), + Signal('/ProductName', c.first, product_name), + Signal('/ProductId', c.first, cfg.PRODUCT_ID, product_id_hex), + Signal('/Connected', c.first, 1), + Signal('/FirmwareVersion', c.return_in_list, firmware_version), + Signal('/HardwareVersion', c.first, cfg.HARDWARE_VERSION, hardware_version), + # Diagnostics + Signal('/Diagnostics/BmsVersion', c.first, lambda s: s.battery.bms_version), + # Warnings + Signal('/WarningFlags/TaM1', c.return_in_list, c.read_bool(register=1005, bit=1)), + Signal('/WarningFlags/TbM1', c.return_in_list, c.read_bool(register=1005, bit=4)), + Signal('/WarningFlags/VBm1', c.return_in_list, c.read_bool(register=1005, bit=6)), + Signal('/WarningFlags/VBM1', c.return_in_list, c.read_bool(register=1005, bit=8)), + Signal('/WarningFlags/IDM1', c.return_in_list, c.read_bool(register=1005, bit=10)), + Signal('/WarningFlags/vsm1', c.return_in_list, c.read_bool(register=1005, bit=22)), + Signal('/WarningFlags/vsM1', c.return_in_list, c.read_bool(register=1005, bit=24)), + Signal('/WarningFlags/iCM1', c.return_in_list, c.read_bool(register=1005, bit=26)), + Signal('/WarningFlags/iDM1', c.return_in_list, c.read_bool(register=1005, bit=28)), + Signal('/WarningFlags/MID1', c.return_in_list, c.read_bool(register=1005, bit=30)), + Signal('/WarningFlags/BLPW', c.return_in_list, c.read_bool(register=1005, bit=32)), + Signal('/WarningFlags/CCBF', c.return_in_list, c.read_bool(register=1005, bit=33)), + Signal('/WarningFlags/Ah_W', c.return_in_list, c.read_bool(register=1005, bit=35)), + Signal('/WarningFlags/MPMM', c.return_in_list, c.read_bool(register=1005, bit=38)), + Signal('/WarningFlags/TCdi', c.return_in_list, c.read_bool(register=1005, bit=40)), + Signal('/WarningFlags/LMPW', c.return_in_list, c.read_bool(register=1005, bit=44)), + Signal('/WarningFlags/TOCW', c.return_in_list, c.read_bool(register=1005, bit=47)), + Signal('/WarningFlags/BUSL', c.return_in_list, c.read_bool(register=1005, bit=49)), + # Alarms + Signal('/AlarmFlags/Tam', c.return_in_list, c.read_bool(register=1005, bit=0)), + Signal('/AlarmFlags/TaM2', c.return_in_list, c.read_bool(register=1005, bit=2)), + Signal('/AlarmFlags/Tbm', c.return_in_list, c.read_bool(register=1005, bit=3)), + Signal('/AlarmFlags/TbM2', c.return_in_list, c.read_bool(register=1005, bit=5)), + Signal('/AlarmFlags/VBm2', c.return_in_list, c.read_bool(register=1005, bit=7)), + Signal('/AlarmFlags/VBM2', c.return_in_list, c.read_bool(register=1005, bit=9)), + Signal('/AlarmFlags/IDM2', c.return_in_list, c.read_bool(register=1005, bit=11)), + Signal('/AlarmFlags/ISOB', c.return_in_list, c.read_bool(register=1005, bit=12)), + Signal('/AlarmFlags/MSWE', c.return_in_list, c.read_bool(register=1005, bit=13)), + Signal('/AlarmFlags/FUSE', c.return_in_list, c.read_bool(register=1005, bit=14)), + Signal('/AlarmFlags/HTRE', c.return_in_list, c.read_bool(register=1005, bit=15)), + Signal('/AlarmFlags/TCPE', c.return_in_list, c.read_bool(register=1005, bit=16)), + Signal('/AlarmFlags/STRE', c.return_in_list, c.read_bool(register=1005, bit=17)), + Signal('/AlarmFlags/CME', c.return_in_list, c.read_bool(register=1005, bit=18)), + Signal('/AlarmFlags/HWFL', c.return_in_list, c.read_bool(register=1005, bit=19)), + Signal('/AlarmFlags/HWEM', c.return_in_list, c.read_bool(register=1005, bit=20)), + Signal('/AlarmFlags/ThM', c.return_in_list, c.read_bool(register=1005, bit=21)), + Signal('/AlarmFlags/vsm2', c.return_in_list, c.read_bool(register=1005, bit=23)), + Signal('/AlarmFlags/vsM2', c.return_in_list, c.read_bool(register=1005, bit=25)), + Signal('/AlarmFlags/iCM2', c.return_in_list, c.read_bool(register=1005, bit=27)), + Signal('/AlarmFlags/iDM2', c.return_in_list, c.read_bool(register=1005, bit=29)), + Signal('/AlarmFlags/MID2', c.return_in_list, c.read_bool(register=1005, bit=31)), + Signal('/AlarmFlags/HTFS', c.return_in_list, c.read_bool(register=1005, bit=42)), + Signal('/AlarmFlags/DATA', c.return_in_list, c.read_bool(register=1005, bit=43)), + Signal('/AlarmFlags/LMPA', c.return_in_list, c.read_bool(register=1005, bit=45)), + Signal('/AlarmFlags/HEBT', c.return_in_list, c.read_bool(register=1005, bit=46)), + Signal('/AlarmFlags/CURM', c.return_in_list, c.read_bool(register=1005, bit=48)), + # LedStatus + Signal('/Diagnostics/LedStatus/Red', c.first, c.read_led_state(register=1004, led=LedColor.red)), + Signal('/Diagnostics/LedStatus/Blue', c.first, c.read_led_state(register=1004, led=LedColor.blue)), + Signal('/Diagnostics/LedStatus/Green', c.first, c.read_led_state(register=1004, led=LedColor.green)), + Signal('/Diagnostics/LedStatus/Amber', c.first, c.read_led_state(register=1004, led=LedColor.amber)), + # IO Status + Signal('/Diagnostics/IoStatus/MainSwitchClosed', c.return_in_list, read_switch_closed), + Signal('/Diagnostics/IoStatus/AlarmOutActive', c.return_in_list, read_alarm_out_active), + Signal('/Diagnostics/IoStatus/InternalFanActive', c.return_in_list, c.read_bool(register=1013, bit=2)), + Signal('/Diagnostics/IoStatus/VoltMeasurementAllowed', c.return_in_list, c.read_bool(register=1013, bit=3)), + Signal('/Diagnostics/IoStatus/AuxRelay', c.return_in_list, read_aux_relay), + Signal('/Diagnostics/IoStatus/RemoteState', c.return_in_list, c.read_bool(register=1013, bit=5)), + Signal('/Diagnostics/IoStatus/RiscOn', c.return_in_list, c.read_bool(register=1013, bit=6)), + ] + +def init_modbus(tty): + # type: (str) -> Modbus + logging.debug('initializing Modbus') + return Modbus( + port='/dev/' + tty, + method=cfg.MODE, + baudrate=cfg.BAUD_RATE, + stopbits=cfg.STOP_BITS, + bytesize=cfg.BYTE_SIZE, + timeout=cfg.TIMEOUT, + parity=cfg.PARITY) + +def init_dbus(tty, signals): + # type: (str, Iterable[Signal]) -> DBus + logging.debug('initializing DBus service') + dbus = DBus(servicename=cfg.SERVICE_NAME_PREFIX + tty) + logging.debug('initializing DBus paths') + for signal in signals: + init_dbus_path(dbus, signal) + return dbus + +# noinspection PyBroadException +def try_get_value(sig): + # type: (Signal) -> object + try: + return sig.get_value(None) + except: + return None + +def init_dbus_path(dbus, sig): + # type: (DBus, Signal) -> () + dbus.add_path( + sig.dbus_path, + try_get_value(sig), + gettextcallback=lambda _, v: sig.get_text(v)) + +def init_main_loop(): + # type: () -> DBusGMainLoop + logging.debug('initializing DBusGMainLoop Loop') + DBusGMainLoop(set_as_default=True) + return GLib.MainLoop() + +def report_slave_id(modbus, slave_address): + # type: (Modbus, int) -> str + slave = str(slave_address) + logging.debug('requesting slave id from node ' + slave) + try: + modbus.connect() + request = ReportSlaveIdRequest(unit=slave_address) + response = modbus.execute(request) + if response is ExceptionResponse or issubclass(type(response), ModbusException): + raise Exception('failed to get slave id from ' + slave + ' : ' + str(response)) + return response.identifier + finally: + modbus.close() + +def identify_battery(modbus, slave_address): + # type: (Modbus, int) -> Battery + logging.info('identifying battery...') + hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address) + firmware_version = read_firmware_version(modbus, slave_address) + specs = Battery( + slave_address=slave_address, + hardware_version=hardware_version, + firmware_version=firmware_version, + bms_version=bms_version, + ampere_hours=ampere_hours) + logging.info('battery identified:\n{0}'.format(str(specs))) + return specs + +def identify_batteries(modbus): + # type: (Modbus) -> list[Battery] + def _identify_batteries(): + address_range = range(1, cfg.MAX_SLAVE_ADDRESS + 1) + for slave_address in address_range: + try: + yield identify_battery(modbus, slave_address) + except Exception as e: + logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e))) + return list(_identify_batteries()) # force that lazy iterable! + +def parse_slave_id(modbus, slave_address): + # type: (Modbus, int) -> (str, str, int) + slave_id = report_slave_id(modbus, slave_address) + sid = re.sub(b'[^\x20-\x7E]', b'', slave_id) # remove weird special chars + match = re.match('(?P48TL(?P\d+)) *(?P.*)', sid.decode('ascii')) + if match is None: + raise Exception('no known battery found') + return match.group('hw'), match.group('bms'), int(match.group('ah')) + +def read_firmware_version(modbus, slave_address): + # type: (Modbus, int) -> str + logging.debug('reading firmware version') + try: + modbus.connect() + response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1) + register = response.registers[0] + return '{0:0>4X}'.format(register) + finally: + modbus.close() # close in any case + +def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS): + # type: (Modbus, int) -> ReadInputRegistersResponse + logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count)) + return modbus.read_input_registers( + address=base_address, + count=count, + unit=slave_address) + +def read_battery_status(modbus, battery): + # type: (Modbus, Battery) -> BatteryStatus + """ + Read the modbus registers containing the battery's status info. + """ + logging.debug('reading battery status') + try: + modbus.connect() + data = read_modbus_registers(modbus, battery.slave_address) + return BatteryStatus(battery, data.registers) + finally: + modbus.close() # close in any case + +def publish_values(dbus, signals, statuses): + # type: (DBus, Iterable[Signal], Iterable[BatteryStatus]) -> () + for s in signals: + values = [s.get_value(status) for status in statuses] + with dbus as srv: + srv[s.dbus_path] = s.aggregate(values) + +previous_warnings = {} +previous_alarms = {} + +class MessageType: + ALARM_OR_WARNING = "AlarmOrWarning" + HEARTBEAT = "Heartbeat" + +class AlarmOrWarning: + def __init__(self, description, created_by): + self.date = datetime.now().strftime('%Y-%m-%d') + self.time = datetime.now().strftime('%H:%M:%S') + self.description = description + self.created_by = created_by + + def to_dict(self): + return { + "Date": self.date, + "Time": self.time, + "Description": self.description, + "CreatedBy": self.created_by + } + +def SubscribeToQueue(): + try: + connection = pika.BlockingConnection(pika.ConnectionParameters(host="10.2.0.11", + port=5672, + virtual_host="/", + credentials=pika.PlainCredentials("producer", "b187ceaddb54d5485063ddc1d41af66f"))) + channel = connection.channel() + channel.queue_declare(queue="statusQueue", durable=True) + print("Subscribed to queue") + except Exception as ex: + print("An error occurred while connecting to the RabbitMQ queue:", ex) + return channel + +is_first_update = True +first_subscribe = False +prev_status=0 +subscribed_to_queue_first_time=False +channel = SubscribeToQueue() +heartbit_interval = 0 +# Create an S3config instance +s3_config = S3config() +INSTALLATION_ID=int(s3_config.bucket.split('-')[0]) +PRODUCT_ID = 1 + +def update_state_from_dictionaries(current_warnings, current_alarms): + global previous_warnings, previous_alarms, INSTALLATION_ID, PRODUCT_ID, is_first_update, first_subscribe, channel,prev_status,heartbit_interval,subscribed_to_queue_first_time + + heartbit_interval+=1 + + if is_first_update: + changed_warnings = current_warnings + changed_alarms = current_alarms + is_first_update = False + else: + changed_alarms={} + changed_warnings={} + # calculate the diff in warnings and alarms + prev_alarm_value_list=list(previous_alarms.values()) + alarm_keys=list(previous_alarms.keys()) + + for i, alarm in enumerate(current_alarms.values()): + if alarm!=prev_alarm_value_list[i]: + changed_alarms[alarm_keys[i]]=True + else: + changed_alarms[alarm_keys[i]]=False + + prev_warning_value_list=list(previous_warnings.values()) + warning_keys=list(previous_warnings.keys()) + + for i, warning in enumerate(current_warnings.values()): + if warning!=prev_warning_value_list[i]: + changed_warnings[warning_keys[i]]=True + else: + changed_warnings[warning_keys[i]]=False + + status_message = { + "InstallationId": INSTALLATION_ID, + "Product": PRODUCT_ID, + "Status": 0, + "Type": 1, + "Warnings": [], + "Alarms": [] + } + + # Evaluate alarms + if any(changed_alarms.values()): + for i, changed_alarm in enumerate(changed_alarms.values()): + if changed_alarm and list(current_alarms.values())[i]: + status_message["Alarms"].append(AlarmOrWarning(list(current_alarms.keys())[i],"System").to_dict()) + + if any(changed_warnings.values()): + for i, changed_warning in enumerate(changed_warnings.values()): + if changed_warning and list(current_warnings.values())[i]: + status_message["Warnings"].append(AlarmOrWarning(list(current_warnings.keys())[i],"System").to_dict()) + + if any(current_alarms.values()): + status_message["Status"]=2 + + if not any(current_alarms.values()) and any(current_warnings.values()): + status_message["Status"]=1 + + if not any(current_alarms.values()) and not any(current_warnings.values()): + status_message["Status"]=0 + + if status_message["Status"]!=prev_status or len(status_message["Warnings"])>0 or len(status_message["Alarms"])>0: + prev_status=status_message["Status"] + status_message["Type"]=0 + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + print(status_message) + print("Message sent successfully") + elif heartbit_interval>=15 or not subscribed_to_queue_first_time: + print("Send heartbit message to rabbitmq") + heartbit_interval=0 + subscribed_to_queue_first_time=True + status_message = json.dumps(status_message) + channel.basic_publish(exchange="", routing_key="statusQueue", body=status_message) + + previous_warnings = current_warnings.copy() + previous_alarms = current_alarms.copy() + + return status_message + +def read_warning_and_alarm_flags(): + return [ + # Warnings + CsvSignal('/Battery/Devices/WarningFlags/TaM1', c.read_bool(register=1005, bit=1)), + CsvSignal('/Battery/Devices/WarningFlags/TbM1', c.read_bool(register=1005, bit=4)), + CsvSignal('/Battery/Devices/WarningFlags/VBm1', c.read_bool(register=1005, bit=6)), + CsvSignal('/Battery/Devices/WarningFlags/VBM1', c.read_bool(register=1005, bit=8)), + CsvSignal('/Battery/Devices/WarningFlags/IDM1', c.read_bool(register=1005, bit=10)), + CsvSignal('/Battery/Devices/WarningFlags/vsm1', c.read_bool(register=1005, bit=22)), + CsvSignal('/Battery/Devices/WarningFlags/vsM1', c.read_bool(register=1005, bit=24)), + CsvSignal('/Battery/Devices/WarningFlags/iCM1', c.read_bool(register=1005, bit=26)), + CsvSignal('/Battery/Devices/WarningFlags/iDM1', c.read_bool(register=1005, bit=28)), + CsvSignal('/Battery/Devices/WarningFlags/MID1', c.read_bool(register=1005, bit=30)), + CsvSignal('/Battery/Devices/WarningFlags/BLPW', c.read_bool(register=1005, bit=32)), + CsvSignal('/Battery/Devices/WarningFlags/CCBF', c.read_bool(register=1005, bit=33)), + CsvSignal('/Battery/Devices/WarningFlags/Ah_W', c.read_bool(register=1005, bit=35)), + CsvSignal('/Battery/Devices/WarningFlags/MPMM', c.read_bool(register=1005, bit=38)), + CsvSignal('/Battery/Devices/WarningFlags/TCdi', c.read_bool(register=1005, bit=40)), + CsvSignal('/Battery/Devices/WarningFlags/LMPW', c.read_bool(register=1005, bit=44)), + CsvSignal('/Battery/Devices/WarningFlags/TOCW', c.read_bool(register=1005, bit=47)), + CsvSignal('/Battery/Devices/WarningFlags/BUSL', c.read_bool(register=1005, bit=49)), + ], [ + # Alarms + CsvSignal('/Battery/Devices/AlarmFlags/Tam', c.read_bool(register=1005, bit=0)), + CsvSignal('/Battery/Devices/AlarmFlags/TaM2', c.read_bool(register=1005, bit=2)), + CsvSignal('/Battery/Devices/AlarmFlags/Tbm', c.read_bool(register=1005, bit=3)), + CsvSignal('/Battery/Devices/AlarmFlags/TbM2', c.read_bool(register=1005, bit=5)), + CsvSignal('/Battery/Devices/AlarmFlags/VBm2', c.read_bool(register=1005, bit=7)), + CsvSignal('/Battery/Devices/AlarmFlags/VBM2', c.read_bool(register=1005, bit=9)), + CsvSignal('/Battery/Devices/AlarmFlags/IDM2', c.read_bool(register=1005, bit=11)), + CsvSignal('/Battery/Devices/AlarmFlags/ISOB', c.read_bool(register=1005, bit=12)), + CsvSignal('/Battery/Devices/AlarmFlags/MSWE', c.read_bool(register=1005, bit=13)), + CsvSignal('/Battery/Devices/AlarmFlags/FUSE', c.read_bool(register=1005, bit=14)), + CsvSignal('/Battery/Devices/AlarmFlags/HTRE', c.read_bool(register=1005, bit=15)), + CsvSignal('/Battery/Devices/AlarmFlags/TCPE', c.read_bool(register=1005, bit=16)), + CsvSignal('/Battery/Devices/AlarmFlags/STRE', c.read_bool(register=1005, bit=17)), + CsvSignal('/Battery/Devices/AlarmFlags/CME', c.read_bool(register=1005, bit=18)), + CsvSignal('/Battery/Devices/AlarmFlags/HWFL', c.read_bool(register=1005, bit=19)), + CsvSignal('/Battery/Devices/AlarmFlags/HWEM', c.read_bool(register=1005, bit=20)), + CsvSignal('/Battery/Devices/AlarmFlags/ThM', c.read_bool(register=1005, bit=21)), + CsvSignal('/Battery/Devices/AlarmFlags/vsm2', c.read_bool(register=1005, bit=23)), + CsvSignal('/Battery/Devices/AlarmFlags/vsM2', c.read_bool(register=1005, bit=25)), + CsvSignal('/Battery/Devices/AlarmFlags/iCM2', c.read_bool(register=1005, bit=27)), + CsvSignal('/Battery/Devices/AlarmFlags/iDM2', c.read_bool(register=1005, bit=29)), + CsvSignal('/Battery/Devices/AlarmFlags/MID2', c.read_bool(register=1005, bit=31)), + CsvSignal('/Battery/Devices/AlarmFlags/HTFS', c.read_bool(register=1005, bit=42)), + CsvSignal('/Battery/Devices/AlarmFlags/DATA', c.read_bool(register=1005, bit=43)), + CsvSignal('/Battery/Devices/AlarmFlags/LMPA', c.read_bool(register=1005, bit=45)), + CsvSignal('/Battery/Devices/AlarmFlags/HEBT', c.read_bool(register=1005, bit=46)), + CsvSignal('/Battery/Devices/AlarmFlags/CURM', c.read_bool(register=1005, bit=48)), + ] + +import random + +def update_for_testing(modbus, batteries, dbus, signals, csv_signals): + global ALLOW + logging.debug('starting testing update cycle') + warning_signals, alarm_signals = read_warning_and_alarm_flags() + current_warnings = {} + current_alarms = {} + statuses = [read_battery_status(modbus, battery) for battery in batteries] + node_numbers = [battery.slave_address for battery in batteries] + if ALLOW: + any_warning_active = False + any_alarm_active = False + for i, node in enumerate(node_numbers): + for s in warning_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_warnings[signal_name] = value + if ALLOW and value: + any_warning_active = True + for s in alarm_signals: + signal_name = insert_id(s.name, i+1) + value = random.choice([True, False]) + current_alarms[signal_name] = value + if ALLOW and value: + any_alarm_active = True + print(update_state_from_dictionaries(current_warnings, current_alarms)) + publish_values(dbus, signals, statuses) + create_csv_files(csv_signals, statuses, node_numbers) + logging.debug('finished update cycle\n') + return True + +start_time = time.time() + +def update(modbus, batteries, dbus, signals, csv_signals): + global start_time + # type: (Modbus, Iterable[Battery], DBus, Iterable[Signal]) -> bool + """ + Main update function + + 1. requests status record each battery via modbus, + 2. parses the data using Signal.get_value + 3. aggregates the data from all batteries into one datum using Signal.aggregate + 4. publishes the data on the dbus + """ + logging.debug('starting update cycle') + warnings_signals, alarm_signals = read_warning_and_alarm_flags() + current_warnings = {} + current_alarms= {} + statuses = [read_battery_status(modbus, battery) for battery in batteries] + node_numbers = [battery.slave_address for battery in batteries] + # Iterate over each node and signal to create rows in the new format + for i, node in enumerate(node_numbers): + for s in warnings_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_warnings[signal_name] = value + for s in alarm_signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + current_alarms[signal_name] = value + print(update_state_from_dictionaries(current_warnings, current_alarms)) + publish_values(dbus, signals, statuses) + elapsed_time = time.time() - start_time + if elapsed_time >= 30: + create_csv_files(csv_signals, statuses, node_numbers) + start_time = time.time() + print("Elapsed time {:.2f} seconds".format(elapsed_time)) + logging.debug('finished update cycle\n') + return True + +def print_usage(): + print('Usage: ' + __file__ + ' ') + print('Example: ' + __file__ + ' ttyUSB0') + +def parse_cmdline_args(argv): + # type: (list[str]) -> str + if len(argv) == 0: + logging.info('missing command line argument for tty device') + print_usage() + sys.exit(1) + return argv[0] + +alive = True # global alive flag, watchdog_task clears it, update_task sets it +ALLOW = False + +def create_update_task(modbus, dbus, batteries, signals, csv_signals, main_loop): + # type: (Modbus, DBus, Iterable[Battery], Iterable[Signal], DBusGMainLoop) -> Callable[[],bool] + """ + Creates an update task which runs the main update function + and resets the alive flag + """ + def update_task(): + # type: () -> bool + global alive, ALLOW + if ALLOW: + ALLOW = False + else: + ALLOW = True + alive = update(modbus, batteries, dbus, signals, csv_signals) + #alive = update_for_testing(modbus, batteries, dbus, signals, csv_signals) + if not alive: + logging.info('update_task: quitting main loop because of error') + main_loop.quit() + return alive + return update_task + +def create_watchdog_task(main_loop): + # type: (DBusGMainLoop) -> Callable[[],bool] + """ + Creates a Watchdog task that monitors the alive flag. + The watchdog kills the main loop if the alive flag is not periodically reset by the update task. + Who watches the watchdog? + """ + def watchdog_task(): + # type: () -> bool + global alive + if alive: + logging.debug('watchdog_task: update_task is alive') + alive = False + return True + else: + logging.info('watchdog_task: killing main loop because update_task is no longer alive') + main_loop.quit() + return False + return watchdog_task + +def get_installation_name(file_path): + with open(file_path, 'r') as file: + return file.read().strip() + +def manage_csv_files(directory_path, max_files=20): + csv_files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))] + csv_files.sort(key=lambda x: os.path.getctime(os.path.join(directory_path, x))) + # Remove oldest files if exceeds maximum + while len(csv_files) > max_files: + file_to_delete = os.path.join(directory_path, csv_files.pop(0)) + os.remove(file_to_delete) + +def serialize_for_csv(value): + if isinstance(value, (dict, list, tuple)): + return json.dumps(value, ensure_ascii=False) + return str(value) + +def insert_id(path, id_number): + parts = path.split("/") + insert_position = parts.index("Devices") + 1 + parts.insert(insert_position, str(id_number)) + return "/".join(parts) + +def create_csv_files(signals, statuses, node_numbers): + global s3_config + timestamp = int(time.time()) + if timestamp % 2 != 0: + timestamp -= 1 + # Create CSV directory if it doesn't exist + if not os.path.exists(CSV_DIR): + os.makedirs(CSV_DIR) + csv_filename = f"{timestamp}.csv" + csv_path = os.path.join(CSV_DIR, csv_filename) + # Append values to the CSV file + with open(csv_path, 'a', newline='') as csvfile: + csv_writer = csv.writer(csvfile, delimiter=';') + # Add a special row for the nodes configuration + nodes_config_path = "/Config/Devices/BatteryNodes" + nodes_list = ",".join(str(node) for node in node_numbers) + config_row = [nodes_config_path, nodes_list, ""] + csv_writer.writerow(config_row) + # Iterate over each node and signal to create rows in the new format + for i, node in enumerate(node_numbers): + for s in signals: + signal_name = insert_id(s.name, i+1) + value = s.get_value(statuses[i]) + row_values = [signal_name, value, s.get_text] + csv_writer.writerow(row_values) + # Manage CSV files, keep a limited number of files + # Create the CSV as a string + csv_data = read_csv_as_string(csv_path) + + if csv_data is None: + print(" error while reading csv as string") + return + + # zip-comp additions + compressed_csv = compress_csv_data(csv_data) + compressed_filename = f"{timestamp}.csv" + + + + response = s3_config.create_put_request(compressed_filename, compressed_csv) + if response.status_code == 200: + os.remove(csv_path) + print("Success") + else: + failed_dir = os.path.join(CSV_DIR, "failed") + if not os.path.exists(failed_dir): + os.makedirs(failed_dir) + failed_path = os.path.join(failed_dir, csv_filename) + os.rename(csv_path, failed_path) + print("Uploading failed") + manage_csv_files(failed_dir, 10) + manage_csv_files(CSV_DIR) + +def main(argv): + # type: (list[str]) -> () + logging.basicConfig(level=cfg.LOG_LEVEL) + logging.info('starting ' + __file__) + tty = parse_cmdline_args(argv) + modbus = init_modbus(tty) + batteries = identify_batteries(modbus) + n = len(batteries) + logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries')) + if n <= 0: + sys.exit(2) + bat = c.first(batteries) # report hw and fw version of first battery found + signals = init_signals(bat.hardware_version, bat.firmware_version, n) + csv_signals = create_csv_signals(bat.firmware_version) + main_loop = init_main_loop() # must run before init_dbus because gobject does some global magic + dbus = init_dbus(tty, signals) + update_task = create_update_task(modbus, dbus, batteries, signals, csv_signals, main_loop) + watchdog_task = create_watchdog_task(main_loop) + GLib.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task) # add watchdog first + GLib.timeout_add(cfg.UPDATE_INTERVAL, update_task) # call update once every update_interval + logging.info('starting GLib.MainLoop') + main_loop.run() + logging.info('GLib.MainLoop was shut down') + sys.exit(0xFF) # reaches this only on error + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/python/dbus-fzsonick-48tl-nofork/ext/velib_python/ve_utils.py b/python/dbus-fzsonick-48tl-nofork/ext/velib_python/ve_utils.py new file mode 100644 index 000000000..f5a2f85a0 --- /dev/null +++ b/python/dbus-fzsonick-48tl-nofork/ext/velib_python/ve_utils.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import sys +from traceback import print_exc +from os import _exit as os_exit +from os import statvfs +from subprocess import check_output, CalledProcessError +import logging +import dbus +logger = logging.getLogger(__name__) + +VEDBUS_INVALID = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) + +class NoVrmPortalIdError(Exception): + pass + +# Use this function to make sure the code quits on an unexpected exception. Make sure to use it +# when using GLib.idle_add and also GLib.timeout_add. +# Without this, the code will just keep running, since GLib does not stop the mainloop on an +# exception. +# Example: GLib.idle_add(exit_on_error, myfunc, arg1, arg2) +def exit_on_error(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except: + try: + print ('exit_on_error: there was an exception. Printing stacktrace will be tried and then exit') + print_exc() + except: + pass + + # sys.exit() is not used, since that throws an exception, which does not lead to a program + # halt when used in a dbus callback, see connection.py in the Python/Dbus libraries, line 230. + os_exit(1) + + +__vrm_portal_id = None +def get_vrm_portal_id(): + # The original definition of the VRM Portal ID is that it is the mac + # address of the onboard- ethernet port (eth0), stripped from its colons + # (:) and lower case. This may however differ between platforms. On Venus + # the task is therefore deferred to /sbin/get-unique-id so that a + # platform specific method can be easily defined. + # + # If /sbin/get-unique-id does not exist, then use the ethernet address + # of eth0. This also handles the case where velib_python is used as a + # package install on a Raspberry Pi. + # + # On a Linux host where the network interface may not be eth0, you can set + # the VRM_IFACE environment variable to the correct name. + + global __vrm_portal_id + + if __vrm_portal_id: + return __vrm_portal_id + + portal_id = None + + # First try the method that works if we don't have a data partition. This + # will fail when the current user is not root. + try: + portal_id = check_output("/sbin/get-unique-id").decode("utf-8", "ignore").strip() + if not portal_id: + raise NoVrmPortalIdError("get-unique-id returned blank") + __vrm_portal_id = portal_id + return portal_id + except CalledProcessError: + # get-unique-id returned non-zero + raise NoVrmPortalIdError("get-unique-id returned non-zero") + except OSError: + # File doesn't exist, use fallback + pass + + # Fall back to getting our id using a syscall. Assume we are on linux. + # Allow the user to override what interface is used using an environment + # variable. + import fcntl, socket, struct, os + + iface = os.environ.get('VRM_IFACE', 'eth0').encode('ascii') + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', iface[:15])) + except IOError: + raise NoVrmPortalIdError("ioctl failed for eth0") + + __vrm_portal_id = info[18:24].hex() + return __vrm_portal_id + + +# See VE.Can registers - public.docx for definition of this conversion +def convert_vreg_version_to_readable(version): + def str_to_arr(x, length): + a = [] + for i in range(0, len(x), length): + a.append(x[i:i+length]) + return a + + x = "%x" % version + x = x.upper() + + if len(x) == 5 or len(x) == 3 or len(x) == 1: + x = '0' + x + + a = str_to_arr(x, 2); + + # remove the first 00 if there are three bytes and it is 00 + if len(a) == 3 and a[0] == '00': + a.remove(0); + + # if we have two or three bytes now, and the first character is a 0, remove it + if len(a) >= 2 and a[0][0:1] == '0': + a[0] = a[0][1]; + + result = '' + for item in a: + result += ('.' if result != '' else '') + item + + + result = 'v' + result + + return result + + +def get_free_space(path): + result = -1 + + try: + s = statvfs(path) + result = s.f_frsize * s.f_bavail # Number of free bytes that ordinary users + except Exception as ex: + logger.info("Error while retrieving free space for path %s: %s" % (path, ex)) + + return result + + +def _get_sysfs_machine_name(): + try: + with open('/sys/firmware/devicetree/base/model', 'r') as f: + return f.read().rstrip('\x00') + except IOError: + pass + + return None + +# Returns None if it cannot find a machine name. Otherwise returns the string +# containing the name +def get_machine_name(): + # First try calling the venus utility script + try: + return check_output("/usr/bin/product-name").strip().decode('UTF-8') + except (CalledProcessError, OSError): + pass + + # Fall back to sysfs + name = _get_sysfs_machine_name() + if name is not None: + return name + + # Fall back to venus build machine name + try: + with open('/etc/venus/machine', 'r', encoding='UTF-8') as f: + return f.read().strip() + except IOError: + pass + + return None + + +def get_product_id(): + """ Find the machine ID and return it. """ + + # First try calling the venus utility script + try: + return check_output("/usr/bin/product-id").strip().decode('UTF-8') + except (CalledProcessError, OSError): + pass + + # Fall back machine name mechanism + name = _get_sysfs_machine_name() + return { + 'Color Control GX': 'C001', + 'Venus GX': 'C002', + 'Octo GX': 'C006', + 'EasySolar-II': 'C007', + 'MultiPlus-II': 'C008', + 'Maxi GX': 'C009', + 'Cerbo GX': 'C00A' + }.get(name, 'C003') # C003 is Generic + + +# Returns False if it cannot open the file. Otherwise returns its rstripped contents +def read_file(path): + content = False + + try: + with open(path, 'r') as f: + content = f.read().rstrip() + except Exception as ex: + logger.debug("Error while reading %s: %s" % (path, ex)) + + return content + + +def wrap_dbus_value(value): + if value is None: + return VEDBUS_INVALID + if isinstance(value, float): + return dbus.Double(value, variant_level=1) + if isinstance(value, bool): + return dbus.Boolean(value, variant_level=1) + if isinstance(value, int): + try: + return dbus.Int32(value, variant_level=1) + except OverflowError: + return dbus.Int64(value, variant_level=1) + if isinstance(value, str): + return dbus.String(value, variant_level=1) + if isinstance(value, list): + if len(value) == 0: + # If the list is empty we cannot infer the type of the contents. So assume unsigned integer. + # A (signed) integer is dangerous, because an empty list of signed integers is used to encode + # an invalid value. + return dbus.Array([], signature=dbus.Signature('u'), variant_level=1) + return dbus.Array([wrap_dbus_value(x) for x in value], variant_level=1) + if isinstance(value, dict): + # Wrapping the keys of the dictionary causes D-Bus errors like: + # 'arguments to dbus_message_iter_open_container() were incorrect, + # assertion "(type == DBUS_TYPE_ARRAY && contained_signature && + # *contained_signature == DBUS_DICT_ENTRY_BEGIN_CHAR) || (contained_signature == NULL || + # _dbus_check_is_valid_signature (contained_signature))" failed in file ...' + return dbus.Dictionary({(k, wrap_dbus_value(v)) for k, v in value.items()}, variant_level=1) + return value + + +dbus_int_types = (dbus.Int32, dbus.UInt32, dbus.Byte, dbus.Int16, dbus.UInt16, dbus.UInt32, dbus.Int64, dbus.UInt64) + + +def unwrap_dbus_value(val): + """Converts D-Bus values back to the original type. For example if val is of type DBus.Double, + a float will be returned.""" + if isinstance(val, dbus_int_types): + return int(val) + if isinstance(val, dbus.Double): + return float(val) + if isinstance(val, dbus.Array): + v = [unwrap_dbus_value(x) for x in val] + return None if len(v) == 0 else v + if isinstance(val, (dbus.Signature, dbus.String)): + return str(val) + # Python has no byte type, so we convert to an integer. + if isinstance(val, dbus.Byte): + return int(val) + if isinstance(val, dbus.ByteArray): + return "".join([bytes(x) for x in val]) + if isinstance(val, (list, tuple)): + return [unwrap_dbus_value(x) for x in val] + if isinstance(val, (dbus.Dictionary, dict)): + # Do not unwrap the keys, see comment in wrap_dbus_value + return dict([(x, unwrap_dbus_value(y)) for x, y in val.items()]) + if isinstance(val, dbus.Boolean): + return bool(val) + return val + +# When supported, only name owner changes for the the given namespace are reported. This +# prevents spending cpu time at irrelevant changes, like scripts accessing the bus temporarily. +def add_name_owner_changed_receiver(dbus, name_owner_changed, namespace="com.victronenergy"): + # support for arg0namespace is submitted upstream, but not included at the time of + # writing, Venus OS does support it, so try if it works. + if namespace is None: + dbus.add_signal_receiver(name_owner_changed, signal_name='NameOwnerChanged') + else: + try: + dbus.add_signal_receiver(name_owner_changed, + signal_name='NameOwnerChanged', arg0namespace=namespace) + except TypeError: + dbus.add_signal_receiver(name_owner_changed, signal_name='NameOwnerChanged') diff --git a/python/dbus-fzsonick-48tl-nofork/ext/velib_python/vedbus.py b/python/dbus-fzsonick-48tl-nofork/ext/velib_python/vedbus.py new file mode 100644 index 000000000..6171a2101 --- /dev/null +++ b/python/dbus-fzsonick-48tl-nofork/ext/velib_python/vedbus.py @@ -0,0 +1,614 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import dbus.service +import logging +import traceback +import os +import weakref +from collections import defaultdict +from ve_utils import wrap_dbus_value, unwrap_dbus_value + +# vedbus contains three classes: +# VeDbusItemImport -> use this to read data from the dbus, ie import +# VeDbusItemExport -> use this to export data to the dbus (one value) +# VeDbusService -> use that to create a service and export several values to the dbus + +# Code for VeDbusItemImport is copied from busitem.py and thereafter modified. +# All projects that used busitem.py need to migrate to this package. And some +# projects used to define there own equivalent of VeDbusItemExport. Better to +# use VeDbusItemExport, or even better the VeDbusService class that does it all for you. + +# TODOS +# 1 check for datatypes, it works now, but not sure if all is compliant with +# com.victronenergy.BusItem interface definition. See also the files in +# tests_and_examples. And see 'if type(v) == dbus.Byte:' on line 102. Perhaps +# something similar should also be done in VeDbusBusItemExport? +# 2 Shouldn't VeDbusBusItemExport inherit dbus.service.Object? +# 7 Make hard rules for services exporting data to the D-Bus, in order to make tracking +# changes possible. Does everybody first invalidate its data before leaving the bus? +# And what about before taking one object away from the bus, instead of taking the +# whole service offline? +# They should! And after taking one value away, do we need to know that someone left +# the bus? Or we just keep that value in invalidated for ever? Result is that we can't +# see the difference anymore between an invalidated value and a value that was first on +# the bus and later not anymore. See comments above VeDbusItemImport as well. +# 9 there are probably more todos in the code below. + +# Some thoughts with regards to the data types: +# +# Text from: http://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html#data-types +# --- +# Variants are represented by setting the variant_level keyword argument in the +# constructor of any D-Bus data type to a value greater than 0 (variant_level 1 +# means a variant containing some other data type, variant_level 2 means a variant +# containing a variant containing some other data type, and so on). If a non-variant +# is passed as an argument but introspection indicates that a variant is expected, +# it'll automatically be wrapped in a variant. +# --- +# +# Also the different dbus datatypes, such as dbus.Int32, and dbus.UInt32 are a subclass +# of Python int. dbus.String is a subclass of Python standard class unicode, etcetera +# +# So all together that explains why we don't need to explicitly convert back and forth +# between the dbus datatypes and the standard python datatypes. Note that all datatypes +# in python are objects. Even an int is an object. + +# The signature of a variant is 'v'. + +# Export ourselves as a D-Bus service. +class VeDbusService(object): + def __init__(self, servicename, bus=None): + # dict containing the VeDbusItemExport objects, with their path as the key. + self._dbusobjects = {} + self._dbusnodes = {} + self._ratelimiters = [] + self._dbusname = None + + # dict containing the onchange callbacks, for each object. Object path is the key + self._onchangecallbacks = {} + + # Connect to session bus whenever present, else use the system bus + self._dbusconn = bus or (dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus()) + + # make the dbus connection available to outside, could make this a true property instead, but ach.. + self.dbusconn = self._dbusconn + + # Register ourselves on the dbus, trigger an error if already in use (do_not_queue) + self._dbusname = dbus.service.BusName(servicename, self._dbusconn, do_not_queue=True) + + # Add the root item that will return all items as a tree + self._dbusnodes['/'] = VeDbusRootExport(self._dbusconn, '/', self) + + logging.info("registered ourselves on D-Bus as %s" % servicename) + + # To force immediate deregistering of this dbus service and all its object paths, explicitly + # call __del__(). + def __del__(self): + for node in list(self._dbusnodes.values()): + node.__del__() + self._dbusnodes.clear() + for item in list(self._dbusobjects.values()): + item.__del__() + self._dbusobjects.clear() + if self._dbusname: + self._dbusname.__del__() # Forces call to self._bus.release_name(self._name), see source code + self._dbusname = None + + def get_name(self): + return self._dbusname.get_name() + + # @param callbackonchange function that will be called when this value is changed. First parameter will + # be the path of the object, second the new value. This callback should return + # True to accept the change, False to reject it. + def add_path(self, path, value, description="", writeable=False, + onchangecallback=None, gettextcallback=None, valuetype=None, itemtype=None): + + if onchangecallback is not None: + self._onchangecallbacks[path] = onchangecallback + + itemtype = itemtype or VeDbusItemExport + item = itemtype(self._dbusconn, path, value, description, writeable, + self._value_changed, gettextcallback, deletecallback=self._item_deleted, valuetype=valuetype) + + spl = path.split('/') + for i in range(2, len(spl)): + subPath = '/'.join(spl[:i]) + if subPath not in self._dbusnodes and subPath not in self._dbusobjects: + self._dbusnodes[subPath] = VeDbusTreeExport(self._dbusconn, subPath, self) + self._dbusobjects[path] = item + logging.debug('added %s with start value %s. Writeable is %s' % (path, value, writeable)) + + # Add the mandatory paths, as per victron dbus api doc + def add_mandatory_paths(self, processname, processversion, connection, + deviceinstance, productid, productname, firmwareversion, hardwareversion, connected): + self.add_path('/Mgmt/ProcessName', processname) + self.add_path('/Mgmt/ProcessVersion', processversion) + self.add_path('/Mgmt/Connection', connection) + + # Create rest of the mandatory objects + self.add_path('/DeviceInstance', deviceinstance) + self.add_path('/ProductId', productid) + self.add_path('/ProductName', productname) + self.add_path('/FirmwareVersion', firmwareversion) + self.add_path('/HardwareVersion', hardwareversion) + self.add_path('/Connected', connected) + + # Callback function that is called from the VeDbusItemExport objects when a value changes. This function + # maps the change-request to the onchangecallback given to us for this specific path. + def _value_changed(self, path, newvalue): + if path not in self._onchangecallbacks: + return True + + return self._onchangecallbacks[path](path, newvalue) + + def _item_deleted(self, path): + self._dbusobjects.pop(path) + for np in list(self._dbusnodes.keys()): + if np != '/': + for ip in self._dbusobjects: + if ip.startswith(np + '/'): + break + else: + self._dbusnodes[np].__del__() + self._dbusnodes.pop(np) + + def __getitem__(self, path): + return self._dbusobjects[path].local_get_value() + + def __setitem__(self, path, newvalue): + self._dbusobjects[path].local_set_value(newvalue) + + def __delitem__(self, path): + self._dbusobjects[path].__del__() # Invalidates and then removes the object path + assert path not in self._dbusobjects + + def __contains__(self, path): + return path in self._dbusobjects + + def __enter__(self): + l = ServiceContext(self) + self._ratelimiters.append(l) + return l + + def __exit__(self, *exc): + # pop off the top one and flush it. If with statements are nested + # then each exit flushes its own part. + if self._ratelimiters: + self._ratelimiters.pop().flush() + +class ServiceContext(object): + def __init__(self, parent): + self.parent = parent + self.changes = {} + + def __getitem__(self, path): + return self.parent[path] + + def __setitem__(self, path, newvalue): + c = self.parent._dbusobjects[path]._local_set_value(newvalue) + if c is not None: + self.changes[path] = c + + def flush(self): + if self.changes: + self.parent._dbusnodes['/'].ItemsChanged(self.changes) + +class TrackerDict(defaultdict): + """ Same as defaultdict, but passes the key to default_factory. """ + def __missing__(self, key): + self[key] = x = self.default_factory(key) + return x + +class VeDbusRootTracker(object): + """ This tracks the root of a dbus path and listens for PropertiesChanged + signals. When a signal arrives, parse it and unpack the key/value changes + into traditional events, then pass it to the original eventCallback + method. """ + def __init__(self, bus, serviceName): + self.importers = defaultdict(weakref.WeakSet) + self.serviceName = serviceName + self._match = bus.get_object(serviceName, '/', introspect=False).connect_to_signal( + "ItemsChanged", weak_functor(self._items_changed_handler)) + + def __del__(self): + self._match.remove() + self._match = None + + def add(self, i): + self.importers[i.path].add(i) + + def _items_changed_handler(self, items): + if not isinstance(items, dict): + return + + for path, changes in items.items(): + try: + v = changes['Value'] + except KeyError: + continue + + try: + t = changes['Text'] + except KeyError: + t = str(unwrap_dbus_value(v)) + + for i in self.importers.get(path, ()): + i._properties_changed_handler({'Value': v, 'Text': t}) + +""" +Importing basics: + - If when we power up, the D-Bus service does not exist, or it does exist and the path does not + yet exist, still subscribe to a signal: as soon as it comes online it will send a signal with its + initial value, which VeDbusItemImport will receive and use to update local cache. And, when set, + call the eventCallback. + - If when we power up, save it + - When using get_value, know that there is no difference between services (or object paths) that don't + exist and paths that are invalid (= empty array, see above). Both will return None. In case you do + really want to know ifa path exists or not, use the exists property. + - When a D-Bus service leaves the D-Bus, it will first invalidate all its values, and send signals + with that update, and only then leave the D-Bus. (or do we need to subscribe to the NameOwnerChanged- + signal!?!) To be discussed and make sure. Not really urgent, since all existing code that uses this + class already subscribes to the NameOwnerChanged signal, and subsequently removes instances of this + class. + +Read when using this class: +Note that when a service leaves that D-Bus without invalidating all its exported objects first, for +example because it is killed, VeDbusItemImport doesn't have a clue. So when using VeDbusItemImport, +make sure to also subscribe to the NamerOwnerChanged signal on bus-level. Or just use dbusmonitor, +because that takes care of all of that for you. +""" +class VeDbusItemImport(object): + def __new__(cls, bus, serviceName, path, eventCallback=None, createsignal=True): + instance = object.__new__(cls) + + # If signal tracking should be done, also add to root tracker + if createsignal: + if "_roots" not in cls.__dict__: + cls._roots = TrackerDict(lambda k: VeDbusRootTracker(bus, k)) + + return instance + + ## Constructor + # @param bus the bus-object (SESSION or SYSTEM). + # @param serviceName the dbus-service-name (string), for example 'com.victronenergy.battery.ttyO1' + # @param path the object-path, for example '/Dc/V' + # @param eventCallback function that you want to be called on a value change + # @param createSignal only set this to False if you use this function to one time read a value. When + # leaving it to True, make sure to also subscribe to the NameOwnerChanged signal + # elsewhere. See also note some 15 lines up. + def __init__(self, bus, serviceName, path, eventCallback=None, createsignal=True): + # TODO: is it necessary to store _serviceName and _path? Isn't it + # stored in the bus_getobjectsomewhere? + self._serviceName = serviceName + self._path = path + self._match = None + # TODO: _proxy is being used in settingsdevice.py, make a getter for that + self._proxy = bus.get_object(serviceName, path, introspect=False) + self.eventCallback = eventCallback + + assert eventCallback is None or createsignal == True + if createsignal: + self._match = self._proxy.connect_to_signal( + "PropertiesChanged", weak_functor(self._properties_changed_handler)) + self._roots[serviceName].add(self) + + # store the current value in _cachedvalue. When it doesn't exists set _cachedvalue to + # None, same as when a value is invalid + self._cachedvalue = None + try: + v = self._proxy.GetValue() + except dbus.exceptions.DBusException: + pass + else: + self._cachedvalue = unwrap_dbus_value(v) + + def __del__(self): + if self._match is not None: + self._match.remove() + self._match = None + self._proxy = None + + def _refreshcachedvalue(self): + self._cachedvalue = unwrap_dbus_value(self._proxy.GetValue()) + + ## Returns the path as a string, for example '/AC/L1/V' + @property + def path(self): + return self._path + + ## Returns the dbus service name as a string, for example com.victronenergy.vebus.ttyO1 + @property + def serviceName(self): + return self._serviceName + + ## Returns the value of the dbus-item. + # the type will be a dbus variant, for example dbus.Int32(0, variant_level=1) + # this is not a property to keep the name consistant with the com.victronenergy.busitem interface + # returns None when the property is invalid + def get_value(self): + return self._cachedvalue + + ## Writes a new value to the dbus-item + def set_value(self, newvalue): + r = self._proxy.SetValue(wrap_dbus_value(newvalue)) + + # instead of just saving the value, go to the dbus and get it. So we have the right type etc. + if r == 0: + self._refreshcachedvalue() + + return r + + ## Resets the item to its default value + def set_default(self): + self._proxy.SetDefault() + self._refreshcachedvalue() + + ## Returns the text representation of the value. + # For example when the value is an enum/int GetText might return the string + # belonging to that enum value. Another example, for a voltage, GetValue + # would return a float, 12.0Volt, and GetText could return 12 VDC. + # + # Note that this depends on how the dbus-producer has implemented this. + def get_text(self): + return self._proxy.GetText() + + ## Returns true of object path exists, and false if it doesn't + @property + def exists(self): + # TODO: do some real check instead of this crazy thing. + r = False + try: + r = self._proxy.GetValue() + r = True + except dbus.exceptions.DBusException: + pass + + return r + + ## callback for the trigger-event. + # @param eventCallback the event-callback-function. + @property + def eventCallback(self): + return self._eventCallback + + @eventCallback.setter + def eventCallback(self, eventCallback): + self._eventCallback = eventCallback + + ## Is called when the value of the imported bus-item changes. + # Stores the new value in our local cache, and calls the eventCallback, if set. + def _properties_changed_handler(self, changes): + if "Value" in changes: + changes['Value'] = unwrap_dbus_value(changes['Value']) + self._cachedvalue = changes['Value'] + if self._eventCallback: + # The reason behind this try/except is to prevent errors silently ending up the an error + # handler in the dbus code. + try: + self._eventCallback(self._serviceName, self._path, changes) + except: + traceback.print_exc() + os._exit(1) # sys.exit() is not used, since that also throws an exception + + +class VeDbusTreeExport(dbus.service.Object): + def __init__(self, bus, objectPath, service): + dbus.service.Object.__init__(self, bus, objectPath) + self._service = service + logging.debug("VeDbusTreeExport %s has been created" % objectPath) + + def __del__(self): + # self._get_path() will raise an exception when retrieved after the call to .remove_from_connection, + # so we need a copy. + path = self._get_path() + if path is None: + return + self.remove_from_connection() + logging.debug("VeDbusTreeExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + def _get_value_handler(self, path, get_text=False): + logging.debug("_get_value_handler called for %s" % path) + r = {} + px = path + if not px.endswith('/'): + px += '/' + for p, item in self._service._dbusobjects.items(): + if p.startswith(px): + v = item.GetText() if get_text else wrap_dbus_value(item.local_get_value()) + r[p[len(px):]] = v + logging.debug(r) + return r + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + value = self._get_value_handler(self._get_path()) + return dbus.Dictionary(value, signature=dbus.Signature('sv'), variant_level=1) + + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetText(self): + return self._get_value_handler(self._get_path(), True) + + def local_get_value(self): + return self._get_value_handler(self.path) + +class VeDbusRootExport(VeDbusTreeExport): + @dbus.service.signal('com.victronenergy.BusItem', signature='a{sa{sv}}') + def ItemsChanged(self, changes): + pass + + @dbus.service.method('com.victronenergy.BusItem', out_signature='a{sa{sv}}') + def GetItems(self): + return { + path: { + 'Value': wrap_dbus_value(item.local_get_value()), + 'Text': item.GetText() } + for path, item in self._service._dbusobjects.items() + } + + +class VeDbusItemExport(dbus.service.Object): + ## Constructor of VeDbusItemExport + # + # Use this object to export (publish), values on the dbus + # Creates the dbus-object under the given dbus-service-name. + # @param bus The dbus object. + # @param objectPath The dbus-object-path. + # @param value Value to initialize ourselves with, defaults to None which means Invalid + # @param description String containing a description. Can be called over the dbus with GetDescription() + # @param writeable what would this do!? :). + # @param callback Function that will be called when someone else changes the value of this VeBusItem + # over the dbus. First parameter passed to callback will be our path, second the new + # value. This callback should return True to accept the change, False to reject it. + def __init__(self, bus, objectPath, value=None, description=None, writeable=False, + onchangecallback=None, gettextcallback=None, deletecallback=None, + valuetype=None): + dbus.service.Object.__init__(self, bus, objectPath) + self._onchangecallback = onchangecallback + self._gettextcallback = gettextcallback + self._value = value + self._description = description + self._writeable = writeable + self._deletecallback = deletecallback + self._type = valuetype + + # To force immediate deregistering of this dbus object, explicitly call __del__(). + def __del__(self): + # self._get_path() will raise an exception when retrieved after the + # call to .remove_from_connection, so we need a copy. + path = self._get_path() + if path == None: + return + if self._deletecallback is not None: + self._deletecallback(path) + self.remove_from_connection() + logging.debug("VeDbusItemExport %s has been removed" % path) + + def _get_path(self): + if len(self._locations) == 0: + return None + return self._locations[0][1] + + ## Sets the value. And in case the value is different from what it was, a signal + # will be emitted to the dbus. This function is to be used in the python code that + # is using this class to export values to the dbus. + # set value to None to indicate that it is Invalid + def local_set_value(self, newvalue): + changes = self._local_set_value(newvalue) + if changes is not None: + self.PropertiesChanged(changes) + + def _local_set_value(self, newvalue): + if self._value == newvalue: + return None + + self._value = newvalue + return { + 'Value': wrap_dbus_value(newvalue), + 'Text': self.GetText() + } + + def local_get_value(self): + return self._value + + # ==== ALL FUNCTIONS BELOW THIS LINE WILL BE CALLED BY OTHER PROCESSES OVER THE DBUS ==== + + ## Dbus exported method SetValue + # Function is called over the D-Bus by other process. It will first check (via callback) if new + # value is accepted. And it is, stores it and emits a changed-signal. + # @param value The new value. + # @return completion-code When successful a 0 is return, and when not a -1 is returned. + @dbus.service.method('com.victronenergy.BusItem', in_signature='v', out_signature='i') + def SetValue(self, newvalue): + if not self._writeable: + return 1 # NOT OK + + newvalue = unwrap_dbus_value(newvalue) + + # If value type is enforced, cast it. If the type can be coerced + # python will do it for us. This allows ints to become floats, + # or bools to become ints. Additionally also allow None, so that + # a path may be invalidated. + if self._type is not None and newvalue is not None: + try: + newvalue = self._type(newvalue) + except (ValueError, TypeError): + return 1 # NOT OK + + if newvalue == self._value: + return 0 # OK + + # call the callback given to us, and check if new value is OK. + if (self._onchangecallback is None or + (self._onchangecallback is not None and self._onchangecallback(self.__dbus_object_path__, newvalue))): + + self.local_set_value(newvalue) + return 0 # OK + + return 2 # NOT OK + + ## Dbus exported method GetDescription + # + # Returns the a description. + # @param language A language code (e.g. ISO 639-1 en-US). + # @param length Lenght of the language string. + # @return description + @dbus.service.method('com.victronenergy.BusItem', in_signature='si', out_signature='s') + def GetDescription(self, language, length): + return self._description if self._description is not None else 'No description given' + + ## Dbus exported method GetValue + # Returns the value. + # @return the value when valid, and otherwise an empty array + @dbus.service.method('com.victronenergy.BusItem', out_signature='v') + def GetValue(self): + return wrap_dbus_value(self._value) + + ## Dbus exported method GetText + # Returns the value as string of the dbus-object-path. + # @return text A text-value. '---' when local value is invalid + @dbus.service.method('com.victronenergy.BusItem', out_signature='s') + def GetText(self): + if self._value is None: + return '---' + + # Default conversion from dbus.Byte will get you a character (so 'T' instead of '84'), so we + # have to convert to int first. Note that if a dbus.Byte turns up here, it must have come from + # the application itself, as all data from the D-Bus should have been unwrapped by now. + if self._gettextcallback is None and type(self._value) == dbus.Byte: + return str(int(self._value)) + + if self._gettextcallback is None and self.__dbus_object_path__ == '/ProductId': + return "0x%X" % self._value + + if self._gettextcallback is None: + return str(self._value) + + return self._gettextcallback(self.__dbus_object_path__, self._value) + + ## The signal that indicates that the value has changed. + # Other processes connected to this BusItem object will have subscribed to the + # event when they want to track our state. + @dbus.service.signal('com.victronenergy.BusItem', signature='a{sv}') + def PropertiesChanged(self, changes): + pass + +## This class behaves like a regular reference to a class method (eg. self.foo), but keeps a weak reference +## to the object which method is to be called. +## Use this object to break circular references. +class weak_functor: + def __init__(self, f): + self._r = weakref.ref(f.__self__) + self._f = weakref.ref(f.__func__) + + def __call__(self, *args, **kargs): + r = self._r() + f = self._f() + if r == None or f == None: + return + f(r, *args, **kargs) diff --git a/python/dbus-fzsonick-48tl-nofork/start.sh b/python/dbus-fzsonick-48tl-nofork/start.sh new file mode 100755 index 000000000..d818ffc57 --- /dev/null +++ b/python/dbus-fzsonick-48tl-nofork/start.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. /opt/victronenergy/serial-starter/run-service.sh + +app=/opt/victronenergy/dbus-fzsonick-48tl/dbus-fzsonick-48tl.py +args="$tty" +start $args diff --git a/typescript/frontend-marios2/src/content/dashboards/BatteryView/BatteryView.tsx b/typescript/frontend-marios2/src/content/dashboards/BatteryView/BatteryView.tsx index 1ad54388b..56378d8d5 100644 --- a/typescript/frontend-marios2/src/content/dashboards/BatteryView/BatteryView.tsx +++ b/typescript/frontend-marios2/src/content/dashboards/BatteryView/BatteryView.tsx @@ -375,6 +375,112 @@ function BatteryView(props: BatteryViewProps) { )} + + {props.productNum === 1 && ( + <> + + {Number(battery.Warnings.value) === 0 ? ( + 'None' + ) : Number(battery.Warnings.value) === 1 ? ( + + New Warning + + ) : ( + + Multiple Warnings + + )} + + + {Number(battery.Alarms.value) === 0 ? ( + 'None' + ) : Number(battery.Alarms.value) === 1 ? ( + + New Alarm + + ) : ( + + Multiple Alarms + + )} + + + )} ))} diff --git a/typescript/frontend-marios2/src/content/dashboards/BatteryView/DetailedBatteryView.tsx b/typescript/frontend-marios2/src/content/dashboards/BatteryView/DetailedBatteryView.tsx index b5752eafa..d03f82778 100644 --- a/typescript/frontend-marios2/src/content/dashboards/BatteryView/DetailedBatteryView.tsx +++ b/typescript/frontend-marios2/src/content/dashboards/BatteryView/DetailedBatteryView.tsx @@ -335,7 +335,7 @@ function DetailedBatteryView(props: DetailedBatteryViewProps) { ...batteryStringStyle, backgroundColor: props.batteryData.String1Active.value == 'True' || - props.batteryData.String4Active.value == 0 + Number(props.batteryData.String1Active.value) == 0 ? '#32CD32' : '#FF033E' }} @@ -345,7 +345,7 @@ function DetailedBatteryView(props: DetailedBatteryViewProps) { ...batteryStringStyle, backgroundColor: props.batteryData.String2Active.value == 'True' || - props.batteryData.String4Active.value == 0 + Number(props.batteryData.String2Active.value) == 0 ? '#32CD32' : '#FF033E' }} @@ -355,7 +355,7 @@ function DetailedBatteryView(props: DetailedBatteryViewProps) { ...batteryStringStyle, backgroundColor: props.batteryData.String3Active.value == 'True' || - props.batteryData.String4Active.value == 0 + Number(props.batteryData.String3Active.value) == 0 ? '#32CD32' : '#FF033E' }} @@ -365,7 +365,7 @@ function DetailedBatteryView(props: DetailedBatteryViewProps) { ...batteryStringStyle, backgroundColor: props.batteryData.String4Active.value == 'True' || - props.batteryData.String4Active.value == 0 + Number(props.batteryData.String4Active.value) == 0 ? '#32CD32' : '#FF033E' }} @@ -375,7 +375,7 @@ function DetailedBatteryView(props: DetailedBatteryViewProps) { ...batteryStringStyle, backgroundColor: props.batteryData.String5Active.value == 'True' || - props.batteryData.String4Active.value == 0 + Number(props.batteryData.String5Active.value) == 0 ? '#32CD32' : '#FF033E' }} @@ -639,6 +639,7 @@ function DetailedBatteryView(props: DetailedBatteryViewProps) { {/*----------------------------------------------------------------------------------------------------------------------------------*/} + {props.productNum === 0 && ( <> diff --git a/typescript/frontend-marios2/src/content/dashboards/Configuration/Configuration.tsx b/typescript/frontend-marios2/src/content/dashboards/Configuration/Configuration.tsx index e1eec0584..66b682adf 100644 --- a/typescript/frontend-marios2/src/content/dashboards/Configuration/Configuration.tsx +++ b/typescript/frontend-marios2/src/content/dashboards/Configuration/Configuration.tsx @@ -162,10 +162,10 @@ function Configuration(props: ConfigurationProps) { const handleConfirm = (newDate) => { //console.log('non adapted day is ', newDate); //console.log('adapted day is ', dayjs.utc(newDate).toDate()); - // setFormValues({ - // ...formValues, - // ['calibrationChargeDate']: dayjs(newDate).toDate() - // }); + setFormValues({ + ...formValues, + ['calibrationChargeDate']: dayjs(newDate).toDate() + }); }; const handleSelectedCalibrationChargeDay = (event) => { diff --git a/typescript/frontend-marios2/src/content/dashboards/Information/InformationSalidomo.tsx b/typescript/frontend-marios2/src/content/dashboards/Information/InformationSalidomo.tsx index 4e4ba1481..b10e749d9 100644 --- a/typescript/frontend-marios2/src/content/dashboards/Information/InformationSalidomo.tsx +++ b/typescript/frontend-marios2/src/content/dashboards/Information/InformationSalidomo.tsx @@ -275,6 +275,19 @@ function InformationSalidomo(props: InformationSalidomoProps) { /> + {/*
*/} + {/* */} + {/* }*/} + {/* name="vrmLink"*/} + {/* value={formValues.vrmLink}*/} + {/* onChange={handleChange}*/} + {/* variant="outlined"*/} + {/* fullWidth*/} + {/* />*/} + {/*
*/} +
diff --git a/typescript/frontend-marios2/src/content/dashboards/Overview/chartOptions.tsx b/typescript/frontend-marios2/src/content/dashboards/Overview/chartOptions.tsx index b51e29278..956a343e8 100644 --- a/typescript/frontend-marios2/src/content/dashboards/Overview/chartOptions.tsx +++ b/typescript/frontend-marios2/src/content/dashboards/Overview/chartOptions.tsx @@ -167,7 +167,7 @@ export const getChartOptions = ( style: { fontSize: '12px' }, - offsetY: -185, + offsetY: -190, offsetX: 25, rotate: 0 }, diff --git a/typescript/frontend-marios2/src/content/dashboards/SalidomoInstallations/FlatInstallationView.tsx b/typescript/frontend-marios2/src/content/dashboards/SalidomoInstallations/FlatInstallationView.tsx index 54cac80e9..ae664956e 100644 --- a/typescript/frontend-marios2/src/content/dashboards/SalidomoInstallations/FlatInstallationView.tsx +++ b/typescript/frontend-marios2/src/content/dashboards/SalidomoInstallations/FlatInstallationView.tsx @@ -207,7 +207,11 @@ const FlatInstallationView = (props: FlatInstallationViewProps) => { sx={{ marginTop: '10px', fontSize: 'small' }} > { const filtered = props.installations.filter( (item) => - item.name.toLowerCase().includes(searchTerm.toLowerCase()) || + item.installationName + .toLowerCase() + .includes(searchTerm.toLowerCase()) || item.location.toLowerCase().includes(searchTerm.toLowerCase()) || item.region.toLowerCase().includes(searchTerm.toLowerCase()) );