Compare commits
No commits in common. "c19db1519da253c9a1c453aaa0fa18a0beb810ab" and "493b76e0ed35e2301387bf0a5ba25b36698253ed" have entirely different histories.
c19db1519d
...
493b76e0ed
Binary file not shown.
|
@ -1,160 +0,0 @@
|
||||||
import struct
|
|
||||||
|
|
||||||
import config as cfg
|
|
||||||
from data import LedState, BatteryStatus
|
|
||||||
|
|
||||||
# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Callable, List, Iterable, Union, AnyStr, Any
|
|
||||||
|
|
||||||
|
|
||||||
def read_bool(base_register, bit):
|
|
||||||
# type: (int, int) -> Callable[[BatteryStatus], bool]
|
|
||||||
|
|
||||||
# TODO: explain base register offset
|
|
||||||
register = base_register + int(bit/16)
|
|
||||||
bit = bit % 16
|
|
||||||
|
|
||||||
def get_value(status):
|
|
||||||
# type: (BatteryStatus) -> bool
|
|
||||||
value = status.modbus_data[register - cfg.BASE_ADDRESS]
|
|
||||||
return value & (1 << bit) > 0
|
|
||||||
|
|
||||||
return get_value
|
|
||||||
|
|
||||||
|
|
||||||
def read_float(register, scale_factor=1.0, offset=0.0):
|
|
||||||
# type: (int, float, float) -> Callable[[BatteryStatus], float]
|
|
||||||
|
|
||||||
def get_value(status):
|
|
||||||
# type: (BatteryStatus) -> float
|
|
||||||
value = status.modbus_data[register - cfg.BASE_ADDRESS]
|
|
||||||
|
|
||||||
if value >= 0x8000: # convert to signed int16
|
|
||||||
value -= 0x10000 # fiamm stores their integers signed AND with sign-offset @#%^&!
|
|
||||||
|
|
||||||
return (value + offset) * scale_factor
|
|
||||||
|
|
||||||
return get_value
|
|
||||||
|
|
||||||
|
|
||||||
def read_registers(register, count):
|
|
||||||
# type: (int, int) -> Callable[[BatteryStatus], List[int]]
|
|
||||||
|
|
||||||
start = register - cfg.BASE_ADDRESS
|
|
||||||
end = start + count
|
|
||||||
|
|
||||||
def get_value(status):
|
|
||||||
# type: (BatteryStatus) -> List[int]
|
|
||||||
return [x for x in status.modbus_data[start:end]]
|
|
||||||
|
|
||||||
return get_value
|
|
||||||
|
|
||||||
|
|
||||||
def comma_separated(values):
|
|
||||||
# type: (Iterable[str]) -> str
|
|
||||||
return ", ".join(set(values))
|
|
||||||
|
|
||||||
|
|
||||||
def count_bits(base_register, nb_of_registers, nb_of_bits, first_bit=0):
|
|
||||||
# type: (int, int, int, int) -> Callable[[BatteryStatus], int]
|
|
||||||
|
|
||||||
get_registers = read_registers(base_register, nb_of_registers)
|
|
||||||
end_bit = first_bit + nb_of_bits
|
|
||||||
|
|
||||||
def get_value(status):
|
|
||||||
# type: (BatteryStatus) -> int
|
|
||||||
|
|
||||||
registers = get_registers(status)
|
|
||||||
bin_registers = [bin(x)[-1:1:-1] for x in registers] # reverse the bits in each register so that bit0 is to the left
|
|
||||||
str_registers = [str(x).ljust(16, "0") for x in bin_registers] # add leading zeroes, so all registers are 16 chars long
|
|
||||||
bit_string = ''.join(str_registers) # join them, one long string of 0s and 1s
|
|
||||||
filtered_bits = bit_string[first_bit:end_bit] # take the first nb_of_bits bits starting at first_bit
|
|
||||||
|
|
||||||
return filtered_bits.count('1') # count 1s
|
|
||||||
|
|
||||||
return get_value
|
|
||||||
|
|
||||||
|
|
||||||
def read_led_state(register, led):
|
|
||||||
# type: (int, int) -> Callable[[BatteryStatus], int]
|
|
||||||
|
|
||||||
read_lo = read_bool(register, led * 2)
|
|
||||||
read_hi = read_bool(register, led * 2 + 1)
|
|
||||||
|
|
||||||
def get_value(status):
|
|
||||||
# type: (BatteryStatus) -> int
|
|
||||||
|
|
||||||
lo = read_lo(status)
|
|
||||||
hi = read_hi(status)
|
|
||||||
|
|
||||||
if hi:
|
|
||||||
if lo:
|
|
||||||
return LedState.blinking_fast
|
|
||||||
else:
|
|
||||||
return LedState.blinking_slow
|
|
||||||
else:
|
|
||||||
if lo:
|
|
||||||
return LedState.on
|
|
||||||
else:
|
|
||||||
return LedState.off
|
|
||||||
|
|
||||||
return get_value
|
|
||||||
|
|
||||||
|
|
||||||
# noinspection PyShadowingNames
|
|
||||||
def unit(unit):
|
|
||||||
# type: (unicode) -> Callable[[unicode], unicode]
|
|
||||||
|
|
||||||
def get_text(v):
|
|
||||||
# type: (unicode) -> unicode
|
|
||||||
return "{0}{1}".format(str(v), unit)
|
|
||||||
|
|
||||||
return get_text
|
|
||||||
|
|
||||||
|
|
||||||
def const(constant):
|
|
||||||
# type: (any) -> Callable[[any], any]
|
|
||||||
def get(*args):
|
|
||||||
return constant
|
|
||||||
return get
|
|
||||||
|
|
||||||
|
|
||||||
def mean(numbers):
|
|
||||||
# type: (List[Union[float,int]]) -> float
|
|
||||||
return float(sum(numbers)) / len(numbers)
|
|
||||||
|
|
||||||
|
|
||||||
def first(ts, default=None):
|
|
||||||
return next((t for t in ts), default)
|
|
||||||
|
|
||||||
|
|
||||||
def bitfields_to_str(lists):
|
|
||||||
# type: (List[List[int]]) -> str
|
|
||||||
|
|
||||||
def or_lists():
|
|
||||||
# type: () -> Iterable[int]
|
|
||||||
|
|
||||||
length = len(first(lists))
|
|
||||||
n_lists = len(lists)
|
|
||||||
|
|
||||||
for i in range(0, length):
|
|
||||||
e = 0
|
|
||||||
for l in range(0, n_lists):
|
|
||||||
e = e | lists[l][i]
|
|
||||||
yield e
|
|
||||||
|
|
||||||
hexed = [
|
|
||||||
'{0:0>4X}'.format(x)
|
|
||||||
for x in or_lists()
|
|
||||||
]
|
|
||||||
|
|
||||||
return ' '.join(hexed)
|
|
||||||
|
|
||||||
|
|
||||||
def pack_string(string):
|
|
||||||
# type: (AnyStr) -> Any
|
|
||||||
data = string.encode('UTF-8')
|
|
||||||
return struct.pack('B', len(data)) + data
|
|
||||||
|
|
Binary file not shown.
|
@ -1,125 +0,0 @@
|
||||||
import config as cfg
|
|
||||||
|
|
||||||
|
|
||||||
# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Callable, List, Optional, AnyStr, Union, Any
|
|
||||||
|
|
||||||
|
|
||||||
class LedState(object):
|
|
||||||
"""
|
|
||||||
from page 6 of the '48TLxxx ModBus Protocol doc'
|
|
||||||
"""
|
|
||||||
off = 0
|
|
||||||
on = 1
|
|
||||||
blinking_slow = 2
|
|
||||||
blinking_fast = 3
|
|
||||||
|
|
||||||
|
|
||||||
class LedColor(object):
|
|
||||||
green = 0
|
|
||||||
amber = 1
|
|
||||||
blue = 2
|
|
||||||
red = 3
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceSignal(object):
|
|
||||||
|
|
||||||
def __init__(self, dbus_path, get_value_or_const, unit=''):
|
|
||||||
# type: (str, Union[Callable[[],Any],Any], Optional[AnyStr] )->None
|
|
||||||
|
|
||||||
self.get_value_or_const = get_value_or_const
|
|
||||||
self.dbus_path = dbus_path
|
|
||||||
self.unit = unit
|
|
||||||
|
|
||||||
@property
|
|
||||||
def value(self):
|
|
||||||
try:
|
|
||||||
return self.get_value_or_const() # callable
|
|
||||||
except:
|
|
||||||
return self.get_value_or_const # value
|
|
||||||
|
|
||||||
|
|
||||||
class BatterySignal(object):
|
|
||||||
|
|
||||||
def __init__(self, dbus_path, aggregate, get_value, unit=''):
|
|
||||||
# type: (str, Callable[[List[any]],any], Callable[[BatteryStatus],any], Optional[AnyStr] )->None
|
|
||||||
"""
|
|
||||||
A Signal holds all information necessary for the handling of a
|
|
||||||
certain datum (e.g. voltage) published by the battery.
|
|
||||||
|
|
||||||
:param dbus_path: str
|
|
||||||
object_path on DBus where the datum needs to be published
|
|
||||||
|
|
||||||
:param aggregate: Iterable[any] -> any
|
|
||||||
function that combines the values of multiple batteries into one.
|
|
||||||
e.g. sum for currents, or mean for voltages
|
|
||||||
|
|
||||||
:param get_value: (BatteryStatus) -> any
|
|
||||||
function to extract the datum from the modbus record,
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.dbus_path = dbus_path
|
|
||||||
self.aggregate = aggregate
|
|
||||||
self.get_value = get_value
|
|
||||||
self.unit = unit
|
|
||||||
|
|
||||||
|
|
||||||
class Battery(object):
|
|
||||||
|
|
||||||
""" Data record to hold hardware and firmware specs of the battery """
|
|
||||||
|
|
||||||
def __init__(self, slave_address, hardware_version, firmware_version, bms_version, ampere_hours):
|
|
||||||
# type: (int, str, str, str, int) -> None
|
|
||||||
self.slave_address = slave_address
|
|
||||||
self.hardware_version = hardware_version
|
|
||||||
self.firmware_version = firmware_version
|
|
||||||
self.bms_version = bms_version
|
|
||||||
self.ampere_hours = ampere_hours
|
|
||||||
self.n_strings = int(ampere_hours/cfg.AH_PER_STRING)
|
|
||||||
self.i_max = self.n_strings * cfg.I_MAX_PER_STRING
|
|
||||||
self.v_min = cfg.V_MIN
|
|
||||||
self.v_max = cfg.V_MAX
|
|
||||||
self.r_int_min = cfg.R_STRING_MIN / self.n_strings
|
|
||||||
self.r_int_max = cfg.R_STRING_MAX / self.n_strings
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return 'slave address = {0}\nhardware version = {1}\nfirmware version = {2}\nbms version = {3}\nampere hours = {4}'.format(
|
|
||||||
self.slave_address, self.hardware_version, self.firmware_version, self.bms_version, str(self.ampere_hours))
|
|
||||||
|
|
||||||
|
|
||||||
class BatteryStatus(object):
|
|
||||||
"""
|
|
||||||
record holding the current status of a battery
|
|
||||||
"""
|
|
||||||
def __init__(self, battery, modbus_data):
|
|
||||||
# type: (Battery, List[int]) -> None
|
|
||||||
|
|
||||||
self.battery = battery
|
|
||||||
self.modbus_data = modbus_data
|
|
||||||
|
|
||||||
def serialize(self):
|
|
||||||
# type: () -> str
|
|
||||||
|
|
||||||
b = self.battery
|
|
||||||
|
|
||||||
s = cfg.INNOVENERGY_PROTOCOL_VERSION + '\n'
|
|
||||||
s += cfg.INSTALLATION_NAME + '\n'
|
|
||||||
s += str(b.slave_address) + '\n'
|
|
||||||
s += b.hardware_version + '\n'
|
|
||||||
s += b.firmware_version + '\n'
|
|
||||||
s += b.bms_version + '\n'
|
|
||||||
s += str(b.ampere_hours) + '\n'
|
|
||||||
|
|
||||||
for d in self.modbus_data:
|
|
||||||
s += str(d) + '\n'
|
|
||||||
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def read_file_one_line(file_name):
|
|
||||||
|
|
||||||
with open(file_name, 'r') as file:
|
|
||||||
return file.read().replace('\n', '').replace('\r', '').strip()
|
|
||||||
|
|
Binary file not shown.
|
@ -1,354 +0,0 @@
|
||||||
#!/usr/bin/python2 -u
|
|
||||||
# coding=utf-8
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import gobject
|
|
||||||
import signals
|
|
||||||
import config as cfg
|
|
||||||
|
|
||||||
from dbus.mainloop.glib import DBusGMainLoop
|
|
||||||
from pymodbus.client.sync import ModbusSerialClient as Modbus
|
|
||||||
from pymodbus.exceptions import ModbusException, ModbusIOException
|
|
||||||
from pymodbus.other_message import ReportSlaveIdRequest
|
|
||||||
from pymodbus.pdu import ExceptionResponse
|
|
||||||
from pymodbus.register_read_message import ReadInputRegistersResponse
|
|
||||||
from data import BatteryStatus, BatterySignal, Battery, ServiceSignal
|
|
||||||
from python_libs.ie_dbus.dbus_service import DBusService
|
|
||||||
|
|
||||||
# trick the pycharm type-checker into thinking Callable is in scope, not used at runtime
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Callable, List, Iterable, NoReturn
|
|
||||||
|
|
||||||
|
|
||||||
RESET_REGISTER = 0x2087
|
|
||||||
|
|
||||||
|
|
||||||
def init_modbus(tty):
|
|
||||||
# type: (str) -> Modbus
|
|
||||||
|
|
||||||
logging.debug('initializing Modbus')
|
|
||||||
|
|
||||||
return Modbus(
|
|
||||||
port='/dev/' + tty,
|
|
||||||
method=cfg.MODE,
|
|
||||||
baudrate=cfg.BAUD_RATE,
|
|
||||||
stopbits=cfg.STOP_BITS,
|
|
||||||
bytesize=cfg.BYTE_SIZE,
|
|
||||||
timeout=cfg.TIMEOUT,
|
|
||||||
parity=cfg.PARITY)
|
|
||||||
|
|
||||||
|
|
||||||
def init_udp_socket():
|
|
||||||
# type: () -> socket
|
|
||||||
|
|
||||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
||||||
s.setblocking(False)
|
|
||||||
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def report_slave_id(modbus, slave_address):
|
|
||||||
# type: (Modbus, int) -> str
|
|
||||||
|
|
||||||
slave = str(slave_address)
|
|
||||||
|
|
||||||
logging.debug('requesting slave id from node ' + slave)
|
|
||||||
|
|
||||||
with modbus:
|
|
||||||
|
|
||||||
request = ReportSlaveIdRequest(unit=slave_address)
|
|
||||||
response = modbus.execute(request)
|
|
||||||
|
|
||||||
if response is ExceptionResponse or issubclass(type(response), ModbusException):
|
|
||||||
raise Exception('failed to get slave id from ' + slave + ' : ' + str(response))
|
|
||||||
|
|
||||||
return response.identifier
|
|
||||||
|
|
||||||
|
|
||||||
def identify_battery(modbus, slave_address):
|
|
||||||
# type: (Modbus, int) -> Battery
|
|
||||||
|
|
||||||
logging.info('identifying battery...')
|
|
||||||
|
|
||||||
hardware_version, bms_version, ampere_hours = parse_slave_id(modbus, slave_address)
|
|
||||||
firmware_version = read_firmware_version(modbus, slave_address)
|
|
||||||
|
|
||||||
specs = Battery(
|
|
||||||
slave_address=slave_address,
|
|
||||||
hardware_version=hardware_version,
|
|
||||||
firmware_version=firmware_version,
|
|
||||||
bms_version=bms_version,
|
|
||||||
ampere_hours=ampere_hours)
|
|
||||||
|
|
||||||
logging.info('battery identified:\n{0}'.format(str(specs)))
|
|
||||||
|
|
||||||
return specs
|
|
||||||
|
|
||||||
|
|
||||||
def identify_batteries(modbus):
|
|
||||||
# type: (Modbus) -> List[Battery]
|
|
||||||
|
|
||||||
def _identify_batteries():
|
|
||||||
slave_address = 0
|
|
||||||
n_missing = -255
|
|
||||||
|
|
||||||
while n_missing < 3:
|
|
||||||
slave_address += 1
|
|
||||||
try:
|
|
||||||
yield identify_battery(modbus, slave_address)
|
|
||||||
n_missing = 0
|
|
||||||
except Exception as e:
|
|
||||||
logging.info('failed to identify battery at {0} : {1}'.format(str(slave_address), str(e)))
|
|
||||||
n_missing += 1
|
|
||||||
|
|
||||||
logging.info('giving up searching for further batteries')
|
|
||||||
|
|
||||||
batteries = list(_identify_batteries()) # dont be lazy!
|
|
||||||
|
|
||||||
n = len(batteries)
|
|
||||||
logging.info('found ' + str(n) + (' battery' if n == 1 else ' batteries'))
|
|
||||||
|
|
||||||
return batteries
|
|
||||||
|
|
||||||
|
|
||||||
def parse_slave_id(modbus, slave_address):
|
|
||||||
# type: (Modbus, int) -> (str, str, int)
|
|
||||||
|
|
||||||
slave_id = report_slave_id(modbus, slave_address)
|
|
||||||
|
|
||||||
sid = re.sub(r'[^\x20-\x7E]', '', slave_id) # remove weird special chars
|
|
||||||
|
|
||||||
match = re.match('(?P<hw>48TL(?P<ah>[0-9]+)) *(?P<bms>.*)', sid)
|
|
||||||
|
|
||||||
if match is None:
|
|
||||||
raise Exception('no known battery found')
|
|
||||||
|
|
||||||
return match.group('hw').strip(), match.group('bms').strip(), int(match.group('ah').strip())
|
|
||||||
|
|
||||||
|
|
||||||
def read_firmware_version(modbus, slave_address):
|
|
||||||
# type: (Modbus, int) -> str
|
|
||||||
|
|
||||||
logging.debug('reading firmware version')
|
|
||||||
|
|
||||||
with modbus:
|
|
||||||
|
|
||||||
response = read_modbus_registers(modbus, slave_address, base_address=1054, count=1)
|
|
||||||
register = response.registers[0]
|
|
||||||
|
|
||||||
return '{0:0>4X}'.format(register)
|
|
||||||
|
|
||||||
|
|
||||||
def read_modbus_registers(modbus, slave_address, base_address=cfg.BASE_ADDRESS, count=cfg.NO_OF_REGISTERS):
|
|
||||||
# type: (Modbus, int, int, int) -> ReadInputRegistersResponse
|
|
||||||
|
|
||||||
logging.debug('requesting modbus registers {0}-{1}'.format(base_address, base_address + count))
|
|
||||||
|
|
||||||
return modbus.read_input_registers(
|
|
||||||
address=base_address,
|
|
||||||
count=count,
|
|
||||||
unit=slave_address)
|
|
||||||
|
|
||||||
|
|
||||||
def read_battery_status(modbus, battery):
|
|
||||||
# type: (Modbus, Battery) -> BatteryStatus
|
|
||||||
"""
|
|
||||||
Read the modbus registers containing the battery's status info.
|
|
||||||
"""
|
|
||||||
|
|
||||||
logging.debug('reading battery status')
|
|
||||||
|
|
||||||
with modbus:
|
|
||||||
data = read_modbus_registers(modbus, battery.slave_address)
|
|
||||||
return BatteryStatus(battery, data.registers)
|
|
||||||
|
|
||||||
|
|
||||||
def publish_values_on_dbus(service, battery_signals, battery_statuses):
|
|
||||||
# type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> ()
|
|
||||||
|
|
||||||
publish_individuals(service, battery_signals, battery_statuses)
|
|
||||||
publish_aggregates(service, battery_signals, battery_statuses)
|
|
||||||
|
|
||||||
|
|
||||||
def publish_aggregates(service, signals, battery_statuses):
|
|
||||||
# type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> ()
|
|
||||||
|
|
||||||
for s in signals:
|
|
||||||
if s.aggregate is None:
|
|
||||||
continue
|
|
||||||
values = [s.get_value(battery_status) for battery_status in battery_statuses]
|
|
||||||
value = s.aggregate(values)
|
|
||||||
service.own_properties.set(s.dbus_path, value, s.unit)
|
|
||||||
|
|
||||||
|
|
||||||
def publish_individuals(service, signals, battery_statuses):
|
|
||||||
# type: (DBusService, Iterable[BatterySignal], Iterable[BatteryStatus]) -> ()
|
|
||||||
|
|
||||||
for signal in signals:
|
|
||||||
for battery_status in battery_statuses:
|
|
||||||
address = battery_status.battery.slave_address
|
|
||||||
dbus_path = '/_Battery/' + str(address) + signal.dbus_path
|
|
||||||
value = signal.get_value(battery_status)
|
|
||||||
service.own_properties.set(dbus_path, value, signal.unit)
|
|
||||||
|
|
||||||
|
|
||||||
def publish_service_signals(service, signals):
|
|
||||||
# type: (DBusService, Iterable[ServiceSignal]) -> NoReturn
|
|
||||||
|
|
||||||
for signal in signals:
|
|
||||||
service.own_properties.set(signal.dbus_path, signal.value, signal.unit)
|
|
||||||
|
|
||||||
|
|
||||||
def upload_status_to_innovenergy(sock, statuses):
|
|
||||||
# type: (socket, Iterable[BatteryStatus]) -> bool
|
|
||||||
|
|
||||||
logging.debug('upload status')
|
|
||||||
|
|
||||||
try:
|
|
||||||
for s in statuses:
|
|
||||||
sock.sendto(s.serialize(), (cfg.INNOVENERGY_SERVER_IP, cfg.INNOVENERGY_SERVER_PORT))
|
|
||||||
except:
|
|
||||||
logging.debug('FAILED')
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def print_usage():
|
|
||||||
print ('Usage: ' + __file__ + ' <serial device>')
|
|
||||||
print ('Example: ' + __file__ + ' ttyUSB0')
|
|
||||||
|
|
||||||
|
|
||||||
def parse_cmdline_args(argv):
|
|
||||||
# type: (List[str]) -> str
|
|
||||||
|
|
||||||
if len(argv) == 0:
|
|
||||||
logging.info('missing command line argument for tty device')
|
|
||||||
print_usage()
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
return argv[0]
|
|
||||||
|
|
||||||
|
|
||||||
def reset_batteries(modbus, batteries):
|
|
||||||
# type: (Modbus, Iterable[Battery]) -> NoReturn
|
|
||||||
|
|
||||||
logging.info('Resetting batteries...')
|
|
||||||
|
|
||||||
for battery in batteries:
|
|
||||||
|
|
||||||
result = modbus.write_registers(RESET_REGISTER, [1], unit=battery.slave_address)
|
|
||||||
|
|
||||||
# expecting a ModbusIOException (timeout)
|
|
||||||
# BMS can no longer reply because it is already reset
|
|
||||||
success = isinstance(result, ModbusIOException)
|
|
||||||
|
|
||||||
outcome = 'successfully' if success else 'FAILED to'
|
|
||||||
logging.info('Battery {0} {1} reset'.format(str(battery.slave_address), outcome))
|
|
||||||
|
|
||||||
logging.info('Shutting down fz-sonick driver')
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
alive = True # global alive flag, watchdog_task clears it, update_task sets it
|
|
||||||
|
|
||||||
|
|
||||||
def create_update_task(modbus, service, batteries):
|
|
||||||
# type: (Modbus, DBusService, Iterable[Battery]) -> Callable[[],bool]
|
|
||||||
"""
|
|
||||||
Creates an update task which runs the main update function
|
|
||||||
and resets the alive flag
|
|
||||||
"""
|
|
||||||
_socket = init_udp_socket()
|
|
||||||
_signals = signals.init_battery_signals()
|
|
||||||
|
|
||||||
def update_task():
|
|
||||||
# type: () -> bool
|
|
||||||
|
|
||||||
global alive
|
|
||||||
|
|
||||||
logging.debug('starting update cycle')
|
|
||||||
|
|
||||||
if service.own_properties.get('/ResetBatteries').value == 1:
|
|
||||||
reset_batteries(modbus, batteries)
|
|
||||||
|
|
||||||
statuses = [read_battery_status(modbus, battery) for battery in batteries]
|
|
||||||
|
|
||||||
publish_values_on_dbus(service, _signals, statuses)
|
|
||||||
upload_status_to_innovenergy(_socket, statuses)
|
|
||||||
|
|
||||||
logging.debug('finished update cycle\n')
|
|
||||||
|
|
||||||
alive = True
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
return update_task
|
|
||||||
|
|
||||||
|
|
||||||
def create_watchdog_task(main_loop):
|
|
||||||
# type: (DBusGMainLoop) -> Callable[[],bool]
|
|
||||||
"""
|
|
||||||
Creates a Watchdog task that monitors the alive flag.
|
|
||||||
The watchdog kills the main loop if the alive flag is not periodically reset by the update task.
|
|
||||||
Who watches the watchdog?
|
|
||||||
"""
|
|
||||||
def watchdog_task():
|
|
||||||
# type: () -> bool
|
|
||||||
|
|
||||||
global alive
|
|
||||||
|
|
||||||
if alive:
|
|
||||||
logging.debug('watchdog_task: update_task is alive')
|
|
||||||
alive = False
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
logging.info('watchdog_task: killing main loop because update_task is no longer alive')
|
|
||||||
main_loop.quit()
|
|
||||||
return False
|
|
||||||
|
|
||||||
return watchdog_task
|
|
||||||
|
|
||||||
|
|
||||||
def main(argv):
|
|
||||||
# type: (List[str]) -> ()
|
|
||||||
|
|
||||||
logging.basicConfig(level=cfg.LOG_LEVEL)
|
|
||||||
logging.info('starting ' + __file__)
|
|
||||||
|
|
||||||
tty = parse_cmdline_args(argv)
|
|
||||||
modbus = init_modbus(tty)
|
|
||||||
|
|
||||||
batteries = identify_batteries(modbus)
|
|
||||||
|
|
||||||
if len(batteries) <= 0:
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
service = DBusService(service_name=cfg.SERVICE_NAME_PREFIX + tty)
|
|
||||||
|
|
||||||
service.own_properties.set('/ResetBatteries', value=False, writable=True) # initial value = False
|
|
||||||
|
|
||||||
main_loop = gobject.MainLoop()
|
|
||||||
|
|
||||||
service_signals = signals.init_service_signals(batteries)
|
|
||||||
publish_service_signals(service, service_signals)
|
|
||||||
|
|
||||||
update_task = create_update_task(modbus, service, batteries)
|
|
||||||
update_task() # run it right away, so that all props are initialized before anyone can ask
|
|
||||||
watchdog_task = create_watchdog_task(main_loop)
|
|
||||||
|
|
||||||
gobject.timeout_add(cfg.UPDATE_INTERVAL * 2, watchdog_task, priority = gobject.PRIORITY_LOW) # add watchdog first
|
|
||||||
gobject.timeout_add(cfg.UPDATE_INTERVAL, update_task, priority = gobject.PRIORITY_LOW) # call update once every update_interval
|
|
||||||
|
|
||||||
logging.info('starting gobject.MainLoop')
|
|
||||||
main_loop.run()
|
|
||||||
logging.info('gobject.MainLoop was shut down')
|
|
||||||
|
|
||||||
sys.exit(0xFF) # reaches this only on error
|
|
||||||
|
|
||||||
|
|
||||||
main(sys.argv[1:])
|
|
Binary file not shown.
|
@ -1,202 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
from traceback import print_exc
|
|
||||||
from os import _exit as os_exit
|
|
||||||
from os import statvfs
|
|
||||||
import logging
|
|
||||||
from functools import update_wrapper
|
|
||||||
import dbus
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
VEDBUS_INVALID = dbus.Array([], signature=dbus.Signature('i'), variant_level=1)
|
|
||||||
|
|
||||||
# Use this function to make sure the code quits on an unexpected exception. Make sure to use it
|
|
||||||
# when using gobject.idle_add and also gobject.timeout_add.
|
|
||||||
# Without this, the code will just keep running, since gobject does not stop the mainloop on an
|
|
||||||
# exception.
|
|
||||||
# Example: gobject.idle_add(exit_on_error, myfunc, arg1, arg2)
|
|
||||||
def exit_on_error(func, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
print 'exit_on_error: there was an exception. Printing stacktrace will be tryed and then exit'
|
|
||||||
print_exc()
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# sys.exit() is not used, since that throws an exception, which does not lead to a program
|
|
||||||
# halt when used in a dbus callback, see connection.py in the Python/Dbus libraries, line 230.
|
|
||||||
os_exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
__vrm_portal_id = None
|
|
||||||
def get_vrm_portal_id():
|
|
||||||
# For the CCGX, the definition of the VRM Portal ID is that it is the mac address of the onboard-
|
|
||||||
# ethernet port (eth0), stripped from its colons (:) and lower case.
|
|
||||||
|
|
||||||
# nice coincidence is that this also works fine when running on your (linux) development computer.
|
|
||||||
|
|
||||||
global __vrm_portal_id
|
|
||||||
|
|
||||||
if __vrm_portal_id:
|
|
||||||
return __vrm_portal_id
|
|
||||||
|
|
||||||
# Assume we are on linux
|
|
||||||
import fcntl, socket, struct
|
|
||||||
|
|
||||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
||||||
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', 'eth0'[:15]))
|
|
||||||
__vrm_portal_id = ''.join(['%02x' % ord(char) for char in info[18:24]])
|
|
||||||
|
|
||||||
return __vrm_portal_id
|
|
||||||
|
|
||||||
|
|
||||||
# See VE.Can registers - public.docx for definition of this conversion
|
|
||||||
def convert_vreg_version_to_readable(version):
|
|
||||||
def str_to_arr(x, length):
|
|
||||||
a = []
|
|
||||||
for i in range(0, len(x), length):
|
|
||||||
a.append(x[i:i+length])
|
|
||||||
return a
|
|
||||||
|
|
||||||
x = "%x" % version
|
|
||||||
x = x.upper()
|
|
||||||
|
|
||||||
if len(x) == 5 or len(x) == 3 or len(x) == 1:
|
|
||||||
x = '0' + x
|
|
||||||
|
|
||||||
a = str_to_arr(x, 2);
|
|
||||||
|
|
||||||
# remove the first 00 if there are three bytes and it is 00
|
|
||||||
if len(a) == 3 and a[0] == '00':
|
|
||||||
a.remove(0);
|
|
||||||
|
|
||||||
# if we have two or three bytes now, and the first character is a 0, remove it
|
|
||||||
if len(a) >= 2 and a[0][0:1] == '0':
|
|
||||||
a[0] = a[0][1];
|
|
||||||
|
|
||||||
result = ''
|
|
||||||
for item in a:
|
|
||||||
result += ('.' if result != '' else '') + item
|
|
||||||
|
|
||||||
|
|
||||||
result = 'v' + result
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def get_free_space(path):
|
|
||||||
result = -1
|
|
||||||
|
|
||||||
try:
|
|
||||||
s = statvfs(path)
|
|
||||||
result = s.f_frsize * s.f_bavail # Number of free bytes that ordinary users
|
|
||||||
except Exception, ex:
|
|
||||||
logger.info("Error while retrieving free space for path %s: %s" % (path, ex))
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def get_load_averages():
|
|
||||||
c = read_file('/proc/loadavg')
|
|
||||||
return c.split(' ')[:3]
|
|
||||||
|
|
||||||
|
|
||||||
# Returns False if it cannot find a machine name. Otherwise returns the string
|
|
||||||
# containing the name
|
|
||||||
def get_machine_name():
|
|
||||||
c = read_file('/proc/device-tree/model')
|
|
||||||
|
|
||||||
if c != False:
|
|
||||||
return c.strip('\x00')
|
|
||||||
|
|
||||||
return read_file('/etc/venus/machine')
|
|
||||||
|
|
||||||
|
|
||||||
# Returns False if it cannot open the file. Otherwise returns its rstripped contents
|
|
||||||
def read_file(path):
|
|
||||||
content = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
content = f.read().rstrip()
|
|
||||||
except Exception, ex:
|
|
||||||
logger.debug("Error while reading %s: %s" % (path, ex))
|
|
||||||
|
|
||||||
return content
|
|
||||||
|
|
||||||
|
|
||||||
def wrap_dbus_value(value):
|
|
||||||
if value is None:
|
|
||||||
return VEDBUS_INVALID
|
|
||||||
if isinstance(value, float):
|
|
||||||
return dbus.Double(value, variant_level=1)
|
|
||||||
if isinstance(value, bool):
|
|
||||||
return dbus.Boolean(value, variant_level=1)
|
|
||||||
if isinstance(value, int):
|
|
||||||
return dbus.Int32(value, variant_level=1)
|
|
||||||
if isinstance(value, str):
|
|
||||||
return dbus.String(value, variant_level=1)
|
|
||||||
if isinstance(value, unicode):
|
|
||||||
return dbus.String(value, variant_level=1)
|
|
||||||
if isinstance(value, list):
|
|
||||||
if len(value) == 0:
|
|
||||||
# If the list is empty we cannot infer the type of the contents. So assume unsigned integer.
|
|
||||||
# A (signed) integer is dangerous, because an empty list of signed integers is used to encode
|
|
||||||
# an invalid value.
|
|
||||||
return dbus.Array([], signature=dbus.Signature('u'), variant_level=1)
|
|
||||||
return dbus.Array([wrap_dbus_value(x) for x in value], variant_level=1)
|
|
||||||
if isinstance(value, long):
|
|
||||||
return dbus.Int64(value, variant_level=1)
|
|
||||||
if isinstance(value, dict):
|
|
||||||
# Wrapping the keys of the dictionary causes D-Bus errors like:
|
|
||||||
# 'arguments to dbus_message_iter_open_container() were incorrect,
|
|
||||||
# assertion "(type == DBUS_TYPE_ARRAY && contained_signature &&
|
|
||||||
# *contained_signature == DBUS_DICT_ENTRY_BEGIN_CHAR) || (contained_signature == NULL ||
|
|
||||||
# _dbus_check_is_valid_signature (contained_signature))" failed in file ...'
|
|
||||||
return dbus.Dictionary({(k, wrap_dbus_value(v)) for k, v in value.items()}, variant_level=1)
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
dbus_int_types = (dbus.Int32, dbus.UInt32, dbus.Byte, dbus.Int16, dbus.UInt16, dbus.UInt32, dbus.Int64, dbus.UInt64)
|
|
||||||
|
|
||||||
|
|
||||||
def unwrap_dbus_value(val):
|
|
||||||
"""Converts D-Bus values back to the original type. For example if val is of type DBus.Double,
|
|
||||||
a float will be returned."""
|
|
||||||
if isinstance(val, dbus_int_types):
|
|
||||||
return int(val)
|
|
||||||
if isinstance(val, dbus.Double):
|
|
||||||
return float(val)
|
|
||||||
if isinstance(val, dbus.Array):
|
|
||||||
v = [unwrap_dbus_value(x) for x in val]
|
|
||||||
return None if len(v) == 0 else v
|
|
||||||
if isinstance(val, (dbus.Signature, dbus.String)):
|
|
||||||
return unicode(val)
|
|
||||||
# Python has no byte type, so we convert to an integer.
|
|
||||||
if isinstance(val, dbus.Byte):
|
|
||||||
return int(val)
|
|
||||||
if isinstance(val, dbus.ByteArray):
|
|
||||||
return "".join([str(x) for x in val])
|
|
||||||
if isinstance(val, (list, tuple)):
|
|
||||||
return [unwrap_dbus_value(x) for x in val]
|
|
||||||
if isinstance(val, (dbus.Dictionary, dict)):
|
|
||||||
# Do not unwrap the keys, see comment in wrap_dbus_value
|
|
||||||
return dict([(x, unwrap_dbus_value(y)) for x, y in val.items()])
|
|
||||||
if isinstance(val, dbus.Boolean):
|
|
||||||
return bool(val)
|
|
||||||
return val
|
|
||||||
|
|
||||||
class reify(object):
|
|
||||||
""" Decorator to replace a property of an object with the calculated value,
|
|
||||||
to make it concrete. """
|
|
||||||
def __init__(self, wrapped):
|
|
||||||
self.wrapped = wrapped
|
|
||||||
update_wrapper(self, wrapped)
|
|
||||||
def __get__(self, inst, objtype=None):
|
|
||||||
if inst is None:
|
|
||||||
return self
|
|
||||||
v = self.wrapped(inst)
|
|
||||||
setattr(inst, self.wrapped.__name__, v)
|
|
||||||
return v
|
|
Binary file not shown.
|
@ -1,496 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
import dbus.service
|
|
||||||
import logging
|
|
||||||
import traceback
|
|
||||||
import os
|
|
||||||
import weakref
|
|
||||||
from ve_utils import wrap_dbus_value, unwrap_dbus_value
|
|
||||||
|
|
||||||
# vedbus contains three classes:
|
|
||||||
# VeDbusItemImport -> use this to read data from the dbus, ie import
|
|
||||||
# VeDbusItemExport -> use this to export data to the dbus (one value)
|
|
||||||
# VeDbusService -> use that to create a service and export several values to the dbus
|
|
||||||
|
|
||||||
# Code for VeDbusItemImport is copied from busitem.py and thereafter modified.
|
|
||||||
# All projects that used busitem.py need to migrate to this package. And some
|
|
||||||
# projects used to define there own equivalent of VeDbusItemExport. Better to
|
|
||||||
# use VeDbusItemExport, or even better the VeDbusService class that does it all for you.
|
|
||||||
|
|
||||||
# TODOS
|
|
||||||
# 1 check for datatypes, it works now, but not sure if all is compliant with
|
|
||||||
# com.victronenergy.BusItem interface definition. See also the files in
|
|
||||||
# tests_and_examples. And see 'if type(v) == dbus.Byte:' on line 102. Perhaps
|
|
||||||
# something similar should also be done in VeDbusBusItemExport?
|
|
||||||
# 2 Shouldn't VeDbusBusItemExport inherit dbus.service.Object?
|
|
||||||
# 7 Make hard rules for services exporting data to the D-Bus, in order to make tracking
|
|
||||||
# changes possible. Does everybody first invalidate its data before leaving the bus?
|
|
||||||
# And what about before taking one object away from the bus, instead of taking the
|
|
||||||
# whole service offline?
|
|
||||||
# They should! And after taking one value away, do we need to know that someone left
|
|
||||||
# the bus? Or we just keep that value in invalidated for ever? Result is that we can't
|
|
||||||
# see the difference anymore between an invalidated value and a value that was first on
|
|
||||||
# the bus and later not anymore. See comments above VeDbusItemImport as well.
|
|
||||||
# 9 there are probably more todos in the code below.
|
|
||||||
|
|
||||||
# Some thoughts with regards to the data types:
|
|
||||||
#
|
|
||||||
# Text from: http://dbus.freedesktop.org/doc/dbus-python/doc/tutorial.html#data-types
|
|
||||||
# ---
|
|
||||||
# Variants are represented by setting the variant_level keyword argument in the
|
|
||||||
# constructor of any D-Bus data type to a value greater than 0 (variant_level 1
|
|
||||||
# means a variant containing some other data type, variant_level 2 means a variant
|
|
||||||
# containing a variant containing some other data type, and so on). If a non-variant
|
|
||||||
# is passed as an argument but introspection indicates that a variant is expected,
|
|
||||||
# it'll automatically be wrapped in a variant.
|
|
||||||
# ---
|
|
||||||
#
|
|
||||||
# Also the different dbus datatypes, such as dbus.Int32, and dbus.UInt32 are a subclass
|
|
||||||
# of Python int. dbus.String is a subclass of Python standard class unicode, etcetera
|
|
||||||
#
|
|
||||||
# So all together that explains why we don't need to explicitly convert back and forth
|
|
||||||
# between the dbus datatypes and the standard python datatypes. Note that all datatypes
|
|
||||||
# in python are objects. Even an int is an object.
|
|
||||||
|
|
||||||
# The signature of a variant is 'v'.
|
|
||||||
|
|
||||||
# Export ourselves as a D-Bus service.
|
|
||||||
class VeDbusService(object):
|
|
||||||
def __init__(self, servicename, bus=None):
|
|
||||||
# dict containing the VeDbusItemExport objects, with their path as the key.
|
|
||||||
self._dbusobjects = {}
|
|
||||||
self._dbusnodes = {}
|
|
||||||
|
|
||||||
# dict containing the onchange callbacks, for each object. Object path is the key
|
|
||||||
self._onchangecallbacks = {}
|
|
||||||
|
|
||||||
# Connect to session bus whenever present, else use the system bus
|
|
||||||
self._dbusconn = bus or (dbus.SessionBus() if 'DBUS_SESSION_BUS_ADDRESS' in os.environ else dbus.SystemBus())
|
|
||||||
|
|
||||||
# make the dbus connection available to outside, could make this a true property instead, but ach..
|
|
||||||
self.dbusconn = self._dbusconn
|
|
||||||
|
|
||||||
# Register ourselves on the dbus, trigger an error if already in use (do_not_queue)
|
|
||||||
self._dbusname = dbus.service.BusName(servicename, self._dbusconn, do_not_queue=True)
|
|
||||||
|
|
||||||
# Add the root item that will return all items as a tree
|
|
||||||
self._dbusnodes['/'] = self._create_tree_export(self._dbusconn, '/', self._get_tree_dict)
|
|
||||||
|
|
||||||
logging.info("registered ourselves on D-Bus as %s" % servicename)
|
|
||||||
|
|
||||||
def _get_tree_dict(self, path, get_text=False):
|
|
||||||
logging.debug("_get_tree_dict called for %s" % path)
|
|
||||||
r = {}
|
|
||||||
px = path
|
|
||||||
if not px.endswith('/'):
|
|
||||||
px += '/'
|
|
||||||
for p, item in self._dbusobjects.items():
|
|
||||||
if p.startswith(px):
|
|
||||||
v = item.GetText() if get_text else wrap_dbus_value(item.local_get_value())
|
|
||||||
r[p[len(px):]] = v
|
|
||||||
logging.debug(r)
|
|
||||||
return r
|
|
||||||
|
|
||||||
# To force immediate deregistering of this dbus service and all its object paths, explicitly
|
|
||||||
# call __del__().
|
|
||||||
def __del__(self):
|
|
||||||
for node in self._dbusnodes.values():
|
|
||||||
node.__del__()
|
|
||||||
self._dbusnodes.clear()
|
|
||||||
for item in self._dbusobjects.values():
|
|
||||||
item.__del__()
|
|
||||||
self._dbusobjects.clear()
|
|
||||||
if self._dbusname:
|
|
||||||
self._dbusname.__del__() # Forces call to self._bus.release_name(self._name), see source code
|
|
||||||
self._dbusname = None
|
|
||||||
|
|
||||||
# @param callbackonchange function that will be called when this value is changed. First parameter will
|
|
||||||
# be the path of the object, second the new value. This callback should return
|
|
||||||
# True to accept the change, False to reject it.
|
|
||||||
def add_path(self, path, value, description="", writeable=False,
|
|
||||||
onchangecallback=None, gettextcallback=None):
|
|
||||||
|
|
||||||
if onchangecallback is not None:
|
|
||||||
self._onchangecallbacks[path] = onchangecallback
|
|
||||||
|
|
||||||
item = VeDbusItemExport(
|
|
||||||
self._dbusconn, path, value, description, writeable,
|
|
||||||
self._value_changed, gettextcallback, deletecallback=self._item_deleted)
|
|
||||||
|
|
||||||
spl = path.split('/')
|
|
||||||
for i in range(2, len(spl)):
|
|
||||||
subPath = '/'.join(spl[:i])
|
|
||||||
if subPath not in self._dbusnodes and subPath not in self._dbusobjects:
|
|
||||||
self._dbusnodes[subPath] = self._create_tree_export(self._dbusconn, subPath, self._get_tree_dict)
|
|
||||||
self._dbusobjects[path] = item
|
|
||||||
logging.debug('added %s with start value %s. Writeable is %s' % (path, value, writeable))
|
|
||||||
|
|
||||||
# Add the mandatory paths, as per victron dbus api doc
|
|
||||||
def add_mandatory_paths(self, processname, processversion, connection,
|
|
||||||
deviceinstance, productid, productname, firmwareversion, hardwareversion, connected):
|
|
||||||
self.add_path('/Mgmt/ProcessName', processname)
|
|
||||||
self.add_path('/Mgmt/ProcessVersion', processversion)
|
|
||||||
self.add_path('/Mgmt/Connection', connection)
|
|
||||||
|
|
||||||
# Create rest of the mandatory objects
|
|
||||||
self.add_path('/DeviceInstance', deviceinstance)
|
|
||||||
self.add_path('/ProductId', productid)
|
|
||||||
self.add_path('/ProductName', productname)
|
|
||||||
self.add_path('/FirmwareVersion', firmwareversion)
|
|
||||||
self.add_path('/HardwareVersion', hardwareversion)
|
|
||||||
self.add_path('/Connected', connected)
|
|
||||||
|
|
||||||
def _create_tree_export(self, bus, objectPath, get_value_handler):
|
|
||||||
return VeDbusTreeExport(bus, objectPath, get_value_handler)
|
|
||||||
|
|
||||||
# Callback function that is called from the VeDbusItemExport objects when a value changes. This function
|
|
||||||
# maps the change-request to the onchangecallback given to us for this specific path.
|
|
||||||
def _value_changed(self, path, newvalue):
|
|
||||||
if path not in self._onchangecallbacks:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return self._onchangecallbacks[path](path, newvalue)
|
|
||||||
|
|
||||||
def _item_deleted(self, path):
|
|
||||||
self._dbusobjects.pop(path)
|
|
||||||
for np in self._dbusnodes.keys():
|
|
||||||
if np != '/':
|
|
||||||
for ip in self._dbusobjects:
|
|
||||||
if ip.startswith(np + '/'):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
self._dbusnodes[np].__del__()
|
|
||||||
self._dbusnodes.pop(np)
|
|
||||||
|
|
||||||
def __getitem__(self, path):
|
|
||||||
return self._dbusobjects[path].local_get_value()
|
|
||||||
|
|
||||||
def __setitem__(self, path, newvalue):
|
|
||||||
self._dbusobjects[path].local_set_value(newvalue)
|
|
||||||
|
|
||||||
def __delitem__(self, path):
|
|
||||||
self._dbusobjects[path].__del__() # Invalidates and then removes the object path
|
|
||||||
assert path not in self._dbusobjects
|
|
||||||
|
|
||||||
def __contains__(self, path):
|
|
||||||
return path in self._dbusobjects
|
|
||||||
|
|
||||||
"""
|
|
||||||
Importing basics:
|
|
||||||
- If when we power up, the D-Bus service does not exist, or it does exist and the path does not
|
|
||||||
yet exist, still subscribe to a signal: as soon as it comes online it will send a signal with its
|
|
||||||
initial value, which VeDbusItemImport will receive and use to update local cache. And, when set,
|
|
||||||
call the eventCallback.
|
|
||||||
- If when we power up, save it
|
|
||||||
- When using get_value, know that there is no difference between services (or object paths) that don't
|
|
||||||
exist and paths that are invalid (= empty array, see above). Both will return None. In case you do
|
|
||||||
really want to know ifa path exists or not, use the exists property.
|
|
||||||
- When a D-Bus service leaves the D-Bus, it will first invalidate all its values, and send signals
|
|
||||||
with that update, and only then leave the D-Bus. (or do we need to subscribe to the NameOwnerChanged-
|
|
||||||
signal!?!) To be discussed and make sure. Not really urgent, since all existing code that uses this
|
|
||||||
class already subscribes to the NameOwnerChanged signal, and subsequently removes instances of this
|
|
||||||
class.
|
|
||||||
|
|
||||||
Read when using this class:
|
|
||||||
Note that when a service leaves that D-Bus without invalidating all its exported objects first, for
|
|
||||||
example because it is killed, VeDbusItemImport doesn't have a clue. So when using VeDbusItemImport,
|
|
||||||
make sure to also subscribe to the NamerOwnerChanged signal on bus-level. Or just use dbusmonitor,
|
|
||||||
because that takes care of all of that for you.
|
|
||||||
"""
|
|
||||||
class VeDbusItemImport(object):
|
|
||||||
## Constructor
|
|
||||||
# @param bus the bus-object (SESSION or SYSTEM).
|
|
||||||
# @param serviceName the dbus-service-name (string), for example 'com.victronenergy.battery.ttyO1'
|
|
||||||
# @param path the object-path, for example '/Dc/V'
|
|
||||||
# @param eventCallback function that you want to be called on a value change
|
|
||||||
# @param createSignal only set this to False if you use this function to one time read a value. When
|
|
||||||
# leaving it to True, make sure to also subscribe to the NameOwnerChanged signal
|
|
||||||
# elsewhere. See also note some 15 lines up.
|
|
||||||
def __init__(self, bus, serviceName, path, eventCallback=None, createsignal=True):
|
|
||||||
# TODO: is it necessary to store _serviceName and _path? Isn't it
|
|
||||||
# stored in the bus_getobjectsomewhere?
|
|
||||||
self._serviceName = serviceName
|
|
||||||
self._path = path
|
|
||||||
self._match = None
|
|
||||||
# TODO: _proxy is being used in settingsdevice.py, make a getter for that
|
|
||||||
self._proxy = bus.get_object(serviceName, path, introspect=False)
|
|
||||||
self.eventCallback = eventCallback
|
|
||||||
|
|
||||||
assert eventCallback is None or createsignal == True
|
|
||||||
if createsignal:
|
|
||||||
self._match = self._proxy.connect_to_signal(
|
|
||||||
"PropertiesChanged", weak_functor(self._properties_changed_handler))
|
|
||||||
|
|
||||||
# store the current value in _cachedvalue. When it doesn't exists set _cachedvalue to
|
|
||||||
# None, same as when a value is invalid
|
|
||||||
self._cachedvalue = None
|
|
||||||
try:
|
|
||||||
v = self._proxy.GetValue()
|
|
||||||
except dbus.exceptions.DBusException:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self._cachedvalue = unwrap_dbus_value(v)
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
if self._match != None:
|
|
||||||
self._match.remove()
|
|
||||||
self._match = None
|
|
||||||
self._proxy = None
|
|
||||||
|
|
||||||
def _refreshcachedvalue(self):
|
|
||||||
self._cachedvalue = unwrap_dbus_value(self._proxy.GetValue())
|
|
||||||
|
|
||||||
## Returns the path as a string, for example '/AC/L1/V'
|
|
||||||
@property
|
|
||||||
def path(self):
|
|
||||||
return self._path
|
|
||||||
|
|
||||||
## Returns the dbus service name as a string, for example com.victronenergy.vebus.ttyO1
|
|
||||||
@property
|
|
||||||
def serviceName(self):
|
|
||||||
return self._serviceName
|
|
||||||
|
|
||||||
## Returns the value of the dbus-item.
|
|
||||||
# the type will be a dbus variant, for example dbus.Int32(0, variant_level=1)
|
|
||||||
# this is not a property to keep the name consistant with the com.victronenergy.busitem interface
|
|
||||||
# returns None when the property is invalid
|
|
||||||
def get_value(self):
|
|
||||||
return self._cachedvalue
|
|
||||||
|
|
||||||
## Writes a new value to the dbus-item
|
|
||||||
def set_value(self, newvalue):
|
|
||||||
r = self._proxy.SetValue(wrap_dbus_value(newvalue))
|
|
||||||
|
|
||||||
# instead of just saving the value, go to the dbus and get it. So we have the right type etc.
|
|
||||||
if r == 0:
|
|
||||||
self._refreshcachedvalue()
|
|
||||||
|
|
||||||
return r
|
|
||||||
|
|
||||||
## Returns the text representation of the value.
|
|
||||||
# For example when the value is an enum/int GetText might return the string
|
|
||||||
# belonging to that enum value. Another example, for a voltage, GetValue
|
|
||||||
# would return a float, 12.0Volt, and GetText could return 12 VDC.
|
|
||||||
#
|
|
||||||
# Note that this depends on how the dbus-producer has implemented this.
|
|
||||||
def get_text(self):
|
|
||||||
return self._proxy.GetText()
|
|
||||||
|
|
||||||
## Returns true of object path exists, and false if it doesn't
|
|
||||||
@property
|
|
||||||
def exists(self):
|
|
||||||
# TODO: do some real check instead of this crazy thing.
|
|
||||||
r = False
|
|
||||||
try:
|
|
||||||
r = self._proxy.GetValue()
|
|
||||||
r = True
|
|
||||||
except dbus.exceptions.DBusException:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return r
|
|
||||||
|
|
||||||
## callback for the trigger-event.
|
|
||||||
# @param eventCallback the event-callback-function.
|
|
||||||
@property
|
|
||||||
def eventCallback(self):
|
|
||||||
return self._eventCallback
|
|
||||||
|
|
||||||
@eventCallback.setter
|
|
||||||
def eventCallback(self, eventCallback):
|
|
||||||
self._eventCallback = eventCallback
|
|
||||||
|
|
||||||
## Is called when the value of the imported bus-item changes.
|
|
||||||
# Stores the new value in our local cache, and calls the eventCallback, if set.
|
|
||||||
def _properties_changed_handler(self, changes):
|
|
||||||
if "Value" in changes:
|
|
||||||
changes['Value'] = unwrap_dbus_value(changes['Value'])
|
|
||||||
self._cachedvalue = changes['Value']
|
|
||||||
if self._eventCallback:
|
|
||||||
# The reason behind this try/except is to prevent errors silently ending up the an error
|
|
||||||
# handler in the dbus code.
|
|
||||||
try:
|
|
||||||
self._eventCallback(self._serviceName, self._path, changes)
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
os._exit(1) # sys.exit() is not used, since that also throws an exception
|
|
||||||
|
|
||||||
|
|
||||||
class VeDbusTreeExport(dbus.service.Object):
|
|
||||||
def __init__(self, bus, objectPath, get_value_handler):
|
|
||||||
dbus.service.Object.__init__(self, bus, objectPath)
|
|
||||||
self._get_value_handler = get_value_handler
|
|
||||||
logging.debug("VeDbusTreeExport %s has been created" % objectPath)
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
# self._get_path() will raise an exception when retrieved after the call to .remove_from_connection,
|
|
||||||
# so we need a copy.
|
|
||||||
path = self._get_path()
|
|
||||||
if path is None:
|
|
||||||
return
|
|
||||||
self.remove_from_connection()
|
|
||||||
logging.debug("VeDbusTreeExport %s has been removed" % path)
|
|
||||||
|
|
||||||
def _get_path(self):
|
|
||||||
if len(self._locations) == 0:
|
|
||||||
return None
|
|
||||||
return self._locations[0][1]
|
|
||||||
|
|
||||||
@dbus.service.method('com.victronenergy.BusItem', out_signature='v')
|
|
||||||
def GetValue(self):
|
|
||||||
value = self._get_value_handler(self._get_path())
|
|
||||||
return dbus.Dictionary(value, signature=dbus.Signature('sv'), variant_level=1)
|
|
||||||
|
|
||||||
@dbus.service.method('com.victronenergy.BusItem', out_signature='v')
|
|
||||||
def GetText(self):
|
|
||||||
return self._get_value_handler(self._get_path(), True)
|
|
||||||
|
|
||||||
def local_get_value(self):
|
|
||||||
return self._get_value_handler(self.path)
|
|
||||||
|
|
||||||
|
|
||||||
class VeDbusItemExport(dbus.service.Object):
|
|
||||||
## Constructor of VeDbusItemExport
|
|
||||||
#
|
|
||||||
# Use this object to export (publish), values on the dbus
|
|
||||||
# Creates the dbus-object under the given dbus-service-name.
|
|
||||||
# @param bus The dbus object.
|
|
||||||
# @param objectPath The dbus-object-path.
|
|
||||||
# @param value Value to initialize ourselves with, defaults to None which means Invalid
|
|
||||||
# @param description String containing a description. Can be called over the dbus with GetDescription()
|
|
||||||
# @param writeable what would this do!? :).
|
|
||||||
# @param callback Function that will be called when someone else changes the value of this VeBusItem
|
|
||||||
# over the dbus. First parameter passed to callback will be our path, second the new
|
|
||||||
# value. This callback should return True to accept the change, False to reject it.
|
|
||||||
def __init__(self, bus, objectPath, value=None, description=None, writeable=False,
|
|
||||||
onchangecallback=None, gettextcallback=None, deletecallback=None):
|
|
||||||
dbus.service.Object.__init__(self, bus, objectPath)
|
|
||||||
self._onchangecallback = onchangecallback
|
|
||||||
self._gettextcallback = gettextcallback
|
|
||||||
self._value = value
|
|
||||||
self._description = description
|
|
||||||
self._writeable = writeable
|
|
||||||
self._deletecallback = deletecallback
|
|
||||||
|
|
||||||
# To force immediate deregistering of this dbus object, explicitly call __del__().
|
|
||||||
def __del__(self):
|
|
||||||
# self._get_path() will raise an exception when retrieved after the
|
|
||||||
# call to .remove_from_connection, so we need a copy.
|
|
||||||
path = self._get_path()
|
|
||||||
if path == None:
|
|
||||||
return
|
|
||||||
if self._deletecallback is not None:
|
|
||||||
self._deletecallback(path)
|
|
||||||
self.local_set_value(None)
|
|
||||||
self.remove_from_connection()
|
|
||||||
logging.debug("VeDbusItemExport %s has been removed" % path)
|
|
||||||
|
|
||||||
def _get_path(self):
|
|
||||||
if len(self._locations) == 0:
|
|
||||||
return None
|
|
||||||
return self._locations[0][1]
|
|
||||||
|
|
||||||
## Sets the value. And in case the value is different from what it was, a signal
|
|
||||||
# will be emitted to the dbus. This function is to be used in the python code that
|
|
||||||
# is using this class to export values to the dbus.
|
|
||||||
# set value to None to indicate that it is Invalid
|
|
||||||
def local_set_value(self, newvalue):
|
|
||||||
if self._value == newvalue:
|
|
||||||
return
|
|
||||||
|
|
||||||
self._value = newvalue
|
|
||||||
|
|
||||||
changes = {}
|
|
||||||
changes['Value'] = wrap_dbus_value(newvalue)
|
|
||||||
changes['Text'] = self.GetText()
|
|
||||||
self.PropertiesChanged(changes)
|
|
||||||
|
|
||||||
def local_get_value(self):
|
|
||||||
return self._value
|
|
||||||
|
|
||||||
# ==== ALL FUNCTIONS BELOW THIS LINE WILL BE CALLED BY OTHER PROCESSES OVER THE DBUS ====
|
|
||||||
|
|
||||||
## Dbus exported method SetValue
|
|
||||||
# Function is called over the D-Bus by other process. It will first check (via callback) if new
|
|
||||||
# value is accepted. And it is, stores it and emits a changed-signal.
|
|
||||||
# @param value The new value.
|
|
||||||
# @return completion-code When successful a 0 is return, and when not a -1 is returned.
|
|
||||||
@dbus.service.method('com.victronenergy.BusItem', in_signature='v', out_signature='i')
|
|
||||||
def SetValue(self, newvalue):
|
|
||||||
if not self._writeable:
|
|
||||||
return 1 # NOT OK
|
|
||||||
|
|
||||||
newvalue = unwrap_dbus_value(newvalue)
|
|
||||||
|
|
||||||
if newvalue == self._value:
|
|
||||||
return 0 # OK
|
|
||||||
|
|
||||||
# call the callback given to us, and check if new value is OK.
|
|
||||||
if (self._onchangecallback is None or
|
|
||||||
(self._onchangecallback is not None and self._onchangecallback(self.__dbus_object_path__, newvalue))):
|
|
||||||
|
|
||||||
self.local_set_value(newvalue)
|
|
||||||
return 0 # OK
|
|
||||||
|
|
||||||
return 2 # NOT OK
|
|
||||||
|
|
||||||
## Dbus exported method GetDescription
|
|
||||||
#
|
|
||||||
# Returns the a description.
|
|
||||||
# @param language A language code (e.g. ISO 639-1 en-US).
|
|
||||||
# @param length Lenght of the language string.
|
|
||||||
# @return description
|
|
||||||
@dbus.service.method('com.victronenergy.BusItem', in_signature='si', out_signature='s')
|
|
||||||
def GetDescription(self, language, length):
|
|
||||||
return self._description if self._description is not None else 'No description given'
|
|
||||||
|
|
||||||
## Dbus exported method GetValue
|
|
||||||
# Returns the value.
|
|
||||||
# @return the value when valid, and otherwise an empty array
|
|
||||||
@dbus.service.method('com.victronenergy.BusItem', out_signature='v')
|
|
||||||
def GetValue(self):
|
|
||||||
return wrap_dbus_value(self._value)
|
|
||||||
|
|
||||||
## Dbus exported method GetText
|
|
||||||
# Returns the value as string of the dbus-object-path.
|
|
||||||
# @return text A text-value. '---' when local value is invalid
|
|
||||||
@dbus.service.method('com.victronenergy.BusItem', out_signature='s')
|
|
||||||
def GetText(self):
|
|
||||||
if self._value is None:
|
|
||||||
return '---'
|
|
||||||
|
|
||||||
# Default conversion from dbus.Byte will get you a character (so 'T' instead of '84'), so we
|
|
||||||
# have to convert to int first. Note that if a dbus.Byte turns up here, it must have come from
|
|
||||||
# the application itself, as all data from the D-Bus should have been unwrapped by now.
|
|
||||||
if self._gettextcallback is None and type(self._value) == dbus.Byte:
|
|
||||||
return str(int(self._value))
|
|
||||||
|
|
||||||
if self._gettextcallback is None and self.__dbus_object_path__ == '/ProductId':
|
|
||||||
return "0x%X" % self._value
|
|
||||||
|
|
||||||
if self._gettextcallback is None:
|
|
||||||
return str(self._value)
|
|
||||||
|
|
||||||
return self._gettextcallback(self.__dbus_object_path__, self._value)
|
|
||||||
|
|
||||||
## The signal that indicates that the value has changed.
|
|
||||||
# Other processes connected to this BusItem object will have subscribed to the
|
|
||||||
# event when they want to track our state.
|
|
||||||
@dbus.service.signal('com.victronenergy.BusItem', signature='a{sv}')
|
|
||||||
def PropertiesChanged(self, changes):
|
|
||||||
pass
|
|
||||||
|
|
||||||
## This class behaves like a regular reference to a class method (eg. self.foo), but keeps a weak reference
|
|
||||||
## to the object which method is to be called.
|
|
||||||
## Use this object to break circular references.
|
|
||||||
class weak_functor:
|
|
||||||
def __init__(self, f):
|
|
||||||
self._r = weakref.ref(f.__self__)
|
|
||||||
self._f = weakref.ref(f.__func__)
|
|
||||||
|
|
||||||
def __call__(self, *args, **kargs):
|
|
||||||
r = self._r()
|
|
||||||
f = self._f()
|
|
||||||
if r == None or f == None:
|
|
||||||
return
|
|
||||||
f(r, *args, **kargs)
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,54 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
from python_libs.ie_utils.mixins import Disposable, RequiresMainLoop, Record
|
|
||||||
from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon
|
|
||||||
from python_libs.ie_dbus.private.own_properties import OwnProperties
|
|
||||||
from python_libs.ie_dbus.private.remote_properties import RemoteProperties
|
|
||||||
from python_libs.ie_dbus.private.ve_constants import SERVICE_PREFIX
|
|
||||||
from python_libs.ie_dbus.private.settings import Settings
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Union, AnyStr, NoReturn, List
|
|
||||||
|
|
||||||
|
|
||||||
def _enforce_ve_prefix(service_name_filter):
|
|
||||||
if not service_name_filter.startswith(SERVICE_PREFIX):
|
|
||||||
raise ValueError('service_name_filter must start with ' + SERVICE_PREFIX)
|
|
||||||
|
|
||||||
|
|
||||||
SESSION_BUS = 0
|
|
||||||
SYSTEM_BUS = 1
|
|
||||||
|
|
||||||
|
|
||||||
class DBusService(Record, Disposable, RequiresMainLoop):
|
|
||||||
|
|
||||||
def __init__(self, service_name=None, device_instance=1, connection_type_or_address=SYSTEM_BUS):
|
|
||||||
# type: (str, int, Union[int, AnyStr]) -> NoReturn
|
|
||||||
|
|
||||||
service_name = service_name if service_name.startswith(SERVICE_PREFIX) else SERVICE_PREFIX + service_name
|
|
||||||
|
|
||||||
self._daemon = DBusDaemon(connection_type_or_address)
|
|
||||||
self.remote_properties = RemoteProperties(self._daemon)
|
|
||||||
self.own_properties = OwnProperties(self._daemon)
|
|
||||||
self.own_properties.set('/DeviceInstance', device_instance) # must be set before request_name, sigh
|
|
||||||
|
|
||||||
self.settings = Settings(self._daemon, self.remote_properties)
|
|
||||||
self.name = service_name
|
|
||||||
|
|
||||||
if service_name is not None:
|
|
||||||
self._bus_name = self._daemon.request_name(service_name)
|
|
||||||
_log.info('service name is ' + service_name)
|
|
||||||
|
|
||||||
_log.info('id is ' + self.bus_id)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def available_services(self):
|
|
||||||
# type: () -> List[unicode]
|
|
||||||
return [s.name for s in self._daemon.services]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def bus_id(self):
|
|
||||||
# type: () -> unicode
|
|
||||||
return self._daemon.bus_id
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,22 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
from python_libs.ie_utils.mixins import Record
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import AnyStr
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceInfo(Record):
|
|
||||||
|
|
||||||
# noinspection PyShadowingBuiltins
|
|
||||||
def __init__(self, name, id, pid, proc_name, cmd):
|
|
||||||
# type: (AnyStr, AnyStr, int, str, str) -> ServiceInfo
|
|
||||||
|
|
||||||
self.proc_name = proc_name
|
|
||||||
self.name = name
|
|
||||||
self.id = id
|
|
||||||
self.cmd = cmd
|
|
||||||
self.pid = pid
|
|
Binary file not shown.
|
@ -1,185 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
from _dbus_bindings import Connection, MethodCallMessage, SignalMessage, BUS_DAEMON_NAME, \
|
|
||||||
BUS_DAEMON_PATH, BUS_DAEMON_IFACE, NAME_FLAG_DO_NOT_QUEUE, Message, HANDLER_RESULT_HANDLED
|
|
||||||
|
|
||||||
from python_libs.ie_dbus.private.dbus_types import dbus_string, dbus_uint32
|
|
||||||
from python_libs.ie_dbus.private.message_types import DBusException
|
|
||||||
from python_libs.ie_utils.mixins import Disposable
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import List, Optional, Iterable, Callable, Union, NoReturn, AnyStr, Any
|
|
||||||
from python_libs.ie_dbus.private.dbus_types import DbusType
|
|
||||||
|
|
||||||
|
|
||||||
class DbusConnection(Disposable):
|
|
||||||
"""
|
|
||||||
A collection of stateless functions operating on a Connection object
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, connection_type_or_address):
|
|
||||||
# type: (Union[int, AnyStr]) -> NoReturn
|
|
||||||
|
|
||||||
self._address = connection_type_or_address
|
|
||||||
# noinspection PyProtectedMember
|
|
||||||
self._connection = Connection._new_for_bus(connection_type_or_address) # it's not disposable
|
|
||||||
self.chain_disposable(self._connection.close, 'connection ' + self._connection.get_unique_name())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def bus_id(self):
|
|
||||||
return self._connection.get_unique_name()
|
|
||||||
|
|
||||||
def fork(self):
|
|
||||||
return DbusConnection(self._address)
|
|
||||||
|
|
||||||
def get_ids_and_service_names(self):
|
|
||||||
# type: () -> Iterable[unicode]
|
|
||||||
|
|
||||||
# noinspection PyTypeChecker
|
|
||||||
return map(unicode, self.call_daemon_method('ListNames')[0])
|
|
||||||
|
|
||||||
def get_service_names(self):
|
|
||||||
# type: () -> Iterable[AnyStr]
|
|
||||||
|
|
||||||
return (
|
|
||||||
unicode(name)
|
|
||||||
for name
|
|
||||||
in self.get_ids_and_service_names()
|
|
||||||
if not name.startswith(':')
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_service_ids(self):
|
|
||||||
# type: () -> Iterable[AnyStr]
|
|
||||||
|
|
||||||
return (
|
|
||||||
name
|
|
||||||
for name in self.get_ids_and_service_names() if name.startswith(':'))
|
|
||||||
|
|
||||||
# noinspection PyBroadException
|
|
||||||
def get_pid_of_service(self, service_name):
|
|
||||||
# type: (AnyStr) -> Optional[int]
|
|
||||||
try:
|
|
||||||
reply = self.call_daemon_method('GetConnectionUnixProcessID', dbus_string(service_name))
|
|
||||||
return int(reply[0])
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_id_of_service(self, service_name):
|
|
||||||
# type: (AnyStr) -> AnyStr
|
|
||||||
reply = self.call_daemon_method('GetNameOwner', dbus_string(service_name))
|
|
||||||
return unicode(reply[0])
|
|
||||||
|
|
||||||
def call_method(self, service_name, object_path, interface, member, *args):
|
|
||||||
# type: (AnyStr, AnyStr, Optional[str], str, List[Any]) -> List[Any]
|
|
||||||
|
|
||||||
msg = MethodCallMessage(service_name, object_path, interface, member)
|
|
||||||
|
|
||||||
for arg in args:
|
|
||||||
msg.append(arg)
|
|
||||||
|
|
||||||
reply = self._connection.send_message_with_reply_and_block(msg) # with py3 we could use asyncio here
|
|
||||||
DBusException.raise_if_error_reply(reply)
|
|
||||||
|
|
||||||
return reply.get_args_list() # TODO: utf8_strings=True ?
|
|
||||||
|
|
||||||
def send_message(self, msg):
|
|
||||||
# type: (Message) -> NoReturn
|
|
||||||
|
|
||||||
self._connection.send_message(msg)
|
|
||||||
|
|
||||||
def call_daemon_method(self, method_name, *args):
|
|
||||||
# type: (AnyStr, Iterable[DbusType])-> List[any]
|
|
||||||
|
|
||||||
return self.call_method(BUS_DAEMON_NAME, BUS_DAEMON_PATH, BUS_DAEMON_IFACE, method_name, *args)
|
|
||||||
|
|
||||||
def request_name(self, service_name):
|
|
||||||
# type: (AnyStr) -> Disposable
|
|
||||||
|
|
||||||
_log.debug('requesting bus name ' + service_name)
|
|
||||||
|
|
||||||
self.call_daemon_method('RequestName', dbus_string(service_name), dbus_uint32(NAME_FLAG_DO_NOT_QUEUE))
|
|
||||||
|
|
||||||
def dispose():
|
|
||||||
self.call_daemon_method('ReleaseName', dbus_string(service_name))
|
|
||||||
|
|
||||||
return self.create_dependent_disposable(dispose, 'bus name ' + service_name)
|
|
||||||
|
|
||||||
def broadcast_signal(self, object_path, interface, member, *args):
|
|
||||||
# type: (AnyStr, AnyStr, AnyStr, List[Any]) -> NoReturn
|
|
||||||
|
|
||||||
msg = SignalMessage(object_path, interface, member)
|
|
||||||
for arg in args:
|
|
||||||
msg.append(arg)
|
|
||||||
|
|
||||||
self._connection.send_message(msg)
|
|
||||||
|
|
||||||
def add_message_callback(self, callback, filter_rule, fork=True):
|
|
||||||
# type: (Callable[[Message], NoReturn], AnyStr, Optional[bool]) -> Disposable
|
|
||||||
if fork:
|
|
||||||
return self._add_message_callback_fork(callback, filter_rule)
|
|
||||||
else:
|
|
||||||
return self._add_message_callback_no_fork(callback, filter_rule)
|
|
||||||
|
|
||||||
def _add_message_callback_no_fork(self, callback, filter_rule): # TODO: forking for incoming method calls
|
|
||||||
# type: (Callable[[Message], NoReturn], AnyStr) -> Disposable
|
|
||||||
|
|
||||||
def dispatch(_, msg):
|
|
||||||
# type: (Connection, Message) -> int
|
|
||||||
|
|
||||||
#_log.info(' ####### got message type=' + str(msg.get_type()) + ' ' + msg.get_path() + '/' + msg.get_member())
|
|
||||||
callback(msg)
|
|
||||||
#_log.debug('DONE')
|
|
||||||
return HANDLER_RESULT_HANDLED
|
|
||||||
|
|
||||||
msg_filter = self._add_message_filter(dispatch)
|
|
||||||
match = self._add_match(filter_rule)
|
|
||||||
|
|
||||||
def dispose():
|
|
||||||
match.dispose()
|
|
||||||
msg_filter.dispose()
|
|
||||||
|
|
||||||
return self.create_dependent_disposable(dispose)
|
|
||||||
|
|
||||||
def _add_message_callback_fork(self, callback, filter_rule):
|
|
||||||
# type: (Callable[[Message], NoReturn], AnyStr) -> Disposable
|
|
||||||
|
|
||||||
forked = self.fork()
|
|
||||||
_log.debug('forked connection ' + forked.bus_id)
|
|
||||||
|
|
||||||
def dispatch(_, msg):
|
|
||||||
# type: (Connection, Message) -> int
|
|
||||||
|
|
||||||
# _log.debug('got message type=' + str(msg.get_type()) + ' ' + msg.get_path() + '/' + msg.get_member())
|
|
||||||
callback(msg)
|
|
||||||
return HANDLER_RESULT_HANDLED
|
|
||||||
|
|
||||||
forked._add_message_filter(dispatch)
|
|
||||||
forked._add_match(filter_rule)
|
|
||||||
|
|
||||||
return self.create_dependent_disposable(forked)
|
|
||||||
|
|
||||||
def _add_message_filter(self, callback):
|
|
||||||
# type: (Callable[[Connection, Message], int]) -> Disposable
|
|
||||||
|
|
||||||
_log.debug('added filter on ' + self.bus_id)
|
|
||||||
self._connection.add_message_filter(callback)
|
|
||||||
|
|
||||||
def dispose():
|
|
||||||
self._connection.remove_message_filter(callback)
|
|
||||||
|
|
||||||
return self.create_dependent_disposable(dispose, 'message filter on ' + self.bus_id)
|
|
||||||
|
|
||||||
def _add_match(self, filter_rule):
|
|
||||||
# type: (AnyStr) -> Disposable
|
|
||||||
|
|
||||||
self.call_daemon_method('AddMatch', dbus_string(filter_rule))
|
|
||||||
|
|
||||||
_log.debug('added match_rule: ' + filter_rule)
|
|
||||||
|
|
||||||
def dispose():
|
|
||||||
self.call_daemon_method('RemoveMatch', dbus_string(filter_rule))
|
|
||||||
|
|
||||||
return self.create_dependent_disposable(dispose, 'Match ' + filter_rule)
|
|
Binary file not shown.
|
@ -1,273 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
from _dbus_bindings import Message, ErrorMessage, BUS_DAEMON_NAME, BUS_DAEMON_PATH, BUS_DAEMON_IFACE
|
|
||||||
from python_libs.ie_dbus.private.datatypes import ServiceInfo
|
|
||||||
from python_libs.ie_dbus.private.dbus_connection import DbusConnection
|
|
||||||
from python_libs.ie_dbus.private.message_types import MatchedMessage, MessageFilter, ResolvedMessage
|
|
||||||
from python_libs.ie_utils.mixins import Disposable, RequiresMainLoop
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
NONE = '<none>'
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Callable, List, Optional, Iterable, Union, AnyStr, NoReturn, Any, Dict
|
|
||||||
from python_libs.ie_dbus.private.dbus_types import DbusType
|
|
||||||
|
|
||||||
|
|
||||||
class DBusDaemon(Disposable, RequiresMainLoop):
|
|
||||||
|
|
||||||
_services = None # type: Dict[str, ServiceInfo]
|
|
||||||
|
|
||||||
def __init__(self, connection_type_or_address):
|
|
||||||
# type: (Union[int, AnyStr]) -> NoReturn
|
|
||||||
|
|
||||||
self._dbus = DbusConnection(connection_type_or_address)
|
|
||||||
# self._dbus.add_message_callback(lambda _: None, 'type=method_call', fork=False) # sink method calls, TODO
|
|
||||||
|
|
||||||
self._name_changed = self.subscribe_to_signal_message(
|
|
||||||
self._on_name_owner_changed,
|
|
||||||
sender_id=BUS_DAEMON_NAME,
|
|
||||||
object_path=BUS_DAEMON_PATH,
|
|
||||||
interface=BUS_DAEMON_IFACE,
|
|
||||||
member='NameOwnerChanged')
|
|
||||||
|
|
||||||
self._services = self._init_services()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def bus_id(self):
|
|
||||||
# type: () -> AnyStr
|
|
||||||
return self._dbus.bus_id
|
|
||||||
|
|
||||||
@property
|
|
||||||
def services(self):
|
|
||||||
# type: () -> Iterable[ServiceInfo]
|
|
||||||
return self._services.itervalues()
|
|
||||||
|
|
||||||
def subscribe_to_signal_message(
|
|
||||||
self,
|
|
||||||
callback,
|
|
||||||
sender_id='*',
|
|
||||||
sender_name='*',
|
|
||||||
object_path='*',
|
|
||||||
interface='*',
|
|
||||||
member='*',
|
|
||||||
signature='*'):
|
|
||||||
# type: (Callable[[MatchedMessage], None], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr]) -> Disposable
|
|
||||||
|
|
||||||
message_filter = MessageFilter(
|
|
||||||
message_type='signal',
|
|
||||||
sender_id=sender_id,
|
|
||||||
sender_name=sender_name,
|
|
||||||
object_path=object_path,
|
|
||||||
interface=interface,
|
|
||||||
member=member,
|
|
||||||
signature=signature)
|
|
||||||
|
|
||||||
def dispatch(msg):
|
|
||||||
# type: (Message) -> NoReturn
|
|
||||||
|
|
||||||
resolved_msg = self._resolve_message(msg)
|
|
||||||
matched = message_filter.match_message(resolved_msg)
|
|
||||||
|
|
||||||
if matched is not None:
|
|
||||||
callback(matched)
|
|
||||||
|
|
||||||
return self._dbus.add_message_callback(dispatch, message_filter.filter_rule)
|
|
||||||
|
|
||||||
def subscribe_to_method_call_message(
|
|
||||||
self,
|
|
||||||
callback,
|
|
||||||
sender_id='*',
|
|
||||||
sender_name='*',
|
|
||||||
object_path='*',
|
|
||||||
interface='*',
|
|
||||||
member='*',
|
|
||||||
signature='*',
|
|
||||||
destination_id='*',
|
|
||||||
destination_name='*'):
|
|
||||||
# type: (Callable[[MatchedMessage], Any], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[AnyStr], Optional[bool]) -> Disposable
|
|
||||||
|
|
||||||
message_filter = MessageFilter(
|
|
||||||
message_type='method_call',
|
|
||||||
sender_id=sender_id,
|
|
||||||
sender_name=sender_name,
|
|
||||||
object_path=object_path,
|
|
||||||
interface=interface,
|
|
||||||
member=member,
|
|
||||||
signature=signature,
|
|
||||||
destination_id=destination_id,
|
|
||||||
destination_name=destination_name) # TODO: eavesdrop logic
|
|
||||||
|
|
||||||
def dispatch(msg):
|
|
||||||
# type: (Message) -> NoReturn
|
|
||||||
|
|
||||||
if msg.get_type() != 1:
|
|
||||||
return
|
|
||||||
|
|
||||||
resolved_msg = self._resolve_message(msg)
|
|
||||||
matched = message_filter.match_message(resolved_msg)
|
|
||||||
|
|
||||||
if matched is None:
|
|
||||||
reply = ErrorMessage(msg, 'com.victronenergy.method_call_refused', 'refused')
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
result = callback(matched)
|
|
||||||
except Exception as e:
|
|
||||||
# _log.debug('method_call threw an exception ' + str(e))
|
|
||||||
# traceback.print_exc()
|
|
||||||
reply = matched.create_error_reply(e)
|
|
||||||
else:
|
|
||||||
reply = matched.create_method_reply(result)
|
|
||||||
|
|
||||||
self._dbus.send_message(reply)
|
|
||||||
|
|
||||||
return self._dbus.add_message_callback(dispatch, message_filter.filter_rule, fork=False)
|
|
||||||
|
|
||||||
def request_name(self, service_name):
|
|
||||||
# type: (AnyStr) -> Disposable
|
|
||||||
|
|
||||||
return self._dbus.request_name(service_name)
|
|
||||||
|
|
||||||
def call_method(self, service_name, object_path, interface, member, *args):
|
|
||||||
# type: (AnyStr, AnyStr, AnyStr, AnyStr, Iterable[DbusType]) -> List[Any]
|
|
||||||
|
|
||||||
return self._dbus.call_method(service_name, object_path, interface, member, *args)
|
|
||||||
|
|
||||||
def broadcast_signal(self, object_path, interface, member, *args):
|
|
||||||
# type: (AnyStr, AnyStr, AnyStr, List[DbusType]) -> NoReturn
|
|
||||||
|
|
||||||
self._dbus.broadcast_signal(object_path, interface, member, *args)
|
|
||||||
|
|
||||||
def get_service_names_of_id(self, service_id):
|
|
||||||
# type: (str) -> List[AnyStr]
|
|
||||||
|
|
||||||
if service_id is None:
|
|
||||||
return []
|
|
||||||
|
|
||||||
return [
|
|
||||||
s.name
|
|
||||||
for s in self.services
|
|
||||||
if s.id == service_id
|
|
||||||
]
|
|
||||||
|
|
||||||
def get_id_for_service_name(self, service_name):
|
|
||||||
# type: (AnyStr) -> Optional[AnyStr]
|
|
||||||
|
|
||||||
return next((s.id for s in self.services if s.name == service_name), None)
|
|
||||||
|
|
||||||
def exists_service_with_name(self, service_name):
|
|
||||||
# type: (AnyStr) -> bool
|
|
||||||
|
|
||||||
return self.get_id_for_service_name(service_name) is not None
|
|
||||||
|
|
||||||
def _resolve_message(self, msg):
|
|
||||||
# type: (Message) -> ResolvedMessage
|
|
||||||
|
|
||||||
sender_id, sender_names = self._resolve_name(msg.get_sender())
|
|
||||||
destination_id, destination_names = self._resolve_name(msg.get_destination())
|
|
||||||
|
|
||||||
return ResolvedMessage(msg, sender_id, sender_names, destination_id, destination_names)
|
|
||||||
|
|
||||||
# noinspection PyShadowingBuiltins
|
|
||||||
def _resolve_name(self, name):
|
|
||||||
# type: (str) -> (str, List[str])
|
|
||||||
|
|
||||||
if name is None:
|
|
||||||
id = NONE
|
|
||||||
names = []
|
|
||||||
elif name.startswith(':'):
|
|
||||||
id = name
|
|
||||||
names = self.get_service_names_of_id(name)
|
|
||||||
else:
|
|
||||||
id = self.get_id_for_service_name(name)
|
|
||||||
names = [name]
|
|
||||||
|
|
||||||
return id, names
|
|
||||||
|
|
||||||
def _on_name_owner_changed(self, msg):
|
|
||||||
# type: (MatchedMessage) -> NoReturn
|
|
||||||
|
|
||||||
(name, old_id, new_id) = msg.arguments
|
|
||||||
|
|
||||||
old_id = old_id.strip()
|
|
||||||
new_id = new_id.strip()
|
|
||||||
name = name.strip()
|
|
||||||
|
|
||||||
if name.startswith(':'):
|
|
||||||
name = None
|
|
||||||
|
|
||||||
added = old_id == '' and new_id != ''
|
|
||||||
changed = old_id != '' and new_id != ''
|
|
||||||
removed = old_id != '' and new_id == ''
|
|
||||||
|
|
||||||
# 'changed' is dispatched as 'removed' followed by 'added'
|
|
||||||
|
|
||||||
if removed or changed:
|
|
||||||
self._services.pop(old_id, None)
|
|
||||||
|
|
||||||
if added or changed:
|
|
||||||
service = self._create_service(name, new_id)
|
|
||||||
self._services[new_id] = service
|
|
||||||
|
|
||||||
# noinspection PyShadowingBuiltins
|
|
||||||
def _init_services(self):
|
|
||||||
# type: () -> Dict[str, ServiceInfo]
|
|
||||||
|
|
||||||
services = dict()
|
|
||||||
|
|
||||||
names_and_ids = self._dbus.get_ids_and_service_names()
|
|
||||||
|
|
||||||
ids = set([i for i in names_and_ids if i.startswith(':')])
|
|
||||||
names = [n for n in names_and_ids if not n.startswith(':')]
|
|
||||||
|
|
||||||
for service_name in names:
|
|
||||||
service = self._create_service(service_name)
|
|
||||||
services[service.id] = service
|
|
||||||
ids.discard(service.id)
|
|
||||||
|
|
||||||
self._services = services # UGLY, because _create_service below references it.
|
|
||||||
|
|
||||||
for id in ids:
|
|
||||||
services[id] = self._create_service(id=id)
|
|
||||||
|
|
||||||
return services
|
|
||||||
|
|
||||||
def _search_service_name_by_pid(self, pid):
|
|
||||||
# type: (int) -> Optional[AnyStr]
|
|
||||||
return next((s.name for s in self.services if s.pid == pid and s.name != NONE), NONE)
|
|
||||||
|
|
||||||
# noinspection PyShadowingBuiltins
|
|
||||||
def _create_service(self, name=None, id=None):
|
|
||||||
# type: (Optional[AnyStr], Optional[AnyStr]) -> ServiceInfo
|
|
||||||
|
|
||||||
id = id or self._dbus.get_id_of_service(name)
|
|
||||||
pid = self._dbus.get_pid_of_service(id)
|
|
||||||
proc = self._get_process_name_of_pid(pid)
|
|
||||||
cmd = self._get_commandline_of_pid(pid)
|
|
||||||
name = name or self._search_service_name_by_pid(pid)
|
|
||||||
|
|
||||||
return ServiceInfo(name, id, pid, proc, cmd)
|
|
||||||
|
|
||||||
# noinspection PyBroadException
|
|
||||||
@staticmethod
|
|
||||||
def _get_process_name_of_pid(service_pid):
|
|
||||||
# type: (int) -> str
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open('/proc/{0}/comm'.format(service_pid)) as proc:
|
|
||||||
return proc.read().replace('\0', ' ').rstrip()
|
|
||||||
except Exception as _:
|
|
||||||
return '<unknown>'
|
|
||||||
|
|
||||||
# noinspection PyBroadException
|
|
||||||
@staticmethod
|
|
||||||
def _get_commandline_of_pid(service_pid):
|
|
||||||
# type: (int) -> str
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open('/proc/{0}/cmdline'.format(service_pid)) as proc:
|
|
||||||
return proc.read().replace('\0', ' ').rstrip()
|
|
||||||
except Exception as _:
|
|
||||||
return '<unknown>'
|
|
Binary file not shown.
|
@ -1,139 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
import dbus
|
|
||||||
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Any, Union, Dict
|
|
||||||
DbusString = Union[dbus.String, dbus.UTF8String, dbus.ObjectPath, dbus.Signature]
|
|
||||||
DbusInt = Union[dbus.Int16, dbus.Int32, dbus.Int64]
|
|
||||||
DbusDouble = dbus.Double
|
|
||||||
DbusBool = dbus.Boolean
|
|
||||||
|
|
||||||
DbusStringVariant = DbusString # TODO: variant_level constraint ?
|
|
||||||
DbusIntVariant = DbusInt
|
|
||||||
DbusDoubleVariant = DbusDouble
|
|
||||||
DbusBoolVariant = DbusBool
|
|
||||||
|
|
||||||
DbusValue = Union[DbusString, DbusInt, DbusDouble, DbusBool, DBUS_NONE]
|
|
||||||
DbusVariant = Union[DbusStringVariant, DbusIntVariant, DbusDoubleVariant, DbusBoolVariant, DBUS_NONE]
|
|
||||||
|
|
||||||
DbusTextDict = dbus.Dictionary
|
|
||||||
DbusVariantDict = dbus.Dictionary
|
|
||||||
|
|
||||||
DbusType = Union[DbusValue, DbusVariant, DbusVariantDict, DbusTextDict]
|
|
||||||
|
|
||||||
DBUS_NONE = dbus.Array([], signature=dbus.Signature('i'), variant_level=1) # DEFINED by victron
|
|
||||||
|
|
||||||
MAX_INT16 = 2 ** 15 - 1
|
|
||||||
MAX_INT32 = 2 ** 31 - 1
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_uint32(value):
|
|
||||||
# type: (int) -> dbus.UInt32
|
|
||||||
if value < 0:
|
|
||||||
raise Exception('cannot convert negative value to UInt32')
|
|
||||||
|
|
||||||
return dbus.UInt32(value)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_int(value):
|
|
||||||
# type: (Union[int, long]) -> Union[dbus.Int16, dbus.Int32, dbus.Int64]
|
|
||||||
abs_value = abs(value)
|
|
||||||
if abs_value < MAX_INT16:
|
|
||||||
return dbus.Int16(value)
|
|
||||||
elif abs_value < MAX_INT32:
|
|
||||||
return dbus.Int32(value)
|
|
||||||
else:
|
|
||||||
return dbus.Int64(value)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_string(value):
|
|
||||||
# type: (Union[str, unicode]) -> DbusString
|
|
||||||
if isinstance(value, unicode):
|
|
||||||
return dbus.UTF8String(value)
|
|
||||||
else:
|
|
||||||
return dbus.String(value)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_double(value):
|
|
||||||
# type: (float) -> DbusDouble
|
|
||||||
return dbus.Double(value)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_bool(value):
|
|
||||||
# type: (bool) -> DbusBool
|
|
||||||
return dbus.Boolean(value)
|
|
||||||
|
|
||||||
|
|
||||||
# VARIANTS
|
|
||||||
|
|
||||||
def dbus_int_variant(value):
|
|
||||||
# type: (Union[int, long]) -> DbusIntVariant
|
|
||||||
abs_value = abs(value)
|
|
||||||
if abs_value < MAX_INT16:
|
|
||||||
return dbus.Int16(value, variant_level=1)
|
|
||||||
elif abs_value < MAX_INT32:
|
|
||||||
return dbus.Int32(value, variant_level=1)
|
|
||||||
else:
|
|
||||||
return dbus.Int64(value, variant_level=1)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_string_variant(value):
|
|
||||||
# type: (Union[str, unicode]) -> DbusStringVariant
|
|
||||||
if isinstance(value, unicode):
|
|
||||||
return dbus.UTF8String(value, variant_level=1)
|
|
||||||
else:
|
|
||||||
return dbus.String(value, variant_level=1)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_double_variant(value):
|
|
||||||
# type: (float) -> DbusDoubleVariant
|
|
||||||
return dbus.Double(value, variant_level=1)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_bool_variant(value):
|
|
||||||
# type: (bool) -> DbusBoolVariant
|
|
||||||
return dbus.Boolean(value, variant_level=1)
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_variant(value):
|
|
||||||
# type: (Any) -> DbusVariant
|
|
||||||
|
|
||||||
if value is None:
|
|
||||||
return DBUS_NONE
|
|
||||||
if isinstance(value, float):
|
|
||||||
return dbus_double_variant(value)
|
|
||||||
if isinstance(value, bool):
|
|
||||||
return dbus_bool_variant(value)
|
|
||||||
if isinstance(value, (int, long)):
|
|
||||||
return dbus_int_variant(value)
|
|
||||||
if isinstance(value, (str, unicode)):
|
|
||||||
return dbus_string_variant(value)
|
|
||||||
# TODO: container types
|
|
||||||
|
|
||||||
raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value))
|
|
||||||
|
|
||||||
|
|
||||||
def dbus_value(value):
|
|
||||||
# type: (Any) -> DbusVariant
|
|
||||||
|
|
||||||
if value is None:
|
|
||||||
return DBUS_NONE
|
|
||||||
if isinstance(value, float):
|
|
||||||
return dbus_double(value)
|
|
||||||
if isinstance(value, bool):
|
|
||||||
return dbus_bool(value)
|
|
||||||
if isinstance(value, (int, long)):
|
|
||||||
return dbus_int(value)
|
|
||||||
if isinstance(value, (str, unicode)):
|
|
||||||
return dbus_string_variant(value)
|
|
||||||
# TODO: container types
|
|
||||||
|
|
||||||
raise TypeError('unsupported python type: ' + str(type(value)) + ' ' + str(value))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -1,259 +0,0 @@
|
||||||
from fnmatch import fnmatch as glob
|
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
from _dbus_bindings import ErrorMessage, Message, MethodReturnMessage
|
|
||||||
from python_libs.ie_utils.mixins import Record
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import List, Optional, Iterable, AnyStr, NoReturn, Any
|
|
||||||
|
|
||||||
|
|
||||||
class MessageType(object):
|
|
||||||
|
|
||||||
invalid = 0
|
|
||||||
method_call = 1
|
|
||||||
method_return = 2
|
|
||||||
error = 3
|
|
||||||
signal = 4
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse(message_type):
|
|
||||||
# type: (int) -> str
|
|
||||||
|
|
||||||
if message_type == 1:
|
|
||||||
return 'method_call'
|
|
||||||
if message_type == 2:
|
|
||||||
return 'method_return'
|
|
||||||
if message_type == 3:
|
|
||||||
return 'error'
|
|
||||||
if message_type == 4:
|
|
||||||
return 'signal'
|
|
||||||
|
|
||||||
return 'invalid'
|
|
||||||
|
|
||||||
|
|
||||||
class DBusMessage(Record):
|
|
||||||
|
|
||||||
def __init__(self, msg, sender_id, destination_id):
|
|
||||||
# type: (Message, str, str) -> NoReturn
|
|
||||||
|
|
||||||
self.sender_id = sender_id
|
|
||||||
self.destination_id = destination_id
|
|
||||||
self._msg = msg
|
|
||||||
|
|
||||||
@property
|
|
||||||
def expects_reply(self):
|
|
||||||
# type: () -> bool
|
|
||||||
return not self._msg.get_no_reply()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def message_type(self):
|
|
||||||
# type: () -> int
|
|
||||||
return int(self._msg.get_type())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def reply_serial(self):
|
|
||||||
# type: () -> int
|
|
||||||
return int(self._msg.get_reply_serial())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def object_path(self):
|
|
||||||
# type: () -> str
|
|
||||||
return str(self._msg.get_path())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def interface(self):
|
|
||||||
# type: () -> str
|
|
||||||
return str(self._msg.get_interface())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def arguments(self):
|
|
||||||
# type: () -> List[Any]
|
|
||||||
return self._msg.get_args_list(utf8_strings=True)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def signature(self):
|
|
||||||
# type: () -> str
|
|
||||||
return str(self._msg.get_signature())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def serial(self):
|
|
||||||
# type: () -> int
|
|
||||||
return int(self._msg.get_serial())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def member(self):
|
|
||||||
# type: () -> str
|
|
||||||
return str(self._msg.get_member())
|
|
||||||
|
|
||||||
def create_method_reply(self, *args):
|
|
||||||
# type: (List[any]) -> MethodReturnMessage
|
|
||||||
|
|
||||||
if self.message_type != MessageType.method_call:
|
|
||||||
raise Exception('cannot create a reply for a message that is not a method call')
|
|
||||||
|
|
||||||
reply = MethodReturnMessage(self._msg)
|
|
||||||
|
|
||||||
for arg in args:
|
|
||||||
reply.append(arg)
|
|
||||||
|
|
||||||
return reply
|
|
||||||
|
|
||||||
def create_error_reply(self, exception):
|
|
||||||
# type: (Exception) -> ErrorMessage
|
|
||||||
|
|
||||||
if self.message_type != MessageType.method_call:
|
|
||||||
raise Exception('cannot create an error reply for a message that is not a method call')
|
|
||||||
|
|
||||||
return ErrorMessage(self._msg, 'com.victronenergy.' + exception.__class__.__name__, exception.message) # TODO prefix
|
|
||||||
|
|
||||||
|
|
||||||
class ResolvedMessage(DBusMessage):
|
|
||||||
|
|
||||||
def __init__(self, msg, sender_id, sender_names, destination_id, destination_names):
|
|
||||||
# type: (Message, str, List[str], str, List[str]) -> NoReturn
|
|
||||||
|
|
||||||
super(ResolvedMessage, self).__init__(msg, sender_id, destination_id)
|
|
||||||
|
|
||||||
self.sender_names = sender_names
|
|
||||||
self.destination_names = destination_names
|
|
||||||
|
|
||||||
|
|
||||||
class MatchedMessage(DBusMessage):
|
|
||||||
|
|
||||||
def __init__(self, resolved_msg, sender_name, destination_name):
|
|
||||||
# type: (ResolvedMessage, str, str) -> NoReturn
|
|
||||||
|
|
||||||
super(MatchedMessage, self).__init__(resolved_msg._msg, resolved_msg.sender_id, resolved_msg.destination_id)
|
|
||||||
|
|
||||||
self.sender_name = sender_name
|
|
||||||
self.destination_name = destination_name
|
|
||||||
|
|
||||||
|
|
||||||
class MessageFilter(Record):
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
message_type='*',
|
|
||||||
sender_id='*',
|
|
||||||
sender_name='*',
|
|
||||||
object_path='*',
|
|
||||||
interface='*',
|
|
||||||
member='*',
|
|
||||||
signature='*',
|
|
||||||
destination_id='*',
|
|
||||||
destination_name='*',
|
|
||||||
eavesdrop=False):
|
|
||||||
|
|
||||||
# type: (Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[bool]) -> NoReturn
|
|
||||||
|
|
||||||
self.signature = signature
|
|
||||||
self.message_type = message_type
|
|
||||||
|
|
||||||
self.member = member
|
|
||||||
self.interface = interface
|
|
||||||
self.object_path = object_path
|
|
||||||
|
|
||||||
self.sender_id = sender_id
|
|
||||||
self.sender_name = sender_name
|
|
||||||
self.destination_id = destination_id
|
|
||||||
self.destination_name = destination_name
|
|
||||||
|
|
||||||
self.eavesdrop = eavesdrop
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def create_filter_rule(
|
|
||||||
message_type='*',
|
|
||||||
sender_id='*',
|
|
||||||
sender_name='*',
|
|
||||||
object_path='*',
|
|
||||||
interface='*',
|
|
||||||
member='*',
|
|
||||||
destination_id='*',
|
|
||||||
eavesdrop=False):
|
|
||||||
# type: (Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],Optional[AnyStr],bool) -> AnyStr
|
|
||||||
|
|
||||||
rules = []
|
|
||||||
|
|
||||||
def rule(key, value):
|
|
||||||
if '*' not in value and '?' not in value:
|
|
||||||
rules.append("%s='%s'" % (key, value))
|
|
||||||
|
|
||||||
rule('type', message_type)
|
|
||||||
rule('sender', sender_id if sender_name == '*' and sender_id != '*' else sender_name)
|
|
||||||
rule('destination', destination_id)
|
|
||||||
rule('eavesdrop', 'true' if eavesdrop else 'false')
|
|
||||||
rule('path', object_path) # TODO: endswith *, object namespace
|
|
||||||
rule('interface', interface)
|
|
||||||
rule('member', member)
|
|
||||||
|
|
||||||
return ','.join(rules)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def filter_rule(self):
|
|
||||||
# type: () -> AnyStr
|
|
||||||
|
|
||||||
return self.create_filter_rule(
|
|
||||||
message_type=self.message_type,
|
|
||||||
sender_id=self.sender_id,
|
|
||||||
sender_name=self.sender_name,
|
|
||||||
object_path=self.object_path,
|
|
||||||
interface=self.interface,
|
|
||||||
member=self.member,
|
|
||||||
destination_id=self.destination_id,
|
|
||||||
eavesdrop=self.eavesdrop)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _get_matching_name(names, name_filter):
|
|
||||||
# type: (Iterable[AnyStr], AnyStr) -> Optional[AnyStr]
|
|
||||||
|
|
||||||
matching_names = (
|
|
||||||
name
|
|
||||||
for name
|
|
||||||
in names
|
|
||||||
if glob(name, name_filter)
|
|
||||||
)
|
|
||||||
|
|
||||||
return next(matching_names, None)
|
|
||||||
|
|
||||||
def match_message(self, msg):
|
|
||||||
# type: (ResolvedMessage) -> Optional[MatchedMessage]
|
|
||||||
|
|
||||||
match = \
|
|
||||||
glob(msg.object_path, self.object_path) and \
|
|
||||||
glob(msg.interface or '<none>', self.interface) and \
|
|
||||||
glob(msg.member, self.member) and \
|
|
||||||
glob(msg.signature, self.signature) and \
|
|
||||||
glob(msg.sender_id, self.sender_id) and \
|
|
||||||
glob(msg.destination_id or '<none>', self.destination_id)
|
|
||||||
|
|
||||||
if not match:
|
|
||||||
return None
|
|
||||||
|
|
||||||
sender_name = self._get_matching_name(msg.sender_names, self.sender_name)
|
|
||||||
if sender_name is None and self.sender_name != '*': # sender might not have a well known name
|
|
||||||
return None
|
|
||||||
|
|
||||||
destination_name = self._get_matching_name(msg.destination_names, self.destination_name)
|
|
||||||
if destination_name is None and self.destination_name != '*':
|
|
||||||
return None
|
|
||||||
|
|
||||||
return MatchedMessage(msg, sender_name, destination_name)
|
|
||||||
|
|
||||||
|
|
||||||
class DBusException(Exception):
|
|
||||||
|
|
||||||
def __init__(self, message):
|
|
||||||
super(Exception, self).__init__(message)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def raise_if_error_reply(cls, reply):
|
|
||||||
# type: (Message) -> Message
|
|
||||||
|
|
||||||
if isinstance(reply, ErrorMessage):
|
|
||||||
raise DBusException(reply.get_error_name())
|
|
||||||
else:
|
|
||||||
return reply
|
|
Binary file not shown.
|
@ -1,177 +0,0 @@
|
||||||
|
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
import dbus
|
|
||||||
|
|
||||||
from python_libs.ie_dbus.private.dbus_types import dbus_variant, dbus_string
|
|
||||||
from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon
|
|
||||||
from python_libs.ie_dbus.private.message_types import MatchedMessage
|
|
||||||
from python_libs.ie_dbus.private.ve_constants import GET_TEXT, INTERFACE_BUS_ITEM, PROPERTIES_CHANGED, GET_VALUE, SET_VALUE
|
|
||||||
from python_libs.ie_utils.mixins import Disposable, Record
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Optional, AnyStr, NoReturn, Dict, Any
|
|
||||||
from python_libs.ie_dbus.private.dbus_types import DbusVariant, DbusString, DbusVariantDict, DbusType
|
|
||||||
|
|
||||||
|
|
||||||
class OwnProperty(Record):
|
|
||||||
|
|
||||||
def __init__(self, value, unit='', writable=False):
|
|
||||||
|
|
||||||
str_value = round(value, 2) if isinstance(value, float) else value
|
|
||||||
|
|
||||||
self.text = unicode(str_value) + unit
|
|
||||||
self.value = value
|
|
||||||
self.unit = unit
|
|
||||||
self.writable = writable
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dbus_dict(self):
|
|
||||||
# type: () -> dbus.Dictionary
|
|
||||||
d = {
|
|
||||||
dbus.String('Text'): dbus_variant(self.text),
|
|
||||||
dbus.String('Value'): dbus_variant(self.value)
|
|
||||||
}
|
|
||||||
return dbus.Dictionary(d, signature='sv')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dbus_value(self):
|
|
||||||
# type: () -> DbusVariant
|
|
||||||
return dbus_variant(self.value)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def dbus_text(self):
|
|
||||||
# type: () -> DbusString
|
|
||||||
return dbus_string(self.text)
|
|
||||||
|
|
||||||
def update_value(self, value):
|
|
||||||
# type: (any) -> OwnProperty
|
|
||||||
return OwnProperty(value, self.unit, self.writable)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
yield self.value
|
|
||||||
yield self.text
|
|
||||||
|
|
||||||
|
|
||||||
class OwnProperties(Disposable):
|
|
||||||
|
|
||||||
_own_properties = None # type: Dict[AnyStr, OwnProperty]
|
|
||||||
|
|
||||||
# noinspection PyProtectedMember
|
|
||||||
def __init__(self, daemon):
|
|
||||||
# type: (DBusDaemon) -> NoReturn
|
|
||||||
|
|
||||||
self._daemon = daemon
|
|
||||||
self._own_properties = dict()
|
|
||||||
self._method_call_subs = self._daemon.subscribe_to_method_call_message(self._on_method_called) # no filter whatsoever
|
|
||||||
|
|
||||||
def get(self, object_path):
|
|
||||||
# type: (AnyStr) -> OwnProperty
|
|
||||||
return self._own_properties[object_path]
|
|
||||||
|
|
||||||
def set(self, object_path, value, unit='', writable=False):
|
|
||||||
# type: (AnyStr, any, Optional[AnyStr], Optional[bool]) -> bool
|
|
||||||
|
|
||||||
prop = OwnProperty(value, unit, writable)
|
|
||||||
|
|
||||||
if object_path in self._own_properties:
|
|
||||||
if self._own_properties[object_path] == prop:
|
|
||||||
return False
|
|
||||||
|
|
||||||
self._own_properties[object_path] = prop
|
|
||||||
# object_path, interface, member, *args):
|
|
||||||
self._daemon.broadcast_signal(
|
|
||||||
object_path,
|
|
||||||
INTERFACE_BUS_ITEM,
|
|
||||||
PROPERTIES_CHANGED,
|
|
||||||
prop.dbus_dict)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _on_method_called(self, message):
|
|
||||||
# type: (MatchedMessage) -> Any
|
|
||||||
|
|
||||||
# _log.info(str(message.sender_name) + '(' + str(message.sender_id) + ') asked ' + message.member + ' ' + message.object_path)
|
|
||||||
|
|
||||||
if message.member == GET_VALUE:
|
|
||||||
return self._on_get_value_called(message)
|
|
||||||
elif message.member == GET_TEXT:
|
|
||||||
return self._on_get_text_called(message)
|
|
||||||
elif message.member == SET_VALUE:
|
|
||||||
return self._on_set_value_called(message)
|
|
||||||
|
|
||||||
def _on_set_value_called(self, message):
|
|
||||||
# type: (MatchedMessage) -> bool
|
|
||||||
|
|
||||||
path = message.object_path
|
|
||||||
|
|
||||||
if path not in self._own_properties:
|
|
||||||
raise Exception('property ' + path + ' does not exist')
|
|
||||||
|
|
||||||
prop = self._own_properties[path]
|
|
||||||
if not prop.writable:
|
|
||||||
raise Exception('property ' + path + ' is read-only')
|
|
||||||
|
|
||||||
value = message.arguments[0]
|
|
||||||
|
|
||||||
if prop.value == value:
|
|
||||||
return False
|
|
||||||
|
|
||||||
prop = prop.update_value(value)
|
|
||||||
self._own_properties[path] = prop
|
|
||||||
|
|
||||||
# object_path, interface, member, *args):
|
|
||||||
self._daemon.broadcast_signal(
|
|
||||||
path,
|
|
||||||
INTERFACE_BUS_ITEM,
|
|
||||||
PROPERTIES_CHANGED,
|
|
||||||
prop.dbus_dict)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _on_get_value_called(self, message):
|
|
||||||
# type: (MatchedMessage) -> DbusType
|
|
||||||
|
|
||||||
path = message.object_path
|
|
||||||
|
|
||||||
if path in self._own_properties:
|
|
||||||
return self._own_properties[path].dbus_value
|
|
||||||
|
|
||||||
if path.endswith('/'): # "Tree Export"
|
|
||||||
values = {
|
|
||||||
dbus.String(k.lstrip('/')): dbus_variant(p.value)
|
|
||||||
for (k, p)
|
|
||||||
in self._own_properties.iteritems()
|
|
||||||
if k.startswith(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
return dbus.Dictionary(values, signature='sv', variant_level=1) # variant for tree export !!
|
|
||||||
|
|
||||||
raise Exception('property ' + path + ' does not exist')
|
|
||||||
|
|
||||||
def _on_get_text_called(self, message):
|
|
||||||
# type: (MatchedMessage) -> DbusType
|
|
||||||
|
|
||||||
path = message.object_path
|
|
||||||
|
|
||||||
if path in self._own_properties:
|
|
||||||
return self._own_properties[message.object_path].dbus_text
|
|
||||||
|
|
||||||
if path.endswith('/'): # "Tree Export"
|
|
||||||
values = {
|
|
||||||
dbus.String(k.lstrip('/')): dbus.String(p.text)
|
|
||||||
for (k, p)
|
|
||||||
in self._own_properties.iteritems()
|
|
||||||
if k.startswith(path)
|
|
||||||
}
|
|
||||||
return dbus.Dictionary(values, signature='ss', variant_level=1) # variant for tree export !!
|
|
||||||
|
|
||||||
raise Exception('property ' + path + ' does not exist')
|
|
||||||
|
|
||||||
def __contains__(self, object_path):
|
|
||||||
# type: (AnyStr) -> bool
|
|
||||||
return object_path in self._own_properties
|
|
Binary file not shown.
|
@ -1,166 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
from python_libs.ie_dbus.private.dbus_types import dbus_variant
|
|
||||||
from python_libs.ie_utils.mixins import Disposable, Record
|
|
||||||
from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon
|
|
||||||
from python_libs.ie_dbus.private.message_types import MatchedMessage
|
|
||||||
from python_libs.ie_dbus.private.ve_constants import GET_TEXT, INTERFACE_BUS_ITEM, PROPERTIES_CHANGED, GET_VALUE, SERVICE_PREFIX, SET_VALUE
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
_UNKNOWN_TEXT = '<UNKNOWN_TEXT>'
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import List, AnyStr, NoReturn, Dict, Any
|
|
||||||
|
|
||||||
|
|
||||||
class RemoteProperty(Record):
|
|
||||||
|
|
||||||
def __init__(self, value, text):
|
|
||||||
|
|
||||||
self.text = text
|
|
||||||
self.value = value
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_dbus_dict(dbus_dict):
|
|
||||||
value = dbus_dict['Value']
|
|
||||||
text = dbus_dict['Text']
|
|
||||||
return RemoteProperty(value, text)
|
|
||||||
|
|
||||||
|
|
||||||
class RemoteProperties(Disposable):
|
|
||||||
|
|
||||||
_remote_properties = None # type: Dict[AnyStr, RemoteProperty]
|
|
||||||
|
|
||||||
def __init__(self, daemon):
|
|
||||||
# type: (DBusDaemon) -> NoReturn
|
|
||||||
|
|
||||||
self._daemon = daemon
|
|
||||||
self._remote_properties = dict()
|
|
||||||
|
|
||||||
# noinspection PyBroadException
|
|
||||||
def available_properties(self, service_name):
|
|
||||||
# type: (unicode) -> List[unicode]
|
|
||||||
|
|
||||||
if not self._daemon.exists_service_with_name(service_name):
|
|
||||||
return []
|
|
||||||
|
|
||||||
try:
|
|
||||||
paths = self._call_remote(service_name=service_name, object_path='/', member=GET_TEXT)[0].keys()
|
|
||||||
except Exception as _:
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
return ['/' + str(path) for path in paths]
|
|
||||||
|
|
||||||
def exists(self, combined_path):
|
|
||||||
# type: (AnyStr) -> bool
|
|
||||||
|
|
||||||
service_name, object_path, combined_path = self._parse_combined_path(combined_path)
|
|
||||||
return object_path in self.available_properties(service_name)
|
|
||||||
|
|
||||||
def get(self, combined_path):
|
|
||||||
# type: (AnyStr) -> RemoteProperty
|
|
||||||
|
|
||||||
service_name, object_path, combined_path = self._parse_combined_path(combined_path)
|
|
||||||
|
|
||||||
if combined_path in self._remote_properties:
|
|
||||||
cached = self._remote_properties[combined_path]
|
|
||||||
|
|
||||||
# a cached prop might have an unknown text, because its value has been written before,
|
|
||||||
# but it has never read or updated via property-changed
|
|
||||||
|
|
||||||
if cached.text != _UNKNOWN_TEXT:
|
|
||||||
return cached
|
|
||||||
|
|
||||||
text = self._get_text(service_name, object_path)
|
|
||||||
self._remote_properties[combined_path] = RemoteProperty(cached.value, text)
|
|
||||||
|
|
||||||
return self._remote_properties[combined_path]
|
|
||||||
|
|
||||||
prop = self._get_property(service_name, object_path)
|
|
||||||
self._remote_properties[combined_path] = prop
|
|
||||||
self._subscribe_to_property_changed(service_name, object_path)
|
|
||||||
|
|
||||||
return prop
|
|
||||||
|
|
||||||
def set(self, combined_path, value):
|
|
||||||
# type: (AnyStr, any) -> bool
|
|
||||||
|
|
||||||
service_name, object_path, combined_path = self._parse_combined_path(combined_path)
|
|
||||||
|
|
||||||
if combined_path in self._remote_properties:
|
|
||||||
if self._remote_properties[combined_path].value == value:
|
|
||||||
return False # property already has the requested value => nothing to do
|
|
||||||
else:
|
|
||||||
self._subscribe_to_property_changed(service_name, object_path)
|
|
||||||
|
|
||||||
result = self._call_remote(service_name, object_path, SET_VALUE, dbus_variant(value))[0]
|
|
||||||
|
|
||||||
if result != 0:
|
|
||||||
raise Exception(service_name + ' refused to set value of ' + object_path + ' to ' + str(value))
|
|
||||||
|
|
||||||
self._remote_properties[combined_path] = RemoteProperty(value, _UNKNOWN_TEXT)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _subscribe_to_property_changed(self, service_name, object_path):
|
|
||||||
# type: (unicode, unicode) -> NoReturn
|
|
||||||
|
|
||||||
def callback(msg):
|
|
||||||
# type: (MatchedMessage) -> NoReturn
|
|
||||||
prop = RemoteProperty.from_dbus_dict(msg.arguments[0])
|
|
||||||
key = msg.sender_name+msg.object_path
|
|
||||||
self._remote_properties[key] = prop
|
|
||||||
|
|
||||||
signal = self._daemon.subscribe_to_signal_message(
|
|
||||||
callback=callback,
|
|
||||||
sender_name=service_name,
|
|
||||||
object_path=object_path,
|
|
||||||
interface=INTERFACE_BUS_ITEM, # TODO: <- this could be removed to make it more robust, in theory
|
|
||||||
member=PROPERTIES_CHANGED) # TODO: OTOH, don't fix if it is not broken
|
|
||||||
|
|
||||||
self.chain_disposable(signal, 'signal subscription on ' + self._daemon.bus_id + ' ' + service_name + object_path)
|
|
||||||
|
|
||||||
def _get_value(self, service_name, object_path):
|
|
||||||
# type: (unicode, unicode) -> any
|
|
||||||
|
|
||||||
return self._call_remote(service_name, object_path, GET_VALUE)[0]
|
|
||||||
|
|
||||||
def _get_text(self, service_name, object_path):
|
|
||||||
# type: (unicode, unicode) -> unicode
|
|
||||||
|
|
||||||
result = self._call_remote(service_name, object_path, GET_TEXT)[0]
|
|
||||||
return unicode(result)
|
|
||||||
|
|
||||||
def _get_property(self, service_name, object_path):
|
|
||||||
# type: (unicode, unicode) -> RemoteProperty
|
|
||||||
|
|
||||||
value = self._get_value(service_name, object_path)
|
|
||||||
text = self._get_text(service_name, object_path)
|
|
||||||
|
|
||||||
return RemoteProperty(value, text)
|
|
||||||
|
|
||||||
def _call_remote(self, service_name, object_path, member, *args):
|
|
||||||
# type: (unicode, unicode, unicode, List[Any]) -> List[Any]
|
|
||||||
|
|
||||||
return self._daemon.call_method(service_name, object_path, INTERFACE_BUS_ITEM, member, *args)
|
|
||||||
|
|
||||||
def _parse_combined_path(self, combined_path):
|
|
||||||
# type: (str) -> (unicode,unicode,unicode)
|
|
||||||
|
|
||||||
service_name, object_path = combined_path.lstrip('/').split('/', 1)
|
|
||||||
|
|
||||||
if service_name == '':
|
|
||||||
raise Exception('Failed to parse service name. \ncombined_path must be of the form "service_name/path/to/property"')
|
|
||||||
if object_path == '':
|
|
||||||
raise Exception('Failed to parse object path. \ncombined_path must be of the form "service_name/path/to/property"')
|
|
||||||
|
|
||||||
service_name = service_name if service_name.startswith(SERVICE_PREFIX) else SERVICE_PREFIX + service_name
|
|
||||||
|
|
||||||
if not self._daemon.exists_service_with_name(service_name):
|
|
||||||
raise Exception('there is no service with the name "' + service_name + '" on the bus')
|
|
||||||
|
|
||||||
object_path = '/' + object_path
|
|
||||||
|
|
||||||
return unicode(service_name), unicode(object_path), unicode(service_name + object_path)
|
|
Binary file not shown.
|
@ -1,89 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
from python_libs.ie_dbus.private.dbus_types import dbus_string, dbus_int_variant, dbus_string_variant, dbus_double_variant, dbus_variant
|
|
||||||
from python_libs.ie_utils.mixins import Record
|
|
||||||
from python_libs.ie_dbus.private.dbus_daemon import DBusDaemon
|
|
||||||
from python_libs.ie_dbus.private.remote_properties import RemoteProperties
|
|
||||||
from python_libs.ie_dbus.private.ve_constants import SETTINGS_SERVICE, SETTINGS_INTERFACE, SETTINGS_PREFIX
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Union, NoReturn, Optional, AnyStr
|
|
||||||
|
|
||||||
|
|
||||||
def prepend_settings_prefix(path):
|
|
||||||
# type: (AnyStr) -> any
|
|
||||||
|
|
||||||
path = '/' + path.lstrip('/')
|
|
||||||
path = path if path.startswith(SETTINGS_PREFIX) else SETTINGS_PREFIX + path
|
|
||||||
return path
|
|
||||||
|
|
||||||
|
|
||||||
class Settings(Record):
|
|
||||||
|
|
||||||
# noinspection PyProtectedMember
|
|
||||||
def __init__(self, daemon, remote_properties):
|
|
||||||
# type: (DBusDaemon, RemoteProperties) -> NoReturn
|
|
||||||
|
|
||||||
self._daemon = daemon
|
|
||||||
self._remote_properties = remote_properties
|
|
||||||
|
|
||||||
# noinspection PyShadowingBuiltins
|
|
||||||
|
|
||||||
def add_setting(self, path, default_value, min=None, max=None, silent=False):
|
|
||||||
# type: (AnyStr, Union[unicode, int, float], Union[int, float, None], Union[int, float, None], Optional[bool]) -> NoReturn
|
|
||||||
|
|
||||||
path = prepend_settings_prefix(path)
|
|
||||||
|
|
||||||
if isinstance(default_value, int):
|
|
||||||
item_type = 'i'
|
|
||||||
elif isinstance(default_value, float):
|
|
||||||
item_type = 'f'
|
|
||||||
elif isinstance(default_value, (str, unicode)):
|
|
||||||
item_type = 's'
|
|
||||||
else:
|
|
||||||
raise Exception('Unsupported Settings Type')
|
|
||||||
|
|
||||||
reply = self._daemon.call_method(
|
|
||||||
SETTINGS_SERVICE, # service_name
|
|
||||||
'/', # object_path
|
|
||||||
SETTINGS_INTERFACE, # interface
|
|
||||||
'AddSilentSetting' if silent else 'AddSetting', # member,
|
|
||||||
dbus_string(''), # "group", not used
|
|
||||||
dbus_string(path),
|
|
||||||
dbus_variant(default_value),
|
|
||||||
dbus_string(item_type),
|
|
||||||
dbus_int_variant(min or 0),
|
|
||||||
dbus_int_variant(max or 0))
|
|
||||||
|
|
||||||
if reply[0] != 0:
|
|
||||||
raise Exception('failed to add setting ' + path)
|
|
||||||
|
|
||||||
def exists(self, path):
|
|
||||||
# type: (unicode) -> bool
|
|
||||||
|
|
||||||
path = prepend_settings_prefix(path)
|
|
||||||
return path in self.available_settings
|
|
||||||
|
|
||||||
def get(self, path):
|
|
||||||
# type: (unicode) -> Union[unicode, int, float]
|
|
||||||
|
|
||||||
path = prepend_settings_prefix(path)
|
|
||||||
return self._remote_properties.get(SETTINGS_SERVICE + path).value
|
|
||||||
|
|
||||||
def set(self, path, value):
|
|
||||||
# type: (unicode, Union[unicode, int, float]) -> NoReturn
|
|
||||||
|
|
||||||
path = prepend_settings_prefix(path)
|
|
||||||
self._remote_properties.set(SETTINGS_SERVICE + path, value)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def available_settings(self):
|
|
||||||
# type: () -> [unicode]
|
|
||||||
return self._remote_properties.available_properties(SETTINGS_SERVICE)
|
|
||||||
|
|
||||||
def __contains__(self, path):
|
|
||||||
# type: (unicode) -> bool
|
|
||||||
return self.exists(path)
|
|
Binary file not shown.
|
@ -1,11 +0,0 @@
|
||||||
|
|
||||||
SERVICE_PREFIX = 'com.victronenergy.'
|
|
||||||
VE_SERVICE_FILTER = SERVICE_PREFIX + '*'
|
|
||||||
INTERFACE_BUS_ITEM = SERVICE_PREFIX + 'BusItem'
|
|
||||||
PROPERTIES_CHANGED = 'PropertiesChanged'
|
|
||||||
GET_VALUE = 'GetValue'
|
|
||||||
SET_VALUE = 'SetValue'
|
|
||||||
GET_TEXT = 'GetText'
|
|
||||||
SETTINGS_SERVICE = 'com.victronenergy.settings'
|
|
||||||
SETTINGS_INTERFACE = 'com.victronenergy.Settings'
|
|
||||||
SETTINGS_PREFIX = '/Settings'
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,73 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import NoReturn, Optional
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class MovingAverageFilter(object):
|
|
||||||
|
|
||||||
def __init__(self, length=30, initial_value=0):
|
|
||||||
# type: (int, float) -> NoReturn
|
|
||||||
|
|
||||||
self.value = initial_value
|
|
||||||
self.length = length
|
|
||||||
|
|
||||||
def update(self, value, length=None):
|
|
||||||
# type: (float, int) -> float
|
|
||||||
|
|
||||||
if length is not None:
|
|
||||||
self.length = length
|
|
||||||
|
|
||||||
self.value = (self.value * self.length + value) / (self.length + 1)
|
|
||||||
|
|
||||||
_log.debug('real value: ' + str(value) + ', filtered value: ' + str(self.value))
|
|
||||||
|
|
||||||
return self.value
|
|
||||||
|
|
||||||
|
|
||||||
class DebounceFilter(object):
|
|
||||||
|
|
||||||
def __init__(self, initial_state=None, max_inertia=10):
|
|
||||||
# type: (Optional[bool], Optional[int]) -> NoReturn
|
|
||||||
|
|
||||||
self._max_inertia = max_inertia
|
|
||||||
self._inertia = max_inertia
|
|
||||||
self._state = initial_state
|
|
||||||
|
|
||||||
def reset(self, state=None, max_inertia=None):
|
|
||||||
# type: (Optional[bool], Optional[int]) -> bool
|
|
||||||
|
|
||||||
self._max_inertia = max_inertia or self._max_inertia
|
|
||||||
self._inertia = self._max_inertia
|
|
||||||
self._state = state or self._state
|
|
||||||
|
|
||||||
_log.debug('debounce filter reset: state={0}, inertia={1}'.format(self._state, self._inertia))
|
|
||||||
|
|
||||||
return self._state
|
|
||||||
|
|
||||||
def flip(self):
|
|
||||||
# type: () -> bool
|
|
||||||
self._state = not self._state
|
|
||||||
self._inertia = self._max_inertia
|
|
||||||
return self._state
|
|
||||||
|
|
||||||
def update(self, new_state, max_inertia=None):
|
|
||||||
# type: (bool, int) -> bool
|
|
||||||
|
|
||||||
if max_inertia is not None and max_inertia != self._max_inertia:
|
|
||||||
return self.reset(new_state, max_inertia)
|
|
||||||
|
|
||||||
if new_state != self._state:
|
|
||||||
if self._inertia > 0:
|
|
||||||
self._inertia = self._inertia - 1
|
|
||||||
else:
|
|
||||||
self.flip()
|
|
||||||
else:
|
|
||||||
self._inertia = min(self._inertia + 1, self._max_inertia)
|
|
||||||
|
|
||||||
_log.debug('debounce filter update: state={0}, inertia={1}'.format(self._state, self._inertia))
|
|
||||||
|
|
||||||
return self._state
|
|
Binary file not shown.
|
@ -1,30 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
import traceback
|
|
||||||
import gobject
|
|
||||||
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Callable, NoReturn
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_on_main_loop(update_action, update_period):
|
|
||||||
# type: (Callable[[],NoReturn], int) -> NoReturn
|
|
||||||
|
|
||||||
main_loop = gobject.MainLoop()
|
|
||||||
|
|
||||||
def update(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
update_action()
|
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
_log.error(e.message)
|
|
||||||
traceback.print_exc()
|
|
||||||
main_loop.quit()
|
|
||||||
return False
|
|
||||||
|
|
||||||
gobject.timeout_add(update_period, update)
|
|
||||||
main_loop.run()
|
|
Binary file not shown.
|
@ -1,115 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
from _dbus_glib_bindings import DBusGMainLoop
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Callable, NoReturn, List, AnyStr, Optional, Union
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def nop(*_args):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def memoize(fn):
|
|
||||||
|
|
||||||
attr_name = '_memoized_' + fn.__name__
|
|
||||||
|
|
||||||
def _memoized(self):
|
|
||||||
if not hasattr(self, attr_name):
|
|
||||||
setattr(self, attr_name, fn(self))
|
|
||||||
return getattr(self, attr_name)
|
|
||||||
|
|
||||||
return _memoized
|
|
||||||
|
|
||||||
|
|
||||||
# noinspection PyAttributeOutsideInit
|
|
||||||
class Disposable(object):
|
|
||||||
|
|
||||||
_dispose_actions = None # type: List[Callable[[],NoReturn]]
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, typ, value, tb):
|
|
||||||
self.dispose()
|
|
||||||
|
|
||||||
def dispose(self):
|
|
||||||
# type: () -> NoReturn
|
|
||||||
|
|
||||||
while self._dispose_actions:
|
|
||||||
dispose = self._dispose_actions.pop()
|
|
||||||
dispose()
|
|
||||||
|
|
||||||
for k, v in self.__dict__.iteritems():
|
|
||||||
if isinstance(v, Disposable) and v._dispose_actions:
|
|
||||||
_log.debug('disposing ' + type(self).__name__ + '.' + k)
|
|
||||||
v.dispose()
|
|
||||||
|
|
||||||
def chain_disposable(self, dispose, message=None):
|
|
||||||
# type: (Union[Callable[[],None],Disposable], Optional[AnyStr]) -> NoReturn
|
|
||||||
|
|
||||||
if self._dispose_actions is None:
|
|
||||||
self._dispose_actions = []
|
|
||||||
|
|
||||||
if isinstance(dispose, Disposable):
|
|
||||||
dispose = dispose.dispose
|
|
||||||
|
|
||||||
if message is None:
|
|
||||||
self._dispose_actions.append(dispose)
|
|
||||||
return
|
|
||||||
|
|
||||||
def dispose_with_log_msg():
|
|
||||||
_log.debug('disposing ' + message)
|
|
||||||
dispose()
|
|
||||||
|
|
||||||
# _log.debug('new disposable ' + message)
|
|
||||||
self._dispose_actions.append(dispose_with_log_msg)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create(cls, dispose_action, message=None):
|
|
||||||
# type: (Union[Callable[[],None],Disposable], Optional[AnyStr]) -> Disposable
|
|
||||||
|
|
||||||
disposable = Disposable()
|
|
||||||
disposable.chain_disposable(dispose_action, message)
|
|
||||||
return disposable
|
|
||||||
|
|
||||||
def create_dependent_disposable(self, dispose_action, message=None):
|
|
||||||
# type: (Union[Callable[[],None],Disposable], Optional[AnyStr]) -> Disposable
|
|
||||||
|
|
||||||
disposable = Disposable.create(dispose_action, message)
|
|
||||||
self.chain_disposable(disposable)
|
|
||||||
return disposable
|
|
||||||
|
|
||||||
|
|
||||||
class Record(object):
|
|
||||||
|
|
||||||
@memoize
|
|
||||||
def __str__(self):
|
|
||||||
return self.__class__.__name__ + ' ' + unicode(vars(self))
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return self.__str__()
|
|
||||||
|
|
||||||
@memoize
|
|
||||||
def __hash__(self):
|
|
||||||
return self.__str__().__hash__()
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
# TODO: improve, iterable vars are not correctly handled
|
|
||||||
return str(other) == str(self)
|
|
||||||
|
|
||||||
# make readonly
|
|
||||||
def __setattr__(self, key, value):
|
|
||||||
# type: (str, any) -> NoReturn
|
|
||||||
|
|
||||||
if not key.startswith('_') and hasattr(self, key): # disallow redefining
|
|
||||||
raise ValueError(key + ' is read-only' + str(dir()))
|
|
||||||
|
|
||||||
super(Record, self).__setattr__(key, value)
|
|
||||||
|
|
||||||
|
|
||||||
class RequiresMainLoop(object):
|
|
||||||
|
|
||||||
main_loop = DBusGMainLoop(set_as_default=True) # initialized only once for all subclasses that need it
|
|
Binary file not shown.
|
@ -1,44 +0,0 @@
|
||||||
from logging import getLogger
|
|
||||||
import re
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import Dict
|
|
||||||
|
|
||||||
_log = getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def make2way(dic):
|
|
||||||
# type: (Dict) -> Dict
|
|
||||||
for k, v in dic.items():
|
|
||||||
dic[v] = k
|
|
||||||
|
|
||||||
return dic
|
|
||||||
|
|
||||||
|
|
||||||
def invert_dict(src_dic):
|
|
||||||
# type: (Dict) -> Dict
|
|
||||||
dic = dict()
|
|
||||||
|
|
||||||
for k, v in src_dic.items():
|
|
||||||
dic[v] = k
|
|
||||||
|
|
||||||
return dic
|
|
||||||
|
|
||||||
|
|
||||||
def enum_file_name_of(path):
|
|
||||||
# type: (str) -> Dict[int,str]
|
|
||||||
|
|
||||||
"""
|
|
||||||
This is kinda hacky, but it works :)
|
|
||||||
The enum file must contain a single enum however!
|
|
||||||
"""
|
|
||||||
|
|
||||||
path = path[0:-1] if path.endswith('.pyc') else path
|
|
||||||
pattern = re.compile(r"^\s*(\w+)\s*=\s*(\d+)", re.M)
|
|
||||||
with open(path, "r") as f:
|
|
||||||
return {
|
|
||||||
int(m[1]): m[0]
|
|
||||||
for m
|
|
||||||
in pattern.findall(f.read())
|
|
||||||
}
|
|
Binary file not shown.
|
@ -1,30 +0,0 @@
|
||||||
# Copyright 2019 Ram Rachum and collaborators.
|
|
||||||
# This program is distributed under the MIT license.
|
|
||||||
'''
|
|
||||||
PySnooper - Never use print for debugging again
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
import pysnooper
|
|
||||||
|
|
||||||
@pysnooper.snoop()
|
|
||||||
def your_function(x):
|
|
||||||
...
|
|
||||||
|
|
||||||
A log will be written to stderr showing the lines executed and variables
|
|
||||||
changed in the decorated function.
|
|
||||||
|
|
||||||
For more information, see https://github.com/cool-RR/PySnooper
|
|
||||||
'''
|
|
||||||
|
|
||||||
from .tracer import Tracer as snoop
|
|
||||||
from .variables import Attrs, Exploding, Indices, Keys
|
|
||||||
import collections
|
|
||||||
|
|
||||||
__VersionInfo = collections.namedtuple('VersionInfo',
|
|
||||||
('major', 'minor', 'micro'))
|
|
||||||
|
|
||||||
__version__ = '0.4.0'
|
|
||||||
__version_info__ = __VersionInfo(*(map(int, __version__.split('.'))))
|
|
||||||
|
|
||||||
del collections, __VersionInfo # Avoid polluting the namespace
|
|
|
@ -1,95 +0,0 @@
|
||||||
# Copyright 2019 Ram Rachum and collaborators.
|
|
||||||
# This program is distributed under the MIT license.
|
|
||||||
'''Python 2/3 compatibility'''
|
|
||||||
|
|
||||||
import abc
|
|
||||||
import os
|
|
||||||
import inspect
|
|
||||||
import sys
|
|
||||||
import datetime as datetime_module
|
|
||||||
|
|
||||||
PY3 = (sys.version_info[0] == 3)
|
|
||||||
PY2 = not PY3
|
|
||||||
|
|
||||||
if hasattr(abc, 'ABC'):
|
|
||||||
ABC = abc.ABC
|
|
||||||
else:
|
|
||||||
class ABC(object):
|
|
||||||
"""Helper class that provides a standard way to create an ABC using
|
|
||||||
inheritance.
|
|
||||||
"""
|
|
||||||
__metaclass__ = abc.ABCMeta
|
|
||||||
__slots__ = ()
|
|
||||||
|
|
||||||
|
|
||||||
if hasattr(os, 'PathLike'):
|
|
||||||
PathLike = os.PathLike
|
|
||||||
else:
|
|
||||||
class PathLike(ABC):
|
|
||||||
"""Abstract base class for implementing the file system path protocol."""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def __fspath__(self):
|
|
||||||
"""Return the file system path representation of the object."""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def __subclasshook__(cls, subclass):
|
|
||||||
return (
|
|
||||||
hasattr(subclass, '__fspath__') or
|
|
||||||
# Make a concession for older `pathlib` versions:g
|
|
||||||
(hasattr(subclass, 'open') and
|
|
||||||
'path' in subclass.__name__.lower())
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
iscoroutinefunction = inspect.iscoroutinefunction
|
|
||||||
except AttributeError:
|
|
||||||
iscoroutinefunction = lambda whatever: False # Lolz
|
|
||||||
|
|
||||||
try:
|
|
||||||
isasyncgenfunction = inspect.isasyncgenfunction
|
|
||||||
except AttributeError:
|
|
||||||
isasyncgenfunction = lambda whatever: False # Lolz
|
|
||||||
|
|
||||||
|
|
||||||
if PY3:
|
|
||||||
string_types = (str,)
|
|
||||||
text_type = str
|
|
||||||
else:
|
|
||||||
string_types = (basestring,)
|
|
||||||
text_type = unicode
|
|
||||||
|
|
||||||
try:
|
|
||||||
from collections import abc as collections_abc
|
|
||||||
except ImportError: # Python 2.7
|
|
||||||
import collections as collections_abc
|
|
||||||
|
|
||||||
if sys.version_info[:2] >= (3, 6):
|
|
||||||
time_isoformat = datetime_module.time.isoformat
|
|
||||||
else:
|
|
||||||
def time_isoformat(time, timespec='microseconds'):
|
|
||||||
assert isinstance(time, datetime_module.time)
|
|
||||||
if timespec != 'microseconds':
|
|
||||||
raise NotImplementedError
|
|
||||||
result = '{:02d}:{:02d}:{:02d}.{:06d}'.format(
|
|
||||||
time.hour, time.minute, time.second, time.microsecond
|
|
||||||
)
|
|
||||||
assert len(result) == 15
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def timedelta_format(timedelta):
|
|
||||||
time = (datetime_module.datetime.min + timedelta).time()
|
|
||||||
return time_isoformat(time, timespec='microseconds')
|
|
||||||
|
|
||||||
def timedelta_parse(s):
|
|
||||||
hours, minutes, seconds, microseconds = map(
|
|
||||||
int,
|
|
||||||
s.replace('.', ':').split(':')
|
|
||||||
)
|
|
||||||
return datetime_module.timedelta(hours=hours, minutes=minutes,
|
|
||||||
seconds=seconds,
|
|
||||||
microseconds=microseconds)
|
|
||||||
|
|
|
@ -1,498 +0,0 @@
|
||||||
# Copyright 2019 Ram Rachum and collaborators.
|
|
||||||
# This program is distributed under the MIT license.
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import inspect
|
|
||||||
import opcode
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import re
|
|
||||||
import collections
|
|
||||||
import datetime as datetime_module
|
|
||||||
import itertools
|
|
||||||
import threading
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from .variables import CommonVariable, Exploding, BaseVariable
|
|
||||||
from . import utils, pycompat
|
|
||||||
if pycompat.PY2:
|
|
||||||
from io import open
|
|
||||||
|
|
||||||
|
|
||||||
ipython_filename_pattern = re.compile('^<ipython-input-([0-9]+)-.*>$')
|
|
||||||
|
|
||||||
|
|
||||||
def get_local_reprs(frame, watch=(), custom_repr=(), max_length=None, normalize=False):
|
|
||||||
code = frame.f_code
|
|
||||||
vars_order = (code.co_varnames + code.co_cellvars + code.co_freevars +
|
|
||||||
tuple(frame.f_locals.keys()))
|
|
||||||
|
|
||||||
result_items = [(key, utils.get_shortish_repr(value, custom_repr,
|
|
||||||
max_length, normalize))
|
|
||||||
for key, value in frame.f_locals.items()]
|
|
||||||
result_items.sort(key=lambda key_value: vars_order.index(key_value[0]))
|
|
||||||
result = collections.OrderedDict(result_items)
|
|
||||||
|
|
||||||
for variable in watch:
|
|
||||||
result.update(sorted(variable.items(frame, normalize)))
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class UnavailableSource(object):
|
|
||||||
def __getitem__(self, i):
|
|
||||||
return u'SOURCE IS UNAVAILABLE'
|
|
||||||
|
|
||||||
|
|
||||||
source_and_path_cache = {}
|
|
||||||
|
|
||||||
|
|
||||||
def get_path_and_source_from_frame(frame):
|
|
||||||
globs = frame.f_globals or {}
|
|
||||||
module_name = globs.get('__name__')
|
|
||||||
file_name = frame.f_code.co_filename
|
|
||||||
cache_key = (module_name, file_name)
|
|
||||||
try:
|
|
||||||
return source_and_path_cache[cache_key]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
loader = globs.get('__loader__')
|
|
||||||
|
|
||||||
source = None
|
|
||||||
if hasattr(loader, 'get_source'):
|
|
||||||
try:
|
|
||||||
source = loader.get_source(module_name)
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
if source is not None:
|
|
||||||
source = source.splitlines()
|
|
||||||
if source is None:
|
|
||||||
ipython_filename_match = ipython_filename_pattern.match(file_name)
|
|
||||||
if ipython_filename_match:
|
|
||||||
entry_number = int(ipython_filename_match.group(1))
|
|
||||||
try:
|
|
||||||
import IPython
|
|
||||||
ipython_shell = IPython.get_ipython()
|
|
||||||
((_, _, source_chunk),) = ipython_shell.history_manager. \
|
|
||||||
get_range(0, entry_number, entry_number + 1)
|
|
||||||
source = source_chunk.splitlines()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
with open(file_name, 'rb') as fp:
|
|
||||||
source = fp.read().splitlines()
|
|
||||||
except utils.file_reading_errors:
|
|
||||||
pass
|
|
||||||
if not source:
|
|
||||||
# We used to check `if source is None` but I found a rare bug where it
|
|
||||||
# was empty, but not `None`, so now we check `if not source`.
|
|
||||||
source = UnavailableSource()
|
|
||||||
|
|
||||||
# If we just read the source from a file, or if the loader did not
|
|
||||||
# apply tokenize.detect_encoding to decode the source into a
|
|
||||||
# string, then we should do that ourselves.
|
|
||||||
if isinstance(source[0], bytes):
|
|
||||||
encoding = 'utf-8'
|
|
||||||
for line in source[:2]:
|
|
||||||
# File coding may be specified. Match pattern from PEP-263
|
|
||||||
# (https://www.python.org/dev/peps/pep-0263/)
|
|
||||||
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
|
|
||||||
if match:
|
|
||||||
encoding = match.group(1).decode('ascii')
|
|
||||||
break
|
|
||||||
source = [pycompat.text_type(sline, encoding, 'replace') for sline in
|
|
||||||
source]
|
|
||||||
|
|
||||||
result = (file_name, source)
|
|
||||||
source_and_path_cache[cache_key] = result
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def get_write_function(output, overwrite):
|
|
||||||
is_path = isinstance(output, (pycompat.PathLike, str))
|
|
||||||
if overwrite and not is_path:
|
|
||||||
raise Exception('`overwrite=True` can only be used when writing '
|
|
||||||
'content to file.')
|
|
||||||
if output is None:
|
|
||||||
def write(s):
|
|
||||||
stderr = sys.stderr
|
|
||||||
try:
|
|
||||||
stderr.write(s)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
# God damn Python 2
|
|
||||||
stderr.write(utils.shitcode(s))
|
|
||||||
elif is_path:
|
|
||||||
return FileWriter(output, overwrite).write
|
|
||||||
elif callable(output):
|
|
||||||
write = output
|
|
||||||
else:
|
|
||||||
assert isinstance(output, utils.WritableStream)
|
|
||||||
|
|
||||||
def write(s):
|
|
||||||
output.write(s)
|
|
||||||
return write
|
|
||||||
|
|
||||||
|
|
||||||
class FileWriter(object):
|
|
||||||
def __init__(self, path, overwrite):
|
|
||||||
self.path = pycompat.text_type(path)
|
|
||||||
self.overwrite = overwrite
|
|
||||||
|
|
||||||
def write(self, s):
|
|
||||||
with open(self.path, 'w' if self.overwrite else 'a',
|
|
||||||
encoding='utf-8') as output_file:
|
|
||||||
output_file.write(s)
|
|
||||||
self.overwrite = False
|
|
||||||
|
|
||||||
|
|
||||||
thread_global = threading.local()
|
|
||||||
DISABLED = bool(os.getenv('PYSNOOPER_DISABLED', ''))
|
|
||||||
|
|
||||||
class Tracer:
|
|
||||||
'''
|
|
||||||
Snoop on the function, writing everything it's doing to stderr.
|
|
||||||
|
|
||||||
This is useful for debugging.
|
|
||||||
|
|
||||||
When you decorate a function with `@pysnooper.snoop()`
|
|
||||||
or wrap a block of code in `with pysnooper.snoop():`, you'll get a log of
|
|
||||||
every line that ran in the function and a play-by-play of every local
|
|
||||||
variable that changed.
|
|
||||||
|
|
||||||
If stderr is not easily accessible for you, you can redirect the output to
|
|
||||||
a file::
|
|
||||||
|
|
||||||
@pysnooper.snoop('/my/log/file.log')
|
|
||||||
|
|
||||||
See values of some expressions that aren't local variables::
|
|
||||||
|
|
||||||
@pysnooper.snoop(watch=('foo.bar', 'self.x["whatever"]'))
|
|
||||||
|
|
||||||
Expand values to see all their attributes or items of lists/dictionaries:
|
|
||||||
|
|
||||||
@pysnooper.snoop(watch_explode=('foo', 'self'))
|
|
||||||
|
|
||||||
(see Advanced Usage in the README for more control)
|
|
||||||
|
|
||||||
Show snoop lines for functions that your function calls::
|
|
||||||
|
|
||||||
@pysnooper.snoop(depth=2)
|
|
||||||
|
|
||||||
Start all snoop lines with a prefix, to grep for them easily::
|
|
||||||
|
|
||||||
@pysnooper.snoop(prefix='ZZZ ')
|
|
||||||
|
|
||||||
On multi-threaded apps identify which thread are snooped in output::
|
|
||||||
|
|
||||||
@pysnooper.snoop(thread_info=True)
|
|
||||||
|
|
||||||
Customize how values are represented as strings::
|
|
||||||
|
|
||||||
@pysnooper.snoop(custom_repr=((type1, custom_repr_func1),
|
|
||||||
(condition2, custom_repr_func2), ...))
|
|
||||||
|
|
||||||
Variables and exceptions get truncated to 100 characters by default. You
|
|
||||||
can customize that:
|
|
||||||
|
|
||||||
@pysnooper.snoop(max_variable_length=200)
|
|
||||||
|
|
||||||
You can also use `max_variable_length=None` to never truncate them.
|
|
||||||
|
|
||||||
Show timestamps relative to start time rather than wall time::
|
|
||||||
|
|
||||||
@pysnooper.snoop(relative_time=True)
|
|
||||||
|
|
||||||
'''
|
|
||||||
def __init__(self, output=None, watch=(), watch_explode=(), depth=1,
|
|
||||||
prefix='', overwrite=False, thread_info=False, custom_repr=(),
|
|
||||||
max_variable_length=100, normalize=False, relative_time=False):
|
|
||||||
self._write = get_write_function(output, overwrite)
|
|
||||||
|
|
||||||
self.watch = [
|
|
||||||
v if isinstance(v, BaseVariable) else CommonVariable(v)
|
|
||||||
for v in utils.ensure_tuple(watch)
|
|
||||||
] + [
|
|
||||||
v if isinstance(v, BaseVariable) else Exploding(v)
|
|
||||||
for v in utils.ensure_tuple(watch_explode)
|
|
||||||
]
|
|
||||||
self.frame_to_local_reprs = {}
|
|
||||||
self.start_times = {}
|
|
||||||
self.depth = depth
|
|
||||||
self.prefix = prefix
|
|
||||||
self.thread_info = thread_info
|
|
||||||
self.thread_info_padding = 0
|
|
||||||
assert self.depth >= 1
|
|
||||||
self.target_codes = set()
|
|
||||||
self.target_frames = set()
|
|
||||||
self.thread_local = threading.local()
|
|
||||||
if len(custom_repr) == 2 and not all(isinstance(x,
|
|
||||||
pycompat.collections_abc.Iterable) for x in custom_repr):
|
|
||||||
custom_repr = (custom_repr,)
|
|
||||||
self.custom_repr = custom_repr
|
|
||||||
self.last_source_path = None
|
|
||||||
self.max_variable_length = max_variable_length
|
|
||||||
self.normalize = normalize
|
|
||||||
self.relative_time = relative_time
|
|
||||||
|
|
||||||
def __call__(self, function_or_class):
|
|
||||||
if DISABLED:
|
|
||||||
return function_or_class
|
|
||||||
|
|
||||||
if inspect.isclass(function_or_class):
|
|
||||||
return self._wrap_class(function_or_class)
|
|
||||||
else:
|
|
||||||
return self._wrap_function(function_or_class)
|
|
||||||
|
|
||||||
def _wrap_class(self, cls):
|
|
||||||
for attr_name, attr in cls.__dict__.items():
|
|
||||||
# Coroutines are functions, but snooping them is not supported
|
|
||||||
# at the moment
|
|
||||||
if pycompat.iscoroutinefunction(attr):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if inspect.isfunction(attr):
|
|
||||||
setattr(cls, attr_name, self._wrap_function(attr))
|
|
||||||
return cls
|
|
||||||
|
|
||||||
def _wrap_function(self, function):
|
|
||||||
self.target_codes.add(function.__code__)
|
|
||||||
|
|
||||||
@functools.wraps(function)
|
|
||||||
def simple_wrapper(*args, **kwargs):
|
|
||||||
with self:
|
|
||||||
return function(*args, **kwargs)
|
|
||||||
|
|
||||||
@functools.wraps(function)
|
|
||||||
def generator_wrapper(*args, **kwargs):
|
|
||||||
gen = function(*args, **kwargs)
|
|
||||||
method, incoming = gen.send, None
|
|
||||||
while True:
|
|
||||||
with self:
|
|
||||||
try:
|
|
||||||
outgoing = method(incoming)
|
|
||||||
except StopIteration:
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
method, incoming = gen.send, (yield outgoing)
|
|
||||||
except Exception as e:
|
|
||||||
method, incoming = gen.throw, e
|
|
||||||
|
|
||||||
if pycompat.iscoroutinefunction(function):
|
|
||||||
raise NotImplementedError
|
|
||||||
if pycompat.isasyncgenfunction(function):
|
|
||||||
raise NotImplementedError
|
|
||||||
elif inspect.isgeneratorfunction(function):
|
|
||||||
return generator_wrapper
|
|
||||||
else:
|
|
||||||
return simple_wrapper
|
|
||||||
|
|
||||||
def write(self, s):
|
|
||||||
s = u'{self.prefix}{s}\n'.format(**locals())
|
|
||||||
self._write(s)
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
if DISABLED:
|
|
||||||
return
|
|
||||||
calling_frame = inspect.currentframe().f_back
|
|
||||||
if not self._is_internal_frame(calling_frame):
|
|
||||||
calling_frame.f_trace = self.trace
|
|
||||||
self.target_frames.add(calling_frame)
|
|
||||||
|
|
||||||
stack = self.thread_local.__dict__.setdefault(
|
|
||||||
'original_trace_functions', []
|
|
||||||
)
|
|
||||||
stack.append(sys.gettrace())
|
|
||||||
self.start_times[calling_frame] = datetime_module.datetime.now()
|
|
||||||
sys.settrace(self.trace)
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
|
||||||
if DISABLED:
|
|
||||||
return
|
|
||||||
stack = self.thread_local.original_trace_functions
|
|
||||||
sys.settrace(stack.pop())
|
|
||||||
calling_frame = inspect.currentframe().f_back
|
|
||||||
self.target_frames.discard(calling_frame)
|
|
||||||
self.frame_to_local_reprs.pop(calling_frame, None)
|
|
||||||
|
|
||||||
### Writing elapsed time: #############################################
|
|
||||||
# #
|
|
||||||
start_time = self.start_times.pop(calling_frame)
|
|
||||||
duration = datetime_module.datetime.now() - start_time
|
|
||||||
elapsed_time_string = pycompat.timedelta_format(duration)
|
|
||||||
indent = ' ' * 4 * (thread_global.depth + 1)
|
|
||||||
self.write(
|
|
||||||
'{indent}Elapsed time: {elapsed_time_string}'.format(**locals())
|
|
||||||
)
|
|
||||||
# #
|
|
||||||
### Finished writing elapsed time. ####################################
|
|
||||||
|
|
||||||
def _is_internal_frame(self, frame):
|
|
||||||
return frame.f_code.co_filename == Tracer.__enter__.__code__.co_filename
|
|
||||||
|
|
||||||
def set_thread_info_padding(self, thread_info):
|
|
||||||
current_thread_len = len(thread_info)
|
|
||||||
self.thread_info_padding = max(self.thread_info_padding,
|
|
||||||
current_thread_len)
|
|
||||||
return thread_info.ljust(self.thread_info_padding)
|
|
||||||
|
|
||||||
def trace(self, frame, event, arg):
|
|
||||||
|
|
||||||
### Checking whether we should trace this line: #######################
|
|
||||||
# #
|
|
||||||
# We should trace this line either if it's in the decorated function,
|
|
||||||
# or the user asked to go a few levels deeper and we're within that
|
|
||||||
# number of levels deeper.
|
|
||||||
|
|
||||||
if not (frame.f_code in self.target_codes or frame in self.target_frames):
|
|
||||||
if self.depth == 1:
|
|
||||||
# We did the most common and quickest check above, because the
|
|
||||||
# trace function runs so incredibly often, therefore it's
|
|
||||||
# crucial to hyper-optimize it for the common case.
|
|
||||||
return None
|
|
||||||
elif self._is_internal_frame(frame):
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
_frame_candidate = frame
|
|
||||||
for i in range(1, self.depth):
|
|
||||||
_frame_candidate = _frame_candidate.f_back
|
|
||||||
if _frame_candidate is None:
|
|
||||||
return None
|
|
||||||
elif _frame_candidate.f_code in self.target_codes or _frame_candidate in self.target_frames:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
thread_global.__dict__.setdefault('depth', -1)
|
|
||||||
if event == 'call':
|
|
||||||
thread_global.depth += 1
|
|
||||||
indent = ' ' * 4 * thread_global.depth
|
|
||||||
|
|
||||||
# #
|
|
||||||
### Finished checking whether we should trace this line. ##############
|
|
||||||
|
|
||||||
### Making timestamp: #################################################
|
|
||||||
# #
|
|
||||||
if self.normalize:
|
|
||||||
timestamp = ' ' * 15
|
|
||||||
elif self.relative_time:
|
|
||||||
try:
|
|
||||||
start_time = self.start_times[frame]
|
|
||||||
except KeyError:
|
|
||||||
start_time = self.start_times[frame] = \
|
|
||||||
datetime_module.datetime.now()
|
|
||||||
duration = datetime_module.datetime.now() - start_time
|
|
||||||
timestamp = pycompat.timedelta_format(duration)
|
|
||||||
else:
|
|
||||||
timestamp = pycompat.time_isoformat(
|
|
||||||
datetime_module.datetime.now().time(),
|
|
||||||
timespec='microseconds'
|
|
||||||
)
|
|
||||||
# #
|
|
||||||
### Finished making timestamp. ########################################
|
|
||||||
|
|
||||||
line_no = frame.f_lineno
|
|
||||||
source_path, source = get_path_and_source_from_frame(frame)
|
|
||||||
source_path = source_path if not self.normalize else os.path.basename(source_path)
|
|
||||||
if self.last_source_path != source_path:
|
|
||||||
self.write(u'{indent}Source path:... {source_path}'.
|
|
||||||
format(**locals()))
|
|
||||||
self.last_source_path = source_path
|
|
||||||
source_line = source[line_no - 1]
|
|
||||||
thread_info = ""
|
|
||||||
if self.thread_info:
|
|
||||||
if self.normalize:
|
|
||||||
raise NotImplementedError("normalize is not supported with "
|
|
||||||
"thread_info")
|
|
||||||
current_thread = threading.current_thread()
|
|
||||||
thread_info = "{ident}-{name} ".format(
|
|
||||||
ident=current_thread.ident, name=current_thread.getName())
|
|
||||||
thread_info = self.set_thread_info_padding(thread_info)
|
|
||||||
|
|
||||||
### Reporting newish and modified variables: ##########################
|
|
||||||
# #
|
|
||||||
old_local_reprs = self.frame_to_local_reprs.get(frame, {})
|
|
||||||
self.frame_to_local_reprs[frame] = local_reprs = \
|
|
||||||
get_local_reprs(frame,
|
|
||||||
watch=self.watch, custom_repr=self.custom_repr,
|
|
||||||
max_length=self.max_variable_length,
|
|
||||||
normalize=self.normalize,
|
|
||||||
)
|
|
||||||
|
|
||||||
newish_string = ('Starting var:.. ' if event == 'call' else
|
|
||||||
'New var:....... ')
|
|
||||||
|
|
||||||
for name, value_repr in local_reprs.items():
|
|
||||||
if name not in old_local_reprs:
|
|
||||||
self.write('{indent}{newish_string}{name} = {value_repr}'.format(
|
|
||||||
**locals()))
|
|
||||||
elif old_local_reprs[name] != value_repr:
|
|
||||||
self.write('{indent}Modified var:.. {name} = {value_repr}'.format(
|
|
||||||
**locals()))
|
|
||||||
|
|
||||||
# #
|
|
||||||
### Finished newish and modified variables. ###########################
|
|
||||||
|
|
||||||
|
|
||||||
### Dealing with misplaced function definition: #######################
|
|
||||||
# #
|
|
||||||
if event == 'call' and source_line.lstrip().startswith('@'):
|
|
||||||
# If a function decorator is found, skip lines until an actual
|
|
||||||
# function definition is found.
|
|
||||||
for candidate_line_no in itertools.count(line_no):
|
|
||||||
try:
|
|
||||||
candidate_source_line = source[candidate_line_no - 1]
|
|
||||||
except IndexError:
|
|
||||||
# End of source file reached without finding a function
|
|
||||||
# definition. Fall back to original source line.
|
|
||||||
break
|
|
||||||
|
|
||||||
if candidate_source_line.lstrip().startswith('def'):
|
|
||||||
# Found the def line!
|
|
||||||
line_no = candidate_line_no
|
|
||||||
source_line = candidate_source_line
|
|
||||||
break
|
|
||||||
# #
|
|
||||||
### Finished dealing with misplaced function definition. ##############
|
|
||||||
|
|
||||||
# If a call ends due to an exception, we still get a 'return' event
|
|
||||||
# with arg = None. This seems to be the only way to tell the difference
|
|
||||||
# https://stackoverflow.com/a/12800909/2482744
|
|
||||||
code_byte = frame.f_code.co_code[frame.f_lasti]
|
|
||||||
if not isinstance(code_byte, int):
|
|
||||||
code_byte = ord(code_byte)
|
|
||||||
ended_by_exception = (
|
|
||||||
event == 'return'
|
|
||||||
and arg is None
|
|
||||||
and (opcode.opname[code_byte]
|
|
||||||
not in ('RETURN_VALUE', 'YIELD_VALUE'))
|
|
||||||
)
|
|
||||||
|
|
||||||
if ended_by_exception:
|
|
||||||
self.write('{indent}Call ended by exception'.
|
|
||||||
format(**locals()))
|
|
||||||
else:
|
|
||||||
self.write(u'{indent}{timestamp} {thread_info}{event:9} '
|
|
||||||
u'{line_no:4} {source_line}'.format(**locals()))
|
|
||||||
|
|
||||||
if event == 'return':
|
|
||||||
self.frame_to_local_reprs.pop(frame, None)
|
|
||||||
self.start_times.pop(frame, None)
|
|
||||||
thread_global.depth -= 1
|
|
||||||
|
|
||||||
if not ended_by_exception:
|
|
||||||
return_value_repr = utils.get_shortish_repr(arg,
|
|
||||||
custom_repr=self.custom_repr,
|
|
||||||
max_length=self.max_variable_length,
|
|
||||||
normalize=self.normalize,
|
|
||||||
)
|
|
||||||
self.write('{indent}Return value:.. {return_value_repr}'.
|
|
||||||
format(**locals()))
|
|
||||||
|
|
||||||
if event == 'exception':
|
|
||||||
exception = '\n'.join(traceback.format_exception_only(*arg[:2])).strip()
|
|
||||||
if self.max_variable_length:
|
|
||||||
exception = utils.truncate(exception, self.max_variable_length)
|
|
||||||
self.write('{indent}{exception}'.
|
|
||||||
format(**locals()))
|
|
||||||
|
|
||||||
return self.trace
|
|
|
@ -1,98 +0,0 @@
|
||||||
# Copyright 2019 Ram Rachum and collaborators.
|
|
||||||
# This program is distributed under the MIT license.
|
|
||||||
|
|
||||||
import abc
|
|
||||||
import re
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from .pycompat import ABC, string_types, collections_abc
|
|
||||||
|
|
||||||
def _check_methods(C, *methods):
|
|
||||||
mro = C.__mro__
|
|
||||||
for method in methods:
|
|
||||||
for B in mro:
|
|
||||||
if method in B.__dict__:
|
|
||||||
if B.__dict__[method] is None:
|
|
||||||
return NotImplemented
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
return NotImplemented
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class WritableStream(ABC):
|
|
||||||
@abc.abstractmethod
|
|
||||||
def write(self, s):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def __subclasshook__(cls, C):
|
|
||||||
if cls is WritableStream:
|
|
||||||
return _check_methods(C, 'write')
|
|
||||||
return NotImplemented
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
file_reading_errors = (
|
|
||||||
IOError,
|
|
||||||
OSError,
|
|
||||||
ValueError # IronPython weirdness.
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def shitcode(s):
|
|
||||||
return ''.join(
|
|
||||||
(c if (0 < ord(c) < 256) else '?') for c in s
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_repr_function(item, custom_repr):
|
|
||||||
for condition, action in custom_repr:
|
|
||||||
if isinstance(condition, type):
|
|
||||||
condition = lambda x, y=condition: isinstance(x, y)
|
|
||||||
if condition(item):
|
|
||||||
return action
|
|
||||||
return repr
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_REPR_RE = re.compile(r' at 0x[a-f0-9A-F]{4,}')
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_repr(item_repr):
|
|
||||||
"""Remove memory address (0x...) from a default python repr"""
|
|
||||||
return DEFAULT_REPR_RE.sub('', item_repr)
|
|
||||||
|
|
||||||
|
|
||||||
def get_shortish_repr(item, custom_repr=(), max_length=None, normalize=False):
|
|
||||||
repr_function = get_repr_function(item, custom_repr)
|
|
||||||
try:
|
|
||||||
r = repr_function(item)
|
|
||||||
except Exception:
|
|
||||||
r = 'REPR FAILED'
|
|
||||||
r = r.replace('\r', '').replace('\n', '')
|
|
||||||
if normalize:
|
|
||||||
r = normalize_repr(r)
|
|
||||||
if max_length:
|
|
||||||
r = truncate(r, max_length)
|
|
||||||
return r
|
|
||||||
|
|
||||||
|
|
||||||
def truncate(string, max_length):
|
|
||||||
if (max_length is None) or (len(string) <= max_length):
|
|
||||||
return string
|
|
||||||
else:
|
|
||||||
left = (max_length - 3) // 2
|
|
||||||
right = max_length - 3 - left
|
|
||||||
return u'{}...{}'.format(string[:left], string[-right:])
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_tuple(x):
|
|
||||||
if isinstance(x, collections_abc.Iterable) and \
|
|
||||||
not isinstance(x, string_types):
|
|
||||||
return tuple(x)
|
|
||||||
else:
|
|
||||||
return (x,)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,133 +0,0 @@
|
||||||
import itertools
|
|
||||||
import abc
|
|
||||||
try:
|
|
||||||
from collections.abc import Mapping, Sequence
|
|
||||||
except ImportError:
|
|
||||||
from collections import Mapping, Sequence
|
|
||||||
from copy import deepcopy
|
|
||||||
|
|
||||||
from . import utils
|
|
||||||
from . import pycompat
|
|
||||||
|
|
||||||
|
|
||||||
def needs_parentheses(source):
|
|
||||||
def code(s):
|
|
||||||
return compile(s, '<variable>', 'eval').co_code
|
|
||||||
|
|
||||||
return code('{}.x'.format(source)) != code('({}).x'.format(source))
|
|
||||||
|
|
||||||
|
|
||||||
class BaseVariable(pycompat.ABC):
|
|
||||||
def __init__(self, source, exclude=()):
|
|
||||||
self.source = source
|
|
||||||
self.exclude = utils.ensure_tuple(exclude)
|
|
||||||
self.code = compile(source, '<variable>', 'eval')
|
|
||||||
if needs_parentheses(source):
|
|
||||||
self.unambiguous_source = '({})'.format(source)
|
|
||||||
else:
|
|
||||||
self.unambiguous_source = source
|
|
||||||
|
|
||||||
def items(self, frame, normalize=False):
|
|
||||||
try:
|
|
||||||
main_value = eval(self.code, frame.f_globals or {}, frame.f_locals)
|
|
||||||
except Exception:
|
|
||||||
return ()
|
|
||||||
return self._items(main_value, normalize)
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def _items(self, key, normalize=False):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _fingerprint(self):
|
|
||||||
return (type(self), self.source, self.exclude)
|
|
||||||
|
|
||||||
def __hash__(self):
|
|
||||||
return hash(self._fingerprint)
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return (isinstance(other, BaseVariable) and
|
|
||||||
self._fingerprint == other._fingerprint)
|
|
||||||
|
|
||||||
|
|
||||||
class CommonVariable(BaseVariable):
|
|
||||||
def _items(self, main_value, normalize=False):
|
|
||||||
result = [(self.source, utils.get_shortish_repr(main_value, normalize=normalize))]
|
|
||||||
for key in self._safe_keys(main_value):
|
|
||||||
try:
|
|
||||||
if key in self.exclude:
|
|
||||||
continue
|
|
||||||
value = self._get_value(main_value, key)
|
|
||||||
except Exception:
|
|
||||||
continue
|
|
||||||
result.append((
|
|
||||||
'{}{}'.format(self.unambiguous_source, self._format_key(key)),
|
|
||||||
utils.get_shortish_repr(value)
|
|
||||||
))
|
|
||||||
return result
|
|
||||||
|
|
||||||
def _safe_keys(self, main_value):
|
|
||||||
try:
|
|
||||||
for key in self._keys(main_value):
|
|
||||||
yield key
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _keys(self, main_value):
|
|
||||||
return ()
|
|
||||||
|
|
||||||
def _format_key(self, key):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def _get_value(self, main_value, key):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
class Attrs(CommonVariable):
|
|
||||||
def _keys(self, main_value):
|
|
||||||
return itertools.chain(
|
|
||||||
getattr(main_value, '__dict__', ()),
|
|
||||||
getattr(main_value, '__slots__', ())
|
|
||||||
)
|
|
||||||
|
|
||||||
def _format_key(self, key):
|
|
||||||
return '.' + key
|
|
||||||
|
|
||||||
def _get_value(self, main_value, key):
|
|
||||||
return getattr(main_value, key)
|
|
||||||
|
|
||||||
|
|
||||||
class Keys(CommonVariable):
|
|
||||||
def _keys(self, main_value):
|
|
||||||
return main_value.keys()
|
|
||||||
|
|
||||||
def _format_key(self, key):
|
|
||||||
return '[{}]'.format(utils.get_shortish_repr(key))
|
|
||||||
|
|
||||||
def _get_value(self, main_value, key):
|
|
||||||
return main_value[key]
|
|
||||||
|
|
||||||
|
|
||||||
class Indices(Keys):
|
|
||||||
_slice = slice(None)
|
|
||||||
|
|
||||||
def _keys(self, main_value):
|
|
||||||
return range(len(main_value))[self._slice]
|
|
||||||
|
|
||||||
def __getitem__(self, item):
|
|
||||||
assert isinstance(item, slice)
|
|
||||||
result = deepcopy(self)
|
|
||||||
result._slice = item
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class Exploding(BaseVariable):
|
|
||||||
def _items(self, main_value, normalize=False):
|
|
||||||
if isinstance(main_value, Mapping):
|
|
||||||
cls = Keys
|
|
||||||
elif isinstance(main_value, Sequence):
|
|
||||||
cls = Indices
|
|
||||||
else:
|
|
||||||
cls = Attrs
|
|
||||||
|
|
||||||
return cls(self.source, self.exclude)._items(main_value, normalize)
|
|
|
@ -1,3 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
exec 2>&1
|
|
||||||
exec multilog t s25000 n4 /var/log/dbus-fzsonick-48tl.TTY
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
exec 2>&1
|
|
||||||
|
|
||||||
exec softlimit -d 100000000 -s 1000000 -a 100000000 /opt/innovenergy/dbus-fzsonick-48tl/start.sh TTY
|
|
|
@ -1,214 +0,0 @@
|
||||||
# coding=utf-8
|
|
||||||
|
|
||||||
import config as cfg
|
|
||||||
from convert import mean, read_float, read_led_state, read_bool, count_bits, comma_separated
|
|
||||||
from data import BatterySignal, Battery, LedColor, ServiceSignal, BatteryStatus, LedState
|
|
||||||
|
|
||||||
# noinspection PyUnreachableCode
|
|
||||||
if False:
|
|
||||||
from typing import List, Iterable
|
|
||||||
|
|
||||||
|
|
||||||
def init_service_signals(batteries):
|
|
||||||
# type: (List[Battery]) -> Iterable[ServiceSignal]
|
|
||||||
|
|
||||||
n_batteries = len(batteries)
|
|
||||||
product_name = cfg.PRODUCT_NAME + ' x' + str(n_batteries)
|
|
||||||
|
|
||||||
return [
|
|
||||||
ServiceSignal('/NbOfBatteries', n_batteries), # TODO: nb of operational batteries
|
|
||||||
ServiceSignal('/Mgmt/ProcessName', __file__),
|
|
||||||
ServiceSignal('/Mgmt/ProcessVersion', cfg.SOFTWARE_VERSION),
|
|
||||||
ServiceSignal('/Mgmt/Connection', cfg.CONNECTION),
|
|
||||||
ServiceSignal('/DeviceInstance', cfg.DEVICE_INSTANCE),
|
|
||||||
ServiceSignal('/ProductName', product_name),
|
|
||||||
ServiceSignal('/ProductId', cfg.PRODUCT_ID),
|
|
||||||
ServiceSignal('/Connected', 1)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def init_battery_signals():
|
|
||||||
# type: () -> Iterable[BatterySignal]
|
|
||||||
|
|
||||||
read_voltage = read_float(register=999, scale_factor=0.01, offset=0)
|
|
||||||
read_current = read_float(register=1000, scale_factor=0.01, offset=-10000)
|
|
||||||
|
|
||||||
read_led_amber = read_led_state(register=1004, led=LedColor.amber)
|
|
||||||
read_led_green = read_led_state(register=1004, led=LedColor.green)
|
|
||||||
read_led_blue = read_led_state(register=1004, led=LedColor.blue)
|
|
||||||
read_led_red = read_led_state(register=1004, led=LedColor.red)
|
|
||||||
|
|
||||||
def read_power(status):
|
|
||||||
# type: (BatteryStatus) -> int
|
|
||||||
return int(read_current(status) * read_voltage(status))
|
|
||||||
|
|
||||||
def calc_power_limit_imposed_by_voltage_limit(v, i, v_limit, r_int):
|
|
||||||
# type: (float, float, float, float) -> float
|
|
||||||
|
|
||||||
dv = v_limit - v
|
|
||||||
di = dv / r_int
|
|
||||||
p_limit = v_limit * (i + di)
|
|
||||||
|
|
||||||
return p_limit
|
|
||||||
|
|
||||||
def calc_power_limit_imposed_by_current_limit(v, i, i_limit, r_int):
|
|
||||||
# type: (float, float, float, float) -> float
|
|
||||||
|
|
||||||
di = i_limit - i
|
|
||||||
dv = di * r_int
|
|
||||||
p_limit = i_limit * (v + dv)
|
|
||||||
|
|
||||||
return p_limit
|
|
||||||
|
|
||||||
def calc_max_charge_power(bs):
|
|
||||||
# type: (BatteryStatus) -> int
|
|
||||||
|
|
||||||
b = bs.battery
|
|
||||||
v = read_voltage(bs)
|
|
||||||
i = read_current(bs)
|
|
||||||
|
|
||||||
p_limits = [
|
|
||||||
calc_power_limit_imposed_by_voltage_limit(v, i, b.v_max, b.r_int_min),
|
|
||||||
calc_power_limit_imposed_by_voltage_limit(v, i, b.v_max, b.r_int_max),
|
|
||||||
calc_power_limit_imposed_by_current_limit(v, i, b.i_max, b.r_int_min),
|
|
||||||
calc_power_limit_imposed_by_current_limit(v, i, b.i_max, b.r_int_max),
|
|
||||||
]
|
|
||||||
|
|
||||||
p_limit = min(p_limits) # p_limit is normally positive here (signed)
|
|
||||||
p_limit = max(p_limit, 0) # charge power must not become negative
|
|
||||||
|
|
||||||
return int(p_limit)
|
|
||||||
|
|
||||||
def calc_max_discharge_power(bs):
|
|
||||||
# type: (BatteryStatus) -> float
|
|
||||||
|
|
||||||
b = bs.battery
|
|
||||||
v = read_voltage(bs)
|
|
||||||
i = read_current(bs)
|
|
||||||
|
|
||||||
p_limits = [
|
|
||||||
calc_power_limit_imposed_by_voltage_limit(v, i, b.v_min, b.r_int_min),
|
|
||||||
calc_power_limit_imposed_by_voltage_limit(v, i, b.v_min, b.r_int_max),
|
|
||||||
calc_power_limit_imposed_by_current_limit(v, i, -b.i_max, b.r_int_min),
|
|
||||||
calc_power_limit_imposed_by_current_limit(v, i, -b.i_max, b.r_int_max),
|
|
||||||
]
|
|
||||||
|
|
||||||
p_limit = max(p_limits) # p_limit is normally negative here (signed)
|
|
||||||
p_limit = min(p_limit, 0) # discharge power must not become positive
|
|
||||||
|
|
||||||
return int(-p_limit) # make unsigned!
|
|
||||||
|
|
||||||
def read_battery_cold(status):
|
|
||||||
return \
|
|
||||||
read_led_green(status) >= LedState.blinking_slow and \
|
|
||||||
read_led_blue(status) >= LedState.blinking_slow
|
|
||||||
|
|
||||||
def read_soc(status):
|
|
||||||
soc = read_float(register=1053, scale_factor=0.1, offset=0)(status)
|
|
||||||
|
|
||||||
# if the SOC is 100 but EOC is not yet reached, report 99.9 instead of 100
|
|
||||||
if soc > 99.9 and not read_eoc_reached(status):
|
|
||||||
return 99.9
|
|
||||||
if soc >= 99.9 and read_eoc_reached(status):
|
|
||||||
return 100
|
|
||||||
|
|
||||||
return soc
|
|
||||||
|
|
||||||
def read_eoc_reached(status):
|
|
||||||
return \
|
|
||||||
read_led_green(status) == LedState.on and \
|
|
||||||
read_led_amber(status) == LedState.off and \
|
|
||||||
read_led_blue(status) == LedState.off
|
|
||||||
|
|
||||||
return [
|
|
||||||
BatterySignal('/Dc/0/Voltage', mean, get_value=read_voltage, unit='V'),
|
|
||||||
BatterySignal('/Dc/0/Current', sum, get_value=read_current, unit='A'),
|
|
||||||
BatterySignal('/Dc/0/Power', sum, get_value=read_power, unit='W'),
|
|
||||||
|
|
||||||
BatterySignal('/BussVoltage', mean, read_float(register=1001, scale_factor=0.01, offset=0), unit='V'),
|
|
||||||
BatterySignal('/Soc', mean, read_soc, unit='%'),
|
|
||||||
BatterySignal('/Dc/0/Temperature', mean, read_float(register=1003, scale_factor=0.1, offset=-400), unit='C'),
|
|
||||||
|
|
||||||
BatterySignal('/NumberOfWarningFlags', sum, count_bits(base_register=1005, nb_of_registers=3, nb_of_bits=47)),
|
|
||||||
BatterySignal('/WarningFlags/TaM1', any, read_bool(base_register=1005, bit=1)),
|
|
||||||
BatterySignal('/WarningFlags/TbM1', any, read_bool(base_register=1005, bit=4)),
|
|
||||||
BatterySignal('/WarningFlags/VBm1', any, read_bool(base_register=1005, bit=6)),
|
|
||||||
BatterySignal('/WarningFlags/VBM1', any, read_bool(base_register=1005, bit=8)),
|
|
||||||
BatterySignal('/WarningFlags/IDM1', any, read_bool(base_register=1005, bit=10)),
|
|
||||||
BatterySignal('/WarningFlags/vsM1', any, read_bool(base_register=1005, bit=24)),
|
|
||||||
BatterySignal('/WarningFlags/iCM1', any, read_bool(base_register=1005, bit=26)),
|
|
||||||
BatterySignal('/WarningFlags/iDM1', any, read_bool(base_register=1005, bit=28)),
|
|
||||||
BatterySignal('/WarningFlags/MID1', any, read_bool(base_register=1005, bit=30)),
|
|
||||||
BatterySignal('/WarningFlags/BLPW', any, read_bool(base_register=1005, bit=32)),
|
|
||||||
BatterySignal('/WarningFlags/Ah_W', any, read_bool(base_register=1005, bit=35)),
|
|
||||||
BatterySignal('/WarningFlags/MPMM', any, read_bool(base_register=1005, bit=38)),
|
|
||||||
BatterySignal('/WarningFlags/TCMM', any, read_bool(base_register=1005, bit=39)),
|
|
||||||
BatterySignal('/WarningFlags/TCdi', any, read_bool(base_register=1005, bit=40)),
|
|
||||||
BatterySignal('/WarningFlags/WMTO', any, read_bool(base_register=1005, bit=41)),
|
|
||||||
BatterySignal('/WarningFlags/bit44', any, read_bool(base_register=1005, bit=44)),
|
|
||||||
BatterySignal('/WarningFlags/CELL1', any, read_bool(base_register=1005, bit=46)),
|
|
||||||
BatterySignal('/WarningFlags/bit47WarningDummy', any, read_bool(base_register=1005, bit=47)),
|
|
||||||
|
|
||||||
BatterySignal('/NumberOfAlarmFlags', sum, count_bits(base_register=1009, nb_of_registers=3, nb_of_bits=47)),
|
|
||||||
BatterySignal('/AlarmFlags/Tam', any, read_bool(base_register=1009, bit=0)),
|
|
||||||
BatterySignal('/AlarmFlags/TaM2', any, read_bool(base_register=1009, bit=2)),
|
|
||||||
BatterySignal('/AlarmFlags/Tbm', any, read_bool(base_register=1009, bit=3)),
|
|
||||||
BatterySignal('/AlarmFlags/TbM2', any, read_bool(base_register=1009, bit=5)),
|
|
||||||
BatterySignal('/AlarmFlags/VBm2', any, read_bool(base_register=1009, bit=7)),
|
|
||||||
BatterySignal('/AlarmFlags/IDM2', any, read_bool(base_register=1009, bit=11)),
|
|
||||||
BatterySignal('/AlarmFlags/ISOB', any, read_bool(base_register=1009, bit=12)),
|
|
||||||
BatterySignal('/AlarmFlags/MSWE', any, read_bool(base_register=1009, bit=13)),
|
|
||||||
BatterySignal('/AlarmFlags/FUSE', any, read_bool(base_register=1009, bit=14)),
|
|
||||||
BatterySignal('/AlarmFlags/HTRE', any, read_bool(base_register=1009, bit=15)),
|
|
||||||
BatterySignal('/AlarmFlags/TCPE', any, read_bool(base_register=1009, bit=16)),
|
|
||||||
BatterySignal('/AlarmFlags/STRE', any, read_bool(base_register=1009, bit=17)),
|
|
||||||
BatterySignal('/AlarmFlags/CME', any, read_bool(base_register=1009, bit=18)),
|
|
||||||
BatterySignal('/AlarmFlags/HWFL', any, read_bool(base_register=1009, bit=19)),
|
|
||||||
BatterySignal('/AlarmFlags/HWEM', any, read_bool(base_register=1009, bit=20)),
|
|
||||||
BatterySignal('/AlarmFlags/ThM', any, read_bool(base_register=1009, bit=21)),
|
|
||||||
BatterySignal('/AlarmFlags/vsm1', any, read_bool(base_register=1009, bit=22)),
|
|
||||||
BatterySignal('/AlarmFlags/vsm2', any, read_bool(base_register=1009, bit=23)),
|
|
||||||
BatterySignal('/AlarmFlags/vsM2', any, read_bool(base_register=1009, bit=25)),
|
|
||||||
BatterySignal('/AlarmFlags/iCM2', any, read_bool(base_register=1009, bit=27)),
|
|
||||||
BatterySignal('/AlarmFlags/iDM2', any, read_bool(base_register=1009, bit=29)),
|
|
||||||
BatterySignal('/AlarmFlags/MID2', any, read_bool(base_register=1009, bit=31)),
|
|
||||||
BatterySignal('/AlarmFlags/CCBF', any, read_bool(base_register=1009, bit=33)),
|
|
||||||
BatterySignal('/AlarmFlags/AhFL', any, read_bool(base_register=1009, bit=34)),
|
|
||||||
BatterySignal('/AlarmFlags/TbCM', any, read_bool(base_register=1009, bit=36)),
|
|
||||||
BatterySignal('/AlarmFlags/BRNF', any, read_bool(base_register=1009, bit=37)),
|
|
||||||
BatterySignal('/AlarmFlags/HTFS', any, read_bool(base_register=1009, bit=42)),
|
|
||||||
BatterySignal('/AlarmFlags/DATA', any, read_bool(base_register=1009, bit=43)),
|
|
||||||
BatterySignal('/AlarmFlags/CELL2', any, read_bool(base_register=1009, bit=45)),
|
|
||||||
BatterySignal('/AlarmFlags/bit47AlarmDummy', any, read_bool(base_register=1009, bit=47)),
|
|
||||||
|
|
||||||
BatterySignal('/LedStatus/Red', max, read_led_red),
|
|
||||||
BatterySignal('/LedStatus/Blue', max, read_led_blue),
|
|
||||||
BatterySignal('/LedStatus/Green', max, read_led_green),
|
|
||||||
BatterySignal('/LedStatus/Amber', max, read_led_amber),
|
|
||||||
|
|
||||||
BatterySignal('/IoStatus/MainSwitchClosed', any, read_bool(base_register=1013, bit=0)),
|
|
||||||
BatterySignal('/IoStatus/AlarmOutActive', any, read_bool(base_register=1013, bit=1)),
|
|
||||||
BatterySignal('/IoStatus/InternalFanActive', any, read_bool(base_register=1013, bit=2)),
|
|
||||||
BatterySignal('/IoStatus/VoltMeasurementAllowed', any, read_bool(base_register=1013, bit=3)),
|
|
||||||
BatterySignal('/IoStatus/AuxRelay', any, read_bool(base_register=1013, bit=4)),
|
|
||||||
BatterySignal('/IoStatus/RemoteState', any, read_bool(base_register=1013, bit=5)),
|
|
||||||
BatterySignal('/IoStatus/HeaterOn', any, read_bool(base_register=1013, bit=6)),
|
|
||||||
BatterySignal('/IoStatus/EocReached', min, read_eoc_reached),
|
|
||||||
BatterySignal('/IoStatus/BatteryCold', any, read_battery_cold),
|
|
||||||
|
|
||||||
# see protocol doc page 7
|
|
||||||
BatterySignal('/Info/MaxDischargeCurrent', sum, lambda bs: bs.battery.i_max, unit='A'),
|
|
||||||
BatterySignal('/Info/MaxChargeCurrent', sum, lambda bs: bs.battery.i_max, unit='A'),
|
|
||||||
BatterySignal('/Info/MaxChargeVoltage', min, lambda bs: bs.battery.v_max, unit='V'),
|
|
||||||
BatterySignal('/Info/MinDischargeVoltage', max, lambda bs: bs.battery.v_min, unit='V'),
|
|
||||||
BatterySignal('/Info/BatteryLowVoltage' , max, lambda bs: bs.battery.v_min-2, unit='V'),
|
|
||||||
BatterySignal('/Info/NumberOfStrings', sum, lambda bs: bs.battery.n_strings),
|
|
||||||
|
|
||||||
BatterySignal('/Info/MaxChargePower', sum, calc_max_charge_power),
|
|
||||||
BatterySignal('/Info/MaxDischargePower', sum, calc_max_discharge_power),
|
|
||||||
|
|
||||||
BatterySignal('/FirmwareVersion', comma_separated, lambda bs: bs.battery.firmware_version),
|
|
||||||
BatterySignal('/HardwareVersion', comma_separated, lambda bs: bs.battery.hardware_version),
|
|
||||||
BatterySignal('/BmsVersion', comma_separated, lambda bs: bs.battery.bms_version)
|
|
||||||
|
|
||||||
]
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,5 +0,0 @@
|
||||||
**/docs
|
|
||||||
**/examples
|
|
||||||
**/test
|
|
||||||
**/utils
|
|
||||||
setup.py
|
|
|
@ -1,8 +0,0 @@
|
||||||
languages:
|
|
||||||
- python
|
|
||||||
exclude_paths:
|
|
||||||
- docs/*
|
|
||||||
- tests/*
|
|
||||||
- utils/*
|
|
||||||
- pika/examples/*
|
|
||||||
- pika/spec.py
|
|
|
@ -1,2 +0,0 @@
|
||||||
[run]
|
|
||||||
omit = pika/spec.py
|
|
|
@ -1,15 +0,0 @@
|
||||||
Thank you for using Pika.
|
|
||||||
|
|
||||||
GitHub issues are **strictly** used for actionable work and pull
|
|
||||||
requests.
|
|
||||||
|
|
||||||
Pika's maintainers do NOT use GitHub issues for questions, root cause
|
|
||||||
analysis, conversations, code reviews, etc.
|
|
||||||
|
|
||||||
Please direct all non-work issues to either the `pika-python` or
|
|
||||||
`rabbitmq-users` mailing list:
|
|
||||||
|
|
||||||
* https://groups.google.com/forum/#!forum/pika-python
|
|
||||||
* https://groups.google.com/forum/#!forum/rabbitmq-users
|
|
||||||
|
|
||||||
Thank you
|
|
|
@ -1,43 +0,0 @@
|
||||||
## Proposed Changes
|
|
||||||
|
|
||||||
Please describe the big picture of your changes here to communicate to
|
|
||||||
the Pika team why we should accept this pull request. If it fixes a bug
|
|
||||||
or resolves a feature request, be sure to link to that issue.
|
|
||||||
|
|
||||||
A pull request that doesn't explain **why** the change was made has a
|
|
||||||
much lower chance of being accepted.
|
|
||||||
|
|
||||||
If English isn't your first language, don't worry about it and try to
|
|
||||||
communicate the problem you are trying to solve to the best of your
|
|
||||||
abilities. As long as we can understand the intent, it's all good.
|
|
||||||
|
|
||||||
## Types of Changes
|
|
||||||
|
|
||||||
What types of changes does your code introduce to this project?
|
|
||||||
_Put an `x` in the boxes that apply_
|
|
||||||
|
|
||||||
- [ ] Bugfix (non-breaking change which fixes issue #NNNN)
|
|
||||||
- [ ] New feature (non-breaking change which adds functionality)
|
|
||||||
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
|
|
||||||
- [ ] Documentation (correction or otherwise)
|
|
||||||
- [ ] Cosmetics (whitespace, appearance)
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
_Put an `x` in the boxes that apply. You can also fill these out after
|
|
||||||
creating the PR. If you're unsure about any of them, don't hesitate to
|
|
||||||
ask on the
|
|
||||||
[`pika-python`](https://groups.google.com/forum/#!forum/pika-python)
|
|
||||||
mailing list. We're here to help! This is simply a reminder of what we
|
|
||||||
are going to look for before merging your code._
|
|
||||||
|
|
||||||
- [ ] I have read the `CONTRIBUTING.md` document
|
|
||||||
- [ ] All tests pass locally with my changes
|
|
||||||
- [ ] I have added tests that prove my fix is effective or that my feature works
|
|
||||||
- [ ] I have added necessary documentation (if appropriate)
|
|
||||||
|
|
||||||
## Further Comments
|
|
||||||
|
|
||||||
If this is a relatively large or complex change, kick off the discussion
|
|
||||||
by explaining why you chose the solution you did and what alternatives
|
|
||||||
you considered, etc.
|
|
|
@ -1,20 +0,0 @@
|
||||||
*.pyc
|
|
||||||
*~
|
|
||||||
.idea
|
|
||||||
.coverage
|
|
||||||
.tox
|
|
||||||
.DS_Store
|
|
||||||
.python-version
|
|
||||||
pika.iml
|
|
||||||
codegen
|
|
||||||
pika.egg-info
|
|
||||||
debug/
|
|
||||||
examples/pika
|
|
||||||
examples/blocking/pika
|
|
||||||
atlassian*xml
|
|
||||||
build
|
|
||||||
dist
|
|
||||||
docs/_build
|
|
||||||
venv*/
|
|
||||||
env/
|
|
||||||
testdata/*.conf
|
|
|
@ -1,103 +0,0 @@
|
||||||
language: python
|
|
||||||
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
sources:
|
|
||||||
- sourceline: deb https://packages.erlang-solutions.com/ubuntu trusty contrib
|
|
||||||
key_url: https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc
|
|
||||||
packages:
|
|
||||||
# apt-cache show erlang-nox=1:20.3-1 | grep Depends | tr ' ' '\n' | grep erlang | grep -v erlang-base-hipe | tr -d ',' | sed 's/$/=1:20.3-1/'
|
|
||||||
- erlang-nox
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- RABBITMQ_VERSION=3.7.8
|
|
||||||
- RABBITMQ_DOWNLOAD_URL="https://github.com/rabbitmq/rabbitmq-server/releases/download/v$RABBITMQ_VERSION/rabbitmq-server-generic-unix-$RABBITMQ_VERSION.tar.xz"
|
|
||||||
- RABBITMQ_TAR="rabbitmq-$RABBITMQ_VERSION.tar.xz"
|
|
||||||
- PATH=$HOME/.local/bin:$PATH
|
|
||||||
- AWS_DEFAULT_REGION=us-east-1
|
|
||||||
- secure: "Eghft2UgJmWuCgnqz6O+KV5F9AERzUbKIeXkcw7vsFAVdkB9z01XgqVLhQ6N+n6i8mkiRDkc0Jes6htVtO4Hi6lTTFeDhu661YCXXTFdRdsx+D9v5bgw8Q2bP41xFy0iao7otYqkzFKIo32Q2cUYzMUqXlS661Yai5DXldr3mjM="
|
|
||||||
- secure: "LjieH/Yh0ng5gwT6+Pl3rL7RMxxb/wOlogoLG7cS99XKdX6N4WRVFvWbHWwCxoVr0be2AcyQynu4VOn+0jC8iGfQjkJZ7UrJjZCDGWbNjAWrNcY0F9VdretFDy8Vn2sHfBXq8fINqszJkgTnmbQk8dZWUtj0m/RNVnOBeBcsIOU="
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- test
|
|
||||||
- name: coverage
|
|
||||||
if: repo = pika/pika
|
|
||||||
- name: deploy
|
|
||||||
if: tag IS present
|
|
||||||
|
|
||||||
cache:
|
|
||||||
apt: true
|
|
||||||
directories:
|
|
||||||
- $HOME/.cache
|
|
||||||
|
|
||||||
install:
|
|
||||||
- pip install -r test-requirements.txt
|
|
||||||
- pip install awscli==1.11.18
|
|
||||||
- if [ ! -d "$HOME/.cache" ]; then mkdir "$HOME/.cache"; fi
|
|
||||||
- if [ -s "$HOME/.cache/$RABBITMQ_TAR" ]; then echo "[INFO] found cached $RABBITMQ_TAR file"; else wget -O "$HOME/.cache/$RABBITMQ_TAR" "$RABBITMQ_DOWNLOAD_URL"; fi
|
|
||||||
- tar -C "$TRAVIS_BUILD_DIR" -xvf "$HOME/.cache/$RABBITMQ_TAR"
|
|
||||||
- sed -e "s#PIKA_DIR#$TRAVIS_BUILD_DIR#g" "$TRAVIS_BUILD_DIR/testdata/rabbitmq.conf.in" > "$TRAVIS_BUILD_DIR/testdata/rabbitmq.conf"
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- pip freeze
|
|
||||||
- /bin/sh -c "RABBITMQ_PID_FILE=$TRAVIS_BUILD_DIR/rabbitmq.pid RABBITMQ_CONFIG_FILE=$TRAVIS_BUILD_DIR/testdata/rabbitmq $TRAVIS_BUILD_DIR/rabbitmq_server-$RABBITMQ_VERSION/sbin/rabbitmq-server &"
|
|
||||||
- /bin/sh "$TRAVIS_BUILD_DIR/rabbitmq_server-$RABBITMQ_VERSION/sbin/rabbitmqctl" wait "$TRAVIS_BUILD_DIR/rabbitmq.pid"
|
|
||||||
- /bin/sh "$TRAVIS_BUILD_DIR/rabbitmq_server-$RABBITMQ_VERSION/sbin/rabbitmqctl" status
|
|
||||||
|
|
||||||
script:
|
|
||||||
# See https://github.com/travis-ci/travis-ci/issues/1066 and https://github.com/pika/pika/pull/984#issuecomment-370565220
|
|
||||||
# as to why 'set -e' and 'set +e' are added here
|
|
||||||
- set -e
|
|
||||||
- nosetests
|
|
||||||
- PIKA_TEST_TLS=true nosetests
|
|
||||||
- set +e
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- aws s3 cp .coverage "s3://com-gavinroy-travis/pika/$TRAVIS_BUILD_NUMBER/.coverage.${TRAVIS_PYTHON_VERSION}"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
include:
|
|
||||||
- python: pypy3
|
|
||||||
- python: pypy
|
|
||||||
- python: 2.7
|
|
||||||
- python: 3.4
|
|
||||||
- python: 3.5
|
|
||||||
- python: 3.6
|
|
||||||
- python: 3.7
|
|
||||||
dist: xenial # required for Python 3.7 (travis-ci/travis-ci#9069)
|
|
||||||
- stage: coverage
|
|
||||||
if: fork = false OR type != pull_request
|
|
||||||
python: 3.6
|
|
||||||
services: []
|
|
||||||
install:
|
|
||||||
- pip install awscli coverage codecov
|
|
||||||
before_script: []
|
|
||||||
script:
|
|
||||||
- mkdir coverage
|
|
||||||
- aws s3 cp --recursive s3://com-gavinroy-travis/pika/$TRAVIS_BUILD_NUMBER/ coverage
|
|
||||||
- cd coverage
|
|
||||||
- coverage combine
|
|
||||||
- cd ..
|
|
||||||
- mv coverage/.coverage .
|
|
||||||
- coverage report
|
|
||||||
after_success: codecov
|
|
||||||
- stage: deploy
|
|
||||||
if: repo = pika/pika
|
|
||||||
python: 3.6
|
|
||||||
services: []
|
|
||||||
install: true
|
|
||||||
before_script: []
|
|
||||||
script: true
|
|
||||||
after_success: []
|
|
||||||
deploy:
|
|
||||||
distributions: sdist bdist_wheel
|
|
||||||
provider: pypi
|
|
||||||
user: crad
|
|
||||||
on:
|
|
||||||
tags: true
|
|
||||||
all_branches: true
|
|
||||||
password:
|
|
||||||
secure: "V/JTU/X9C6uUUVGEAWmWWbmKW7NzVVlC/JWYpo05Ha9c0YV0vX4jOfov2EUAphM0WwkD/MRhz4dq3kCU5+cjHxR3aTSb+sbiElsCpaciaPkyrns+0wT5MCMO29Lpnq2qBLc1ePR1ey5aTWC/VibgFJOL7H/3wyvukL6ZaCnktYk="
|
|
|
@ -1,760 +0,0 @@
|
||||||
Version History
|
|
||||||
===============
|
|
||||||
|
|
||||||
0.13.1 2019-03-07
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
`GitHub milestone <https://github.com/pika/pika/milestone/14>`_
|
|
||||||
|
|
||||||
0.13.0 2019-01-17
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
`GitHub milestone <https://github.com/pika/pika/milestone/13>`_
|
|
||||||
|
|
||||||
- `AsyncioConnection`, `TornadoConnection` and `TwistedProtocolConnection` are no longer auto-imported (`PR <https://github.com/pika/pika/pull/1129>`_)
|
|
||||||
- Python `3.7` support (`Issue <https://github.com/pika/pika/issues/1107>`_)
|
|
||||||
|
|
||||||
0.12.0 2018-06-19
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
`GitHub milestone <https://github.com/pika/pika/milestone/12>`_
|
|
||||||
|
|
||||||
This is an interim release prior to version `1.0.0`. It includes the following backported pull requests and commits from the `master` branch:
|
|
||||||
|
|
||||||
- `PR #908 <https://github.com/pika/pika/pull/908>`_
|
|
||||||
- `PR #910 <https://github.com/pika/pika/pull/910>`_
|
|
||||||
- `PR #918 <https://github.com/pika/pika/pull/918>`_
|
|
||||||
- `PR #920 <https://github.com/pika/pika/pull/920>`_
|
|
||||||
- `PR #924 <https://github.com/pika/pika/pull/924>`_
|
|
||||||
- `PR #937 <https://github.com/pika/pika/pull/937>`_
|
|
||||||
- `PR #938 <https://github.com/pika/pika/pull/938>`_
|
|
||||||
- `PR #933 <https://github.com/pika/pika/pull/933>`_
|
|
||||||
- `PR #940 <https://github.com/pika/pika/pull/940>`_
|
|
||||||
- `PR #932 <https://github.com/pika/pika/pull/932>`_
|
|
||||||
- `PR #928 <https://github.com/pika/pika/pull/928>`_
|
|
||||||
- `PR #934 <https://github.com/pika/pika/pull/934>`_
|
|
||||||
- `PR #915 <https://github.com/pika/pika/pull/915>`_
|
|
||||||
- `PR #946 <https://github.com/pika/pika/pull/946>`_
|
|
||||||
- `PR #947 <https://github.com/pika/pika/pull/947>`_
|
|
||||||
- `PR #952 <https://github.com/pika/pika/pull/952>`_
|
|
||||||
- `PR #956 <https://github.com/pika/pika/pull/956>`_
|
|
||||||
- `PR #966 <https://github.com/pika/pika/pull/966>`_
|
|
||||||
- `PR #975 <https://github.com/pika/pika/pull/975>`_
|
|
||||||
- `PR #978 <https://github.com/pika/pika/pull/978>`_
|
|
||||||
- `PR #981 <https://github.com/pika/pika/pull/981>`_
|
|
||||||
- `PR #994 <https://github.com/pika/pika/pull/994>`_
|
|
||||||
- `PR #1007 <https://github.com/pika/pika/pull/1007>`_
|
|
||||||
- `PR #1045 <https://github.com/pika/pika/pull/1045>`_ (manually backported)
|
|
||||||
- `PR #1011 <https://github.com/pika/pika/pull/1011>`_
|
|
||||||
|
|
||||||
Commits:
|
|
||||||
|
|
||||||
Travis CI fail fast - 3f0e739
|
|
||||||
|
|
||||||
New features:
|
|
||||||
|
|
||||||
`BlockingConnection` now supports the `add_callback_threadsafe` method which allows a function to be executed correctly on the IO loop thread. The main use-case for this is as follows:
|
|
||||||
|
|
||||||
- Application sets up a thread for `BlockingConnection` and calls `basic_consume` on it
|
|
||||||
- When a message is received, work is done on another thread
|
|
||||||
- When the work is done, the worker uses `connection.add_callback_threadsafe` to call the `basic_ack` method on the channel instance.
|
|
||||||
|
|
||||||
Please see `examples/basic_consumer_threaded.py` for an example. As always, `SelectConnection` and a fully async consumer/publisher is the preferred method of using Pika.
|
|
||||||
|
|
||||||
Heartbeats are now sent at an interval equal to 1/2 of the negotiated idle connection timeout. RabbitMQ's default timeout value is 60 seconds, so heartbeats will be sent at a 30 second interval. In addition, Pika's check for an idle connection will be done at an interval equal to the timeout value plus 5 seconds to allow for delays. This results in an interval of 65 seconds by default.
|
|
||||||
|
|
||||||
0.11.2 2017-11-30
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
`GitHub milestone <https://github.com/pika/pika/milestone/11>`_
|
|
||||||
|
|
||||||
`0.11.2 <https://github.com/pika/pika/compare/0.11.1...0.11.2>`_
|
|
||||||
|
|
||||||
- Remove `+` character from platform releases string (`PR <https://github.com/pika/pika/pull/895>`_)
|
|
||||||
|
|
||||||
0.11.1 2017-11-27
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
`GitHub milestone <https://github.com/pika/pika/milestone/10>`_
|
|
||||||
|
|
||||||
`0.11.1 <https://github.com/pika/pika/compare/0.11.0...0.11.1>`_
|
|
||||||
|
|
||||||
- Fix `BlockingConnection` to ensure event loop exits (`PR <https://github.com/pika/pika/pull/887>`_)
|
|
||||||
- Heartbeat timeouts will use the client value if specified (`PR <https://github.com/pika/pika/pull/874>`_)
|
|
||||||
- Allow setting some common TCP options (`PR <https://github.com/pika/pika/pull/880>`_)
|
|
||||||
- Errors when decoding Unicode are ignored (`PR <https://github.com/pika/pika/pull/890>`_)
|
|
||||||
- Fix large number encoding (`PR <https://github.com/pika/pika/pull/888>`_)
|
|
||||||
|
|
||||||
0.11.0 2017-07-29
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
`GitHub milestone <https://github.com/pika/pika/milestone/9>`_
|
|
||||||
|
|
||||||
`0.11.0 <https://github.com/pika/pika/compare/0.10.0...0.11.0>`_
|
|
||||||
|
|
||||||
- Simplify Travis CI configuration for OS X.
|
|
||||||
- Add `asyncio` connection adapter for Python 3.4 and newer.
|
|
||||||
- Connection failures that occur after the socket is opened and before the
|
|
||||||
AMQP connection is ready to go are now reported by calling the connection
|
|
||||||
error callback. Previously these were not consistently reported.
|
|
||||||
- In BaseConnection.close, call _handle_ioloop_stop only if the connection is
|
|
||||||
already closed to allow the asynchronous close operation to complete
|
|
||||||
gracefully.
|
|
||||||
- Pass error information from failed socket connection to user callbacks
|
|
||||||
on_open_error_callback and on_close_callback with result_code=-1.
|
|
||||||
- ValueError is raised when a completion callback is passed to an asynchronous
|
|
||||||
(nowait) Channel operation. It's an application error to pass a non-None
|
|
||||||
completion callback with an asynchronous request, because this callback can
|
|
||||||
never be serviced in the asynchronous scenario.
|
|
||||||
- `Channel.basic_reject` fixed to allow `delivery_tag` to be of type `long`
|
|
||||||
as well as `int`. (by quantum5)
|
|
||||||
- Implemented support for blocked connection timeouts in
|
|
||||||
`pika.connection.Connection`. This feature is available to all pika adapters.
|
|
||||||
See `pika.connection.ConnectionParameters` docstring to learn more about
|
|
||||||
`blocked_connection_timeout` configuration.
|
|
||||||
- Deprecated the `heartbeat_interval` arg in `pika.ConnectionParameters` in
|
|
||||||
favor of the `heartbeat` arg for consistency with the other connection
|
|
||||||
parameters classes `pika.connection.Parameters` and `pika.URLParameters`.
|
|
||||||
- When the `port` arg is not set explicitly in `ConnectionParameters`
|
|
||||||
constructor, but the `ssl` arg is set explicitly, then set the port value to
|
|
||||||
to the default AMQP SSL port if SSL is enabled, otherwise to the default
|
|
||||||
AMQP plaintext port.
|
|
||||||
- `URLParameters` will raise ValueError if a non-empty URL scheme other than
|
|
||||||
{amqp | amqps | http | https} is specified.
|
|
||||||
- `InvalidMinimumFrameSize` and `InvalidMaximumFrameSize` exceptions are
|
|
||||||
deprecated. pika.connection.Parameters.frame_max property setter now raises
|
|
||||||
the standard `ValueError` exception when the value is out of bounds.
|
|
||||||
- Removed deprecated parameter `type` in `Channel.exchange_declare` and
|
|
||||||
`BlockingChannel.exchange_declare` in favor of the `exchange_type` arg that
|
|
||||||
doesn't overshadow the builtin `type` keyword.
|
|
||||||
- Channel.close() on OPENING channel transitions it to CLOSING instead of
|
|
||||||
raising ChannelClosed.
|
|
||||||
- Channel.close() on CLOSING channel raises `ChannelAlreadyClosing`; used to
|
|
||||||
raise `ChannelClosed`.
|
|
||||||
- Connection.channel() raises `ConnectionClosed` if connection is not in OPEN
|
|
||||||
state.
|
|
||||||
- When performing graceful close on a channel and `Channel.Close` from broker
|
|
||||||
arrives while waiting for CloseOk, don't release the channel number until
|
|
||||||
CloseOk arrives to avoid race condition that may lead to a new channel
|
|
||||||
receiving the CloseOk that was destined for the closing channel.
|
|
||||||
- The `backpressure_detection` option of `ConnectionParameters` and
|
|
||||||
`URLParameters` property is DEPRECATED in favor of `Connection.Blocked` and
|
|
||||||
`Connection.Unblocked`. See `Connection.add_on_connection_blocked_callback`.
|
|
||||||
|
|
||||||
0.10.0 2015-09-02
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
`0.10.0 <https://github.com/pika/pika/compare/0.9.14...0.10.0>`_
|
|
||||||
|
|
||||||
- a9bf96d - LibevConnection: Fixed dict chgd size during iteration (Michael Laing)
|
|
||||||
- 388c55d - SelectConnection: Fixed KeyError exceptions in IOLoop timeout executions (Shinji Suzuki)
|
|
||||||
- 4780de3 - BlockingConnection: Add support to make BlockingConnection a Context Manager (@reddec)
|
|
||||||
|
|
||||||
0.10.0b2 2015-07-15
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
- f72b58f - Fixed failure to purge _ConsumerCancellationEvt from BlockingChannel._pending_events during basic_cancel. (Vitaly Kruglikov)
|
|
||||||
|
|
||||||
0.10.0b1 2015-07-10
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
High-level summary of notable changes:
|
|
||||||
|
|
||||||
- Change to 3-Clause BSD License
|
|
||||||
- Python 3.x support
|
|
||||||
- Over 150 commits from 19 contributors
|
|
||||||
- Refactoring of SelectConnection ioloop
|
|
||||||
- This major release contains certain non-backward-compatible API changes as
|
|
||||||
well as significant performance improvements in the `BlockingConnection`
|
|
||||||
adapter.
|
|
||||||
- Non-backward-compatible changes in `Channel.add_on_return_callback` callback's
|
|
||||||
signature.
|
|
||||||
- The `AsyncoreConnection` adapter was retired
|
|
||||||
|
|
||||||
**Details**
|
|
||||||
|
|
||||||
Python 3.x: this release introduces python 3.x support. Tested on Python 3.3
|
|
||||||
and 3.4.
|
|
||||||
|
|
||||||
`AsyncoreConnection`: Retired this legacy adapter to reduce maintenance burden;
|
|
||||||
the recommended replacement is the `SelectConnection` adapter.
|
|
||||||
|
|
||||||
`SelectConnection`: ioloop was refactored for compatibility with other ioloops.
|
|
||||||
|
|
||||||
`Channel.add_on_return_callback`: The callback is now passed the individual
|
|
||||||
parameters channel, method, properties, and body instead of a tuple of those
|
|
||||||
values for congruence with other similar callbacks.
|
|
||||||
|
|
||||||
`BlockingConnection`: This adapter underwent a makeover under the hood and
|
|
||||||
gained significant performance improvements as well as enhanced timer
|
|
||||||
resolution. It is now implemented as a client of the `SelectConnection` adapter.
|
|
||||||
|
|
||||||
Below is an overview of the `BlockingConnection` and `BlockingChannel` API
|
|
||||||
changes:
|
|
||||||
|
|
||||||
- Recursion: the new implementation eliminates callback recursion that
|
|
||||||
sometimes blew out the stack in the legacy implementation (e.g.,
|
|
||||||
publish -> consumer_callback -> publish -> consumer_callback, etc.). While
|
|
||||||
`BlockingConnection.process_data_events` and `BlockingConnection.sleep` may
|
|
||||||
still be called from the scope of the blocking adapter's callbacks in order
|
|
||||||
to process pending I/O, additional callbacks will be suppressed whenever
|
|
||||||
`BlockingConnection.process_data_events` and `BlockingConnection.sleep` are
|
|
||||||
nested in any combination; in that case, the callback information will be
|
|
||||||
bufferred and dispatched once nesting unwinds and control returns to the
|
|
||||||
level-zero dispatcher.
|
|
||||||
- `BlockingConnection.connect`: this method was removed in favor of the
|
|
||||||
constructor as the only way to establish connections; this reduces
|
|
||||||
maintenance burden, while improving reliability of the adapter.
|
|
||||||
- `BlockingConnection.process_data_events`: added the optional parameter
|
|
||||||
`time_limit`.
|
|
||||||
- `BlockingConnection.add_on_close_callback`: removed; legacy raised
|
|
||||||
`NotImplementedError`.
|
|
||||||
- `BlockingConnection.add_on_open_callback`: removed; legacy raised
|
|
||||||
`NotImplementedError`.
|
|
||||||
- `BlockingConnection.add_on_open_error_callback`: removed; legacy raised
|
|
||||||
`NotImplementedError`.
|
|
||||||
- `BlockingConnection.add_backpressure_callback`: not supported
|
|
||||||
- `BlockingConnection.set_backpressure_multiplier`: not supported
|
|
||||||
- `BlockingChannel.add_on_flow_callback`: not supported; per docstring in
|
|
||||||
channel.py: "Note that newer versions of RabbitMQ will not issue this but
|
|
||||||
instead use TCP backpressure".
|
|
||||||
- `BlockingChannel.flow`: not supported
|
|
||||||
- `BlockingChannel.force_data_events`: removed as it is no longer necessary
|
|
||||||
following redesign of the adapter.
|
|
||||||
- Removed the `nowait` parameter from `BlockingChannel` methods, forcing
|
|
||||||
`nowait=False` (former API default) in the implementation; this is more
|
|
||||||
suitable for the blocking nature of the adapter and its error-reporting
|
|
||||||
strategy; this concerns the following methods: `basic_cancel`,
|
|
||||||
`confirm_delivery`, `exchange_bind`, `exchange_declare`, `exchange_delete`,
|
|
||||||
`exchange_unbind`, `queue_bind`, `queue_declare`, `queue_delete`, and
|
|
||||||
`queue_purge`.
|
|
||||||
- `BlockingChannel.basic_cancel`: returns a sequence instead of None; for a
|
|
||||||
`no_ack=True` consumer, `basic_cancel` returns a sequence of pending
|
|
||||||
messages that arrived before broker confirmed the cancellation.
|
|
||||||
- `BlockingChannel.consume`: added new optional kwargs `arguments` and
|
|
||||||
`inactivity_timeout`. Also, raises ValueError if the consumer creation
|
|
||||||
parameters don't match those used to create the existing queue consumer
|
|
||||||
generator, if any; this happens when you break out of the consume loop, then
|
|
||||||
call `BlockingChannel.consume` again with different consumer-creation args
|
|
||||||
without first cancelling the previous queue consumer generator via
|
|
||||||
`BlockingChannel.cancel`. The legacy implementation would silently resume
|
|
||||||
consuming from the existing queue consumer generator even if the subsequent
|
|
||||||
`BlockingChannel.consume` was invoked with a different queue name, etc.
|
|
||||||
- `BlockingChannel.cancel`: returns 0; the legacy implementation tried to
|
|
||||||
return the number of requeued messages, but this number was not accurate
|
|
||||||
as it didn't include the messages returned by the Channel class; this count
|
|
||||||
is not generally useful, so returning 0 is a reasonable replacement.
|
|
||||||
- `BlockingChannel.open`: removed in favor of having a single mechanism for
|
|
||||||
creating a channel (`BlockingConnection.channel`); this reduces maintenance
|
|
||||||
burden, while improving reliability of the adapter.
|
|
||||||
- `BlockingChannel.confirm_delivery`: raises UnroutableError when unroutable
|
|
||||||
messages that were sent prior to this call are returned before we receive
|
|
||||||
Confirm.Select-ok.
|
|
||||||
- `BlockingChannel.basic_publish: always returns True when delivery
|
|
||||||
confirmation is not enabled (publisher-acks = off); the legacy implementation
|
|
||||||
returned a bool in this case if `mandatory=True` to indicate whether the
|
|
||||||
message was delivered; however, this was non-deterministic, because
|
|
||||||
Basic.Return is asynchronous and there is no way to know how long to wait
|
|
||||||
for it or its absence. The legacy implementation returned None when
|
|
||||||
publishing with publisher-acks = off and `mandatory=False`. The new
|
|
||||||
implementation always returns True when publishing while
|
|
||||||
publisher-acks = off.
|
|
||||||
- `BlockingChannel.publish`: a new alternate method (vs. `basic_publish`) for
|
|
||||||
publishing a message with more detailed error reporting via UnroutableError
|
|
||||||
and NackError exceptions.
|
|
||||||
- `BlockingChannel.start_consuming`: raises pika.exceptions.RecursionError if
|
|
||||||
called from the scope of a `BlockingConnection` or `BlockingChannel`
|
|
||||||
callback.
|
|
||||||
- `BlockingChannel.get_waiting_message_count`: new method; returns the number
|
|
||||||
of messages that may be retrieved from the current queue consumer generator
|
|
||||||
via `BasicChannel.consume` without blocking.
|
|
||||||
|
|
||||||
**Commits**
|
|
||||||
|
|
||||||
- 5aaa753 - Fixed SSL import and removed no_ack=True in favor of explicit AMQP message handling based on deferreds (skftn)
|
|
||||||
- 7f222c2 - Add checkignore for codeclimate (Gavin M. Roy)
|
|
||||||
- 4dec370 - Implemented BlockingChannel.flow; Implemented BlockingConnection.add_on_connection_blocked_callback; Implemented BlockingConnection.add_on_connection_unblocked_callback. (Vitaly Kruglikov)
|
|
||||||
- 4804200 - Implemented blocking adapter acceptance test for exchange-to-exchange binding. Added rudimentary validation of BasicProperties passthru in blocking adapter publish tests. Updated CHANGELOG. (Vitaly Kruglikov)
|
|
||||||
- 4ec07fd - Fixed sending of data in TwistedProtocolConnection (Vitaly Kruglikov)
|
|
||||||
- a747fb3 - Remove my copyright from forward_server.py test utility. (Vitaly Kruglikov)
|
|
||||||
- 94246d2 - Return True from basic_publish when pubacks is off. Implemented more blocking adapter accceptance tests. (Vitaly Kruglikov)
|
|
||||||
- 3ce013d - PIKA-609 Wait for broker to dispatch all messages to client before cancelling consumer in TestBasicCancelWithNonAckableConsumer and TestBasicCancelWithAckableConsumer (Vitaly Kruglikov)
|
|
||||||
- 293f778 - Created CHANGELOG entry for release 0.10.0. Fixed up callback documentation for basic_get, basic_consume, and add_on_return_callback. (Vitaly Kruglikov)
|
|
||||||
- 16d360a - Removed the legacy AsyncoreConnection adapter in favor of the recommended SelectConnection adapter. (Vitaly Kruglikov)
|
|
||||||
- 240a82c - Defer creation of poller's event loop interrupt socket pair until start is called, because some SelectConnection users (e.g., BlockingConnection adapter) don't use the event loop, and these sockets would just get reported as resource leaks. (Vitaly Kruglikov)
|
|
||||||
- aed5cae - Added EINTR loops in select_connection pollers. Addressed some pylint findings, including an error or two. Wrap socket.send and socket.recv calls in EINTR loops Use the correct exception for socket.error and select.error and get errno depending on python version. (Vitaly Kruglikov)
|
|
||||||
- 498f1be - Allow passing exchange, queue and routing_key as text, handle short strings as text in python3 (saarni)
|
|
||||||
- 9f7f243 - Restored basic_consume, basic_cancel, and add_on_cancel_callback (Vitaly Kruglikov)
|
|
||||||
- 18c9909 - Reintroduced BlockingConnection.process_data_events. (Vitaly Kruglikov)
|
|
||||||
- 4b25cb6 - Fixed BlockingConnection/BlockingChannel acceptance and unit tests (Vitaly Kruglikov)
|
|
||||||
- bfa932f - Facilitate proper connection state after BasicConnection._adapter_disconnect (Vitaly Kruglikov)
|
|
||||||
- 9a09268 - Fixed BlockingConnection test that was failing with ConnectionClosed error. (Vitaly Kruglikov)
|
|
||||||
- 5a36934 - Copied synchronous_connection.py from pika-synchronous branch Fixed pylint findings Integrated SynchronousConnection with the new ioloop in SelectConnection Defined dedicated message classes PolledMessage and ConsumerMessage and moved from BlockingChannel to module-global scope. Got rid of nowait args from BlockingChannel public API methods Signal unroutable messages via UnroutableError exception. Signal Nack'ed messages via NackError exception. These expose more information about the failure than legacy basic_publich API. Removed set_timeout and backpressure callback methods Restored legacy `is_open`, etc. property names (Vitaly Kruglikov)
|
|
||||||
- 6226dc0 - Remove deprecated --use-mirrors (Gavin M. Roy)
|
|
||||||
- 1a7112f - Raise ConnectionClosed when sending a frame with no connection (#439) (Gavin M. Roy)
|
|
||||||
- 9040a14 - Make delivery_tag non-optional (#498) (Gavin M. Roy)
|
|
||||||
- 86aabc2 - Bump version (Gavin M. Roy)
|
|
||||||
- 562075a - Update a few testing things (Gavin M. Roy)
|
|
||||||
- 4954d38 - use unicode_type in blocking_connection.py (Antti Haapala)
|
|
||||||
- 133d6bc - Let Travis install ordereddict for Python 2.6, and ttest 3.3, 3.4 too. (Antti Haapala)
|
|
||||||
- 0d2287d - Pika Python 3 support (Antti Haapala)
|
|
||||||
- 3125c79 - SSLWantRead is not supported before python 2.7.9 and 3.3 (Will)
|
|
||||||
- 9a9c46c - Fixed TestDisconnectDuringConnectionStart: it turns out that depending on callback order, it might get either ProbableAuthenticationError or ProbableAccessDeniedError. (Vitaly Kruglikov)
|
|
||||||
- cd8c9b0 - A fix the write starvation problem that we see with tornado and pika (Will)
|
|
||||||
- 8654fbc - SelectConnection - make interrupt socketpair non-blocking (Will)
|
|
||||||
- 4f3666d - Added copyright in forward_server.py and fixed NameError bug (Vitaly Kruglikov)
|
|
||||||
- f8ebbbc - ignore docs (Gavin M. Roy)
|
|
||||||
- a344f78 - Updated codeclimate config (Gavin M. Roy)
|
|
||||||
- 373c970 - Try and fix pathing issues in codeclimate (Gavin M. Roy)
|
|
||||||
- 228340d - Ignore codegen (Gavin M. Roy)
|
|
||||||
- 4db0740 - Add a codeclimate config (Gavin M. Roy)
|
|
||||||
- 7e989f9 - Slight code re-org, usage comment and better naming of test file. (Will)
|
|
||||||
- 287be36 - Set up _kqueue member of KQueuePoller before calling super constructor to avoid exception due to missing _kqueue member. Call `self._map_event(event)` instead of `self._map_event(event.filter)`, because `KQueuePoller._map_event()` assumes it's getting an event, not an event filter. (Vitaly Kruglikov)
|
|
||||||
- 62810fb - Fix issue #412: reset BlockingConnection._read_poller in BlockingConnection._adapter_disconnect() to guard against accidental access to old file descriptor. (Vitaly Kruglikov)
|
|
||||||
- 03400ce - Rationalise adapter acceptance tests (Will)
|
|
||||||
- 9414153 - Fix bug selecting non epoll poller (Will)
|
|
||||||
- 4f063df - Use user heartbeat setting if server proposes none (Pau Gargallo)
|
|
||||||
- 9d04d6e - Deactivate heartbeats when heartbeat_interval is 0 (Pau Gargallo)
|
|
||||||
- a52a608 - Bug fix and review comments. (Will)
|
|
||||||
- e3ebb6f - Fix incorrect x-expires argument in acceptance tests (Will)
|
|
||||||
- 294904e - Get BlockingConnection into consistent state upon loss of TCP/IP connection with broker and implement acceptance tests for those cases. (Vitaly Kruglikov)
|
|
||||||
- 7f91a68 - Make SelectConnection behave like an ioloop (Will)
|
|
||||||
- dc9db2b - Perhaps 5 seconds is too agressive for travis (Gavin M. Roy)
|
|
||||||
- c23e532 - Lower the stuck test timeout (Gavin M. Roy)
|
|
||||||
- 1053ebc - Late night bug (Gavin M. Roy)
|
|
||||||
- cd6c1bf - More BaseConnection._handle_error cleanup (Gavin M. Roy)
|
|
||||||
- a0ff21c - Fix the test to work with Python 2.6 (Gavin M. Roy)
|
|
||||||
- 748e8aa - Remove pypy for now (Gavin M. Roy)
|
|
||||||
- 1c921c1 - Socket close/shutdown cleanup (Gavin M. Roy)
|
|
||||||
- 5289125 - Formatting update from PR (Gavin M. Roy)
|
|
||||||
- d235989 - Be more specific when calling getaddrinfo (Gavin M. Roy)
|
|
||||||
- b5d1b31 - Reflect the method name change in pika.callback (Gavin M. Roy)
|
|
||||||
- df7d3b7 - Cleanup BlockingConnection in a few places (Gavin M. Roy)
|
|
||||||
- cd99e1c - Rename method due to use in BlockingConnection (Gavin M. Roy)
|
|
||||||
- 7e0d1b3 - Use google style with yapf instead of pep8 (Gavin M. Roy)
|
|
||||||
- 7dc9bab - Refactor socket writing to not use sendall #481 (Gavin M. Roy)
|
|
||||||
- 4838789 - Dont log the fd #521 (Gavin M. Roy)
|
|
||||||
- 765107d - Add Connection.Blocked callback registration methods #476 (Gavin M. Roy)
|
|
||||||
- c15b5c1 - Fix _blocking typo pointed out in #513 (Gavin M. Roy)
|
|
||||||
- 759ac2c - yapf of codegen (Gavin M. Roy)
|
|
||||||
- 9dadd77 - yapf cleanup of codegen and spec (Gavin M. Roy)
|
|
||||||
- ddba7ce - Do not reject consumers with no_ack=True #486 #530 (Gavin M. Roy)
|
|
||||||
- 4528a1a - yapf reformatting of tests (Gavin M. Roy)
|
|
||||||
- e7b6d73 - Remove catching AttributError (#531) (Gavin M. Roy)
|
|
||||||
- 41ea5ea - Update README badges [skip ci] (Gavin M. Roy)
|
|
||||||
- 6af987b - Add note on contributing (Gavin M. Roy)
|
|
||||||
- 161fc0d - yapf formatting cleanup (Gavin M. Roy)
|
|
||||||
- edcb619 - Add PYPY to travis testing (Gavin M. Roy)
|
|
||||||
- 2225771 - Change the coverage badge (Gavin M. Roy)
|
|
||||||
- 8f7d451 - Move to codecov from coveralls (Gavin M. Roy)
|
|
||||||
- b80407e - Add confirm_delivery to example (Andrew Smith)
|
|
||||||
- 6637212 - Update base_connection.py (bstemshorn)
|
|
||||||
- 1583537 - #544 get_waiting_message_count() (markcf)
|
|
||||||
- 0c9be99 - Fix #535: pass expected reply_code and reply_text from method frame to Connection._on_disconnect from Connection._on_connection_closed (Vitaly Kruglikov)
|
|
||||||
- d11e73f - Propagate ConnectionClosed exception out of BlockingChannel._send_method() and log ConnectionClosed in BlockingConnection._on_connection_closed() (Vitaly Kruglikov)
|
|
||||||
- 63d2951 - Fix #541 - make sure connection state is properly reset when BlockingConnection._check_state_on_disconnect raises ConnectionClosed. This supplements the previously-merged PR #450 by getting the connection into consistent state. (Vitaly Kruglikov)
|
|
||||||
- 71bc0eb - Remove unused self.fd attribute from BaseConnection (Vitaly Kruglikov)
|
|
||||||
- 8c08f93 - PIKA-532 Removed unnecessary params (Vitaly Kruglikov)
|
|
||||||
- 6052ecf - PIKA-532 Fix bug in BlockingConnection._handle_timeout that was preventing _on_connection_closed from being called when not closing. (Vitaly Kruglikov)
|
|
||||||
- 562aa15 - pika: callback: Display exception message when callback fails. (Stuart Longland)
|
|
||||||
- 452995c - Typo fix in connection.py (Andrew)
|
|
||||||
- 361c0ad - Added some missing yields (Robert Weidlich)
|
|
||||||
- 0ab5a60 - Added complete example for python twisted service (Robert Weidlich)
|
|
||||||
- 4429110 - Add deployment and webhooks (Gavin M. Roy)
|
|
||||||
- 7e50302 - Fix has_content style in codegen (Andrew Grigorev)
|
|
||||||
- 28c2214 - Fix the trove categorization (Gavin M. Roy)
|
|
||||||
- de8b545 - Ensure frames can not be interspersed on send (Gavin M. Roy)
|
|
||||||
- 8fe6bdd - Fix heartbeat behaviour after connection failure. (Kyösti Herrala)
|
|
||||||
- c123472 - Updating BlockingChannel.basic_get doc (it does not receive a callback like the rest of the adapters) (Roberto Decurnex)
|
|
||||||
- b5f52fb - Fix number of arguments passed to _on_return callback (Axel Eirola)
|
|
||||||
- 765139e - Lower default TIMEOUT to 0.01 (bra-fsn)
|
|
||||||
- 6cc22a5 - Fix confirmation on reconnects (bra-fsn)
|
|
||||||
- f4faf0a - asynchronous publisher and subscriber examples refactored to follow the StepDown rule (Riccardo Cirimelli)
|
|
||||||
|
|
||||||
0.9.14 - 2014-07-11
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
`0.9.14 <https://github.com/pika/pika/compare/0.9.13...0.9.14>`_
|
|
||||||
|
|
||||||
- 57fe43e - fix test to generate a correct range of random ints (ml)
|
|
||||||
- 0d68dee - fix async watcher for libev_connection (ml)
|
|
||||||
- 01710ad - Use default username and password if not specified in URLParameters (Sean Dwyer)
|
|
||||||
- fae328e - documentation typo (Jeff Fein-Worton)
|
|
||||||
- afbc9e0 - libev_connection: reset_io_watcher (ml)
|
|
||||||
- 24332a2 - Fix the manifest (Gavin M. Roy)
|
|
||||||
- acdfdef - Remove useless test (Gavin M. Roy)
|
|
||||||
- 7918e1a - Skip libev tests if pyev is not installed or if they are being run in pypy (Gavin M. Roy)
|
|
||||||
- bb583bf - Remove the deprecated test (Gavin M. Roy)
|
|
||||||
- aecf3f2 - Don't reject a message if the channel is not open (Gavin M. Roy)
|
|
||||||
- e37f336 - Remove UTF-8 decoding in spec (Gavin M. Roy)
|
|
||||||
- ddc35a9 - Update the unittest to reflect removal of force binary (Gavin M. Roy)
|
|
||||||
- fea2476 - PEP8 cleanup (Gavin M. Roy)
|
|
||||||
- 9b97956 - Remove force_binary (Gavin M. Roy)
|
|
||||||
- a42dd90 - Whitespace required (Gavin M. Roy)
|
|
||||||
- 85867ea - Update the content_frame_dispatcher tests to reflect removal of auto-cast utf-8 (Gavin M. Roy)
|
|
||||||
- 5a4bd5d - Remove unicode casting (Gavin M. Roy)
|
|
||||||
- efea53d - Remove force binary and unicode casting (Gavin M. Roy)
|
|
||||||
- e918d15 - Add methods to remove deprecation warnings from asyncore (Gavin M. Roy)
|
|
||||||
- 117f62d - Add a coveragerc to ignore the auto generated pika.spec (Gavin M. Roy)
|
|
||||||
- 52f4485 - Remove pypy tests from travis for now (Gavin M. Roy)
|
|
||||||
- c3aa958 - Update README.rst (Gavin M. Roy)
|
|
||||||
- 3e2319f - Delete README.md (Gavin M. Roy)
|
|
||||||
- c12b0f1 - Move to RST (Gavin M. Roy)
|
|
||||||
- 704f5be - Badging updates (Gavin M. Roy)
|
|
||||||
- 7ae33ca - Update for coverage info (Gavin M. Roy)
|
|
||||||
- ae7ca86 - add libev_adapter_tests.py; modify .travis.yml to install libev and pyev (ml)
|
|
||||||
- f86aba5 - libev_connection: add **kwargs to _handle_event; suppress default_ioloop reuse warning (ml)
|
|
||||||
- 603f1cf - async_test_base: add necessary args to _on_cconn_closed (ml)
|
|
||||||
- 3422007 - add libev_adapter_tests.py (ml)
|
|
||||||
- 6cbab0c - removed relative imports and importing urlparse from urllib.parse for py3+ (a-tal)
|
|
||||||
- f808464 - libev_connection: add async watcher; add optional parameters to add_timeout (ml)
|
|
||||||
- c041c80 - Remove ev all together for now (Gavin M. Roy)
|
|
||||||
- 9408388 - Update the test descriptions and timeout (Gavin M. Roy)
|
|
||||||
- 1b552e0 - Increase timeout (Gavin M. Roy)
|
|
||||||
- 69a1f46 - Remove the pyev requirement for 2.6 testing (Gavin M. Roy)
|
|
||||||
- fe062d2 - Update package name (Gavin M. Roy)
|
|
||||||
- 611ad0e - Distribute the LICENSE and README.md (#350) (Gavin M. Roy)
|
|
||||||
- df5e1d8 - Ensure that the entire frame is written using socket.sendall (#349) (Gavin M. Roy)
|
|
||||||
- 69ec8cf - Move the libev install to before_install (Gavin M. Roy)
|
|
||||||
- a75f693 - Update test structure (Gavin M. Roy)
|
|
||||||
- 636b424 - Update things to ignore (Gavin M. Roy)
|
|
||||||
- b538c68 - Add tox, nose.cfg, update testing config (Gavin M. Roy)
|
|
||||||
- a0e7063 - add some tests to increase coverage of pika.connection (Charles Law)
|
|
||||||
- c76d9eb - Address issue #459 (Gavin M. Roy)
|
|
||||||
- 86ad2db - Raise exception if positional arg for parameters isn't an instance of Parameters (Gavin M. Roy)
|
|
||||||
- 14d08e1 - Fix for python 2.6 (Gavin M. Roy)
|
|
||||||
- bd388a3 - Use the first unused channel number addressing #404, #460 (Gavin M. Roy)
|
|
||||||
- e7676e6 - removing a debug that was left in last commit (James Mutton)
|
|
||||||
- 6c93b38 - Fixing connection-closed behavior to detect on attempt to publish (James Mutton)
|
|
||||||
- c3f0356 - Initialize bytes_written in _handle_write() (Jonathan Kirsch)
|
|
||||||
- 4510e95 - Fix _handle_write() may not send full frame (Jonathan Kirsch)
|
|
||||||
- 12b793f - fixed Tornado Consumer example to successfully reconnect (Yang Yang)
|
|
||||||
- f074444 - remove forgotten import of ordereddict (Pedro Abranches)
|
|
||||||
- 1ba0aea - fix last merge (Pedro Abranches)
|
|
||||||
- 10490a6 - change timeouts structure to list to maintain scheduling order (Pedro Abranches)
|
|
||||||
- 7958394 - save timeouts in ordered dict instead of dict (Pedro Abranches)
|
|
||||||
- d2746bf - URLParameters and ConnectionParameters accept unicode strings (Allard Hoeve)
|
|
||||||
- 596d145 - previous fix for AttributeError made parent and child class methods identical, remove duplication (James Mutton)
|
|
||||||
- 42940dd - UrlParameters Docs: fixed amqps scheme examples (Riccardo Cirimelli)
|
|
||||||
- 43904ff - Dont test this in PyPy due to sort order issue (Gavin M. Roy)
|
|
||||||
- d7d293e - Don't leave __repr__ sorting up to chance (Gavin M. Roy)
|
|
||||||
- 848c594 - Add integration test to travis and fix invocation (Gavin M. Roy)
|
|
||||||
- 2678275 - Add pypy to travis tests (Gavin M. Roy)
|
|
||||||
- 1877f3d - Also addresses issue #419 (Gavin M. Roy)
|
|
||||||
- 470c245 - Address issue #419 (Gavin M. Roy)
|
|
||||||
- ca3cb59 - Address issue #432 (Gavin M. Roy)
|
|
||||||
- a3ff6f2 - Default frame max should be AMQP FRAME_MAX (Gavin M. Roy)
|
|
||||||
- ff3d5cb - Remove max consumer tag test due to change in code. (Gavin M. Roy)
|
|
||||||
- 6045dda - Catch KeyError (#437) to ensure that an exception is not raised in a race condition (Gavin M. Roy)
|
|
||||||
- 0b4d53a - Address issue #441 (Gavin M. Roy)
|
|
||||||
- 180e7c4 - Update license and related files (Gavin M. Roy)
|
|
||||||
- 256ed3d - Added Jython support. (Erik Olof Gunnar Andersson)
|
|
||||||
- f73c141 - experimental work around for recursion issue. (Erik Olof Gunnar Andersson)
|
|
||||||
- a623f69 - Prevent #436 by iterating the keys and not the dict (Gavin M. Roy)
|
|
||||||
- 755fcae - Add support for authentication_failure_close, connection.blocked (Gavin M. Roy)
|
|
||||||
- c121243 - merge upstream master (Michael Laing)
|
|
||||||
- a08dc0d - add arg to channel.basic_consume (Pedro Abranches)
|
|
||||||
- 10b136d - Documentation fix (Anton Ryzhov)
|
|
||||||
- 9313307 - Fixed minor markup errors. (Jorge Puente Sarrín)
|
|
||||||
- fb3e3cf - Fix the spelling of UnsupportedAMQPFieldException (Garrett Cooper)
|
|
||||||
- 03d5da3 - connection.py: Propagate the force_channel keyword parameter to methods involved in channel creation (Michael Laing)
|
|
||||||
- 7bbcff5 - Documentation fix for basic_publish (JuhaS)
|
|
||||||
- 01dcea7 - Expose no_ack and exclusive to BlockingChannel.consume (Jeff Tang)
|
|
||||||
- d39b6aa - Fix BlockingChannel.basic_consume does not block on non-empty queues (Juhyeong Park)
|
|
||||||
- 6e1d295 - fix for issue 391 and issue 307 (Qi Fan)
|
|
||||||
- d9ffce9 - Update parameters.rst (cacovsky)
|
|
||||||
- 6afa41e - Add additional badges (Gavin M. Roy)
|
|
||||||
- a255925 - Fix return value on dns resolution issue (Laurent Eschenauer)
|
|
||||||
- 3f7466c - libev_connection: tweak docs (Michael Laing)
|
|
||||||
- 0aaed93 - libev_connection: Fix varable naming (Michael Laing)
|
|
||||||
- 0562d08 - libev_connection: Fix globals warning (Michael Laing)
|
|
||||||
- 22ada59 - libev_connection: use globals to track sigint and sigterm watchers as they are created globally within libev (Michael Laing)
|
|
||||||
- 2649b31 - Move badge [skip ci] (Gavin M. Roy)
|
|
||||||
- f70eea1 - Remove pypy and installation attempt of pyev (Gavin M. Roy)
|
|
||||||
- f32e522 - Conditionally skip external connection adapters if lib is not installed (Gavin M. Roy)
|
|
||||||
- cce97c5 - Only install pyev on python 2.7 (Gavin M. Roy)
|
|
||||||
- ff84462 - Add travis ci support (Gavin M. Roy)
|
|
||||||
- cf971da - lib_evconnection: improve signal handling; add callback (Michael Laing)
|
|
||||||
- 9adb269 - bugfix in returning a list in Py3k (Alex Chandel)
|
|
||||||
- c41d5b9 - update exception syntax for Py3k (Alex Chandel)
|
|
||||||
- c8506f1 - fix _adapter_connect (Michael Laing)
|
|
||||||
- 67cb660 - Add LibevConnection to README (Michael Laing)
|
|
||||||
- 1f9e72b - Propagate low-level connection errors to the AMQPConnectionError. (Bjorn Sandberg)
|
|
||||||
- e1da447 - Avoid race condition in _on_getok on successive basic_get() when clearing out callbacks (Jeff)
|
|
||||||
- 7a09979 - Add support for upcoming Connection.Blocked/Unblocked (Gavin M. Roy)
|
|
||||||
- 53cce88 - TwistedChannel correctly handles multi-argument deferreds. (eivanov)
|
|
||||||
- 66f8ace - Use uuid when creating unique consumer tag (Perttu Ranta-aho)
|
|
||||||
- 4ee2738 - Limit the growth of Channel._cancelled, use deque instead of list. (Perttu Ranta-aho)
|
|
||||||
- 0369aed - fix adapter references and tweak docs (Michael Laing)
|
|
||||||
- 1738c23 - retry select.select() on EINTR (Cenk Alti)
|
|
||||||
- 1e55357 - libev_connection: reset internal state on reconnect (Michael Laing)
|
|
||||||
- 708559e - libev adapter (Michael Laing)
|
|
||||||
- a6b7c8b - Prioritize EPollPoller and KQueuePoller over PollPoller and SelectPoller (Anton Ryzhov)
|
|
||||||
- 53400d3 - Handle socket errors in PollPoller and EPollPoller Correctly check 'select.poll' availability (Anton Ryzhov)
|
|
||||||
- a6dc969 - Use dict.keys & items instead of iterkeys & iteritems (Alex Chandel)
|
|
||||||
- 5c1b0d0 - Use print function syntax, in examples (Alex Chandel)
|
|
||||||
- ac9f87a - Fixed a typo in the name of the Asyncore Connection adapter (Guruprasad)
|
|
||||||
- dfbba50 - Fixed bug mentioned in Issue #357 (Erik Andersson)
|
|
||||||
- c906a2d - Drop additional flags when getting info for the hostnames, log errors (#352) (Gavin M. Roy)
|
|
||||||
- baf23dd - retry poll() on EINTR (Cenk Alti)
|
|
||||||
- 7cd8762 - Address ticket #352 catching an error when socket.getprotobyname fails (Gavin M. Roy)
|
|
||||||
- 6c3ec75 - Prep for 0.9.14 (Gavin M. Roy)
|
|
||||||
- dae7a99 - Bump to 0.9.14p0 (Gavin M. Roy)
|
|
||||||
- 620edc7 - Use default port and virtual host if omitted in URLParameters (Issue #342) (Gavin M. Roy)
|
|
||||||
- 42a8787 - Move the exception handling inside the while loop (Gavin M. Roy)
|
|
||||||
- 10e0264 - Fix connection back pressure detection issue #347 (Gavin M. Roy)
|
|
||||||
- 0bfd670 - Fixed mistake in commit 3a19d65. (Erik Andersson)
|
|
||||||
- da04bc0 - Fixed Unknown state on disconnect error message generated when closing connections. (Erik Andersson)
|
|
||||||
- 3a19d65 - Alternative solution to fix #345. (Erik Andersson)
|
|
||||||
- abf9fa8 - switch to sendall to send entire frame (Dustin Koupal)
|
|
||||||
- 9ce8ce4 - Fixed the async publisher example to work with reconnections (Raphaël De Giusti)
|
|
||||||
- 511028a - Fix typo in TwistedChannel docstring (cacovsky)
|
|
||||||
- 8b69e5a - calls self._adapter_disconnect() instead of self.disconnect() which doesn't actually exist #294 (Mark Unsworth)
|
|
||||||
- 06a5cf8 - add NullHandler to prevent logging warnings (Cenk Alti)
|
|
||||||
- f404a9a - Fix #337 cannot start ioloop after stop (Ralf Nyren)
|
|
||||||
|
|
||||||
0.9.13 - 2013-05-15
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
`0.9.13 <https://github.com/pika/pika/compare/0.9.12...0.9.13>`_
|
|
||||||
|
|
||||||
**Major Changes**
|
|
||||||
|
|
||||||
- IPv6 Support with thanks to Alessandro Tagliapietra for initial prototype
|
|
||||||
- Officially remove support for <= Python 2.5 even though it was broken already
|
|
||||||
- Drop pika.simplebuffer.SimpleBuffer in favor of the Python stdlib collections.deque object
|
|
||||||
- New default object for receiving content is a "bytes" object which is a str wrapper in Python 2, but paves way for Python 3 support
|
|
||||||
- New "Raw" mode for frame decoding content frames (#334) addresses issues #331, #229 added by Garth Williamson
|
|
||||||
- Connection and Disconnection logic refactored, allowing for cleaner separation of protocol logic and socket handling logic as well as connection state management
|
|
||||||
- New "on_open_error_callback" argument in creating connection objects and new Connection.add_on_open_error_callback method
|
|
||||||
- New Connection.connect method to cleanly allow for reconnection code
|
|
||||||
- Support for all AMQP field types, using protocol specified signed/unsigned unpacking
|
|
||||||
|
|
||||||
**Backwards Incompatible Changes**
|
|
||||||
|
|
||||||
- Method signature for creating connection objects has new argument "on_open_error_callback" which is positionally before "on_close_callback"
|
|
||||||
- Internal callback variable names in connection.Connection have been renamed and constants used. If you relied on any of these callbacks outside of their internal use, make sure to check out the new constants.
|
|
||||||
- Connection._connect method, which was an internal only method is now deprecated and will raise a DeprecationWarning. If you relied on this method, your code needs to change.
|
|
||||||
- pika.simplebuffer has been removed
|
|
||||||
|
|
||||||
**Bugfixes**
|
|
||||||
|
|
||||||
- BlockingConnection consumer generator does not free buffer when exited (#328)
|
|
||||||
- Unicode body payloads in the blocking adapter raises exception (#333)
|
|
||||||
- Support "b" short-short-int AMQP data type (#318)
|
|
||||||
- Docstring type fix in adapters/select_connection (#316) fix by Rikard Hultén
|
|
||||||
- IPv6 not supported (#309)
|
|
||||||
- Stop the HeartbeatChecker when connection is closed (#307)
|
|
||||||
- Unittest fix for SelectConnection (#336) fix by Erik Andersson
|
|
||||||
- Handle condition where no connection or socket exists but SelectConnection needs a timeout for retrying a connection (#322)
|
|
||||||
- TwistedAdapter lagging behind BaseConnection changes (#321) fix by Jan Urbański
|
|
||||||
|
|
||||||
**Other**
|
|
||||||
|
|
||||||
- Refactored documentation
|
|
||||||
- Added Twisted Adapter example (#314) by nolinksoft
|
|
||||||
|
|
||||||
0.9.12 - 2013-03-18
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
`0.9.12 <https://github.com/pika/pika/compare/0.9.11...0.9.12>`_
|
|
||||||
|
|
||||||
**Bugfixes**
|
|
||||||
|
|
||||||
- New timeout id hashing was not unique
|
|
||||||
|
|
||||||
0.9.11 - 2013-03-17
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
`0.9.11 <https://github.com/pika/pika/compare/0.9.10...0.9.11>`_
|
|
||||||
|
|
||||||
**Bugfixes**
|
|
||||||
|
|
||||||
- Address inconsistent channel close callback documentation and add the signature
|
|
||||||
change to the TwistedChannel class (#305)
|
|
||||||
- Address a missed timeout related internal data structure name change
|
|
||||||
introduced in the SelectConnection 0.9.10 release. Update all connection
|
|
||||||
adapters to use same signature and docstring (#306).
|
|
||||||
|
|
||||||
0.9.10 - 2013-03-16
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
`0.9.10 <https://github.com/pika/pika/compare/0.9.9...0.9.10>`_
|
|
||||||
|
|
||||||
**Bugfixes**
|
|
||||||
|
|
||||||
- Fix timeout in twisted adapter (Submitted by cellscape)
|
|
||||||
- Fix blocking_connection poll timer resolution to milliseconds (Submitted by cellscape)
|
|
||||||
- Fix channel._on_close() without a method frame (Submitted by Richard Boulton)
|
|
||||||
- Addressed exception on close (Issue #279 - fix by patcpsc)
|
|
||||||
- 'messages' not initialized in BlockingConnection.cancel() (Issue #289 - fix by Mik Kocikowski)
|
|
||||||
- Make queue_unbind behave like queue_bind (Issue #277)
|
|
||||||
- Address closing behavioral issues for connections and channels (Issue #275)
|
|
||||||
- Pass a Method frame to Channel._on_close in Connection._on_disconnect (Submitted by Jan Urbański)
|
|
||||||
- Fix channel closed callback signature in the Twisted adapter (Submitted by Jan Urbański)
|
|
||||||
- Don't stop the IOLoop on connection close for in the Twisted adapter (Submitted by Jan Urbański)
|
|
||||||
- Update the asynchronous examples to fix reconnecting and have it work
|
|
||||||
- Warn if the socket was closed such as if RabbitMQ dies without a Close frame
|
|
||||||
- Fix URLParameters ssl_options (Issue #296)
|
|
||||||
- Add state to BlockingConnection addressing (Issue #301)
|
|
||||||
- Encode unicode body content prior to publishing (Issue #282)
|
|
||||||
- Fix an issue with unicode keys in BasicProperties headers key (Issue #280)
|
|
||||||
- Change how timeout ids are generated (Issue #254)
|
|
||||||
- Address post close state issues in Channel (Issue #302)
|
|
||||||
|
|
||||||
** Behavior changes **
|
|
||||||
|
|
||||||
- Change core connection communication behavior to prefer outbound writes over reads, addressing a recursion issue
|
|
||||||
- Update connection on close callbacks, changing callback method signature
|
|
||||||
- Update channel on close callbacks, changing callback method signature
|
|
||||||
- Give more info in the ChannelClosed exception
|
|
||||||
- Change the constructor signature for BlockingConnection, block open/close callbacks
|
|
||||||
- Disable the use of add_on_open_callback/add_on_close_callback methods in BlockingConnection
|
|
||||||
|
|
||||||
|
|
||||||
0.9.9 - 2013-01-29
|
|
||||||
------------------
|
|
||||||
|
|
||||||
`0.9.9 <https://github.com/pika/pika/compare/0.9.8...0.9.9>`_
|
|
||||||
|
|
||||||
**Bugfixes**
|
|
||||||
|
|
||||||
- Only remove the tornado_connection.TornadoConnection file descriptor from the IOLoop if it's still open (Issue #221)
|
|
||||||
- Allow messages with no body (Issue #227)
|
|
||||||
- Allow for empty routing keys (Issue #224)
|
|
||||||
- Don't raise an exception when trying to send a frame to a closed connection (Issue #229)
|
|
||||||
- Only send a Connection.CloseOk if the connection is still open. (Issue #236 - Fix by noleaf)
|
|
||||||
- Fix timeout threshold in blocking connection - (Issue #232 - Fix by Adam Flynn)
|
|
||||||
- Fix closing connection while a channel is still open (Issue #230 - Fix by Adam Flynn)
|
|
||||||
- Fixed misleading warning and exception messages in BaseConnection (Issue #237 - Fix by Tristan Penman)
|
|
||||||
- Pluralised and altered the wording of the AMQPConnectionError exception (Issue #237 - Fix by Tristan Penman)
|
|
||||||
- Fixed _adapter_disconnect in TornadoConnection class (Issue #237 - Fix by Tristan Penman)
|
|
||||||
- Fixing hang when closing connection without any channel in BlockingConnection (Issue #244 - Fix by Ales Teska)
|
|
||||||
- Remove the process_timeouts() call in SelectConnection (Issue #239)
|
|
||||||
- Change the string validation to basestring for host connection parameters (Issue #231)
|
|
||||||
- Add a poller to the BlockingConnection to address latency issues introduced in Pika 0.9.8 (Issue #242)
|
|
||||||
- reply_code and reply_text is not set in ChannelException (Issue #250)
|
|
||||||
- Add the missing constraint parameter for Channel._on_return callback processing (Issue #257 - Fix by patcpsc)
|
|
||||||
- Channel callbacks not being removed from callback manager when channel is closed or deleted (Issue #261)
|
|
||||||
|
|
||||||
0.9.8 - 2012-11-18
|
|
||||||
------------------
|
|
||||||
|
|
||||||
`0.9.8 <https://github.com/pika/pika/compare/0.9.7...0.9.8>`_
|
|
||||||
|
|
||||||
**Bugfixes**
|
|
||||||
|
|
||||||
- Channel.queue_declare/BlockingChannel.queue_declare not setting up callbacks property for empty queue name (Issue #218)
|
|
||||||
- Channel.queue_bind/BlockingChannel.queue_bind not allowing empty routing key
|
|
||||||
- Connection._on_connection_closed calling wrong method in Channel (Issue #219)
|
|
||||||
- Fix tx_commit and tx_rollback bugs in BlockingChannel (Issue #217)
|
|
||||||
|
|
||||||
0.9.7 - 2012-11-11
|
|
||||||
------------------
|
|
||||||
|
|
||||||
`0.9.7 <https://github.com/pika/pika/compare/0.9.6...0.9.7>`_
|
|
||||||
|
|
||||||
**New features**
|
|
||||||
|
|
||||||
- generator based consumer in BlockingChannel (See :doc:`examples/blocking_consumer_generator` for example)
|
|
||||||
|
|
||||||
**Changes**
|
|
||||||
|
|
||||||
- BlockingChannel._send_method will only wait if explicitly told to
|
|
||||||
|
|
||||||
**Bugfixes**
|
|
||||||
|
|
||||||
- Added the exchange "type" parameter back but issue a DeprecationWarning
|
|
||||||
- Dont require a queue name in Channel.queue_declare()
|
|
||||||
- Fixed KeyError when processing timeouts (Issue # 215 - Fix by Raphael De Giusti)
|
|
||||||
- Don't try and close channels when the connection is closed (Issue #216 - Fix by Charles Law)
|
|
||||||
- Dont raise UnexpectedFrame exceptions, log them instead
|
|
||||||
- Handle multiple synchronous RPC calls made without waiting for the call result (Issues #192, #204, #211)
|
|
||||||
- Typo in docs (Issue #207 Fix by Luca Wehrstedt)
|
|
||||||
- Only sleep on connection failure when retry attempts are > 0 (Issue #200)
|
|
||||||
- Bypass _rpc method and just send frames for Basic.Ack, Basic.Nack, Basic.Reject (Issue #205)
|
|
||||||
|
|
||||||
0.9.6 - 2012-10-29
|
|
||||||
------------------
|
|
||||||
|
|
||||||
`0.9.6 <https://github.com/pika/pika/compare/0.9.5...0.9.6>`_
|
|
||||||
|
|
||||||
**New features**
|
|
||||||
|
|
||||||
- URLParameters
|
|
||||||
- BlockingChannel.start_consuming() and BlockingChannel.stop_consuming()
|
|
||||||
- Delivery Confirmations
|
|
||||||
- Improved unittests
|
|
||||||
|
|
||||||
**Major bugfix areas**
|
|
||||||
|
|
||||||
- Connection handling
|
|
||||||
- Blocking functionality in the BlockingConnection
|
|
||||||
- SSL
|
|
||||||
- UTF-8 Handling
|
|
||||||
|
|
||||||
**Removals**
|
|
||||||
|
|
||||||
- pika.reconnection_strategies
|
|
||||||
- pika.channel.ChannelTransport
|
|
||||||
- pika.log
|
|
||||||
- pika.template
|
|
||||||
- examples directory
|
|
||||||
|
|
||||||
0.9.5 - 2011-03-29
|
|
||||||
------------------
|
|
||||||
|
|
||||||
`0.9.5 <https://github.com/pika/pika/compare/0.9.4...0.9.5>`_
|
|
||||||
|
|
||||||
**Changelog**
|
|
||||||
|
|
||||||
- Scope changes with adapter IOLoops and CallbackManager allowing for cleaner, multi-threaded operation
|
|
||||||
- Add support for Confirm.Select with channel.Channel.confirm_delivery()
|
|
||||||
- Add examples of delivery confirmation to examples (demo_send_confirmed.py)
|
|
||||||
- Update uses of log.warn with warning.warn for TCP Back-pressure alerting
|
|
||||||
- License boilerplate updated to simplify license text in source files
|
|
||||||
- Increment the timeout in select_connection.SelectPoller reducing CPU utilization
|
|
||||||
- Bug fix in Heartbeat frame delivery addressing issue #35
|
|
||||||
- Remove abuse of pika.log.method_call through a majority of the code
|
|
||||||
- Rename of key modules: table to data, frames to frame
|
|
||||||
- Cleanup of frame module and related classes
|
|
||||||
- Restructure of tests and test runner
|
|
||||||
- Update functional tests to respect RABBITMQ_HOST, RABBITMQ_PORT environment variables
|
|
||||||
- Bug fixes to reconnection_strategies module
|
|
||||||
- Fix the scale of timeout for PollPoller to be specified in milliseconds
|
|
||||||
- Remove mutable default arguments in RPC calls
|
|
||||||
- Add data type validation to RPC calls
|
|
||||||
- Move optional credentials erasing out of connection.Connection into credentials module
|
|
||||||
- Add support to allow for additional external credential types
|
|
||||||
- Add a NullHandler to prevent the 'No handlers could be found for logger "pika"' error message when not using pika.log in a client app at all.
|
|
||||||
- Clean up all examples to make them easier to read and use
|
|
||||||
- Move documentation into its own repository https://github.com/pika/documentation
|
|
||||||
|
|
||||||
- channel.py
|
|
||||||
|
|
||||||
- Move channel.MAX_CHANNELS constant from connection.CHANNEL_MAX
|
|
||||||
- Add default value of None to ChannelTransport.rpc
|
|
||||||
- Validate callback and acceptable replies parameters in ChannelTransport.RPC
|
|
||||||
- Remove unused connection attribute from Channel
|
|
||||||
|
|
||||||
- connection.py
|
|
||||||
|
|
||||||
- Remove unused import of struct
|
|
||||||
- Remove direct import of pika.credentials.PlainCredentials
|
|
||||||
- Change to import pika.credentials
|
|
||||||
- Move CHANNEL_MAX to channel.MAX_CHANNELS
|
|
||||||
- Change ConnectionParameters initialization parameter heartbeat to boolean
|
|
||||||
- Validate all inbound parameter types in ConnectionParameters
|
|
||||||
- Remove the Connection._erase_credentials stub method in favor of letting the Credentials object deal with that itself.
|
|
||||||
- Warn if the credentials object intends on erasing the credentials and a reconnection strategy other than NullReconnectionStrategy is specified.
|
|
||||||
- Change the default types for callback and acceptable_replies in Connection._rpc
|
|
||||||
- Validate the callback and acceptable_replies data types in Connection._rpc
|
|
||||||
|
|
||||||
- adapters.blocking_connection.BlockingConnection
|
|
||||||
|
|
||||||
- Addition of _adapter_disconnect to blocking_connection.BlockingConnection
|
|
||||||
- Add timeout methods to BlockingConnection addressing issue #41
|
|
||||||
- BlockingConnection didn't allow you register more than one consumer callback because basic_consume was overridden to block immediately. New behavior allows you to do so.
|
|
||||||
- Removed overriding of base basic_consume and basic_cancel methods. Now uses underlying Channel versions of those methods.
|
|
||||||
- Added start_consuming() method to BlockingChannel to start the consumption loop.
|
|
||||||
- Updated stop_consuming() to iterate through all the registered consumers in self._consumers and issue a basic_cancel.
|
|
|
@ -1,68 +0,0 @@
|
||||||
# Contributing
|
|
||||||
|
|
||||||
## Test Coverage
|
|
||||||
|
|
||||||
To contribute to Pika, please make sure that any new features or changes
|
|
||||||
to existing functionality **include test coverage**.
|
|
||||||
|
|
||||||
*Pull requests that add or change code without coverage have a much lower chance
|
|
||||||
of being accepted.*
|
|
||||||
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
Pika test suite has a couple of requirements:
|
|
||||||
|
|
||||||
* Dependencies from `test-dependencies.txt` are installed
|
|
||||||
* A RabbitMQ node with all defaults is running on `localhost:5672`
|
|
||||||
|
|
||||||
|
|
||||||
## Installing Dependencies
|
|
||||||
|
|
||||||
To install the dependencies needed to run Pika tests, use
|
|
||||||
|
|
||||||
pip install -r test-requirements.txt
|
|
||||||
|
|
||||||
which on Python 3 might look like this
|
|
||||||
|
|
||||||
pip3 install -r test-requirements.txt
|
|
||||||
|
|
||||||
|
|
||||||
## Running Tests
|
|
||||||
|
|
||||||
To run all test suites, use
|
|
||||||
|
|
||||||
nosetests
|
|
||||||
|
|
||||||
Note that some tests are OS-specific (e.g. epoll on Linux
|
|
||||||
or kqueue on MacOS and BSD). Those will be skipped
|
|
||||||
automatically.
|
|
||||||
|
|
||||||
If you would like to run TLS/SSL tests, use the following procedure:
|
|
||||||
|
|
||||||
* Create a `rabbitmq.conf` file:
|
|
||||||
|
|
||||||
```
|
|
||||||
sed -e "s#PIKA_DIR#$PWD#g" ./testdata/rabbitmq.conf.in > ./testdata/rabbitmq.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
* Start RabbitMQ and use the configuration file you just created. An example command
|
|
||||||
that works with the `generic-unix` package is as follows:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ RABBITMQ_CONFIG_FILE=/path/to/pika/testdata/rabbitmq.conf ./sbin/rabbitmq-server
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run the tests indicating that TLS/SSL connections should be used:
|
|
||||||
|
|
||||||
```
|
|
||||||
PIKA_TEST_TLS=true nosetests
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Code Formatting
|
|
||||||
|
|
||||||
Please format your code using [yapf](http://pypi.python.org/pypi/yapf)
|
|
||||||
with ``google`` style prior to issuing your pull request. *Note: only format those
|
|
||||||
lines that you have changed in your pull request. If you format an entire file and
|
|
||||||
change code outside of the scope of your PR, it will likely be rejected.*
|
|
|
@ -1,25 +0,0 @@
|
||||||
Copyright (c) 2009-2017, Tony Garnock-Jones, Gavin M. Roy, Pivotal and others.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
* Neither the name of the Pika project nor the names of its contributors may be used
|
|
||||||
to endorse or promote products derived from this software without specific
|
|
||||||
prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
|
||||||
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
||||||
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
||||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
|
||||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
|
||||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
@ -1,2 +0,0 @@
|
||||||
include LICENSE
|
|
||||||
include README.rst
|
|
|
@ -1,157 +0,0 @@
|
||||||
Pika
|
|
||||||
====
|
|
||||||
Pika is a RabbitMQ (AMQP-0-9-1) client library for Python.
|
|
||||||
|
|
||||||
|Version| |Python versions| |Status| |Coverage| |License| |Docs|
|
|
||||||
|
|
||||||
Introduction
|
|
||||||
-------------
|
|
||||||
Pika is a pure-Python implementation of the AMQP 0-9-1 protocol including RabbitMQ's
|
|
||||||
extensions.
|
|
||||||
|
|
||||||
- Python 2.7 and 3.4+ are supported.
|
|
||||||
|
|
||||||
- Since threads aren't appropriate to every situation, it doesn't
|
|
||||||
require threads. It takes care not to forbid them, either. The same
|
|
||||||
goes for greenlets, callbacks, continuations and generators. It is
|
|
||||||
not necessarily thread-safe however, and your mileage will vary.
|
|
||||||
|
|
||||||
- People may be using direct sockets, plain old `select()`,
|
|
||||||
or any of the wide variety of ways of getting network events to and from a
|
|
||||||
python application. Pika tries to stay compatible with all of these, and to
|
|
||||||
make adapting it to a new environment as simple as possible.
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
-------------
|
|
||||||
Pika's documentation can be found at `https://pika.readthedocs.io <https://pika.readthedocs.io>`_
|
|
||||||
|
|
||||||
Example
|
|
||||||
-------
|
|
||||||
Here is the most simple example of use, sending a message with the BlockingConnection adapter:
|
|
||||||
|
|
||||||
.. code :: python
|
|
||||||
|
|
||||||
import pika
|
|
||||||
connection = pika.BlockingConnection()
|
|
||||||
channel = connection.channel()
|
|
||||||
channel.basic_publish(exchange='example',
|
|
||||||
routing_key='test',
|
|
||||||
body='Test Message')
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
And an example of writing a blocking consumer:
|
|
||||||
|
|
||||||
.. code :: python
|
|
||||||
|
|
||||||
import pika
|
|
||||||
connection = pika.BlockingConnection()
|
|
||||||
channel = connection.channel()
|
|
||||||
|
|
||||||
for method_frame, properties, body in channel.consume('test'):
|
|
||||||
|
|
||||||
# Display the message parts and ack the message
|
|
||||||
print(method_frame, properties, body)
|
|
||||||
channel.basic_ack(method_frame.delivery_tag)
|
|
||||||
|
|
||||||
# Escape out of the loop after 10 messages
|
|
||||||
if method_frame.delivery_tag == 10:
|
|
||||||
break
|
|
||||||
|
|
||||||
# Cancel the consumer and return any pending messages
|
|
||||||
requeued_messages = channel.cancel()
|
|
||||||
print('Requeued %i messages' % requeued_messages)
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
Pika provides the following adapters
|
|
||||||
------------------------------------
|
|
||||||
|
|
||||||
- AsyncioConnection - adapter for the Python3 AsyncIO event loop
|
|
||||||
- BlockingConnection - enables blocking, synchronous operation on top of library for simple uses
|
|
||||||
- SelectConnection - fast asynchronous adapter
|
|
||||||
- TornadoConnection - adapter for use with the Tornado IO Loop http://tornadoweb.org
|
|
||||||
- TwistedConnection - adapter for use with the Twisted asynchronous package http://twistedmatrix.com/
|
|
||||||
|
|
||||||
Requesting message ACKs from another thread
|
|
||||||
-------------------------------------------
|
|
||||||
The single-threaded usage constraint of an individual Pika connection adapter
|
|
||||||
instance may result in a dropped AMQP/stream connection due to AMQP heartbeat
|
|
||||||
timeout in consumers that take a long time to process an incoming message. A
|
|
||||||
common solution is to delegate processing of the incoming messages to another
|
|
||||||
thread, while the connection adapter's thread continues to service its ioloop's
|
|
||||||
message pump, permitting AMQP heartbeats and other I/O to be serviced in a
|
|
||||||
timely fashion.
|
|
||||||
|
|
||||||
Messages processed in another thread may not be ACK'ed directly from that thread,
|
|
||||||
since all accesses to the connection adapter instance must be from a single
|
|
||||||
thread - the thread that is running the adapter's ioloop. However, this may be
|
|
||||||
accomplished by requesting a callback to be executed in the adapter's ioloop
|
|
||||||
thread. For example, the callback function's implementation might look like this:
|
|
||||||
|
|
||||||
.. code :: python
|
|
||||||
|
|
||||||
def ack_message(channel, delivery_tag):
|
|
||||||
"""Note that `channel` must be the same pika channel instance via which
|
|
||||||
the message being ACKed was retrieved (AMQP protocol constraint).
|
|
||||||
"""
|
|
||||||
if channel.is_open:
|
|
||||||
channel.basic_ack(delivery_tag)
|
|
||||||
else:
|
|
||||||
# Channel is already closed, so we can't ACK this message;
|
|
||||||
# log and/or do something that makes sense for your app in this case.
|
|
||||||
pass
|
|
||||||
|
|
||||||
The code running in the other thread may request the `ack_message()` function
|
|
||||||
to be executed in the connection adapter's ioloop thread using an
|
|
||||||
adapter-specific mechanism:
|
|
||||||
|
|
||||||
- :py:class:`pika.BlockingConnection` abstracts its ioloop from the application
|
|
||||||
and thus exposes :py:meth:`pika.BlockingConnection.add_callback_threadsafe()`.
|
|
||||||
Refer to this method's docstring for additional information. For example:
|
|
||||||
|
|
||||||
.. code :: python
|
|
||||||
|
|
||||||
connection.add_callback_threadsafe(functools.partial(ack_message, channel, delivery_tag))
|
|
||||||
|
|
||||||
- When using a non-blocking connection adapter, such as
|
|
||||||
:py:class:`pika.adapters.asyncio_connection.AsyncioConnection` or
|
|
||||||
:py:class:`pika.SelectConnection`, you use the underlying asynchronous
|
|
||||||
framework's native API for requesting an ioloop-bound callback from
|
|
||||||
another thread. For example, `SelectConnection`'s `IOLoop` provides
|
|
||||||
`add_callback_threadsafe()`, `Tornado`'s `IOLoop` has
|
|
||||||
`add_callback()`, while `asyncio`'s event loop exposes
|
|
||||||
`call_soon_threadsafe()`.
|
|
||||||
|
|
||||||
This threadsafe callback request mechanism may also be used to delegate
|
|
||||||
publishing of messages, etc., from a background thread to the connection adapter's
|
|
||||||
thread.
|
|
||||||
|
|
||||||
Contributing
|
|
||||||
------------
|
|
||||||
To contribute to pika, please make sure that any new features or changes
|
|
||||||
to existing functionality **include test coverage**.
|
|
||||||
|
|
||||||
*Pull requests that add or change code without coverage will most likely be rejected.*
|
|
||||||
|
|
||||||
Additionally, please format your code using `yapf <http://pypi.python.org/pypi/yapf>`_
|
|
||||||
with ``google`` style prior to issuing your pull request. *Note: only format those
|
|
||||||
lines that you have changed in your pull request. If you format an entire file and
|
|
||||||
change code outside of the scope of your PR, it will likely be rejected.*
|
|
||||||
|
|
||||||
.. |Version| image:: https://img.shields.io/pypi/v/pika.svg?
|
|
||||||
:target: http://badge.fury.io/py/pika
|
|
||||||
|
|
||||||
.. |Python versions| image:: https://img.shields.io/pypi/pyversions/pika.svg
|
|
||||||
:target: https://pypi.python.org/pypi/pika
|
|
||||||
|
|
||||||
.. |Status| image:: https://img.shields.io/travis/pika/pika.svg?
|
|
||||||
:target: https://travis-ci.org/pika/pika
|
|
||||||
|
|
||||||
.. |Coverage| image:: https://img.shields.io/codecov/c/github/pika/pika.svg?
|
|
||||||
:target: https://codecov.io/github/pika/pika?branch=master
|
|
||||||
|
|
||||||
.. |License| image:: https://img.shields.io/pypi/l/pika.svg?
|
|
||||||
:target: https://pika.readthedocs.io
|
|
||||||
|
|
||||||
.. |Docs| image:: https://readthedocs.org/projects/pika/badge/?version=stable
|
|
||||||
:target: https://pika.readthedocs.io
|
|
||||||
:alt: Documentation Status
|
|
|
@ -1,107 +0,0 @@
|
||||||
# Windows build and test of Pika
|
|
||||||
|
|
||||||
environment:
|
|
||||||
erlang_download_url: "http://erlang.org/download/otp_win64_19.3.exe"
|
|
||||||
erlang_exe_path: "C:\\Users\\appveyor\\erlang_19.3.exe"
|
|
||||||
erlang_home_dir: "C:\\Users\\appveyor\\erlang"
|
|
||||||
erlang_erts_version: "erts-8.3"
|
|
||||||
|
|
||||||
rabbitmq_version: 3.7.4
|
|
||||||
rabbitmq_installer_download_url: "https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.7.4/rabbitmq-server-3.7.4.exe"
|
|
||||||
rabbitmq_installer_path: "C:\\Users\\appveyor\\rabbitmq-server-3.7.4.exe"
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
- PYTHON_ARCH: "32"
|
|
||||||
PYTHONHOME: "C:\\Python27"
|
|
||||||
PIKA_TEST_TLS: false
|
|
||||||
- PYTHON_ARCH: "32"
|
|
||||||
PYTHONHOME: "C:\\Python27"
|
|
||||||
PIKA_TEST_TLS: true
|
|
||||||
|
|
||||||
|
|
||||||
cache:
|
|
||||||
# RabbitMQ is a pretty big package, so caching it in hopes of expediting the
|
|
||||||
# runtime
|
|
||||||
- "%erlang_exe_path%"
|
|
||||||
- "%rabbitmq_installer_path%"
|
|
||||||
|
|
||||||
|
|
||||||
install:
|
|
||||||
- SET PYTHONPATH=%PYTHONHOME%
|
|
||||||
- SET PATH=%PYTHONHOME%\Scripts;%PYTHONHOME%;%PATH%
|
|
||||||
|
|
||||||
# For diagnostics
|
|
||||||
- ECHO %PYTHONPATH%
|
|
||||||
- ECHO %PATH%
|
|
||||||
- python --version
|
|
||||||
|
|
||||||
- ECHO Upgrading pip...
|
|
||||||
- python -m pip install --upgrade pip setuptools
|
|
||||||
- pip --version
|
|
||||||
|
|
||||||
- ECHO Installing wheel...
|
|
||||||
- pip install wheel
|
|
||||||
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- ECHO Building distributions...
|
|
||||||
- python setup.py sdist bdist bdist_wheel
|
|
||||||
- DIR /s *.whl
|
|
||||||
|
|
||||||
|
|
||||||
artifacts:
|
|
||||||
- path: 'dist\*.whl'
|
|
||||||
name: pika wheel
|
|
||||||
|
|
||||||
|
|
||||||
before_test:
|
|
||||||
# Install test requirements
|
|
||||||
- ECHO Installing pika...
|
|
||||||
- python setup.py install
|
|
||||||
|
|
||||||
- ECHO Installing pika test requirements...
|
|
||||||
- pip install -r test-requirements.txt
|
|
||||||
|
|
||||||
# List conents of C:\ to help debug caching of rabbitmq artifacts
|
|
||||||
# - DIR C:\
|
|
||||||
|
|
||||||
- ps: $webclient=New-Object System.Net.WebClient
|
|
||||||
|
|
||||||
- ECHO Downloading Erlang...
|
|
||||||
- ps: if (-Not (Test-Path "$env:erlang_exe_path")) { $webclient.DownloadFile("$env:erlang_download_url", "$env:erlang_exe_path") } else { Write-Host "Found" $env:erlang_exe_path "in cache." }
|
|
||||||
|
|
||||||
- ECHO Installing Erlang...
|
|
||||||
- start /B /WAIT %erlang_exe_path% /S /D=%erlang_home_dir%
|
|
||||||
- set ERLANG_HOME=%erlang_home_dir%
|
|
||||||
|
|
||||||
- ECHO Downloading RabbitMQ...
|
|
||||||
- ps: if (-Not (Test-Path "$env:rabbitmq_installer_path")) { $webclient.DownloadFile("$env:rabbitmq_installer_download_url", "$env:rabbitmq_installer_path") } else { Write-Host "Found" $env:rabbitmq_installer_path "in cache." }
|
|
||||||
|
|
||||||
- ECHO Creating directory %AppData%\RabbitMQ...
|
|
||||||
- ps: New-Item -ItemType Directory -ErrorAction Continue -Path "$env:AppData/RabbitMQ"
|
|
||||||
|
|
||||||
- ECHO Creating RabbitMQ configuration file in %AppData%\RabbitMQ...
|
|
||||||
- ps: Get-Content C:/Projects/pika/testdata/rabbitmq.conf.in | %{ $_ -replace 'PIKA_DIR', 'C:/projects/pika' } | Set-Content -Path "$env:AppData/RabbitMQ/rabbitmq.conf"
|
|
||||||
- ps: Get-Content "$env:AppData/RabbitMQ/rabbitmq.conf"
|
|
||||||
|
|
||||||
- ECHO Creating Erlang cookie files...
|
|
||||||
- ps: '[System.IO.File]::WriteAllText("C:\Users\appveyor\.erlang.cookie", "PIKAISTHEBEST", [System.Text.Encoding]::ASCII)'
|
|
||||||
- ps: '[System.IO.File]::WriteAllText("C:\Windows\System32\config\systemprofile\.erlang.cookie", "PIKAISTHEBEST", [System.Text.Encoding]::ASCII)'
|
|
||||||
|
|
||||||
- ECHO Installing and starting RabbitMQ with default config...
|
|
||||||
- start /B /WAIT %rabbitmq_installer_path% /S
|
|
||||||
- ps: (Get-Service -Name RabbitMQ).Status
|
|
||||||
|
|
||||||
- ECHO Waiting for epmd to report that RabbitMQ has started...
|
|
||||||
- ps: 'C:\projects\pika\testdata\wait-epmd.ps1'
|
|
||||||
- ps: 'C:\projects\pika\testdata\wait-rabbitmq.ps1'
|
|
||||||
|
|
||||||
- ECHO Getting RabbitMQ status...
|
|
||||||
- cmd /c "C:\Program Files\RabbitMQ Server\rabbitmq_server-%rabbitmq_version%\sbin\rabbitmqctl.bat" status
|
|
||||||
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- nosetests
|
|
||||||
|
|
||||||
# Since Pika is source-only there's no need to deploy from Windows
|
|
||||||
deploy: false
|
|
|
@ -1,153 +0,0 @@
|
||||||
# Makefile for Sphinx documentation
|
|
||||||
#
|
|
||||||
|
|
||||||
# You can set these variables from the command line.
|
|
||||||
SPHINXOPTS =
|
|
||||||
SPHINXBUILD = sphinx-build
|
|
||||||
PAPER =
|
|
||||||
BUILDDIR = _build
|
|
||||||
|
|
||||||
# Internal variables.
|
|
||||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
|
||||||
PAPEROPT_letter = -D latex_paper_size=letter
|
|
||||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
|
||||||
# the i18n builder cannot share the environment and doctrees with the others
|
|
||||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
|
||||||
|
|
||||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
|
||||||
|
|
||||||
help:
|
|
||||||
@echo "Please use \`make <target>' where <target> is one of"
|
|
||||||
@echo " html to make standalone HTML files"
|
|
||||||
@echo " dirhtml to make HTML files named index.html in directories"
|
|
||||||
@echo " singlehtml to make a single large HTML file"
|
|
||||||
@echo " pickle to make pickle files"
|
|
||||||
@echo " json to make JSON files"
|
|
||||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
|
||||||
@echo " qthelp to make HTML files and a qthelp project"
|
|
||||||
@echo " devhelp to make HTML files and a Devhelp project"
|
|
||||||
@echo " epub to make an epub"
|
|
||||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
|
||||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
|
||||||
@echo " text to make text files"
|
|
||||||
@echo " man to make manual pages"
|
|
||||||
@echo " texinfo to make Texinfo files"
|
|
||||||
@echo " info to make Texinfo files and run them through makeinfo"
|
|
||||||
@echo " gettext to make PO message catalogs"
|
|
||||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
|
||||||
@echo " linkcheck to check all external links for integrity"
|
|
||||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
|
||||||
|
|
||||||
clean:
|
|
||||||
-rm -rf $(BUILDDIR)/*
|
|
||||||
|
|
||||||
html:
|
|
||||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
|
||||||
|
|
||||||
dirhtml:
|
|
||||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
|
||||||
|
|
||||||
singlehtml:
|
|
||||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
|
||||||
|
|
||||||
pickle:
|
|
||||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can process the pickle files."
|
|
||||||
|
|
||||||
json:
|
|
||||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can process the JSON files."
|
|
||||||
|
|
||||||
htmlhelp:
|
|
||||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
|
||||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
|
||||||
|
|
||||||
qthelp:
|
|
||||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
|
||||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
|
||||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pika.qhcp"
|
|
||||||
@echo "To view the help file:"
|
|
||||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pika.qhc"
|
|
||||||
|
|
||||||
devhelp:
|
|
||||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
|
||||||
@echo
|
|
||||||
@echo "Build finished."
|
|
||||||
@echo "To view the help file:"
|
|
||||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/pika"
|
|
||||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pika"
|
|
||||||
@echo "# devhelp"
|
|
||||||
|
|
||||||
epub:
|
|
||||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
|
||||||
|
|
||||||
latex:
|
|
||||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
|
||||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
|
||||||
"(use \`make latexpdf' here to do that automatically)."
|
|
||||||
|
|
||||||
latexpdf:
|
|
||||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
|
||||||
@echo "Running LaTeX files through pdflatex..."
|
|
||||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
|
||||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
|
||||||
|
|
||||||
text:
|
|
||||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
|
||||||
|
|
||||||
man:
|
|
||||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
|
||||||
|
|
||||||
texinfo:
|
|
||||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
|
||||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
|
||||||
"(use \`make info' here to do that automatically)."
|
|
||||||
|
|
||||||
info:
|
|
||||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
|
||||||
@echo "Running Texinfo files through makeinfo..."
|
|
||||||
make -C $(BUILDDIR)/texinfo info
|
|
||||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
|
||||||
|
|
||||||
gettext:
|
|
||||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
|
||||||
|
|
||||||
changes:
|
|
||||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
|
||||||
@echo
|
|
||||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
|
||||||
|
|
||||||
linkcheck:
|
|
||||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
|
||||||
@echo
|
|
||||||
@echo "Link check complete; look for any errors in the above output " \
|
|
||||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
|
||||||
|
|
||||||
doctest:
|
|
||||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
|
||||||
@echo "Testing of doctests in the sources finished, look at the " \
|
|
||||||
"results in $(BUILDDIR)/doctest/output.txt."
|
|
|
@ -1,34 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
import sys
|
|
||||||
sys.path.insert(0, '../')
|
|
||||||
#needs_sphinx = '1.0'
|
|
||||||
|
|
||||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
|
|
||||||
'sphinx.ext.intersphinx']
|
|
||||||
|
|
||||||
intersphinx_mapping = {'python': ('https://docs.python.org/3/',
|
|
||||||
'https://docs.python.org/3/objects.inv'),
|
|
||||||
'tornado': ('http://www.tornadoweb.org/en/stable/',
|
|
||||||
'http://www.tornadoweb.org/en/stable/objects.inv')}
|
|
||||||
|
|
||||||
templates_path = ['_templates']
|
|
||||||
|
|
||||||
source_suffix = '.rst'
|
|
||||||
master_doc = 'index'
|
|
||||||
|
|
||||||
project = 'pika'
|
|
||||||
copyright = '2009-2017, Tony Garnock-Jones, Gavin M. Roy, Pivotal Software, Inc and contributors.'
|
|
||||||
|
|
||||||
import pika
|
|
||||||
release = pika.__version__
|
|
||||||
version = '.'.join(release.split('.')[0:1])
|
|
||||||
|
|
||||||
exclude_patterns = ['_build']
|
|
||||||
add_function_parentheses = True
|
|
||||||
add_module_names = True
|
|
||||||
show_authors = True
|
|
||||||
pygments_style = 'sphinx'
|
|
||||||
modindex_common_prefix = ['pika']
|
|
||||||
html_theme = 'default'
|
|
||||||
html_static_path = ['_static']
|
|
||||||
htmlhelp_basename = 'pikadoc'
|
|
|
@ -1,104 +0,0 @@
|
||||||
Contributors
|
|
||||||
============
|
|
||||||
The following people have directly contributes code by way of new features and/or bug fixes to Pika:
|
|
||||||
|
|
||||||
- Gavin M. Roy
|
|
||||||
- Tony Garnock-Jones
|
|
||||||
- Vitaly Kruglikov
|
|
||||||
- Michael Laing
|
|
||||||
- Marek Majkowski
|
|
||||||
- Jan Urbański
|
|
||||||
- Brian K. Jones
|
|
||||||
- Ask Solem
|
|
||||||
- ml
|
|
||||||
- Will
|
|
||||||
- atatsu
|
|
||||||
- Fredrik Svensson
|
|
||||||
- Pedro Abranches
|
|
||||||
- Kyösti Herrala
|
|
||||||
- Erik Andersson
|
|
||||||
- Charles Law
|
|
||||||
- Alex Chandel
|
|
||||||
- Tristan Penman
|
|
||||||
- Raphaël De Giusti
|
|
||||||
- Jozef Van Eenbergen
|
|
||||||
- Josh Braegger
|
|
||||||
- Jason J. W. Williams
|
|
||||||
- James Mutton
|
|
||||||
- Cenk Alti
|
|
||||||
- Asko Soukka
|
|
||||||
- Antti Haapala
|
|
||||||
- Anton Ryzhov
|
|
||||||
- cellscape
|
|
||||||
- cacovsky
|
|
||||||
- bra-fsn
|
|
||||||
- ateska
|
|
||||||
- Roey Berman
|
|
||||||
- Robert Weidlich
|
|
||||||
- Riccardo Cirimelli
|
|
||||||
- Perttu Ranta-aho
|
|
||||||
- Pau Gargallo
|
|
||||||
- Kane
|
|
||||||
- Kamil Kisiel
|
|
||||||
- Jonty Wareing
|
|
||||||
- Jonathan Kirsch
|
|
||||||
- Jacek 'Forger' Całusiński
|
|
||||||
- Garth Williamson
|
|
||||||
- Erik Olof Gunnar Andersson
|
|
||||||
- David Strauss
|
|
||||||
- Anton V. Yanchenko
|
|
||||||
- Alexey Myasnikov
|
|
||||||
- Alessandro Tagliapietra
|
|
||||||
- Adam Flynn
|
|
||||||
- skftn
|
|
||||||
- saarni
|
|
||||||
- pavlobaron
|
|
||||||
- nonleaf
|
|
||||||
- markcf
|
|
||||||
- george y
|
|
||||||
- eivanov
|
|
||||||
- bstemshorn
|
|
||||||
- a-tal
|
|
||||||
- Yang Yang
|
|
||||||
- Stuart Longland
|
|
||||||
- Sigurd Høgsbro
|
|
||||||
- Sean Dwyer
|
|
||||||
- Samuel Stauffer
|
|
||||||
- Roberto Decurnex
|
|
||||||
- Rikard Hultén
|
|
||||||
- Richard Boulton
|
|
||||||
- Ralf Nyren
|
|
||||||
- Qi Fan
|
|
||||||
- Peter Magnusson
|
|
||||||
- Pankrat
|
|
||||||
- Olivier Le Thanh Duong
|
|
||||||
- Njal Karevoll
|
|
||||||
- Milan Skuhra
|
|
||||||
- Mik Kocikowski
|
|
||||||
- Michael Kenney
|
|
||||||
- Mark Unsworth
|
|
||||||
- Luca Wehrstedt
|
|
||||||
- Laurent Eschenauer
|
|
||||||
- Lars van de Kerkhof
|
|
||||||
- Kyösti Herrala
|
|
||||||
- Juhyeong Park
|
|
||||||
- JuhaS
|
|
||||||
- Josh Hansen
|
|
||||||
- Jorge Puente Sarrín
|
|
||||||
- Jeff Tang
|
|
||||||
- Jeff Fein-Worton
|
|
||||||
- Jeff
|
|
||||||
- Hunter Morris
|
|
||||||
- Guruprasad
|
|
||||||
- Garrett Cooper
|
|
||||||
- Frank Slaughter
|
|
||||||
- Dustin Koupal
|
|
||||||
- Bjorn Sandberg
|
|
||||||
- Axel Eirola
|
|
||||||
- Andrew Smith
|
|
||||||
- Andrew Grigorev
|
|
||||||
- Andrew
|
|
||||||
- Allard Hoeve
|
|
||||||
- A.Shaposhnikov
|
|
||||||
|
|
||||||
*Contributors listed by commit count.*
|
|
|
@ -1,23 +0,0 @@
|
||||||
Usage Examples
|
|
||||||
==============
|
|
||||||
|
|
||||||
Pika has various methods of use, between the synchronous BlockingConnection adapter and the various asynchronous connection adapter. The following examples illustrate the various ways that you can use Pika in your projects.
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:glob:
|
|
||||||
:maxdepth: 1
|
|
||||||
|
|
||||||
examples/using_urlparameters
|
|
||||||
examples/connecting_async
|
|
||||||
examples/blocking_basic_get
|
|
||||||
examples/blocking_consume
|
|
||||||
examples/blocking_consumer_generator
|
|
||||||
examples/comparing_publishing_sync_async
|
|
||||||
examples/blocking_delivery_confirmations
|
|
||||||
examples/blocking_publish_mandatory
|
|
||||||
examples/asynchronous_consumer_example
|
|
||||||
examples/asynchronous_publisher_example
|
|
||||||
examples/twisted_example
|
|
||||||
examples/tornado_consumer
|
|
||||||
examples/tls_mutual_authentication
|
|
||||||
examples/tls_server_authentication
|
|
|
@ -1,357 +0,0 @@
|
||||||
Asynchronous consumer example
|
|
||||||
=============================
|
|
||||||
The following example implements a consumer that will respond to RPC commands sent from RabbitMQ. For example, it will reconnect if RabbitMQ closes the connection and will shutdown if RabbitMQ cancels the consumer or closes the channel. While it may look intimidating, each method is very short and represents a individual actions that a consumer can do.
|
|
||||||
|
|
||||||
consumer.py::
|
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import pika
|
|
||||||
|
|
||||||
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
|
|
||||||
'-35s %(lineno) -5d: %(message)s')
|
|
||||||
LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ExampleConsumer(object):
|
|
||||||
"""This is an example consumer that will handle unexpected interactions
|
|
||||||
with RabbitMQ such as channel and connection closures.
|
|
||||||
|
|
||||||
If RabbitMQ closes the connection, it will reopen it. You should
|
|
||||||
look at the output, as there are limited reasons why the connection may
|
|
||||||
be closed, which usually are tied to permission related issues or
|
|
||||||
socket timeouts.
|
|
||||||
|
|
||||||
If the channel is closed, it will indicate a problem with one of the
|
|
||||||
commands that were issued and that should surface in the output as well.
|
|
||||||
|
|
||||||
"""
|
|
||||||
EXCHANGE = 'message'
|
|
||||||
EXCHANGE_TYPE = 'topic'
|
|
||||||
QUEUE = 'text'
|
|
||||||
ROUTING_KEY = 'example.text'
|
|
||||||
|
|
||||||
def __init__(self, amqp_url):
|
|
||||||
"""Create a new instance of the consumer class, passing in the AMQP
|
|
||||||
URL used to connect to RabbitMQ.
|
|
||||||
|
|
||||||
:param str amqp_url: The AMQP url to connect with
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._connection = None
|
|
||||||
self._channel = None
|
|
||||||
self._closing = False
|
|
||||||
self._consumer_tag = None
|
|
||||||
self._url = amqp_url
|
|
||||||
|
|
||||||
def connect(self):
|
|
||||||
"""This method connects to RabbitMQ, returning the connection handle.
|
|
||||||
When the connection is established, the on_connection_open method
|
|
||||||
will be invoked by pika.
|
|
||||||
|
|
||||||
:rtype: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connecting to %s', self._url)
|
|
||||||
return pika.SelectConnection(pika.URLParameters(self._url),
|
|
||||||
self.on_connection_open,
|
|
||||||
stop_ioloop_on_close=False)
|
|
||||||
|
|
||||||
def on_connection_open(self, unused_connection):
|
|
||||||
"""This method is called by pika once the connection to RabbitMQ has
|
|
||||||
been established. It passes the handle to the connection object in
|
|
||||||
case we need it, but in this case, we'll just mark it unused.
|
|
||||||
|
|
||||||
:type unused_connection: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connection opened')
|
|
||||||
self.add_on_connection_close_callback()
|
|
||||||
self.open_channel()
|
|
||||||
|
|
||||||
def add_on_connection_close_callback(self):
|
|
||||||
"""This method adds an on close callback that will be invoked by pika
|
|
||||||
when RabbitMQ closes the connection to the publisher unexpectedly.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding connection close callback')
|
|
||||||
self._connection.add_on_close_callback(self.on_connection_closed)
|
|
||||||
|
|
||||||
def on_connection_closed(self, connection, reply_code, reply_text):
|
|
||||||
"""This method is invoked by pika when the connection to RabbitMQ is
|
|
||||||
closed unexpectedly. Since it is unexpected, we will reconnect to
|
|
||||||
RabbitMQ if it disconnects.
|
|
||||||
|
|
||||||
:param pika.connection.Connection connection: The closed connection obj
|
|
||||||
:param int reply_code: The server provided reply_code if given
|
|
||||||
:param str reply_text: The server provided reply_text if given
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._channel = None
|
|
||||||
if self._closing:
|
|
||||||
self._connection.ioloop.stop()
|
|
||||||
else:
|
|
||||||
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
|
|
||||||
reply_code, reply_text)
|
|
||||||
self._connection.add_timeout(5, self.reconnect)
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Will be invoked by the IOLoop timer if the connection is
|
|
||||||
closed. See the on_connection_closed method.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# This is the old connection IOLoop instance, stop its ioloop
|
|
||||||
self._connection.ioloop.stop()
|
|
||||||
|
|
||||||
if not self._closing:
|
|
||||||
|
|
||||||
# Create a new connection
|
|
||||||
self._connection = self.connect()
|
|
||||||
|
|
||||||
# There is now a new connection, needs a new ioloop to run
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
|
|
||||||
def open_channel(self):
|
|
||||||
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
|
|
||||||
command. When RabbitMQ responds that the channel is open, the
|
|
||||||
on_channel_open callback will be invoked by pika.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Creating a new channel')
|
|
||||||
self._connection.channel(on_open_callback=self.on_channel_open)
|
|
||||||
|
|
||||||
def on_channel_open(self, channel):
|
|
||||||
"""This method is invoked by pika when the channel has been opened.
|
|
||||||
The channel object is passed in so we can make use of it.
|
|
||||||
|
|
||||||
Since the channel is now open, we'll declare the exchange to use.
|
|
||||||
|
|
||||||
:param pika.channel.Channel channel: The channel object
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Channel opened')
|
|
||||||
self._channel = channel
|
|
||||||
self.add_on_channel_close_callback()
|
|
||||||
self.setup_exchange(self.EXCHANGE)
|
|
||||||
|
|
||||||
def add_on_channel_close_callback(self):
|
|
||||||
"""This method tells pika to call the on_channel_closed method if
|
|
||||||
RabbitMQ unexpectedly closes the channel.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding channel close callback')
|
|
||||||
self._channel.add_on_close_callback(self.on_channel_closed)
|
|
||||||
|
|
||||||
def on_channel_closed(self, channel, reply_code, reply_text):
|
|
||||||
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
|
|
||||||
Channels are usually closed if you attempt to do something that
|
|
||||||
violates the protocol, such as re-declare an exchange or queue with
|
|
||||||
different parameters. In this case, we'll close the connection
|
|
||||||
to shutdown the object.
|
|
||||||
|
|
||||||
:param pika.channel.Channel: The closed channel
|
|
||||||
:param int reply_code: The numeric reason the channel was closed
|
|
||||||
:param str reply_text: The text reason the channel was closed
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.warning('Channel %i was closed: (%s) %s',
|
|
||||||
channel, reply_code, reply_text)
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def setup_exchange(self, exchange_name):
|
|
||||||
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
|
|
||||||
command. When it is complete, the on_exchange_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode exchange_name: The name of the exchange to declare
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring exchange %s', exchange_name)
|
|
||||||
self._channel.exchange_declare(self.on_exchange_declareok,
|
|
||||||
exchange_name,
|
|
||||||
self.EXCHANGE_TYPE)
|
|
||||||
|
|
||||||
def on_exchange_declareok(self, unused_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
|
|
||||||
command.
|
|
||||||
|
|
||||||
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Exchange declared')
|
|
||||||
self.setup_queue(self.QUEUE)
|
|
||||||
|
|
||||||
def setup_queue(self, queue_name):
|
|
||||||
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
|
|
||||||
command. When it is complete, the on_queue_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode queue_name: The name of the queue to declare.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring queue %s', queue_name)
|
|
||||||
self._channel.queue_declare(self.on_queue_declareok, queue_name)
|
|
||||||
|
|
||||||
def on_queue_declareok(self, method_frame):
|
|
||||||
"""Method invoked by pika when the Queue.Declare RPC call made in
|
|
||||||
setup_queue has completed. In this method we will bind the queue
|
|
||||||
and exchange together with the routing key by issuing the Queue.Bind
|
|
||||||
RPC command. When this command is complete, the on_bindok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Binding %s to %s with %s',
|
|
||||||
self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)
|
|
||||||
self._channel.queue_bind(self.on_bindok, self.QUEUE,
|
|
||||||
self.EXCHANGE, self.ROUTING_KEY)
|
|
||||||
|
|
||||||
def on_bindok(self, unused_frame):
|
|
||||||
"""Invoked by pika when the Queue.Bind method has completed. At this
|
|
||||||
point we will start consuming messages by calling start_consuming
|
|
||||||
which will invoke the needed RPC commands to start the process.
|
|
||||||
|
|
||||||
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Queue bound')
|
|
||||||
self.start_consuming()
|
|
||||||
|
|
||||||
def start_consuming(self):
|
|
||||||
"""This method sets up the consumer by first calling
|
|
||||||
add_on_cancel_callback so that the object is notified if RabbitMQ
|
|
||||||
cancels the consumer. It then issues the Basic.Consume RPC command
|
|
||||||
which returns the consumer tag that is used to uniquely identify the
|
|
||||||
consumer with RabbitMQ. We keep the value to use it when we want to
|
|
||||||
cancel consuming. The on_message method is passed in as a callback pika
|
|
||||||
will invoke when a message is fully received.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Issuing consumer related RPC commands')
|
|
||||||
self.add_on_cancel_callback()
|
|
||||||
self._consumer_tag = self._channel.basic_consume(self.on_message,
|
|
||||||
self.QUEUE)
|
|
||||||
|
|
||||||
def add_on_cancel_callback(self):
|
|
||||||
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
|
|
||||||
for some reason. If RabbitMQ does cancel the consumer,
|
|
||||||
on_consumer_cancelled will be invoked by pika.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding consumer cancellation callback')
|
|
||||||
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
|
|
||||||
|
|
||||||
def on_consumer_cancelled(self, method_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
|
|
||||||
receiving messages.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: The Basic.Cancel frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
|
|
||||||
method_frame)
|
|
||||||
if self._channel:
|
|
||||||
self._channel.close()
|
|
||||||
|
|
||||||
def on_message(self, unused_channel, basic_deliver, properties, body):
|
|
||||||
"""Invoked by pika when a message is delivered from RabbitMQ. The
|
|
||||||
channel is passed for your convenience. The basic_deliver object that
|
|
||||||
is passed in carries the exchange, routing key, delivery tag and
|
|
||||||
a redelivered flag for the message. The properties passed in is an
|
|
||||||
instance of BasicProperties with the message properties and the body
|
|
||||||
is the message that was sent.
|
|
||||||
|
|
||||||
:param pika.channel.Channel unused_channel: The channel object
|
|
||||||
:param pika.Spec.Basic.Deliver: basic_deliver method
|
|
||||||
:param pika.Spec.BasicProperties: properties
|
|
||||||
:param str|unicode body: The message body
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Received message # %s from %s: %s',
|
|
||||||
basic_deliver.delivery_tag, properties.app_id, body)
|
|
||||||
self.acknowledge_message(basic_deliver.delivery_tag)
|
|
||||||
|
|
||||||
def acknowledge_message(self, delivery_tag):
|
|
||||||
"""Acknowledge the message delivery from RabbitMQ by sending a
|
|
||||||
Basic.Ack RPC method for the delivery tag.
|
|
||||||
|
|
||||||
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Acknowledging message %s', delivery_tag)
|
|
||||||
self._channel.basic_ack(delivery_tag)
|
|
||||||
|
|
||||||
def stop_consuming(self):
|
|
||||||
"""Tell RabbitMQ that you would like to stop consuming by sending the
|
|
||||||
Basic.Cancel RPC command.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._channel:
|
|
||||||
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
|
|
||||||
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
|
|
||||||
|
|
||||||
def on_cancelok(self, unused_frame):
|
|
||||||
"""This method is invoked by pika when RabbitMQ acknowledges the
|
|
||||||
cancellation of a consumer. At this point we will close the channel.
|
|
||||||
This will invoke the on_channel_closed method once the channel has been
|
|
||||||
closed, which will in-turn close the connection.
|
|
||||||
|
|
||||||
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer')
|
|
||||||
self.close_channel()
|
|
||||||
|
|
||||||
def close_channel(self):
|
|
||||||
"""Call to close the channel with RabbitMQ cleanly by issuing the
|
|
||||||
Channel.Close RPC command.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Closing the channel')
|
|
||||||
self._channel.close()
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"""Run the example consumer by connecting to RabbitMQ and then
|
|
||||||
starting the IOLoop to block and allow the SelectConnection to operate.
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._connection = self.connect()
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
|
|
||||||
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
|
|
||||||
will be invoked by pika, which will then closing the channel and
|
|
||||||
connection. The IOLoop is started again because this method is invoked
|
|
||||||
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
|
|
||||||
exception stops the IOLoop which needs to be running for pika to
|
|
||||||
communicate with RabbitMQ. All of the commands issued prior to starting
|
|
||||||
the IOLoop will be buffered but not processed.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Stopping')
|
|
||||||
self._closing = True
|
|
||||||
self.stop_consuming()
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
LOGGER.info('Stopped')
|
|
||||||
|
|
||||||
def close_connection(self):
|
|
||||||
"""This method closes the connection to RabbitMQ."""
|
|
||||||
LOGGER.info('Closing connection')
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
|
|
||||||
example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F')
|
|
||||||
try:
|
|
||||||
example.run()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
example.stop()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
|
@ -1,359 +0,0 @@
|
||||||
Asynchronous publisher example
|
|
||||||
==============================
|
|
||||||
The following example implements a publisher that will respond to RPC commands sent from RabbitMQ and uses delivery confirmations. It will reconnect if RabbitMQ closes the connection and will shutdown if RabbitMQ closes the channel. While it may look intimidating, each method is very short and represents a individual actions that a publisher can do.
|
|
||||||
|
|
||||||
publisher.py::
|
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import pika
|
|
||||||
import json
|
|
||||||
|
|
||||||
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
|
|
||||||
'-35s %(lineno) -5d: %(message)s')
|
|
||||||
LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ExamplePublisher(object):
|
|
||||||
"""This is an example publisher that will handle unexpected interactions
|
|
||||||
with RabbitMQ such as channel and connection closures.
|
|
||||||
|
|
||||||
If RabbitMQ closes the connection, it will reopen it. You should
|
|
||||||
look at the output, as there are limited reasons why the connection may
|
|
||||||
be closed, which usually are tied to permission related issues or
|
|
||||||
socket timeouts.
|
|
||||||
|
|
||||||
It uses delivery confirmations and illustrates one way to keep track of
|
|
||||||
messages that have been sent and if they've been confirmed by RabbitMQ.
|
|
||||||
|
|
||||||
"""
|
|
||||||
EXCHANGE = 'message'
|
|
||||||
EXCHANGE_TYPE = 'topic'
|
|
||||||
PUBLISH_INTERVAL = 1
|
|
||||||
QUEUE = 'text'
|
|
||||||
ROUTING_KEY = 'example.text'
|
|
||||||
|
|
||||||
def __init__(self, amqp_url):
|
|
||||||
"""Setup the example publisher object, passing in the URL we will use
|
|
||||||
to connect to RabbitMQ.
|
|
||||||
|
|
||||||
:param str amqp_url: The URL for connecting to RabbitMQ
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._connection = None
|
|
||||||
self._channel = None
|
|
||||||
|
|
||||||
self._deliveries = None
|
|
||||||
self._acked = None
|
|
||||||
self._nacked = None
|
|
||||||
self._message_number = None
|
|
||||||
|
|
||||||
self._stopping = False
|
|
||||||
self._url = amqp_url
|
|
||||||
|
|
||||||
def connect(self):
|
|
||||||
"""This method connects to RabbitMQ, returning the connection handle.
|
|
||||||
When the connection is established, the on_connection_open method
|
|
||||||
will be invoked by pika. If you want the reconnection to work, make
|
|
||||||
sure you set stop_ioloop_on_close to False, which is not the default
|
|
||||||
behavior of this adapter.
|
|
||||||
|
|
||||||
:rtype: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connecting to %s', self._url)
|
|
||||||
return pika.SelectConnection(pika.URLParameters(self._url),
|
|
||||||
on_open_callback=self.on_connection_open,
|
|
||||||
on_close_callback=self.on_connection_closed,
|
|
||||||
stop_ioloop_on_close=False)
|
|
||||||
|
|
||||||
def on_connection_open(self, unused_connection):
|
|
||||||
"""This method is called by pika once the connection to RabbitMQ has
|
|
||||||
been established. It passes the handle to the connection object in
|
|
||||||
case we need it, but in this case, we'll just mark it unused.
|
|
||||||
|
|
||||||
:type unused_connection: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connection opened')
|
|
||||||
self.open_channel()
|
|
||||||
|
|
||||||
def on_connection_closed(self, connection, reply_code, reply_text):
|
|
||||||
"""This method is invoked by pika when the connection to RabbitMQ is
|
|
||||||
closed unexpectedly. Since it is unexpected, we will reconnect to
|
|
||||||
RabbitMQ if it disconnects.
|
|
||||||
|
|
||||||
:param pika.connection.Connection connection: The closed connection obj
|
|
||||||
:param int reply_code: The server provided reply_code if given
|
|
||||||
:param str reply_text: The server provided reply_text if given
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._channel = None
|
|
||||||
if self._stopping:
|
|
||||||
self._connection.ioloop.stop()
|
|
||||||
else:
|
|
||||||
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
|
|
||||||
reply_code, reply_text)
|
|
||||||
self._connection.add_timeout(5, self._connection.ioloop.stop)
|
|
||||||
|
|
||||||
def open_channel(self):
|
|
||||||
"""This method will open a new channel with RabbitMQ by issuing the
|
|
||||||
Channel.Open RPC command. When RabbitMQ confirms the channel is open
|
|
||||||
by sending the Channel.OpenOK RPC reply, the on_channel_open method
|
|
||||||
will be invoked.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Creating a new channel')
|
|
||||||
self._connection.channel(on_open_callback=self.on_channel_open)
|
|
||||||
|
|
||||||
def on_channel_open(self, channel):
|
|
||||||
"""This method is invoked by pika when the channel has been opened.
|
|
||||||
The channel object is passed in so we can make use of it.
|
|
||||||
|
|
||||||
Since the channel is now open, we'll declare the exchange to use.
|
|
||||||
|
|
||||||
:param pika.channel.Channel channel: The channel object
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Channel opened')
|
|
||||||
self._channel = channel
|
|
||||||
self.add_on_channel_close_callback()
|
|
||||||
self.setup_exchange(self.EXCHANGE)
|
|
||||||
|
|
||||||
def add_on_channel_close_callback(self):
|
|
||||||
"""This method tells pika to call the on_channel_closed method if
|
|
||||||
RabbitMQ unexpectedly closes the channel.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding channel close callback')
|
|
||||||
self._channel.add_on_close_callback(self.on_channel_closed)
|
|
||||||
|
|
||||||
def on_channel_closed(self, channel, reply_code, reply_text):
|
|
||||||
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
|
|
||||||
Channels are usually closed if you attempt to do something that
|
|
||||||
violates the protocol, such as re-declare an exchange or queue with
|
|
||||||
different parameters. In this case, we'll close the connection
|
|
||||||
to shutdown the object.
|
|
||||||
|
|
||||||
:param pika.channel.Channel channel: The closed channel
|
|
||||||
:param int reply_code: The numeric reason the channel was closed
|
|
||||||
:param str reply_text: The text reason the channel was closed
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text)
|
|
||||||
self._channel = None
|
|
||||||
if not self._stopping:
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def setup_exchange(self, exchange_name):
|
|
||||||
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
|
|
||||||
command. When it is complete, the on_exchange_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode exchange_name: The name of the exchange to declare
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring exchange %s', exchange_name)
|
|
||||||
self._channel.exchange_declare(self.on_exchange_declareok,
|
|
||||||
exchange_name,
|
|
||||||
self.EXCHANGE_TYPE)
|
|
||||||
|
|
||||||
def on_exchange_declareok(self, unused_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
|
|
||||||
command.
|
|
||||||
|
|
||||||
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Exchange declared')
|
|
||||||
self.setup_queue(self.QUEUE)
|
|
||||||
|
|
||||||
def setup_queue(self, queue_name):
|
|
||||||
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
|
|
||||||
command. When it is complete, the on_queue_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode queue_name: The name of the queue to declare.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring queue %s', queue_name)
|
|
||||||
self._channel.queue_declare(self.on_queue_declareok, queue_name)
|
|
||||||
|
|
||||||
def on_queue_declareok(self, method_frame):
|
|
||||||
"""Method invoked by pika when the Queue.Declare RPC call made in
|
|
||||||
setup_queue has completed. In this method we will bind the queue
|
|
||||||
and exchange together with the routing key by issuing the Queue.Bind
|
|
||||||
RPC command. When this command is complete, the on_bindok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Binding %s to %s with %s',
|
|
||||||
self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)
|
|
||||||
self._channel.queue_bind(self.on_bindok, self.QUEUE,
|
|
||||||
self.EXCHANGE, self.ROUTING_KEY)
|
|
||||||
|
|
||||||
def on_bindok(self, unused_frame):
|
|
||||||
"""This method is invoked by pika when it receives the Queue.BindOk
|
|
||||||
response from RabbitMQ. Since we know we're now setup and bound, it's
|
|
||||||
time to start publishing."""
|
|
||||||
LOGGER.info('Queue bound')
|
|
||||||
self.start_publishing()
|
|
||||||
|
|
||||||
def start_publishing(self):
|
|
||||||
"""This method will enable delivery confirmations and schedule the
|
|
||||||
first message to be sent to RabbitMQ
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Issuing consumer related RPC commands')
|
|
||||||
self.enable_delivery_confirmations()
|
|
||||||
self.schedule_next_message()
|
|
||||||
|
|
||||||
def enable_delivery_confirmations(self):
|
|
||||||
"""Send the Confirm.Select RPC method to RabbitMQ to enable delivery
|
|
||||||
confirmations on the channel. The only way to turn this off is to close
|
|
||||||
the channel and create a new one.
|
|
||||||
|
|
||||||
When the message is confirmed from RabbitMQ, the
|
|
||||||
on_delivery_confirmation method will be invoked passing in a Basic.Ack
|
|
||||||
or Basic.Nack method from RabbitMQ that will indicate which messages it
|
|
||||||
is confirming or rejecting.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Issuing Confirm.Select RPC command')
|
|
||||||
self._channel.confirm_delivery(self.on_delivery_confirmation)
|
|
||||||
|
|
||||||
def on_delivery_confirmation(self, method_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
|
|
||||||
command, passing in either a Basic.Ack or Basic.Nack frame with
|
|
||||||
the delivery tag of the message that was published. The delivery tag
|
|
||||||
is an integer counter indicating the message number that was sent
|
|
||||||
on the channel via Basic.Publish. Here we're just doing house keeping
|
|
||||||
to keep track of stats and remove message numbers that we expect
|
|
||||||
a delivery confirmation of from the list used to keep track of messages
|
|
||||||
that are pending confirmation.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
|
|
||||||
LOGGER.info('Received %s for delivery tag: %i',
|
|
||||||
confirmation_type,
|
|
||||||
method_frame.method.delivery_tag)
|
|
||||||
if confirmation_type == 'ack':
|
|
||||||
self._acked += 1
|
|
||||||
elif confirmation_type == 'nack':
|
|
||||||
self._nacked += 1
|
|
||||||
self._deliveries.remove(method_frame.method.delivery_tag)
|
|
||||||
LOGGER.info('Published %i messages, %i have yet to be confirmed, '
|
|
||||||
'%i were acked and %i were nacked',
|
|
||||||
self._message_number, len(self._deliveries),
|
|
||||||
self._acked, self._nacked)
|
|
||||||
|
|
||||||
def schedule_next_message(self):
|
|
||||||
"""If we are not closing our connection to RabbitMQ, schedule another
|
|
||||||
message to be delivered in PUBLISH_INTERVAL seconds.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Scheduling next message for %0.1f seconds',
|
|
||||||
self.PUBLISH_INTERVAL)
|
|
||||||
self._connection.add_timeout(self.PUBLISH_INTERVAL,
|
|
||||||
self.publish_message)
|
|
||||||
|
|
||||||
def publish_message(self):
|
|
||||||
"""If the class is not stopping, publish a message to RabbitMQ,
|
|
||||||
appending a list of deliveries with the message number that was sent.
|
|
||||||
This list will be used to check for delivery confirmations in the
|
|
||||||
on_delivery_confirmations method.
|
|
||||||
|
|
||||||
Once the message has been sent, schedule another message to be sent.
|
|
||||||
The main reason I put scheduling in was just so you can get a good idea
|
|
||||||
of how the process is flowing by slowing down and speeding up the
|
|
||||||
delivery intervals by changing the PUBLISH_INTERVAL constant in the
|
|
||||||
class.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._channel is None or not self._channel.is_open:
|
|
||||||
return
|
|
||||||
|
|
||||||
hdrs = {u'مفتاح': u' قيمة',
|
|
||||||
u'键': u'值',
|
|
||||||
u'キー': u'値'}
|
|
||||||
properties = pika.BasicProperties(app_id='example-publisher',
|
|
||||||
content_type='application/json',
|
|
||||||
headers=hdrs)
|
|
||||||
|
|
||||||
message = u'مفتاح قيمة 键 值 キー 値'
|
|
||||||
self._channel.basic_publish(self.EXCHANGE, self.ROUTING_KEY,
|
|
||||||
json.dumps(message, ensure_ascii=False),
|
|
||||||
properties)
|
|
||||||
self._message_number += 1
|
|
||||||
self._deliveries.append(self._message_number)
|
|
||||||
LOGGER.info('Published message # %i', self._message_number)
|
|
||||||
self.schedule_next_message()
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"""Run the example code by connecting and then starting the IOLoop.
|
|
||||||
|
|
||||||
"""
|
|
||||||
while not self._stopping:
|
|
||||||
self._connection = None
|
|
||||||
self._deliveries = []
|
|
||||||
self._acked = 0
|
|
||||||
self._nacked = 0
|
|
||||||
self._message_number = 0
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._connection = self.connect()
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
self.stop()
|
|
||||||
if (self._connection is not None and
|
|
||||||
not self._connection.is_closed):
|
|
||||||
# Finish closing
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
|
|
||||||
LOGGER.info('Stopped')
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
"""Stop the example by closing the channel and connection. We
|
|
||||||
set a flag here so that we stop scheduling new messages to be
|
|
||||||
published. The IOLoop is started because this method is
|
|
||||||
invoked by the Try/Catch below when KeyboardInterrupt is caught.
|
|
||||||
Starting the IOLoop again will allow the publisher to cleanly
|
|
||||||
disconnect from RabbitMQ.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Stopping')
|
|
||||||
self._stopping = True
|
|
||||||
self.close_channel()
|
|
||||||
self.close_connection()
|
|
||||||
|
|
||||||
def close_channel(self):
|
|
||||||
"""Invoke this command to close the channel with RabbitMQ by sending
|
|
||||||
the Channel.Close RPC command.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._channel is not None:
|
|
||||||
LOGGER.info('Closing the channel')
|
|
||||||
self._channel.close()
|
|
||||||
|
|
||||||
def close_connection(self):
|
|
||||||
"""This method closes the connection to RabbitMQ."""
|
|
||||||
if self._connection is not None:
|
|
||||||
LOGGER.info('Closing connection')
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
|
|
||||||
|
|
||||||
# Connect to localhost:5672 as guest with the password guest and virtual host "/" (%2F)
|
|
||||||
example = ExamplePublisher('amqp://guest:guest@localhost:5672/%2F?connection_attempts=3&heartbeat_interval=3600')
|
|
||||||
example.run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
|
@ -1,355 +0,0 @@
|
||||||
Asyncio Consumer
|
|
||||||
================
|
|
||||||
The following example implements a consumer using the
|
|
||||||
:class:`Asyncio adapter <pika.adapters.asyncio_connection.AsyncioConnection>` for the
|
|
||||||
`Asyncio library <https://docs.python.org/3/library/asyncio.html>`_ that will respond to RPC commands sent
|
|
||||||
from RabbitMQ. For example, it will reconnect if RabbitMQ closes the connection and will shutdown if
|
|
||||||
RabbitMQ cancels the consumer or closes the channel. While it may look intimidating, each method is
|
|
||||||
very short and represents a individual actions that a consumer can do.
|
|
||||||
|
|
||||||
consumer.py::
|
|
||||||
|
|
||||||
from pika import adapters
|
|
||||||
import pika
|
|
||||||
import logging
|
|
||||||
|
|
||||||
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
|
|
||||||
'-35s %(lineno) -5d: %(message)s')
|
|
||||||
|
|
||||||
LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ExampleConsumer(object):
|
|
||||||
"""This is an example consumer that will handle unexpected interactions
|
|
||||||
with RabbitMQ such as channel and connection closures.
|
|
||||||
|
|
||||||
If RabbitMQ closes the connection, it will reopen it. You should
|
|
||||||
look at the output, as there are limited reasons why the connection may
|
|
||||||
be closed, which usually are tied to permission related issues or
|
|
||||||
socket timeouts.
|
|
||||||
|
|
||||||
If the channel is closed, it will indicate a problem with one of the
|
|
||||||
commands that were issued and that should surface in the output as well.
|
|
||||||
|
|
||||||
"""
|
|
||||||
EXCHANGE = 'message'
|
|
||||||
EXCHANGE_TYPE = 'topic'
|
|
||||||
QUEUE = 'text'
|
|
||||||
ROUTING_KEY = 'example.text'
|
|
||||||
|
|
||||||
def __init__(self, amqp_url):
|
|
||||||
"""Create a new instance of the consumer class, passing in the AMQP
|
|
||||||
URL used to connect to RabbitMQ.
|
|
||||||
|
|
||||||
:param str amqp_url: The AMQP url to connect with
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._connection = None
|
|
||||||
self._channel = None
|
|
||||||
self._closing = False
|
|
||||||
self._consumer_tag = None
|
|
||||||
self._url = amqp_url
|
|
||||||
|
|
||||||
def connect(self):
|
|
||||||
"""This method connects to RabbitMQ, returning the connection handle.
|
|
||||||
When the connection is established, the on_connection_open method
|
|
||||||
will be invoked by pika.
|
|
||||||
|
|
||||||
:rtype: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connecting to %s', self._url)
|
|
||||||
return adapters.asyncio_connection.AsyncioConnection(pika.URLParameters(self._url),
|
|
||||||
self.on_connection_open)
|
|
||||||
|
|
||||||
def close_connection(self):
|
|
||||||
"""This method closes the connection to RabbitMQ."""
|
|
||||||
LOGGER.info('Closing connection')
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def add_on_connection_close_callback(self):
|
|
||||||
"""This method adds an on close callback that will be invoked by pika
|
|
||||||
when RabbitMQ closes the connection to the publisher unexpectedly.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding connection close callback')
|
|
||||||
self._connection.add_on_close_callback(self.on_connection_closed)
|
|
||||||
|
|
||||||
def on_connection_closed(self, connection, reply_code, reply_text):
|
|
||||||
"""This method is invoked by pika when the connection to RabbitMQ is
|
|
||||||
closed unexpectedly. Since it is unexpected, we will reconnect to
|
|
||||||
RabbitMQ if it disconnects.
|
|
||||||
|
|
||||||
:param pika.connection.Connection connection: The closed connection obj
|
|
||||||
:param int reply_code: The server provided reply_code if given
|
|
||||||
:param str reply_text: The server provided reply_text if given
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._channel = None
|
|
||||||
if self._closing:
|
|
||||||
self._connection.ioloop.stop()
|
|
||||||
else:
|
|
||||||
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
|
|
||||||
reply_code, reply_text)
|
|
||||||
self._connection.add_timeout(5, self.reconnect)
|
|
||||||
|
|
||||||
def on_connection_open(self, unused_connection):
|
|
||||||
"""This method is called by pika once the connection to RabbitMQ has
|
|
||||||
been established. It passes the handle to the connection object in
|
|
||||||
case we need it, but in this case, we'll just mark it unused.
|
|
||||||
|
|
||||||
:type unused_connection: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connection opened')
|
|
||||||
self.add_on_connection_close_callback()
|
|
||||||
self.open_channel()
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Will be invoked by the IOLoop timer if the connection is
|
|
||||||
closed. See the on_connection_closed method.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self._closing:
|
|
||||||
|
|
||||||
# Create a new connection
|
|
||||||
self._connection = self.connect()
|
|
||||||
|
|
||||||
def add_on_channel_close_callback(self):
|
|
||||||
"""This method tells pika to call the on_channel_closed method if
|
|
||||||
RabbitMQ unexpectedly closes the channel.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding channel close callback')
|
|
||||||
self._channel.add_on_close_callback(self.on_channel_closed)
|
|
||||||
|
|
||||||
def on_channel_closed(self, channel, reply_code, reply_text):
|
|
||||||
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
|
|
||||||
Channels are usually closed if you attempt to do something that
|
|
||||||
violates the protocol, such as re-declare an exchange or queue with
|
|
||||||
different parameters. In this case, we'll close the connection
|
|
||||||
to shutdown the object.
|
|
||||||
|
|
||||||
:param pika.channel.Channel: The closed channel
|
|
||||||
:param int reply_code: The numeric reason the channel was closed
|
|
||||||
:param str reply_text: The text reason the channel was closed
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.warning('Channel %i was closed: (%s) %s',
|
|
||||||
channel, reply_code, reply_text)
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def on_channel_open(self, channel):
|
|
||||||
"""This method is invoked by pika when the channel has been opened.
|
|
||||||
The channel object is passed in so we can make use of it.
|
|
||||||
|
|
||||||
Since the channel is now open, we'll declare the exchange to use.
|
|
||||||
|
|
||||||
:param pika.channel.Channel channel: The channel object
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Channel opened')
|
|
||||||
self._channel = channel
|
|
||||||
self.add_on_channel_close_callback()
|
|
||||||
self.setup_exchange(self.EXCHANGE)
|
|
||||||
|
|
||||||
def setup_exchange(self, exchange_name):
|
|
||||||
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
|
|
||||||
command. When it is complete, the on_exchange_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode exchange_name: The name of the exchange to declare
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring exchange %s', exchange_name)
|
|
||||||
self._channel.exchange_declare(self.on_exchange_declareok,
|
|
||||||
exchange_name,
|
|
||||||
self.EXCHANGE_TYPE)
|
|
||||||
|
|
||||||
def on_exchange_declareok(self, unused_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
|
|
||||||
command.
|
|
||||||
|
|
||||||
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Exchange declared')
|
|
||||||
self.setup_queue(self.QUEUE)
|
|
||||||
|
|
||||||
def setup_queue(self, queue_name):
|
|
||||||
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
|
|
||||||
command. When it is complete, the on_queue_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode queue_name: The name of the queue to declare.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring queue %s', queue_name)
|
|
||||||
self._channel.queue_declare(self.on_queue_declareok, queue_name)
|
|
||||||
|
|
||||||
def on_queue_declareok(self, method_frame):
|
|
||||||
"""Method invoked by pika when the Queue.Declare RPC call made in
|
|
||||||
setup_queue has completed. In this method we will bind the queue
|
|
||||||
and exchange together with the routing key by issuing the Queue.Bind
|
|
||||||
RPC command. When this command is complete, the on_bindok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Binding %s to %s with %s',
|
|
||||||
self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)
|
|
||||||
self._channel.queue_bind(self.on_bindok, self.QUEUE,
|
|
||||||
self.EXCHANGE, self.ROUTING_KEY)
|
|
||||||
|
|
||||||
def add_on_cancel_callback(self):
|
|
||||||
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
|
|
||||||
for some reason. If RabbitMQ does cancel the consumer,
|
|
||||||
on_consumer_cancelled will be invoked by pika.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding consumer cancellation callback')
|
|
||||||
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
|
|
||||||
|
|
||||||
def on_consumer_cancelled(self, method_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
|
|
||||||
receiving messages.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: The Basic.Cancel frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
|
|
||||||
method_frame)
|
|
||||||
if self._channel:
|
|
||||||
self._channel.close()
|
|
||||||
|
|
||||||
def acknowledge_message(self, delivery_tag):
|
|
||||||
"""Acknowledge the message delivery from RabbitMQ by sending a
|
|
||||||
Basic.Ack RPC method for the delivery tag.
|
|
||||||
|
|
||||||
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Acknowledging message %s', delivery_tag)
|
|
||||||
self._channel.basic_ack(delivery_tag)
|
|
||||||
|
|
||||||
def on_message(self, unused_channel, basic_deliver, properties, body):
|
|
||||||
"""Invoked by pika when a message is delivered from RabbitMQ. The
|
|
||||||
channel is passed for your convenience. The basic_deliver object that
|
|
||||||
is passed in carries the exchange, routing key, delivery tag and
|
|
||||||
a redelivered flag for the message. The properties passed in is an
|
|
||||||
instance of BasicProperties with the message properties and the body
|
|
||||||
is the message that was sent.
|
|
||||||
|
|
||||||
:param pika.channel.Channel unused_channel: The channel object
|
|
||||||
:param pika.Spec.Basic.Deliver: basic_deliver method
|
|
||||||
:param pika.Spec.BasicProperties: properties
|
|
||||||
:param str|unicode body: The message body
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Received message # %s from %s: %s',
|
|
||||||
basic_deliver.delivery_tag, properties.app_id, body)
|
|
||||||
self.acknowledge_message(basic_deliver.delivery_tag)
|
|
||||||
|
|
||||||
def on_cancelok(self, unused_frame):
|
|
||||||
"""This method is invoked by pika when RabbitMQ acknowledges the
|
|
||||||
cancellation of a consumer. At this point we will close the channel.
|
|
||||||
This will invoke the on_channel_closed method once the channel has been
|
|
||||||
closed, which will in-turn close the connection.
|
|
||||||
|
|
||||||
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer')
|
|
||||||
self.close_channel()
|
|
||||||
|
|
||||||
def stop_consuming(self):
|
|
||||||
"""Tell RabbitMQ that you would like to stop consuming by sending the
|
|
||||||
Basic.Cancel RPC command.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._channel:
|
|
||||||
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
|
|
||||||
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
|
|
||||||
|
|
||||||
def start_consuming(self):
|
|
||||||
"""This method sets up the consumer by first calling
|
|
||||||
add_on_cancel_callback so that the object is notified if RabbitMQ
|
|
||||||
cancels the consumer. It then issues the Basic.Consume RPC command
|
|
||||||
which returns the consumer tag that is used to uniquely identify the
|
|
||||||
consumer with RabbitMQ. We keep the value to use it when we want to
|
|
||||||
cancel consuming. The on_message method is passed in as a callback pika
|
|
||||||
will invoke when a message is fully received.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Issuing consumer related RPC commands')
|
|
||||||
self.add_on_cancel_callback()
|
|
||||||
self._consumer_tag = self._channel.basic_consume(self.on_message,
|
|
||||||
self.QUEUE)
|
|
||||||
|
|
||||||
def on_bindok(self, unused_frame):
|
|
||||||
"""Invoked by pika when the Queue.Bind method has completed. At this
|
|
||||||
point we will start consuming messages by calling start_consuming
|
|
||||||
which will invoke the needed RPC commands to start the process.
|
|
||||||
|
|
||||||
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Queue bound')
|
|
||||||
self.start_consuming()
|
|
||||||
|
|
||||||
def close_channel(self):
|
|
||||||
"""Call to close the channel with RabbitMQ cleanly by issuing the
|
|
||||||
Channel.Close RPC command.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Closing the channel')
|
|
||||||
self._channel.close()
|
|
||||||
|
|
||||||
def open_channel(self):
|
|
||||||
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
|
|
||||||
command. When RabbitMQ responds that the channel is open, the
|
|
||||||
on_channel_open callback will be invoked by pika.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Creating a new channel')
|
|
||||||
self._connection.channel(on_open_callback=self.on_channel_open)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"""Run the example consumer by connecting to RabbitMQ and then
|
|
||||||
starting the IOLoop to block and allow the SelectConnection to operate.
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._connection = self.connect()
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
|
|
||||||
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
|
|
||||||
will be invoked by pika, which will then closing the channel and
|
|
||||||
connection. The IOLoop is started again because this method is invoked
|
|
||||||
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
|
|
||||||
exception stops the IOLoop which needs to be running for pika to
|
|
||||||
communicate with RabbitMQ. All of the commands issued prior to starting
|
|
||||||
the IOLoop will be buffered but not processed.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Stopping')
|
|
||||||
self._closing = True
|
|
||||||
self.stop_consuming()
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
LOGGER.info('Stopped')
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
|
|
||||||
example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F')
|
|
||||||
try:
|
|
||||||
example.run()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
example.stop()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
Using the Blocking Connection to get a message from RabbitMQ
|
|
||||||
============================================================
|
|
||||||
|
|
||||||
.. _example_blocking_basic_get:
|
|
||||||
|
|
||||||
The :py:meth:`BlockingChannel.basic_get <pika.adapters.blocking_connection.BlockingChannel.basic_get>` method will return a tuple with the members.
|
|
||||||
|
|
||||||
If the server returns a message, the first item in the tuple will be a :class:`pika.spec.Basic.GetOk` object with the current message count, the redelivered flag, the routing key that was used to put the message in the queue, and the exchange the message was published to. The second item will be a :py:class:`~pika.spec.BasicProperties` object and the third will be the message body.
|
|
||||||
|
|
||||||
If the server did not return a message a tuple of None, None, None will be returned.
|
|
||||||
|
|
||||||
Example of getting a message and acknowledging it::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
connection = pika.BlockingConnection()
|
|
||||||
channel = connection.channel()
|
|
||||||
method_frame, header_frame, body = channel.basic_get('test')
|
|
||||||
if method_frame:
|
|
||||||
print(method_frame, header_frame, body)
|
|
||||||
channel.basic_ack(method_frame.delivery_tag)
|
|
||||||
else:
|
|
||||||
print('No message returned')
|
|
|
@ -1,29 +0,0 @@
|
||||||
Using the Blocking Connection to consume messages from RabbitMQ
|
|
||||||
===============================================================
|
|
||||||
|
|
||||||
.. _example_blocking_basic_consume:
|
|
||||||
|
|
||||||
The :py:meth:`BlockingChannel.basic_consume <pika.adapters.blocking_connection.BlockingChannel.basic_consume>` method assign a callback method to be called every time that RabbitMQ delivers messages to your consuming application.
|
|
||||||
|
|
||||||
When pika calls your method, it will pass in the channel, a :py:class:`pika.spec.Basic.Deliver` object with the delivery tag, the redelivered flag, the routing key that was used to put the message in the queue, and the exchange the message was published to. The third argument will be a :py:class:`pika.spec.BasicProperties` object and the last will be the message body.
|
|
||||||
|
|
||||||
Example of consuming messages and acknowledging them::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
|
|
||||||
def on_message(channel, method_frame, header_frame, body):
|
|
||||||
print(method_frame.delivery_tag)
|
|
||||||
print(body)
|
|
||||||
print()
|
|
||||||
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
|
|
||||||
|
|
||||||
|
|
||||||
connection = pika.BlockingConnection()
|
|
||||||
channel = connection.channel()
|
|
||||||
channel.basic_consume(on_message, 'test')
|
|
||||||
try:
|
|
||||||
channel.start_consuming()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
channel.stop_consuming()
|
|
||||||
connection.close()
|
|
|
@ -1,73 +0,0 @@
|
||||||
Using the BlockingChannel.consume generator to consume messages
|
|
||||||
===============================================================
|
|
||||||
|
|
||||||
.. _example_blocking_basic_get:
|
|
||||||
|
|
||||||
The :py:meth:`BlockingChannel.consume <pika.adapters.blocking_connection.BlockingChannel.consume>` method is a generator that will return a tuple of method, properties and body.
|
|
||||||
|
|
||||||
When you escape out of the loop, be sure to call consumer.cancel() to return any unprocessed messages.
|
|
||||||
|
|
||||||
Example of consuming messages and acknowledging them::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
connection = pika.BlockingConnection()
|
|
||||||
channel = connection.channel()
|
|
||||||
|
|
||||||
# Get ten messages and break out
|
|
||||||
for method_frame, properties, body in channel.consume('test'):
|
|
||||||
|
|
||||||
# Display the message parts
|
|
||||||
print(method_frame)
|
|
||||||
print(properties)
|
|
||||||
print(body)
|
|
||||||
|
|
||||||
# Acknowledge the message
|
|
||||||
channel.basic_ack(method_frame.delivery_tag)
|
|
||||||
|
|
||||||
# Escape out of the loop after 10 messages
|
|
||||||
if method_frame.delivery_tag == 10:
|
|
||||||
break
|
|
||||||
|
|
||||||
# Cancel the consumer and return any pending messages
|
|
||||||
requeued_messages = channel.cancel()
|
|
||||||
print('Requeued %i messages' % requeued_messages)
|
|
||||||
|
|
||||||
# Close the channel and the connection
|
|
||||||
channel.close()
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
If you have pending messages in the test queue, your output should look something like::
|
|
||||||
|
|
||||||
(pika)gmr-0x02:pika gmr$ python blocking_nack.py
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=1', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=2', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=3', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=4', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=5', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=6', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=7', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=8', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=9', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
<Basic.Deliver(['consumer_tag=ctag1.0', 'redelivered=True', 'routing_key=test', 'delivery_tag=10', 'exchange=test'])>
|
|
||||||
<BasicProperties(['delivery_mode=1', 'content_type=text/plain'])>
|
|
||||||
Hello World!
|
|
||||||
Requeued 1894 messages
|
|
|
@ -1,28 +0,0 @@
|
||||||
Using Delivery Confirmations with the BlockingConnection
|
|
||||||
========================================================
|
|
||||||
|
|
||||||
The following code demonstrates how to turn on delivery confirmations with the BlockingConnection and how to check for confirmation from RabbitMQ::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
# Open a connection to RabbitMQ on localhost using all default parameters
|
|
||||||
connection = pika.BlockingConnection()
|
|
||||||
|
|
||||||
# Open the channel
|
|
||||||
channel = connection.channel()
|
|
||||||
|
|
||||||
# Declare the queue
|
|
||||||
channel.queue_declare(queue="test", durable=True, exclusive=False, auto_delete=False)
|
|
||||||
|
|
||||||
# Turn on delivery confirmations
|
|
||||||
channel.confirm_delivery()
|
|
||||||
|
|
||||||
# Send a message
|
|
||||||
if channel.basic_publish(exchange='test',
|
|
||||||
routing_key='test',
|
|
||||||
body='Hello World!',
|
|
||||||
properties=pika.BasicProperties(content_type='text/plain',
|
|
||||||
delivery_mode=1)):
|
|
||||||
print('Message publish was confirmed')
|
|
||||||
else:
|
|
||||||
print('Message could not be confirmed')
|
|
|
@ -1,29 +0,0 @@
|
||||||
Ensuring message delivery with the mandatory flag
|
|
||||||
=================================================
|
|
||||||
|
|
||||||
The following example demonstrates how to check if a message is delivered by setting the mandatory flag and checking the return result when using the BlockingConnection::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
# Open a connection to RabbitMQ on localhost using all default parameters
|
|
||||||
connection = pika.BlockingConnection()
|
|
||||||
|
|
||||||
# Open the channel
|
|
||||||
channel = connection.channel()
|
|
||||||
|
|
||||||
# Declare the queue
|
|
||||||
channel.queue_declare(queue="test", durable=True, exclusive=False, auto_delete=False)
|
|
||||||
|
|
||||||
# Enabled delivery confirmations
|
|
||||||
channel.confirm_delivery()
|
|
||||||
|
|
||||||
# Send a message
|
|
||||||
if channel.basic_publish(exchange='test',
|
|
||||||
routing_key='test',
|
|
||||||
body='Hello World!',
|
|
||||||
properties=pika.BasicProperties(content_type='text/plain',
|
|
||||||
delivery_mode=1),
|
|
||||||
mandatory=True):
|
|
||||||
print('Message was published')
|
|
||||||
else:
|
|
||||||
print('Message was returned')
|
|
|
@ -1,64 +0,0 @@
|
||||||
Comparing Message Publishing with BlockingConnection and SelectConnection
|
|
||||||
=========================================================================
|
|
||||||
|
|
||||||
For those doing simple, non-asynchronous programming, :py:meth:`pika.adapters.blocking_connection.BlockingConnection` proves to be the easiest way to get up and running with Pika to publish messages.
|
|
||||||
|
|
||||||
In the following example, a connection is made to RabbitMQ listening to port *5672* on *localhost* using the username *guest* and password *guest* and virtual host */*. Once connected, a channel is opened and a message is published to the *test_exchange* exchange using the *test_routing_key* routing key. The BasicProperties value passed in sets the message to delivery mode *1* (non-persisted) with a content-type of *text/plain*. Once the message is published, the connection is closed::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F')
|
|
||||||
|
|
||||||
connection = pika.BlockingConnection(parameters)
|
|
||||||
|
|
||||||
channel = connection.channel()
|
|
||||||
|
|
||||||
channel.basic_publish('test_exchange',
|
|
||||||
'test_routing_key',
|
|
||||||
'message body value',
|
|
||||||
pika.BasicProperties(content_type='text/plain',
|
|
||||||
delivery_mode=1))
|
|
||||||
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
|
|
||||||
In contrast, using :py:meth:`pika.adapters.select_connection.SelectConnection` and the other asynchronous adapters is more complicated and less pythonic, but when used with other asynchronous services can have tremendous performance improvements. In the following code example, all of the same parameters and values are used as were used in the previous example::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
# Step #3
|
|
||||||
def on_open(connection):
|
|
||||||
|
|
||||||
connection.channel(on_channel_open)
|
|
||||||
|
|
||||||
# Step #4
|
|
||||||
def on_channel_open(channel):
|
|
||||||
|
|
||||||
channel.basic_publish('test_exchange',
|
|
||||||
'test_routing_key',
|
|
||||||
'message body value',
|
|
||||||
pika.BasicProperties(content_type='text/plain',
|
|
||||||
delivery_mode=1))
|
|
||||||
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
# Step #1: Connect to RabbitMQ
|
|
||||||
parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F')
|
|
||||||
|
|
||||||
connection = pika.SelectConnection(parameters=parameters,
|
|
||||||
on_open_callback=on_open)
|
|
||||||
|
|
||||||
try:
|
|
||||||
|
|
||||||
# Step #2 - Block on the IOLoop
|
|
||||||
connection.ioloop.start()
|
|
||||||
|
|
||||||
# Catch a Keyboard Interrupt to make sure that the connection is closed cleanly
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
|
|
||||||
# Gracefully close the connection
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
# Start the IOLoop again so Pika can communicate, it will stop on its own when the connection is closed
|
|
||||||
connection.ioloop.start()
|
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
Connecting to RabbitMQ with Callback-Passing Style
|
|
||||||
==================================================
|
|
||||||
|
|
||||||
When you connect to RabbitMQ with an asynchronous adapter, you are writing event
|
|
||||||
oriented code. The connection adapter will block on the IOLoop that is watching
|
|
||||||
to see when pika should read data from and write data to RabbitMQ. Because you're
|
|
||||||
now blocking on the IOLoop, you will receive callback notifications when specific
|
|
||||||
events happen.
|
|
||||||
|
|
||||||
Example Code
|
|
||||||
------------
|
|
||||||
In the example, there are three steps that take place:
|
|
||||||
|
|
||||||
1. Setup the connection to RabbitMQ
|
|
||||||
2. Start the IOLoop
|
|
||||||
3. Once connected, the on_open method will be called by Pika with a handle to
|
|
||||||
the connection. In this method, a new channel will be opened on the connection.
|
|
||||||
4. Once the channel is opened, you can do your other actions, whether they be
|
|
||||||
publishing messages, consuming messages or other RabbitMQ related activities.::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
# Step #3
|
|
||||||
def on_open(connection):
|
|
||||||
connection.channel(on_channel_open)
|
|
||||||
|
|
||||||
# Step #4
|
|
||||||
def on_channel_open(channel):
|
|
||||||
channel.basic_publish('exchange_name',
|
|
||||||
'routing_key',
|
|
||||||
'Test Message',
|
|
||||||
pika.BasicProperties(content_type='text/plain',
|
|
||||||
type='example'))
|
|
||||||
|
|
||||||
# Step #1: Connect to RabbitMQ
|
|
||||||
connection = pika.SelectConnection(on_open_callback=on_open)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Step #2 - Block on the IOLoop
|
|
||||||
connection.ioloop.start()
|
|
||||||
|
|
||||||
# Catch a Keyboard Interrupt to make sure that the connection is closed cleanly
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
|
|
||||||
# Gracefully close the connection
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
# Start the IOLoop again so Pika can communicate, it will stop on its own when the connection is closed
|
|
||||||
connection.ioloop.start()
|
|
|
@ -1,81 +0,0 @@
|
||||||
Direct reply-to example
|
|
||||||
==============================
|
|
||||||
The following example demonstrates the use of the RabbitMQ "Direct reply-to" feature via `pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html for more info about this feature.
|
|
||||||
|
|
||||||
direct_reply_to.py::
|
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
"""
|
|
||||||
This example demonstrates the RabbitMQ "Direct reply-to" usage via
|
|
||||||
`pika.BlockingConnection`. See https://www.rabbitmq.com/direct-reply-to.html
|
|
||||||
for more info about this feature.
|
|
||||||
"""
|
|
||||||
import pika
|
|
||||||
|
|
||||||
|
|
||||||
SERVER_QUEUE = 'rpc.server.queue'
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
""" Here, Client sends "Marco" to RPC Server, and RPC Server replies with
|
|
||||||
"Polo".
|
|
||||||
|
|
||||||
NOTE Normally, the server would be running separately from the client, but
|
|
||||||
in this very simple example both are running in the same thread and sharing
|
|
||||||
connection and channel.
|
|
||||||
|
|
||||||
"""
|
|
||||||
with pika.BlockingConnection() as conn:
|
|
||||||
channel = conn.channel()
|
|
||||||
|
|
||||||
# Set up server
|
|
||||||
|
|
||||||
channel.queue_declare(queue=SERVER_QUEUE,
|
|
||||||
exclusive=True,
|
|
||||||
auto_delete=True)
|
|
||||||
channel.basic_consume(on_server_rx_rpc_request, queue=SERVER_QUEUE)
|
|
||||||
|
|
||||||
|
|
||||||
# Set up client
|
|
||||||
|
|
||||||
# NOTE Client must create its consumer and publish RPC requests on the
|
|
||||||
# same channel to enable the RabbitMQ broker to make the necessary
|
|
||||||
# associations.
|
|
||||||
#
|
|
||||||
# Also, client must create the consumer *before* starting to publish the
|
|
||||||
# RPC requests.
|
|
||||||
#
|
|
||||||
# Client must create its consumer with no_ack=True, because the reply-to
|
|
||||||
# queue isn't real.
|
|
||||||
|
|
||||||
channel.basic_consume(on_client_rx_reply_from_server,
|
|
||||||
queue='amq.rabbitmq.reply-to',
|
|
||||||
no_ack=True)
|
|
||||||
channel.basic_publish(
|
|
||||||
exchange='',
|
|
||||||
routing_key=SERVER_QUEUE,
|
|
||||||
body='Marco',
|
|
||||||
properties=pika.BasicProperties(reply_to='amq.rabbitmq.reply-to'))
|
|
||||||
|
|
||||||
channel.start_consuming()
|
|
||||||
|
|
||||||
|
|
||||||
def on_server_rx_rpc_request(ch, method_frame, properties, body):
|
|
||||||
print 'RPC Server got request:', body
|
|
||||||
|
|
||||||
ch.basic_publish('', routing_key=properties.reply_to, body='Polo')
|
|
||||||
|
|
||||||
ch.basic_ack(delivery_tag=method_frame.delivery_tag)
|
|
||||||
|
|
||||||
print 'RPC Server says good bye'
|
|
||||||
|
|
||||||
|
|
||||||
def on_client_rx_reply_from_server(ch, method_frame, properties, body):
|
|
||||||
print 'RPC Client got reply:', body
|
|
||||||
|
|
||||||
# NOTE A real client might want to make additional RPC requests, but in this
|
|
||||||
# simple example we're closing the channel after getting our first reply
|
|
||||||
# to force control to return from channel.start_consuming()
|
|
||||||
print 'RPC Client says bye'
|
|
||||||
ch.close()
|
|
|
@ -1,37 +0,0 @@
|
||||||
Ensuring well-behaved connection with heartbeat and blocked-connection timeouts
|
|
||||||
===============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
This example demonstrates explicit setting of heartbeat and blocked connection timeouts.
|
|
||||||
|
|
||||||
Starting with RabbitMQ 3.5.5, the broker's default heartbeat timeout decreased from 580 seconds to 60 seconds. As a result, applications that perform lengthy processing in the same thread that also runs their Pika connection may experience unexpected dropped connections due to heartbeat timeout. Here, we specify an explicit lower bound for heartbeat timeout.
|
|
||||||
|
|
||||||
When RabbitMQ broker is running out of certain resources, such as memory and disk space, it may block connections that are performing resource-consuming operations, such as publishing messages. Once a connection is blocked, RabbitMQ stops reading from that connection's socket, so no commands from the client will get through to the broker on that connection until the broker unblocks it. A blocked connection may last for an indefinite period of time, stalling the connection and possibly resulting in a hang (e.g., in BlockingConnection) until the connection is unblocked. Blocked Connection Timeout is intended to interrupt (i.e., drop) a connection that has been blocked longer than the given timeout value.
|
|
||||||
|
|
||||||
Example of configuring hertbeat and blocked-connection timeouts::
|
|
||||||
|
|
||||||
import pika
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
# NOTE: These parameters work with all Pika connection types
|
|
||||||
params = pika.ConnectionParameters(heartbeat_interval=600,
|
|
||||||
blocked_connection_timeout=300)
|
|
||||||
|
|
||||||
conn = pika.BlockingConnection(params)
|
|
||||||
|
|
||||||
chan = conn.channel()
|
|
||||||
|
|
||||||
chan.basic_publish('', 'my-alphabet-queue', "abc")
|
|
||||||
|
|
||||||
# If publish causes the connection to become blocked, then this conn.close()
|
|
||||||
# would hang until the connection is unblocked, if ever. However, the
|
|
||||||
# blocked_connection_timeout connection parameter would interrupt the wait,
|
|
||||||
# resulting in ConnectionClosed exception from BlockingConnection (or the
|
|
||||||
# on_connection_closed callback call in an asynchronous adapter)
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
|
@ -1,61 +0,0 @@
|
||||||
TLS parameters example
|
|
||||||
=============================
|
|
||||||
This examples demonstrates a TLS session with RabbitMQ using mutual authentication.
|
|
||||||
|
|
||||||
It was tested against RabbitMQ 3.6.10, using Python 3.6.1 and pre-release Pika `0.11.0`
|
|
||||||
|
|
||||||
Note the use of `ssl_version=ssl.PROTOCOL_TLSv1`. The recent verions of RabbitMQ disable older versions of
|
|
||||||
SSL due to security vulnerabilities.
|
|
||||||
|
|
||||||
See https://www.rabbitmq.com/ssl.html for certificate creation and rabbitmq SSL configuration instructions.
|
|
||||||
|
|
||||||
|
|
||||||
tls_example.py::
|
|
||||||
|
|
||||||
import ssl
|
|
||||||
import pika
|
|
||||||
import logging
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
cp = pika.ConnectionParameters(
|
|
||||||
ssl=True,
|
|
||||||
ssl_options=dict(
|
|
||||||
ssl_version=ssl.PROTOCOL_TLSv1,
|
|
||||||
ca_certs="/Users/me/tls-gen/basic/testca/cacert.pem",
|
|
||||||
keyfile="/Users/me/tls-gen/basic/client/key.pem",
|
|
||||||
certfile="/Users/me/tls-gen/basic/client/cert.pem",
|
|
||||||
cert_reqs=ssl.CERT_REQUIRED))
|
|
||||||
|
|
||||||
conn = pika.BlockingConnection(cp)
|
|
||||||
ch = conn.channel()
|
|
||||||
print(ch.queue_declare("sslq"))
|
|
||||||
ch.publish("", "sslq", "abc")
|
|
||||||
print(ch.basic_get("sslq"))
|
|
||||||
|
|
||||||
|
|
||||||
rabbitmq.config::
|
|
||||||
|
|
||||||
%% Both the client and rabbitmq server were running on the same machine, a MacBookPro laptop.
|
|
||||||
%%
|
|
||||||
%% rabbitmq.config was created in its default location for OS X: /usr/local/etc/rabbitmq/rabbitmq.config.
|
|
||||||
%%
|
|
||||||
%% The contents of the example rabbitmq.config are for demonstration purposes only. See https://www.rabbitmq.com/ssl.html for instructions about creating the test certificates and the contents of rabbitmq.config.
|
|
||||||
|
|
||||||
|
|
||||||
[
|
|
||||||
{rabbit,
|
|
||||||
[
|
|
||||||
{ssl_listeners, [{"127.0.0.1", 5671}]},
|
|
||||||
|
|
||||||
%% Configuring SSL.
|
|
||||||
%% See http://www.rabbitmq.com/ssl.html for full documentation.
|
|
||||||
%%
|
|
||||||
{ssl_options, [{cacertfile, "/Users/me/tls-gen/basic/testca/cacert.pem"},
|
|
||||||
{certfile, "/Users/me/tls-gen/basic/server/cert.pem"},
|
|
||||||
{keyfile, "/Users/me/tls-gen/basic/server/key.pem"},
|
|
||||||
{verify, verify_peer},
|
|
||||||
{fail_if_no_peer_cert, true}]}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
].
|
|
|
@ -1,60 +0,0 @@
|
||||||
TLS parameters example
|
|
||||||
=============================
|
|
||||||
This examples demonstrates a TLS session with RabbitMQ using server authentication.
|
|
||||||
|
|
||||||
It was tested against RabbitMQ 3.6.10, using Python 3.6.1 and pre-release Pika `0.11.0`
|
|
||||||
|
|
||||||
Note the use of `ssl_version=ssl.PROTOCOL_TLSv1`. The recent versions of RabbitMQ disable older versions of
|
|
||||||
SSL due to security vulnerabilities.
|
|
||||||
|
|
||||||
See https://www.rabbitmq.com/ssl.html for certificate creation and rabbitmq SSL configuration instructions.
|
|
||||||
|
|
||||||
|
|
||||||
tls_example.py::
|
|
||||||
|
|
||||||
import ssl
|
|
||||||
import pika
|
|
||||||
import logging
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
cp = pika.ConnectionParameters(
|
|
||||||
ssl=True,
|
|
||||||
ssl_options=dict(
|
|
||||||
ssl_version=ssl.PROTOCOL_TLSv1,
|
|
||||||
ca_certs="/Users/me/tls-gen/basic/testca/cacert.pem",
|
|
||||||
cert_reqs=ssl.CERT_REQUIRED))
|
|
||||||
|
|
||||||
conn = pika.BlockingConnection(cp)
|
|
||||||
ch = conn.channel()
|
|
||||||
print(ch.queue_declare("sslq"))
|
|
||||||
ch.publish("", "sslq", "abc")
|
|
||||||
print(ch.basic_get("sslq"))
|
|
||||||
|
|
||||||
|
|
||||||
rabbitmq.config::
|
|
||||||
|
|
||||||
%% Both the client and rabbitmq server were running on the same machine, a MacBookPro laptop.
|
|
||||||
%%
|
|
||||||
%% rabbitmq.config was created in its default location for OS X: /usr/local/etc/rabbitmq/rabbitmq.config.
|
|
||||||
%%
|
|
||||||
%% The contents of the example rabbitmq.config are for demonstration purposes only. See https://www.rabbitmq.com/ssl.html for instructions about creating the test certificates and the contents of rabbitmq.config.
|
|
||||||
%%
|
|
||||||
%% Note that the {fail_if_no_peer_cert,false} option, states that RabbitMQ should accept clients that don't have a certificate to send to the broker, but through the {verify,verify_peer} option, we state that if the client does send a certificate to the broker, the broker must be able to establish a chain of trust to it.
|
|
||||||
|
|
||||||
[
|
|
||||||
{rabbit,
|
|
||||||
[
|
|
||||||
{ssl_listeners, [{"127.0.0.1", 5671}]},
|
|
||||||
|
|
||||||
%% Configuring SSL.
|
|
||||||
%% See http://www.rabbitmq.com/ssl.html for full documentation.
|
|
||||||
%%
|
|
||||||
{ssl_options, [{cacertfile, "/Users/me/tls-gen/basic/testca/cacert.pem"},
|
|
||||||
{certfile, "/Users/me/tls-gen/basic/server/cert.pem"},
|
|
||||||
{keyfile, "/Users/me/tls-gen/basic/server/key.pem"},
|
|
||||||
{verify, verify_peer},
|
|
||||||
{fail_if_no_peer_cert, false}]}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
].
|
|
|
@ -1,349 +0,0 @@
|
||||||
Tornado Consumer
|
|
||||||
================
|
|
||||||
The following example implements a consumer using the :class:`Tornado adapter <pika.adapters.tornado_connection.TornadoConnection>` for the `Tornado framework <http://tornadoweb.org>`_ that will respond to RPC commands sent from RabbitMQ. For example, it will reconnect if RabbitMQ closes the connection and will shutdown if RabbitMQ cancels the consumer or closes the channel. While it may look intimidating, each method is very short and represents a individual actions that a consumer can do.
|
|
||||||
|
|
||||||
consumer.py::
|
|
||||||
|
|
||||||
from pika import adapters
|
|
||||||
import pika
|
|
||||||
import logging
|
|
||||||
|
|
||||||
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
|
|
||||||
'-35s %(lineno) -5d: %(message)s')
|
|
||||||
LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ExampleConsumer(object):
|
|
||||||
"""This is an example consumer that will handle unexpected interactions
|
|
||||||
with RabbitMQ such as channel and connection closures.
|
|
||||||
|
|
||||||
If RabbitMQ closes the connection, it will reopen it. You should
|
|
||||||
look at the output, as there are limited reasons why the connection may
|
|
||||||
be closed, which usually are tied to permission related issues or
|
|
||||||
socket timeouts.
|
|
||||||
|
|
||||||
If the channel is closed, it will indicate a problem with one of the
|
|
||||||
commands that were issued and that should surface in the output as well.
|
|
||||||
|
|
||||||
"""
|
|
||||||
EXCHANGE = 'message'
|
|
||||||
EXCHANGE_TYPE = 'topic'
|
|
||||||
QUEUE = 'text'
|
|
||||||
ROUTING_KEY = 'example.text'
|
|
||||||
|
|
||||||
def __init__(self, amqp_url):
|
|
||||||
"""Create a new instance of the consumer class, passing in the AMQP
|
|
||||||
URL used to connect to RabbitMQ.
|
|
||||||
|
|
||||||
:param str amqp_url: The AMQP url to connect with
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._connection = None
|
|
||||||
self._channel = None
|
|
||||||
self._closing = False
|
|
||||||
self._consumer_tag = None
|
|
||||||
self._url = amqp_url
|
|
||||||
|
|
||||||
def connect(self):
|
|
||||||
"""This method connects to RabbitMQ, returning the connection handle.
|
|
||||||
When the connection is established, the on_connection_open method
|
|
||||||
will be invoked by pika.
|
|
||||||
|
|
||||||
:rtype: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connecting to %s', self._url)
|
|
||||||
return adapters.tornado_connection.TornadoConnection(pika.URLParameters(self._url),
|
|
||||||
self.on_connection_open)
|
|
||||||
|
|
||||||
def close_connection(self):
|
|
||||||
"""This method closes the connection to RabbitMQ."""
|
|
||||||
LOGGER.info('Closing connection')
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def add_on_connection_close_callback(self):
|
|
||||||
"""This method adds an on close callback that will be invoked by pika
|
|
||||||
when RabbitMQ closes the connection to the publisher unexpectedly.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding connection close callback')
|
|
||||||
self._connection.add_on_close_callback(self.on_connection_closed)
|
|
||||||
|
|
||||||
def on_connection_closed(self, connection, reply_code, reply_text):
|
|
||||||
"""This method is invoked by pika when the connection to RabbitMQ is
|
|
||||||
closed unexpectedly. Since it is unexpected, we will reconnect to
|
|
||||||
RabbitMQ if it disconnects.
|
|
||||||
|
|
||||||
:param pika.connection.Connection connection: The closed connection obj
|
|
||||||
:param int reply_code: The server provided reply_code if given
|
|
||||||
:param str reply_text: The server provided reply_text if given
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._channel = None
|
|
||||||
if self._closing:
|
|
||||||
self._connection.ioloop.stop()
|
|
||||||
else:
|
|
||||||
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
|
|
||||||
reply_code, reply_text)
|
|
||||||
self._connection.add_timeout(5, self.reconnect)
|
|
||||||
|
|
||||||
def on_connection_open(self, unused_connection):
|
|
||||||
"""This method is called by pika once the connection to RabbitMQ has
|
|
||||||
been established. It passes the handle to the connection object in
|
|
||||||
case we need it, but in this case, we'll just mark it unused.
|
|
||||||
|
|
||||||
:type unused_connection: pika.SelectConnection
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Connection opened')
|
|
||||||
self.add_on_connection_close_callback()
|
|
||||||
self.open_channel()
|
|
||||||
|
|
||||||
def reconnect(self):
|
|
||||||
"""Will be invoked by the IOLoop timer if the connection is
|
|
||||||
closed. See the on_connection_closed method.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self._closing:
|
|
||||||
|
|
||||||
# Create a new connection
|
|
||||||
self._connection = self.connect()
|
|
||||||
|
|
||||||
def add_on_channel_close_callback(self):
|
|
||||||
"""This method tells pika to call the on_channel_closed method if
|
|
||||||
RabbitMQ unexpectedly closes the channel.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding channel close callback')
|
|
||||||
self._channel.add_on_close_callback(self.on_channel_closed)
|
|
||||||
|
|
||||||
def on_channel_closed(self, channel, reply_code, reply_text):
|
|
||||||
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
|
|
||||||
Channels are usually closed if you attempt to do something that
|
|
||||||
violates the protocol, such as re-declare an exchange or queue with
|
|
||||||
different parameters. In this case, we'll close the connection
|
|
||||||
to shutdown the object.
|
|
||||||
|
|
||||||
:param pika.channel.Channel: The closed channel
|
|
||||||
:param int reply_code: The numeric reason the channel was closed
|
|
||||||
:param str reply_text: The text reason the channel was closed
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.warning('Channel %i was closed: (%s) %s',
|
|
||||||
channel, reply_code, reply_text)
|
|
||||||
self._connection.close()
|
|
||||||
|
|
||||||
def on_channel_open(self, channel):
|
|
||||||
"""This method is invoked by pika when the channel has been opened.
|
|
||||||
The channel object is passed in so we can make use of it.
|
|
||||||
|
|
||||||
Since the channel is now open, we'll declare the exchange to use.
|
|
||||||
|
|
||||||
:param pika.channel.Channel channel: The channel object
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Channel opened')
|
|
||||||
self._channel = channel
|
|
||||||
self.add_on_channel_close_callback()
|
|
||||||
self.setup_exchange(self.EXCHANGE)
|
|
||||||
|
|
||||||
def setup_exchange(self, exchange_name):
|
|
||||||
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
|
|
||||||
command. When it is complete, the on_exchange_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode exchange_name: The name of the exchange to declare
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring exchange %s', exchange_name)
|
|
||||||
self._channel.exchange_declare(self.on_exchange_declareok,
|
|
||||||
exchange_name,
|
|
||||||
self.EXCHANGE_TYPE)
|
|
||||||
|
|
||||||
def on_exchange_declareok(self, unused_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
|
|
||||||
command.
|
|
||||||
|
|
||||||
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Exchange declared')
|
|
||||||
self.setup_queue(self.QUEUE)
|
|
||||||
|
|
||||||
def setup_queue(self, queue_name):
|
|
||||||
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
|
|
||||||
command. When it is complete, the on_queue_declareok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param str|unicode queue_name: The name of the queue to declare.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Declaring queue %s', queue_name)
|
|
||||||
self._channel.queue_declare(self.on_queue_declareok, queue_name)
|
|
||||||
|
|
||||||
def on_queue_declareok(self, method_frame):
|
|
||||||
"""Method invoked by pika when the Queue.Declare RPC call made in
|
|
||||||
setup_queue has completed. In this method we will bind the queue
|
|
||||||
and exchange together with the routing key by issuing the Queue.Bind
|
|
||||||
RPC command. When this command is complete, the on_bindok method will
|
|
||||||
be invoked by pika.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Binding %s to %s with %s',
|
|
||||||
self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)
|
|
||||||
self._channel.queue_bind(self.on_bindok, self.QUEUE,
|
|
||||||
self.EXCHANGE, self.ROUTING_KEY)
|
|
||||||
|
|
||||||
def add_on_cancel_callback(self):
|
|
||||||
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
|
|
||||||
for some reason. If RabbitMQ does cancel the consumer,
|
|
||||||
on_consumer_cancelled will be invoked by pika.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Adding consumer cancellation callback')
|
|
||||||
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
|
|
||||||
|
|
||||||
def on_consumer_cancelled(self, method_frame):
|
|
||||||
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
|
|
||||||
receiving messages.
|
|
||||||
|
|
||||||
:param pika.frame.Method method_frame: The Basic.Cancel frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
|
|
||||||
method_frame)
|
|
||||||
if self._channel:
|
|
||||||
self._channel.close()
|
|
||||||
|
|
||||||
def acknowledge_message(self, delivery_tag):
|
|
||||||
"""Acknowledge the message delivery from RabbitMQ by sending a
|
|
||||||
Basic.Ack RPC method for the delivery tag.
|
|
||||||
|
|
||||||
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Acknowledging message %s', delivery_tag)
|
|
||||||
self._channel.basic_ack(delivery_tag)
|
|
||||||
|
|
||||||
def on_message(self, unused_channel, basic_deliver, properties, body):
|
|
||||||
"""Invoked by pika when a message is delivered from RabbitMQ. The
|
|
||||||
channel is passed for your convenience. The basic_deliver object that
|
|
||||||
is passed in carries the exchange, routing key, delivery tag and
|
|
||||||
a redelivered flag for the message. The properties passed in is an
|
|
||||||
instance of BasicProperties with the message properties and the body
|
|
||||||
is the message that was sent.
|
|
||||||
|
|
||||||
:param pika.channel.Channel unused_channel: The channel object
|
|
||||||
:param pika.Spec.Basic.Deliver: basic_deliver method
|
|
||||||
:param pika.Spec.BasicProperties: properties
|
|
||||||
:param str|unicode body: The message body
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Received message # %s from %s: %s',
|
|
||||||
basic_deliver.delivery_tag, properties.app_id, body)
|
|
||||||
self.acknowledge_message(basic_deliver.delivery_tag)
|
|
||||||
|
|
||||||
def on_cancelok(self, unused_frame):
|
|
||||||
"""This method is invoked by pika when RabbitMQ acknowledges the
|
|
||||||
cancellation of a consumer. At this point we will close the channel.
|
|
||||||
This will invoke the on_channel_closed method once the channel has been
|
|
||||||
closed, which will in-turn close the connection.
|
|
||||||
|
|
||||||
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer')
|
|
||||||
self.close_channel()
|
|
||||||
|
|
||||||
def stop_consuming(self):
|
|
||||||
"""Tell RabbitMQ that you would like to stop consuming by sending the
|
|
||||||
Basic.Cancel RPC command.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._channel:
|
|
||||||
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
|
|
||||||
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
|
|
||||||
|
|
||||||
def start_consuming(self):
|
|
||||||
"""This method sets up the consumer by first calling
|
|
||||||
add_on_cancel_callback so that the object is notified if RabbitMQ
|
|
||||||
cancels the consumer. It then issues the Basic.Consume RPC command
|
|
||||||
which returns the consumer tag that is used to uniquely identify the
|
|
||||||
consumer with RabbitMQ. We keep the value to use it when we want to
|
|
||||||
cancel consuming. The on_message method is passed in as a callback pika
|
|
||||||
will invoke when a message is fully received.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Issuing consumer related RPC commands')
|
|
||||||
self.add_on_cancel_callback()
|
|
||||||
self._consumer_tag = self._channel.basic_consume(self.on_message,
|
|
||||||
self.QUEUE)
|
|
||||||
|
|
||||||
def on_bindok(self, unused_frame):
|
|
||||||
"""Invoked by pika when the Queue.Bind method has completed. At this
|
|
||||||
point we will start consuming messages by calling start_consuming
|
|
||||||
which will invoke the needed RPC commands to start the process.
|
|
||||||
|
|
||||||
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Queue bound')
|
|
||||||
self.start_consuming()
|
|
||||||
|
|
||||||
def close_channel(self):
|
|
||||||
"""Call to close the channel with RabbitMQ cleanly by issuing the
|
|
||||||
Channel.Close RPC command.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Closing the channel')
|
|
||||||
self._channel.close()
|
|
||||||
|
|
||||||
def open_channel(self):
|
|
||||||
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
|
|
||||||
command. When RabbitMQ responds that the channel is open, the
|
|
||||||
on_channel_open callback will be invoked by pika.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Creating a new channel')
|
|
||||||
self._connection.channel(on_open_callback=self.on_channel_open)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"""Run the example consumer by connecting to RabbitMQ and then
|
|
||||||
starting the IOLoop to block and allow the SelectConnection to operate.
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._connection = self.connect()
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
|
|
||||||
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
|
|
||||||
will be invoked by pika, which will then closing the channel and
|
|
||||||
connection. The IOLoop is started again because this method is invoked
|
|
||||||
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
|
|
||||||
exception stops the IOLoop which needs to be running for pika to
|
|
||||||
communicate with RabbitMQ. All of the commands issued prior to starting
|
|
||||||
the IOLoop will be buffered but not processed.
|
|
||||||
|
|
||||||
"""
|
|
||||||
LOGGER.info('Stopping')
|
|
||||||
self._closing = True
|
|
||||||
self.stop_consuming()
|
|
||||||
self._connection.ioloop.start()
|
|
||||||
LOGGER.info('Stopped')
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
|
|
||||||
example = ExampleConsumer('amqp://guest:guest@localhost:5672/%2F')
|
|
||||||
try:
|
|
||||||
example.run()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
example.stop()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
Twisted Consumer Example
|
|
||||||
========================
|
|
||||||
Example of writing a consumer using the :py:class:`Twisted connection adapter <pika.adapters.twisted_connection.TwistedConnection>`::
|
|
||||||
|
|
||||||
# -*- coding:utf-8 -*-
|
|
||||||
|
|
||||||
import pika
|
|
||||||
from pika import exceptions
|
|
||||||
from pika.adapters import twisted_connection
|
|
||||||
from twisted.internet import defer, reactor, protocol,task
|
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def run(connection):
|
|
||||||
|
|
||||||
channel = yield connection.channel()
|
|
||||||
|
|
||||||
exchange = yield channel.exchange_declare(exchange='topic_link', exchange_type='topic')
|
|
||||||
|
|
||||||
queue = yield channel.queue_declare(queue='hello', auto_delete=False, exclusive=False)
|
|
||||||
|
|
||||||
yield channel.queue_bind(exchange='topic_link',queue='hello',routing_key='hello.world')
|
|
||||||
|
|
||||||
yield channel.basic_qos(prefetch_count=1)
|
|
||||||
|
|
||||||
queue_object, consumer_tag = yield channel.basic_consume(queue='hello',no_ack=False)
|
|
||||||
|
|
||||||
l = task.LoopingCall(read, queue_object)
|
|
||||||
|
|
||||||
l.start(0.01)
|
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def read(queue_object):
|
|
||||||
|
|
||||||
ch,method,properties,body = yield queue_object.get()
|
|
||||||
|
|
||||||
if body:
|
|
||||||
print(body)
|
|
||||||
|
|
||||||
yield ch.basic_ack(delivery_tag=method.delivery_tag)
|
|
||||||
|
|
||||||
|
|
||||||
parameters = pika.ConnectionParameters()
|
|
||||||
cc = protocol.ClientCreator(reactor, twisted_connection.TwistedProtocolConnection, parameters)
|
|
||||||
d = cc.connectTCP('hostname', 5672)
|
|
||||||
d.addCallback(lambda protocol: protocol.ready)
|
|
||||||
d.addCallback(run)
|
|
||||||
reactor.run()
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue