import asyncore
import os
-import select
import signal
import errno
import logging
+import sched
+import time
+import socket
+import sys
from ganeti import utils
from ganeti import constants
+from ganeti import errors
+
+
+class SchedulerBreakout(Exception):
+ """Exception used to get out of the scheduler loop
+
+ """
+
+
+def AsyncoreDelayFunction(timeout):
+ """Asyncore-compatible scheduler delay function.
+
+ This is a delay function for sched that, rather than actually sleeping,
+ executes asyncore events happening in the meantime.
+
+ After an event has occurred, rather than returning, it raises a
+ SchedulerBreakout exception, which will force the current scheduler.run()
+ invocation to terminate, so that we can also check for signals. The main loop
+ will then call the scheduler run again, which will allow it to actually
+ process any due events.
+
+ This is needed because scheduler.run() doesn't support a count=..., as
+ asyncore loop, and the scheduler module documents throwing exceptions from
+ inside the delay function as an allowed usage model.
+
+ """
+ asyncore.loop(timeout=timeout, count=1, use_poll=True)
+ raise SchedulerBreakout()
+
+
+class AsyncoreScheduler(sched.scheduler):
+ """Event scheduler integrated with asyncore
+
+ """
+ def __init__(self, timefunc):
+ sched.scheduler.__init__(self, timefunc, AsyncoreDelayFunction)
+
+
+class AsyncUDPSocket(asyncore.dispatcher):
+ """An improved asyncore udp socket.
+
+ """
+ def __init__(self):
+ """Constructor for AsyncUDPSocket
+
+ """
+ asyncore.dispatcher.__init__(self)
+ self._out_queue = []
+ self.create_socket(socket.AF_INET, socket.SOCK_DGRAM)
+
+ # this method is overriding an asyncore.dispatcher method
+ def handle_connect(self):
+ # Python thinks that the first udp message from a source qualifies as a
+ # "connect" and further ones are part of the same connection. We beg to
+ # differ and treat all messages equally.
+ pass
+
+ def do_read(self):
+ try:
+ payload, address = self.recvfrom(constants.MAX_UDP_DATA_SIZE)
+ except socket.error, err:
+ if err.errno == errno.EINTR:
+ # we got a signal while trying to read. no need to do anything,
+ # handle_read will be called again if there is data on the socket.
+ return
+ else:
+ raise
+ ip, port = address
+ self.handle_datagram(payload, ip, port)
+
+ # this method is overriding an asyncore.dispatcher method
+ def handle_read(self):
+ try:
+ self.do_read()
+ except: # pylint: disable-msg=W0702
+ # we need to catch any exception here, log it, but proceed, because even
+ # if we failed handling a single request, we still want to continue.
+ logging.error("Unexpected exception", exc_info=True)
+
+ def handle_datagram(self, payload, ip, port):
+ """Handle an already read udp datagram
+
+ """
+ raise NotImplementedError
+
+ # this method is overriding an asyncore.dispatcher method
+ def writable(self):
+ # We should check whether we can write to the socket only if we have
+ # something scheduled to be written
+ return bool(self._out_queue)
+
+ def handle_write(self):
+ try:
+ if not self._out_queue:
+ logging.error("handle_write called with empty output queue")
+ return
+ (ip, port, payload) = self._out_queue[0]
+ try:
+ self.sendto(payload, 0, (ip, port))
+ except socket.error, err:
+ if err.errno == errno.EINTR:
+ # we got a signal while trying to write. no need to do anything,
+ # handle_write will be called again because we haven't emptied the
+ # _out_queue, and we'll try again
+ return
+ else:
+ raise
+ self._out_queue.pop(0)
+ except: # pylint: disable-msg=W0702
+ # we need to catch any exception here, log it, but proceed, because even
+ # if we failed sending a single datagram we still want to continue.
+ logging.error("Unexpected exception", exc_info=True)
+
+ def enqueue_send(self, ip, port, payload):
+ """Enqueue a datagram to be sent when possible
+
+ """
+ if len(payload) > constants.MAX_UDP_DATA_SIZE:
+ raise errors.UdpDataSizeError('Packet too big: %s > %s' % (len(payload),
+ constants.MAX_UDP_DATA_SIZE))
+ self._out_queue.append((ip, port, payload))
class Mainloop(object):
"""Generic mainloop for daemons
+ @ivar scheduler: A sched.scheduler object, which can be used to register
+ timed events
+
"""
def __init__(self):
"""Constructs a new Mainloop instance.
"""
self._signal_wait = []
+ self.scheduler = AsyncoreScheduler(time.time)
@utils.SignalHandled([signal.SIGCHLD])
@utils.SignalHandled([signal.SIGTERM])
- def Run(self, stop_on_empty=False, signal_handlers=None):
+ def Run(self, signal_handlers=None):
"""Runs the mainloop.
- @type stop_on_empty: bool
- @param stop_on_empty: Whether to stop mainloop once all I/O waiters
- unregistered
@type signal_handlers: dict
@param signal_handlers: signal->L{utils.SignalHandler} passed by decorator
running = True
# Start actual main loop
while running:
- # Stop if nothing is listening anymore
- if stop_on_empty and not (self._io_wait):
- break
-
- asyncore.loop(timeout=5, count=1, use_poll=True)
+ if not self.scheduler.empty():
+ try:
+ self.scheduler.run()
+ except SchedulerBreakout:
+ pass
+ else:
+ asyncore.loop(count=1, use_poll=True)
# Check whether a signal was raised
for sig in signal_handlers:
"""
for owner in self._signal_wait:
- owner.OnSignal(signal.SIGCHLD)
+ owner.OnSignal(signum)
def RegisterSignal(self, owner):
"""Registers a receiver for signal notifications
@type daemon_name: string
@param daemon_name: daemon name
- @type optionparser: L{optparse.OptionParser}
+ @type optionparser: optparse.OptionParser
@param optionparser: initialized optionparser with daemon-specific options
(common -f -d options will be handled by this module)
- @type options: object @param options: OptionParser result, should contain at
- least the fork and the debug options
@type dirs: list of strings
@param dirs: list of directories that must exist for this daemon to work
@type check_fn: function which accepts (options, args)
optionparser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
+ optionparser.add_option("--syslog", dest="syslog",
+ help="Enable logging to syslog (except debug"
+ " messages); one of 'no', 'yes' or 'only' [%s]" %
+ constants.SYSLOG_USAGE,
+ default=constants.SYSLOG_USAGE,
+ choices=["no", "yes", "only"])
if daemon_name in constants.DAEMONS_PORTS:
# for networked daemons we also allow choosing the bind port and address.
# by default we use the port provided by utils.GetDaemonPort, and bind to
utils.SetupLogging(logfile=constants.DAEMONS_LOGFILES[daemon_name],
debug=options.debug,
stderr_logging=not options.fork,
- multithreaded=multithread)
- logging.info("%s daemon startup" % daemon_name)
+ multithreaded=multithread,
+ program=daemon_name,
+ syslog=options.syslog)
+ logging.info("%s daemon startup", daemon_name)
exec_fn(options, args)
finally:
utils.RemovePidFile(daemon_name)
-