projects
/
ganeti-local
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix burnin error when trying to grow a file volume
[ganeti-local]
/
lib
/
daemon.py
diff --git
a/lib/daemon.py
b/lib/daemon.py
index
9467bb9
..
0fba74d
100644
(file)
--- a/
lib/daemon.py
+++ b/
lib/daemon.py
@@
-24,13
+24,14
@@
import asyncore
import os
import asyncore
import os
-import select
import signal
import errno
import logging
import sched
import time
import socket
import signal
import errno
import logging
import sched
import time
import socket
+import select
+import sys
from ganeti import utils
from ganeti import constants
from ganeti import utils
from ganeti import constants
@@
-91,21
+92,24
@@
class AsyncUDPSocket(asyncore.dispatcher):
# differ and treat all messages equally.
pass
# differ and treat all messages equally.
pass
+ def do_read(self):
+ try:
+ payload, address = self.recvfrom(constants.MAX_UDP_DATA_SIZE)
+ except socket.error, err:
+ if err.errno == errno.EINTR:
+ # we got a signal while trying to read. no need to do anything,
+ # handle_read will be called again if there is data on the socket.
+ return
+ else:
+ raise
+ ip, port = address
+ self.handle_datagram(payload, ip, port)
+
# this method is overriding an asyncore.dispatcher method
def handle_read(self):
try:
# this method is overriding an asyncore.dispatcher method
def handle_read(self):
try:
- try:
- payload, address = self.recvfrom(4096)
- except socket.error, err:
- if err.errno == errno.EINTR:
- # we got a signal while trying to read. no need to do anything,
- # handle_read will be called again if there is data on the socket.
- return
- else:
- raise
- ip, port = address
- self.handle_datagram(payload, ip, port)
- except:
+ self.do_read()
+ except: # pylint: disable-msg=W0702
# we need to catch any exception here, log it, but proceed, because even
# if we failed handling a single request, we still want to continue.
logging.error("Unexpected exception", exc_info=True)
# we need to catch any exception here, log it, but proceed, because even
# if we failed handling a single request, we still want to continue.
logging.error("Unexpected exception", exc_info=True)
@@
-139,7
+143,7
@@
class AsyncUDPSocket(asyncore.dispatcher):
else:
raise
self._out_queue.pop(0)
else:
raise
self._out_queue.pop(0)
- except:
+ except: # pylint: disable-msg=W0702
# we need to catch any exception here, log it, but proceed, because even
# if we failed sending a single datagram we still want to continue.
logging.error("Unexpected exception", exc_info=True)
# we need to catch any exception here, log it, but proceed, because even
# if we failed sending a single datagram we still want to continue.
logging.error("Unexpected exception", exc_info=True)
@@
-148,31
+152,46
@@
class AsyncUDPSocket(asyncore.dispatcher):
"""Enqueue a datagram to be sent when possible
"""
"""Enqueue a datagram to be sent when possible
"""
+ if len(payload) > constants.MAX_UDP_DATA_SIZE:
+ raise errors.UdpDataSizeError('Packet too big: %s > %s' % (len(payload),
+ constants.MAX_UDP_DATA_SIZE))
self._out_queue.append((ip, port, payload))
self._out_queue.append((ip, port, payload))
+ def process_next_packet(self, timeout=0):
+ """Process the next datagram, waiting for it if necessary.
+
+ @type timeout: float
+ @param timeout: how long to wait for data
+ @rtype: boolean
+ @return: True if some data has been handled, False otherwise
+
+ """
+ if utils.WaitForFdCondition(self, select.POLLIN, timeout) & select.POLLIN:
+ self.do_read()
+ return True
+ else:
+ return False
+
class Mainloop(object):
"""Generic mainloop for daemons
class Mainloop(object):
"""Generic mainloop for daemons
+ @ivar scheduler: A sched.scheduler object, which can be used to register
+ timed events
+
"""
def __init__(self):
"""Constructs a new Mainloop instance.
"""
def __init__(self):
"""Constructs a new Mainloop instance.
- @ivar scheduler: A L{sched.scheduler} object, which can be used to register
- timed events
-
"""
self._signal_wait = []
self.scheduler = AsyncoreScheduler(time.time)
@utils.SignalHandled([signal.SIGCHLD])
@utils.SignalHandled([signal.SIGTERM])
"""
self._signal_wait = []
self.scheduler = AsyncoreScheduler(time.time)
@utils.SignalHandled([signal.SIGCHLD])
@utils.SignalHandled([signal.SIGTERM])
- def Run(self, stop_on_empty=False, signal_handlers=None):
+ def Run(self, signal_handlers=None):
"""Runs the mainloop.
"""Runs the mainloop.
- @type stop_on_empty: bool
- @param stop_on_empty: Whether to stop mainloop once all I/O waiters
- unregistered
@type signal_handlers: dict
@param signal_handlers: signal->L{utils.SignalHandler} passed by decorator
@type signal_handlers: dict
@param signal_handlers: signal->L{utils.SignalHandler} passed by decorator
@@
-183,10
+202,6
@@
class Mainloop(object):
running = True
# Start actual main loop
while running:
running = True
# Start actual main loop
while running:
- # Stop if nothing is listening anymore
- if stop_on_empty and not (self._io_wait):
- break
-
if not self.scheduler.empty():
try:
self.scheduler.run()
if not self.scheduler.empty():
try:
self.scheduler.run()
@@
-230,11
+245,9
@@
def GenericMain(daemon_name, optionparser, dirs, check_fn, exec_fn):
@type daemon_name: string
@param daemon_name: daemon name
@type daemon_name: string
@param daemon_name: daemon name
- @type optionparser: L{optparse.OptionParser}
+ @type optionparser: optparse.OptionParser
@param optionparser: initialized optionparser with daemon-specific options
(common -f -d options will be handled by this module)
@param optionparser: initialized optionparser with daemon-specific options
(common -f -d options will be handled by this module)
- @type options: object @param options: OptionParser result, should contain at
- least the fork and the debug options
@type dirs: list of strings
@param dirs: list of directories that must exist for this daemon to work
@type check_fn: function which accepts (options, args)
@type dirs: list of strings
@param dirs: list of directories that must exist for this daemon to work
@type check_fn: function which accepts (options, args)
@@
-251,6
+264,12
@@
def GenericMain(daemon_name, optionparser, dirs, check_fn, exec_fn):
optionparser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
optionparser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
+ optionparser.add_option("--syslog", dest="syslog",
+ help="Enable logging to syslog (except debug"
+ " messages); one of 'no', 'yes' or 'only' [%s]" %
+ constants.SYSLOG_USAGE,
+ default=constants.SYSLOG_USAGE,
+ choices=["no", "yes", "only"])
if daemon_name in constants.DAEMONS_PORTS:
# for networked daemons we also allow choosing the bind port and address.
# by default we use the port provided by utils.GetDaemonPort, and bind to
if daemon_name in constants.DAEMONS_PORTS:
# for networked daemons we also allow choosing the bind port and address.
# by default we use the port provided by utils.GetDaemonPort, and bind to
@@
-302,9
+321,10
@@
def GenericMain(daemon_name, optionparser, dirs, check_fn, exec_fn):
utils.SetupLogging(logfile=constants.DAEMONS_LOGFILES[daemon_name],
debug=options.debug,
stderr_logging=not options.fork,
utils.SetupLogging(logfile=constants.DAEMONS_LOGFILES[daemon_name],
debug=options.debug,
stderr_logging=not options.fork,
- multithreaded=multithread)
- logging.info("%s daemon startup" % daemon_name)
+ multithreaded=multithread,
+ program=daemon_name,
+ syslog=options.syslog)
+ logging.info("%s daemon startup", daemon_name)
exec_fn(options, args)
finally:
utils.RemovePidFile(daemon_name)
exec_fn(options, args)
finally:
utils.RemovePidFile(daemon_name)
-