Statistics
| Branch: | Tag: | Revision:

root / lib / daemon.py @ c8eded0b

History | View | Annotate | Download (10.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module with helper classes and functions for daemons"""
23

    
24

    
25
import asyncore
26
import os
27
import select
28
import signal
29
import errno
30
import logging
31
import sched
32
import time
33
import socket
34

    
35
from ganeti import utils
36
from ganeti import constants
37
from ganeti import errors
38

    
39

    
40
class SchedulerBreakout(Exception):
41
  """Exception used to get out of the scheduler loop
42

43
  """
44

    
45

    
46
def AsyncoreDelayFunction(timeout):
47
  """Asyncore-compatible scheduler delay function.
48

49
  This is a delay function for sched that, rather than actually sleeping,
50
  executes asyncore events happening in the meantime.
51

52
  After an event has occurred, rather than returning, it raises a
53
  SchedulerBreakout exception, which will force the current scheduler.run()
54
  invocation to terminate, so that we can also check for signals. The main loop
55
  will then call the scheduler run again, which will allow it to actually
56
  process any due events.
57

58
  This is needed because scheduler.run() doesn't support a count=..., as
59
  asyncore loop, and the scheduler module documents throwing exceptions from
60
  inside the delay function as an allowed usage model.
61

62
  """
63
  asyncore.loop(timeout=timeout, count=1, use_poll=True)
64
  raise SchedulerBreakout()
65

    
66

    
67
class AsyncoreScheduler(sched.scheduler):
68
  """Event scheduler integrated with asyncore
69

70
  """
71
  def __init__(self, timefunc):
72
    sched.scheduler.__init__(self, timefunc, AsyncoreDelayFunction)
73

    
74

    
75
class AsyncUDPSocket(asyncore.dispatcher):
76
  """An improved asyncore udp socket.
77

78
  """
79
  def __init__(self):
80
    """Constructor for AsyncUDPSocket
81

82
    """
83
    asyncore.dispatcher.__init__(self)
84
    self._out_queue = []
85
    self.create_socket(socket.AF_INET, socket.SOCK_DGRAM)
86

    
87
  # this method is overriding an asyncore.dispatcher method
88
  def handle_connect(self):
89
    # Python thinks that the first udp message from a source qualifies as a
90
    # "connect" and further ones are part of the same connection. We beg to
91
    # differ and treat all messages equally.
92
    pass
93

    
94
  # this method is overriding an asyncore.dispatcher method
95
  def handle_read(self):
96
    try:
97
      try:
98
        payload, address = self.recvfrom(constants.MAX_UDP_DATA_SIZE)
99
      except socket.error, err:
100
        if err.errno == errno.EINTR:
101
          # we got a signal while trying to read. no need to do anything,
102
          # handle_read will be called again if there is data on the socket.
103
          return
104
        else:
105
          raise
106
      ip, port = address
107
      self.handle_datagram(payload, ip, port)
108
    except:
109
      # we need to catch any exception here, log it, but proceed, because even
110
      # if we failed handling a single request, we still want to continue.
111
      logging.error("Unexpected exception", exc_info=True)
112

    
113
  def handle_datagram(self, payload, ip, port):
114
    """Handle an already read udp datagram
115

116
    """
117
    raise NotImplementedError
118

    
119
  # this method is overriding an asyncore.dispatcher method
120
  def writable(self):
121
    # We should check whether we can write to the socket only if we have
122
    # something scheduled to be written
123
    return bool(self._out_queue)
124

    
125
  def handle_write(self):
126
    try:
127
      if not self._out_queue:
128
        logging.error("handle_write called with empty output queue")
129
        return
130
      (ip, port, payload) = self._out_queue[0]
131
      try:
132
        self.sendto(payload, 0, (ip, port))
133
      except socket.error, err:
134
        if err.errno == errno.EINTR:
135
          # we got a signal while trying to write. no need to do anything,
136
          # handle_write will be called again because we haven't emptied the
137
          # _out_queue, and we'll try again
138
          return
139
        else:
140
          raise
141
      self._out_queue.pop(0)
142
    except:
143
      # we need to catch any exception here, log it, but proceed, because even
144
      # if we failed sending a single datagram we still want to continue.
145
      logging.error("Unexpected exception", exc_info=True)
146

    
147
  def enqueue_send(self, ip, port, payload):
148
    """Enqueue a datagram to be sent when possible
149

150
    """
151
    if len(payload) > constants.MAX_UDP_DATA_SIZE:
152
      raise errors.UdpDataSizeError('Packet too big: %s > %s' % (len(payload),
153
                                    constants.MAX_UDP_DATA_SIZE))
154
    self._out_queue.append((ip, port, payload))
155

    
156

    
157
class Mainloop(object):
158
  """Generic mainloop for daemons
159

160
  """
161
  def __init__(self):
162
    """Constructs a new Mainloop instance.
163

164
    @ivar scheduler: A L{sched.scheduler} object, which can be used to register
165
    timed events
166

167
    """
168
    self._signal_wait = []
169
    self.scheduler = AsyncoreScheduler(time.time)
170

    
171
  @utils.SignalHandled([signal.SIGCHLD])
172
  @utils.SignalHandled([signal.SIGTERM])
173
  def Run(self, stop_on_empty=False, signal_handlers=None):
174
    """Runs the mainloop.
175

176
    @type stop_on_empty: bool
177
    @param stop_on_empty: Whether to stop mainloop once all I/O waiters
178
                          unregistered
179
    @type signal_handlers: dict
180
    @param signal_handlers: signal->L{utils.SignalHandler} passed by decorator
181

182
    """
183
    assert isinstance(signal_handlers, dict) and \
184
           len(signal_handlers) > 0, \
185
           "Broken SignalHandled decorator"
186
    running = True
187
    # Start actual main loop
188
    while running:
189
      # Stop if nothing is listening anymore
190
      if stop_on_empty and not (self._io_wait):
191
        break
192

    
193
      if not self.scheduler.empty():
194
        try:
195
          self.scheduler.run()
196
        except SchedulerBreakout:
197
          pass
198
      else:
199
        asyncore.loop(count=1, use_poll=True)
200

    
201
      # Check whether a signal was raised
202
      for sig in signal_handlers:
203
        handler = signal_handlers[sig]
204
        if handler.called:
205
          self._CallSignalWaiters(sig)
206
          running = (sig != signal.SIGTERM)
207
          handler.Clear()
208

    
209
  def _CallSignalWaiters(self, signum):
210
    """Calls all signal waiters for a certain signal.
211

212
    @type signum: int
213
    @param signum: Signal number
214

215
    """
216
    for owner in self._signal_wait:
217
      owner.OnSignal(signum)
218

    
219
  def RegisterSignal(self, owner):
220
    """Registers a receiver for signal notifications
221

222
    The receiver must support a "OnSignal(self, signum)" function.
223

224
    @type owner: instance
225
    @param owner: Receiver
226

227
    """
228
    self._signal_wait.append(owner)
229

    
230

    
231
def GenericMain(daemon_name, optionparser, dirs, check_fn, exec_fn):
232
  """Shared main function for daemons.
233

234
  @type daemon_name: string
235
  @param daemon_name: daemon name
236
  @type optionparser: L{optparse.OptionParser}
237
  @param optionparser: initialized optionparser with daemon-specific options
238
                       (common -f -d options will be handled by this module)
239
  @type options: object @param options: OptionParser result, should contain at
240
                 least the fork and the debug options
241
  @type dirs: list of strings
242
  @param dirs: list of directories that must exist for this daemon to work
243
  @type check_fn: function which accepts (options, args)
244
  @param check_fn: function that checks start conditions and exits if they're
245
                   not met
246
  @type exec_fn: function which accepts (options, args)
247
  @param exec_fn: function that's executed with the daemon's pid file held, and
248
                  runs the daemon itself.
249

250
  """
251
  optionparser.add_option("-f", "--foreground", dest="fork",
252
                          help="Don't detach from the current terminal",
253
                          default=True, action="store_false")
254
  optionparser.add_option("-d", "--debug", dest="debug",
255
                          help="Enable some debug messages",
256
                          default=False, action="store_true")
257
  if daemon_name in constants.DAEMONS_PORTS:
258
    # for networked daemons we also allow choosing the bind port and address.
259
    # by default we use the port provided by utils.GetDaemonPort, and bind to
260
    # 0.0.0.0 (which is represented by and empty bind address.
261
    port = utils.GetDaemonPort(daemon_name)
262
    optionparser.add_option("-p", "--port", dest="port",
263
                            help="Network port (%s default)." % port,
264
                            default=port, type="int")
265
    optionparser.add_option("-b", "--bind", dest="bind_address",
266
                            help="Bind address",
267
                            default="", metavar="ADDRESS")
268

    
269
  if daemon_name in constants.DAEMONS_SSL:
270
    default_cert, default_key = constants.DAEMONS_SSL[daemon_name]
271
    optionparser.add_option("--no-ssl", dest="ssl",
272
                            help="Do not secure HTTP protocol with SSL",
273
                            default=True, action="store_false")
274
    optionparser.add_option("-K", "--ssl-key", dest="ssl_key",
275
                            help="SSL key",
276
                            default=default_key, type="string")
277
    optionparser.add_option("-C", "--ssl-cert", dest="ssl_cert",
278
                            help="SSL certificate",
279
                            default=default_cert, type="string")
280

    
281
  multithread = utils.no_fork = daemon_name in constants.MULTITHREADED_DAEMONS
282

    
283
  options, args = optionparser.parse_args()
284

    
285
  if hasattr(options, 'ssl') and options.ssl:
286
    if not (options.ssl_cert and options.ssl_key):
287
      print >> sys.stderr, "Need key and certificate to use ssl"
288
      sys.exit(constants.EXIT_FAILURE)
289
    for fname in (options.ssl_cert, options.ssl_key):
290
      if not os.path.isfile(fname):
291
        print >> sys.stderr, "Need ssl file %s to run" % fname
292
        sys.exit(constants.EXIT_FAILURE)
293

    
294
  if check_fn is not None:
295
    check_fn(options, args)
296

    
297
  utils.EnsureDirs(dirs)
298

    
299
  if options.fork:
300
    utils.CloseFDs()
301
    utils.Daemonize(logfile=constants.DAEMONS_LOGFILES[daemon_name])
302

    
303
  utils.WritePidFile(daemon_name)
304
  try:
305
    utils.SetupLogging(logfile=constants.DAEMONS_LOGFILES[daemon_name],
306
                       debug=options.debug,
307
                       stderr_logging=not options.fork,
308
                       multithreaded=multithread)
309
    logging.info("%s daemon startup" % daemon_name)
310
    exec_fn(options, args)
311
  finally:
312
    utils.RemovePidFile(daemon_name)
313