Statistics
| Branch: | Tag: | Revision:

root / lib / utils / __init__.py @ 80a0546b

History | View | Annotate | Download (23.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29
# Allow wildcard import in pylint: disable=W0401
30

    
31
import os
32
import re
33
import errno
34
import pwd
35
import time
36
import itertools
37
import select
38
import logging
39
import signal
40

    
41
from ganeti import errors
42
from ganeti import constants
43
from ganeti import compat
44
from ganeti import pathutils
45

    
46
from ganeti.utils.algo import *
47
from ganeti.utils.filelock import *
48
from ganeti.utils.hash import *
49
from ganeti.utils.io import *
50
from ganeti.utils.log import *
51
from ganeti.utils.mlock import *
52
from ganeti.utils.nodesetup import *
53
from ganeti.utils.process import *
54
from ganeti.utils.retry import *
55
from ganeti.utils.text import *
56
from ganeti.utils.wrapper import *
57
from ganeti.utils.x509 import *
58

    
59

    
60
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
61

    
62
UUID_RE = re.compile(constants.UUID_REGEX)
63

    
64

    
65
def ForceDictType(target, key_types, allowed_values=None):
66
  """Force the values of a dict to have certain types.
67

68
  @type target: dict
69
  @param target: the dict to update
70
  @type key_types: dict
71
  @param key_types: dict mapping target dict keys to types
72
                    in constants.ENFORCEABLE_TYPES
73
  @type allowed_values: list
74
  @keyword allowed_values: list of specially allowed values
75

76
  """
77
  if allowed_values is None:
78
    allowed_values = []
79

    
80
  if not isinstance(target, dict):
81
    msg = "Expected dictionary, got '%s'" % target
82
    raise errors.TypeEnforcementError(msg)
83

    
84
  for key in target:
85
    if key not in key_types:
86
      msg = "Unknown parameter '%s'" % key
87
      raise errors.TypeEnforcementError(msg)
88

    
89
    if target[key] in allowed_values:
90
      continue
91

    
92
    ktype = key_types[key]
93
    if ktype not in constants.ENFORCEABLE_TYPES:
94
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
95
      raise errors.ProgrammerError(msg)
96

    
97
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
98
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
99
        pass
100
      elif not isinstance(target[key], basestring):
101
        if isinstance(target[key], bool) and not target[key]:
102
          target[key] = ""
103
        else:
104
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
105
          raise errors.TypeEnforcementError(msg)
106
    elif ktype == constants.VTYPE_BOOL:
107
      if isinstance(target[key], basestring) and target[key]:
108
        if target[key].lower() == constants.VALUE_FALSE:
109
          target[key] = False
110
        elif target[key].lower() == constants.VALUE_TRUE:
111
          target[key] = True
112
        else:
113
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
114
          raise errors.TypeEnforcementError(msg)
115
      elif target[key]:
116
        target[key] = True
117
      else:
118
        target[key] = False
119
    elif ktype == constants.VTYPE_SIZE:
120
      try:
121
        target[key] = ParseUnit(target[key])
122
      except errors.UnitParseError, err:
123
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
124
              (key, target[key], err)
125
        raise errors.TypeEnforcementError(msg)
126
    elif ktype == constants.VTYPE_INT:
127
      try:
128
        target[key] = int(target[key])
129
      except (ValueError, TypeError):
130
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
131
        raise errors.TypeEnforcementError(msg)
132

    
133

    
134
def ValidateServiceName(name):
135
  """Validate the given service name.
136

137
  @type name: number or string
138
  @param name: Service name or port specification
139

140
  """
141
  try:
142
    numport = int(name)
143
  except (ValueError, TypeError):
144
    # Non-numeric service name
145
    valid = _VALID_SERVICE_NAME_RE.match(name)
146
  else:
147
    # Numeric port (protocols other than TCP or UDP might need adjustments
148
    # here)
149
    valid = (numport >= 0 and numport < (1 << 16))
150

    
151
  if not valid:
152
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
153
                               errors.ECODE_INVAL)
154

    
155
  return name
156

    
157

    
158
def _ComputeMissingKeys(key_path, options, defaults):
159
  """Helper functions to compute which keys a invalid.
160

161
  @param key_path: The current key path (if any)
162
  @param options: The user provided options
163
  @param defaults: The default dictionary
164
  @return: A list of invalid keys
165

166
  """
167
  defaults_keys = frozenset(defaults.keys())
168
  invalid = []
169
  for key, value in options.items():
170
    if key_path:
171
      new_path = "%s/%s" % (key_path, key)
172
    else:
173
      new_path = key
174

    
175
    if key not in defaults_keys:
176
      invalid.append(new_path)
177
    elif isinstance(value, dict):
178
      invalid.extend(_ComputeMissingKeys(new_path, value, defaults[key]))
179

    
180
  return invalid
181

    
182

    
183
def VerifyDictOptions(options, defaults):
184
  """Verify a dict has only keys set which also are in the defaults dict.
185

186
  @param options: The user provided options
187
  @param defaults: The default dictionary
188
  @raise error.OpPrereqError: If one of the keys is not supported
189

190
  """
191
  invalid = _ComputeMissingKeys("", options, defaults)
192

    
193
  if invalid:
194
    raise errors.OpPrereqError("Provided option keys not supported: %s" %
195
                               CommaJoin(invalid), errors.ECODE_INVAL)
196

    
197

    
198
def ListVolumeGroups():
199
  """List volume groups and their size
200

201
  @rtype: dict
202
  @return:
203
       Dictionary with keys volume name and values
204
       the size of the volume
205

206
  """
207
  command = "vgs --noheadings --units m --nosuffix -o name,size"
208
  result = RunCmd(command)
209
  retval = {}
210
  if result.failed:
211
    return retval
212

    
213
  for line in result.stdout.splitlines():
214
    try:
215
      name, size = line.split()
216
      size = int(float(size))
217
    except (IndexError, ValueError), err:
218
      logging.error("Invalid output from vgs (%s): %s", err, line)
219
      continue
220

    
221
    retval[name] = size
222

    
223
  return retval
224

    
225

    
226
def BridgeExists(bridge):
227
  """Check whether the given bridge exists in the system
228

229
  @type bridge: str
230
  @param bridge: the bridge name to check
231
  @rtype: boolean
232
  @return: True if it does
233

234
  """
235
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
236

    
237

    
238
def TryConvert(fn, val):
239
  """Try to convert a value ignoring errors.
240

241
  This function tries to apply function I{fn} to I{val}. If no
242
  C{ValueError} or C{TypeError} exceptions are raised, it will return
243
  the result, else it will return the original value. Any other
244
  exceptions are propagated to the caller.
245

246
  @type fn: callable
247
  @param fn: function to apply to the value
248
  @param val: the value to be converted
249
  @return: The converted value if the conversion was successful,
250
      otherwise the original value.
251

252
  """
253
  try:
254
    nv = fn(val)
255
  except (ValueError, TypeError):
256
    nv = val
257
  return nv
258

    
259

    
260
def ParseCpuMask(cpu_mask):
261
  """Parse a CPU mask definition and return the list of CPU IDs.
262

263
  CPU mask format: comma-separated list of CPU IDs
264
  or dash-separated ID ranges
265
  Example: "0-2,5" -> "0,1,2,5"
266

267
  @type cpu_mask: str
268
  @param cpu_mask: CPU mask definition
269
  @rtype: list of int
270
  @return: list of CPU IDs
271

272
  """
273
  if not cpu_mask:
274
    return []
275
  cpu_list = []
276
  for range_def in cpu_mask.split(","):
277
    boundaries = range_def.split("-")
278
    n_elements = len(boundaries)
279
    if n_elements > 2:
280
      raise errors.ParseError("Invalid CPU ID range definition"
281
                              " (only one hyphen allowed): %s" % range_def)
282
    try:
283
      lower = int(boundaries[0])
284
    except (ValueError, TypeError), err:
285
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
286
                              " CPU ID range: %s" % str(err))
287
    try:
288
      higher = int(boundaries[-1])
289
    except (ValueError, TypeError), err:
290
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
291
                              " CPU ID range: %s" % str(err))
292
    if lower > higher:
293
      raise errors.ParseError("Invalid CPU ID range definition"
294
                              " (%d > %d): %s" % (lower, higher, range_def))
295
    cpu_list.extend(range(lower, higher + 1))
296
  return cpu_list
297

    
298

    
299
def ParseMultiCpuMask(cpu_mask):
300
  """Parse a multiple CPU mask definition and return the list of CPU IDs.
301

302
  CPU mask format: colon-separated list of comma-separated list of CPU IDs
303
  or dash-separated ID ranges, with optional "all" as CPU value
304
  Example: "0-2,5:all:1,5,6:2" -> [ [ 0,1,2,5 ], [ -1 ], [ 1, 5, 6 ], [ 2 ] ]
305

306
  @type cpu_mask: str
307
  @param cpu_mask: multiple CPU mask definition
308
  @rtype: list of lists of int
309
  @return: list of lists of CPU IDs
310

311
  """
312
  if not cpu_mask:
313
    return []
314
  cpu_list = []
315
  for range_def in cpu_mask.split(constants.CPU_PINNING_SEP):
316
    if range_def == constants.CPU_PINNING_ALL:
317
      cpu_list.append([constants.CPU_PINNING_ALL_VAL, ])
318
    else:
319
      # Uniquify and sort the list before adding
320
      cpu_list.append(sorted(set(ParseCpuMask(range_def))))
321

    
322
  return cpu_list
323

    
324

    
325
def GetHomeDir(user, default=None):
326
  """Try to get the homedir of the given user.
327

328
  The user can be passed either as a string (denoting the name) or as
329
  an integer (denoting the user id). If the user is not found, the
330
  C{default} argument is returned, which defaults to C{None}.
331

332
  """
333
  try:
334
    if isinstance(user, basestring):
335
      result = pwd.getpwnam(user)
336
    elif isinstance(user, (int, long)):
337
      result = pwd.getpwuid(user)
338
    else:
339
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
340
                                   type(user))
341
  except KeyError:
342
    return default
343
  return result.pw_dir
344

    
345

    
346
def FirstFree(seq, base=0):
347
  """Returns the first non-existing integer from seq.
348

349
  The seq argument should be a sorted list of positive integers. The
350
  first time the index of an element is smaller than the element
351
  value, the index will be returned.
352

353
  The base argument is used to start at a different offset,
354
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
355

356
  Example: C{[0, 1, 3]} will return I{2}.
357

358
  @type seq: sequence
359
  @param seq: the sequence to be analyzed.
360
  @type base: int
361
  @param base: use this value as the base index of the sequence
362
  @rtype: int
363
  @return: the first non-used index in the sequence
364

365
  """
366
  for idx, elem in enumerate(seq):
367
    assert elem >= base, "Passed element is higher than base offset"
368
    if elem > idx + base:
369
      # idx is not used
370
      return idx + base
371
  return None
372

    
373

    
374
def SingleWaitForFdCondition(fdobj, event, timeout):
375
  """Waits for a condition to occur on the socket.
376

377
  Immediately returns at the first interruption.
378

379
  @type fdobj: integer or object supporting a fileno() method
380
  @param fdobj: entity to wait for events on
381
  @type event: integer
382
  @param event: ORed condition (see select module)
383
  @type timeout: float or None
384
  @param timeout: Timeout in seconds
385
  @rtype: int or None
386
  @return: None for timeout, otherwise occured conditions
387

388
  """
389
  check = (event | select.POLLPRI |
390
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
391

    
392
  if timeout is not None:
393
    # Poller object expects milliseconds
394
    timeout *= 1000
395

    
396
  poller = select.poll()
397
  poller.register(fdobj, event)
398
  try:
399
    # TODO: If the main thread receives a signal and we have no timeout, we
400
    # could wait forever. This should check a global "quit" flag or something
401
    # every so often.
402
    io_events = poller.poll(timeout)
403
  except select.error, err:
404
    if err[0] != errno.EINTR:
405
      raise
406
    io_events = []
407
  if io_events and io_events[0][1] & check:
408
    return io_events[0][1]
409
  else:
410
    return None
411

    
412

    
413
class FdConditionWaiterHelper(object):
414
  """Retry helper for WaitForFdCondition.
415

416
  This class contains the retried and wait functions that make sure
417
  WaitForFdCondition can continue waiting until the timeout is actually
418
  expired.
419

420
  """
421

    
422
  def __init__(self, timeout):
423
    self.timeout = timeout
424

    
425
  def Poll(self, fdobj, event):
426
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
427
    if result is None:
428
      raise RetryAgain()
429
    else:
430
      return result
431

    
432
  def UpdateTimeout(self, timeout):
433
    self.timeout = timeout
434

    
435

    
436
def WaitForFdCondition(fdobj, event, timeout):
437
  """Waits for a condition to occur on the socket.
438

439
  Retries until the timeout is expired, even if interrupted.
440

441
  @type fdobj: integer or object supporting a fileno() method
442
  @param fdobj: entity to wait for events on
443
  @type event: integer
444
  @param event: ORed condition (see select module)
445
  @type timeout: float or None
446
  @param timeout: Timeout in seconds
447
  @rtype: int or None
448
  @return: None for timeout, otherwise occured conditions
449

450
  """
451
  if timeout is not None:
452
    retrywaiter = FdConditionWaiterHelper(timeout)
453
    try:
454
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
455
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
456
    except RetryTimeout:
457
      result = None
458
  else:
459
    result = None
460
    while result is None:
461
      result = SingleWaitForFdCondition(fdobj, event, timeout)
462
  return result
463

    
464

    
465
def EnsureDaemon(name):
466
  """Check for and start daemon if not alive.
467

468
  """
469
  result = RunCmd([pathutils.DAEMON_UTIL, "check-and-start", name])
470
  if result.failed:
471
    logging.error("Can't start daemon '%s', failure %s, output: %s",
472
                  name, result.fail_reason, result.output)
473
    return False
474

    
475
  return True
476

    
477

    
478
def StopDaemon(name):
479
  """Stop daemon
480

481
  """
482
  result = RunCmd([pathutils.DAEMON_UTIL, "stop", name])
483
  if result.failed:
484
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
485
                  name, result.fail_reason, result.output)
486
    return False
487

    
488
  return True
489

    
490

    
491
def CheckVolumeGroupSize(vglist, vgname, minsize):
492
  """Checks if the volume group list is valid.
493

494
  The function will check if a given volume group is in the list of
495
  volume groups and has a minimum size.
496

497
  @type vglist: dict
498
  @param vglist: dictionary of volume group names and their size
499
  @type vgname: str
500
  @param vgname: the volume group we should check
501
  @type minsize: int
502
  @param minsize: the minimum size we accept
503
  @rtype: None or str
504
  @return: None for success, otherwise the error message
505

506
  """
507
  vgsize = vglist.get(vgname, None)
508
  if vgsize is None:
509
    return "volume group '%s' missing" % vgname
510
  elif vgsize < minsize:
511
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
512
            (vgname, minsize, vgsize))
513
  return None
514

    
515

    
516
def SplitTime(value):
517
  """Splits time as floating point number into a tuple.
518

519
  @param value: Time in seconds
520
  @type value: int or float
521
  @return: Tuple containing (seconds, microseconds)
522

523
  """
524
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
525

    
526
  assert 0 <= seconds, \
527
    "Seconds must be larger than or equal to 0, but are %s" % seconds
528
  assert 0 <= microseconds <= 999999, \
529
    "Microseconds must be 0-999999, but are %s" % microseconds
530

    
531
  return (int(seconds), int(microseconds))
532

    
533

    
534
def MergeTime(timetuple):
535
  """Merges a tuple into time as a floating point number.
536

537
  @param timetuple: Time as tuple, (seconds, microseconds)
538
  @type timetuple: tuple
539
  @return: Time as a floating point number expressed in seconds
540

541
  """
542
  (seconds, microseconds) = timetuple
543

    
544
  assert 0 <= seconds, \
545
    "Seconds must be larger than or equal to 0, but are %s" % seconds
546
  assert 0 <= microseconds <= 999999, \
547
    "Microseconds must be 0-999999, but are %s" % microseconds
548

    
549
  return float(seconds) + (float(microseconds) * 0.000001)
550

    
551

    
552
def FindMatch(data, name):
553
  """Tries to find an item in a dictionary matching a name.
554

555
  Callers have to ensure the data names aren't contradictory (e.g. a regexp
556
  that matches a string). If the name isn't a direct key, all regular
557
  expression objects in the dictionary are matched against it.
558

559
  @type data: dict
560
  @param data: Dictionary containing data
561
  @type name: string
562
  @param name: Name to look for
563
  @rtype: tuple; (value in dictionary, matched groups as list)
564

565
  """
566
  if name in data:
567
    return (data[name], [])
568

    
569
  for key, value in data.items():
570
    # Regex objects
571
    if hasattr(key, "match"):
572
      m = key.match(name)
573
      if m:
574
        return (value, list(m.groups()))
575

    
576
  return None
577

    
578

    
579
def GetMounts(filename=constants.PROC_MOUNTS):
580
  """Returns the list of mounted filesystems.
581

582
  This function is Linux-specific.
583

584
  @param filename: path of mounts file (/proc/mounts by default)
585
  @rtype: list of tuples
586
  @return: list of mount entries (device, mountpoint, fstype, options)
587

588
  """
589
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
590
  data = []
591
  mountlines = ReadFile(filename).splitlines()
592
  for line in mountlines:
593
    device, mountpoint, fstype, options, _ = line.split(None, 4)
594
    data.append((device, mountpoint, fstype, options))
595

    
596
  return data
597

    
598

    
599
def SignalHandled(signums):
600
  """Signal Handled decoration.
601

602
  This special decorator installs a signal handler and then calls the target
603
  function. The function must accept a 'signal_handlers' keyword argument,
604
  which will contain a dict indexed by signal number, with SignalHandler
605
  objects as values.
606

607
  The decorator can be safely stacked with iself, to handle multiple signals
608
  with different handlers.
609

610
  @type signums: list
611
  @param signums: signals to intercept
612

613
  """
614
  def wrap(fn):
615
    def sig_function(*args, **kwargs):
616
      assert "signal_handlers" not in kwargs or \
617
             kwargs["signal_handlers"] is None or \
618
             isinstance(kwargs["signal_handlers"], dict), \
619
             "Wrong signal_handlers parameter in original function call"
620
      if "signal_handlers" in kwargs and kwargs["signal_handlers"] is not None:
621
        signal_handlers = kwargs["signal_handlers"]
622
      else:
623
        signal_handlers = {}
624
        kwargs["signal_handlers"] = signal_handlers
625
      sighandler = SignalHandler(signums)
626
      try:
627
        for sig in signums:
628
          signal_handlers[sig] = sighandler
629
        return fn(*args, **kwargs)
630
      finally:
631
        sighandler.Reset()
632
    return sig_function
633
  return wrap
634

    
635

    
636
def TimeoutExpired(epoch, timeout, _time_fn=time.time):
637
  """Checks whether a timeout has expired.
638

639
  """
640
  return _time_fn() > (epoch + timeout)
641

    
642

    
643
class SignalWakeupFd(object):
644
  try:
645
    # This is only supported in Python 2.5 and above (some distributions
646
    # backported it to Python 2.4)
647
    _set_wakeup_fd_fn = signal.set_wakeup_fd
648
  except AttributeError:
649
    # Not supported
650

    
651
    def _SetWakeupFd(self, _): # pylint: disable=R0201
652
      return -1
653
  else:
654

    
655
    def _SetWakeupFd(self, fd):
656
      return self._set_wakeup_fd_fn(fd)
657

    
658
  def __init__(self):
659
    """Initializes this class.
660

661
    """
662
    (read_fd, write_fd) = os.pipe()
663

    
664
    # Once these succeeded, the file descriptors will be closed automatically.
665
    # Buffer size 0 is important, otherwise .read() with a specified length
666
    # might buffer data and the file descriptors won't be marked readable.
667
    self._read_fh = os.fdopen(read_fd, "r", 0)
668
    self._write_fh = os.fdopen(write_fd, "w", 0)
669

    
670
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
671

    
672
    # Utility functions
673
    self.fileno = self._read_fh.fileno
674
    self.read = self._read_fh.read
675

    
676
  def Reset(self):
677
    """Restores the previous wakeup file descriptor.
678

679
    """
680
    if hasattr(self, "_previous") and self._previous is not None:
681
      self._SetWakeupFd(self._previous)
682
      self._previous = None
683

    
684
  def Notify(self):
685
    """Notifies the wakeup file descriptor.
686

687
    """
688
    self._write_fh.write("\0")
689

    
690
  def __del__(self):
691
    """Called before object deletion.
692

693
    """
694
    self.Reset()
695

    
696

    
697
class SignalHandler(object):
698
  """Generic signal handler class.
699

700
  It automatically restores the original handler when deconstructed or
701
  when L{Reset} is called. You can either pass your own handler
702
  function in or query the L{called} attribute to detect whether the
703
  signal was sent.
704

705
  @type signum: list
706
  @ivar signum: the signals we handle
707
  @type called: boolean
708
  @ivar called: tracks whether any of the signals have been raised
709

710
  """
711
  def __init__(self, signum, handler_fn=None, wakeup=None):
712
    """Constructs a new SignalHandler instance.
713

714
    @type signum: int or list of ints
715
    @param signum: Single signal number or set of signal numbers
716
    @type handler_fn: callable
717
    @param handler_fn: Signal handling function
718

719
    """
720
    assert handler_fn is None or callable(handler_fn)
721

    
722
    self.signum = set(signum)
723
    self.called = False
724

    
725
    self._handler_fn = handler_fn
726
    self._wakeup = wakeup
727

    
728
    self._previous = {}
729
    try:
730
      for signum in self.signum:
731
        # Setup handler
732
        prev_handler = signal.signal(signum, self._HandleSignal)
733
        try:
734
          self._previous[signum] = prev_handler
735
        except:
736
          # Restore previous handler
737
          signal.signal(signum, prev_handler)
738
          raise
739
    except:
740
      # Reset all handlers
741
      self.Reset()
742
      # Here we have a race condition: a handler may have already been called,
743
      # but there's not much we can do about it at this point.
744
      raise
745

    
746
  def __del__(self):
747
    self.Reset()
748

    
749
  def Reset(self):
750
    """Restore previous handler.
751

752
    This will reset all the signals to their previous handlers.
753

754
    """
755
    for signum, prev_handler in self._previous.items():
756
      signal.signal(signum, prev_handler)
757
      # If successful, remove from dict
758
      del self._previous[signum]
759

    
760
  def Clear(self):
761
    """Unsets the L{called} flag.
762

763
    This function can be used in case a signal may arrive several times.
764

765
    """
766
    self.called = False
767

    
768
  def _HandleSignal(self, signum, frame):
769
    """Actual signal handling function.
770

771
    """
772
    # This is not nice and not absolutely atomic, but it appears to be the only
773
    # solution in Python -- there are no atomic types.
774
    self.called = True
775

    
776
    if self._wakeup:
777
      # Notify whoever is interested in signals
778
      self._wakeup.Notify()
779

    
780
    if self._handler_fn:
781
      self._handler_fn(signum, frame)
782

    
783

    
784
class FieldSet(object):
785
  """A simple field set.
786

787
  Among the features are:
788
    - checking if a string is among a list of static string or regex objects
789
    - checking if a whole list of string matches
790
    - returning the matching groups from a regex match
791

792
  Internally, all fields are held as regular expression objects.
793

794
  """
795
  def __init__(self, *items):
796
    self.items = [re.compile("^%s$" % value) for value in items]
797

    
798
  def Extend(self, other_set):
799
    """Extend the field set with the items from another one"""
800
    self.items.extend(other_set.items)
801

    
802
  def Matches(self, field):
803
    """Checks if a field matches the current set
804

805
    @type field: str
806
    @param field: the string to match
807
    @return: either None or a regular expression match object
808

809
    """
810
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
811
      return m
812
    return None
813

    
814
  def NonMatching(self, items):
815
    """Returns the list of fields not matching the current set
816

817
    @type items: list
818
    @param items: the list of fields to check
819
    @rtype: list
820
    @return: list of non-matching fields
821

822
    """
823
    return [val for val in items if not self.Matches(val)]