Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ a744b676

History | View | Annotate | Download (99.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64
from ganeti import netutils
65

    
66

    
67
_locksheld = []
68
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
69

    
70
debug_locks = False
71

    
72
#: when set to True, L{RunCmd} is disabled
73
no_fork = False
74

    
75
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
76

    
77
HEX_CHAR_RE = r"[a-zA-Z0-9]"
78
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
79
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
80
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
81
                             HEX_CHAR_RE, HEX_CHAR_RE),
82
                            re.S | re.I)
83

    
84
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
85

    
86
# Certificate verification results
87
(CERT_WARNING,
88
 CERT_ERROR) = range(1, 3)
89

    
90
# Flags for mlockall() (from bits/mman.h)
91
_MCL_CURRENT = 1
92
_MCL_FUTURE = 2
93

    
94

    
95
class RunResult(object):
96
  """Holds the result of running external programs.
97

98
  @type exit_code: int
99
  @ivar exit_code: the exit code of the program, or None (if the program
100
      didn't exit())
101
  @type signal: int or None
102
  @ivar signal: the signal that caused the program to finish, or None
103
      (if the program wasn't terminated by a signal)
104
  @type stdout: str
105
  @ivar stdout: the standard output of the program
106
  @type stderr: str
107
  @ivar stderr: the standard error of the program
108
  @type failed: boolean
109
  @ivar failed: True in case the program was
110
      terminated by a signal or exited with a non-zero exit code
111
  @ivar fail_reason: a string detailing the termination reason
112

113
  """
114
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
115
               "failed", "fail_reason", "cmd"]
116

    
117

    
118
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
119
    self.cmd = cmd
120
    self.exit_code = exit_code
121
    self.signal = signal_
122
    self.stdout = stdout
123
    self.stderr = stderr
124
    self.failed = (signal_ is not None or exit_code != 0)
125

    
126
    if self.signal is not None:
127
      self.fail_reason = "terminated by signal %s" % self.signal
128
    elif self.exit_code is not None:
129
      self.fail_reason = "exited with exit code %s" % self.exit_code
130
    else:
131
      self.fail_reason = "unable to determine termination reason"
132

    
133
    if self.failed:
134
      logging.debug("Command '%s' failed (%s); output: %s",
135
                    self.cmd, self.fail_reason, self.output)
136

    
137
  def _GetOutput(self):
138
    """Returns the combined stdout and stderr for easier usage.
139

140
    """
141
    return self.stdout + self.stderr
142

    
143
  output = property(_GetOutput, None, None, "Return full output")
144

    
145

    
146
def _BuildCmdEnvironment(env, reset):
147
  """Builds the environment for an external program.
148

149
  """
150
  if reset:
151
    cmd_env = {}
152
  else:
153
    cmd_env = os.environ.copy()
154
    cmd_env["LC_ALL"] = "C"
155

    
156
  if env is not None:
157
    cmd_env.update(env)
158

    
159
  return cmd_env
160

    
161

    
162
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
163
  """Execute a (shell) command.
164

165
  The command should not read from its standard input, as it will be
166
  closed.
167

168
  @type cmd: string or list
169
  @param cmd: Command to run
170
  @type env: dict
171
  @param env: Additional environment variables
172
  @type output: str
173
  @param output: if desired, the output of the command can be
174
      saved in a file instead of the RunResult instance; this
175
      parameter denotes the file name (if not None)
176
  @type cwd: string
177
  @param cwd: if specified, will be used as the working
178
      directory for the command; the default will be /
179
  @type reset_env: boolean
180
  @param reset_env: whether to reset or keep the default os environment
181
  @rtype: L{RunResult}
182
  @return: RunResult instance
183
  @raise errors.ProgrammerError: if we call this when forks are disabled
184

185
  """
186
  if no_fork:
187
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
188

    
189
  if isinstance(cmd, basestring):
190
    strcmd = cmd
191
    shell = True
192
  else:
193
    cmd = [str(val) for val in cmd]
194
    strcmd = ShellQuoteArgs(cmd)
195
    shell = False
196

    
197
  if output:
198
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
199
  else:
200
    logging.debug("RunCmd %s", strcmd)
201

    
202
  cmd_env = _BuildCmdEnvironment(env, reset_env)
203

    
204
  try:
205
    if output is None:
206
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
207
    else:
208
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
209
      out = err = ""
210
  except OSError, err:
211
    if err.errno == errno.ENOENT:
212
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
213
                               (strcmd, err))
214
    else:
215
      raise
216

    
217
  if status >= 0:
218
    exitcode = status
219
    signal_ = None
220
  else:
221
    exitcode = None
222
    signal_ = -status
223

    
224
  return RunResult(exitcode, signal_, out, err, strcmd)
225

    
226

    
227
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
228
                pidfile=None):
229
  """Start a daemon process after forking twice.
230

231
  @type cmd: string or list
232
  @param cmd: Command to run
233
  @type env: dict
234
  @param env: Additional environment variables
235
  @type cwd: string
236
  @param cwd: Working directory for the program
237
  @type output: string
238
  @param output: Path to file in which to save the output
239
  @type output_fd: int
240
  @param output_fd: File descriptor for output
241
  @type pidfile: string
242
  @param pidfile: Process ID file
243
  @rtype: int
244
  @return: Daemon process ID
245
  @raise errors.ProgrammerError: if we call this when forks are disabled
246

247
  """
248
  if no_fork:
249
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
250
                                 " disabled")
251

    
252
  if output and not (bool(output) ^ (output_fd is not None)):
253
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
254
                                 " specified")
255

    
256
  if isinstance(cmd, basestring):
257
    cmd = ["/bin/sh", "-c", cmd]
258

    
259
  strcmd = ShellQuoteArgs(cmd)
260

    
261
  if output:
262
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
263
  else:
264
    logging.debug("StartDaemon %s", strcmd)
265

    
266
  cmd_env = _BuildCmdEnvironment(env, False)
267

    
268
  # Create pipe for sending PID back
269
  (pidpipe_read, pidpipe_write) = os.pipe()
270
  try:
271
    try:
272
      # Create pipe for sending error messages
273
      (errpipe_read, errpipe_write) = os.pipe()
274
      try:
275
        try:
276
          # First fork
277
          pid = os.fork()
278
          if pid == 0:
279
            try:
280
              # Child process, won't return
281
              _StartDaemonChild(errpipe_read, errpipe_write,
282
                                pidpipe_read, pidpipe_write,
283
                                cmd, cmd_env, cwd,
284
                                output, output_fd, pidfile)
285
            finally:
286
              # Well, maybe child process failed
287
              os._exit(1) # pylint: disable-msg=W0212
288
        finally:
289
          _CloseFDNoErr(errpipe_write)
290

    
291
        # Wait for daemon to be started (or an error message to arrive) and read
292
        # up to 100 KB as an error message
293
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
294
      finally:
295
        _CloseFDNoErr(errpipe_read)
296
    finally:
297
      _CloseFDNoErr(pidpipe_write)
298

    
299
    # Read up to 128 bytes for PID
300
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
301
  finally:
302
    _CloseFDNoErr(pidpipe_read)
303

    
304
  # Try to avoid zombies by waiting for child process
305
  try:
306
    os.waitpid(pid, 0)
307
  except OSError:
308
    pass
309

    
310
  if errormsg:
311
    raise errors.OpExecError("Error when starting daemon process: %r" %
312
                             errormsg)
313

    
314
  try:
315
    return int(pidtext)
316
  except (ValueError, TypeError), err:
317
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
318
                             (pidtext, err))
319

    
320

    
321
def _StartDaemonChild(errpipe_read, errpipe_write,
322
                      pidpipe_read, pidpipe_write,
323
                      args, env, cwd,
324
                      output, fd_output, pidfile):
325
  """Child process for starting daemon.
326

327
  """
328
  try:
329
    # Close parent's side
330
    _CloseFDNoErr(errpipe_read)
331
    _CloseFDNoErr(pidpipe_read)
332

    
333
    # First child process
334
    os.chdir("/")
335
    os.umask(077)
336
    os.setsid()
337

    
338
    # And fork for the second time
339
    pid = os.fork()
340
    if pid != 0:
341
      # Exit first child process
342
      os._exit(0) # pylint: disable-msg=W0212
343

    
344
    # Make sure pipe is closed on execv* (and thereby notifies original process)
345
    SetCloseOnExecFlag(errpipe_write, True)
346

    
347
    # List of file descriptors to be left open
348
    noclose_fds = [errpipe_write]
349

    
350
    # Open PID file
351
    if pidfile:
352
      try:
353
        # TODO: Atomic replace with another locked file instead of writing into
354
        # it after creating
355
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
356

    
357
        # Lock the PID file (and fail if not possible to do so). Any code
358
        # wanting to send a signal to the daemon should try to lock the PID
359
        # file before reading it. If acquiring the lock succeeds, the daemon is
360
        # no longer running and the signal should not be sent.
361
        LockFile(fd_pidfile)
362

    
363
        os.write(fd_pidfile, "%d\n" % os.getpid())
364
      except Exception, err:
365
        raise Exception("Creating and locking PID file failed: %s" % err)
366

    
367
      # Keeping the file open to hold the lock
368
      noclose_fds.append(fd_pidfile)
369

    
370
      SetCloseOnExecFlag(fd_pidfile, False)
371
    else:
372
      fd_pidfile = None
373

    
374
    # Open /dev/null
375
    fd_devnull = os.open(os.devnull, os.O_RDWR)
376

    
377
    assert not output or (bool(output) ^ (fd_output is not None))
378

    
379
    if fd_output is not None:
380
      pass
381
    elif output:
382
      # Open output file
383
      try:
384
        # TODO: Implement flag to set append=yes/no
385
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
386
      except EnvironmentError, err:
387
        raise Exception("Opening output file failed: %s" % err)
388
    else:
389
      fd_output = fd_devnull
390

    
391
    # Redirect standard I/O
392
    os.dup2(fd_devnull, 0)
393
    os.dup2(fd_output, 1)
394
    os.dup2(fd_output, 2)
395

    
396
    # Send daemon PID to parent
397
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
398

    
399
    # Close all file descriptors except stdio and error message pipe
400
    CloseFDs(noclose_fds=noclose_fds)
401

    
402
    # Change working directory
403
    os.chdir(cwd)
404

    
405
    if env is None:
406
      os.execvp(args[0], args)
407
    else:
408
      os.execvpe(args[0], args, env)
409
  except: # pylint: disable-msg=W0702
410
    try:
411
      # Report errors to original process
412
      buf = str(sys.exc_info()[1])
413

    
414
      RetryOnSignal(os.write, errpipe_write, buf)
415
    except: # pylint: disable-msg=W0702
416
      # Ignore errors in error handling
417
      pass
418

    
419
  os._exit(1) # pylint: disable-msg=W0212
420

    
421

    
422
def _RunCmdPipe(cmd, env, via_shell, cwd):
423
  """Run a command and return its output.
424

425
  @type  cmd: string or list
426
  @param cmd: Command to run
427
  @type env: dict
428
  @param env: The environment to use
429
  @type via_shell: bool
430
  @param via_shell: if we should run via the shell
431
  @type cwd: string
432
  @param cwd: the working directory for the program
433
  @rtype: tuple
434
  @return: (out, err, status)
435

436
  """
437
  poller = select.poll()
438
  child = subprocess.Popen(cmd, shell=via_shell,
439
                           stderr=subprocess.PIPE,
440
                           stdout=subprocess.PIPE,
441
                           stdin=subprocess.PIPE,
442
                           close_fds=True, env=env,
443
                           cwd=cwd)
444

    
445
  child.stdin.close()
446
  poller.register(child.stdout, select.POLLIN)
447
  poller.register(child.stderr, select.POLLIN)
448
  out = StringIO()
449
  err = StringIO()
450
  fdmap = {
451
    child.stdout.fileno(): (out, child.stdout),
452
    child.stderr.fileno(): (err, child.stderr),
453
    }
454
  for fd in fdmap:
455
    SetNonblockFlag(fd, True)
456

    
457
  while fdmap:
458
    pollresult = RetryOnSignal(poller.poll)
459

    
460
    for fd, event in pollresult:
461
      if event & select.POLLIN or event & select.POLLPRI:
462
        data = fdmap[fd][1].read()
463
        # no data from read signifies EOF (the same as POLLHUP)
464
        if not data:
465
          poller.unregister(fd)
466
          del fdmap[fd]
467
          continue
468
        fdmap[fd][0].write(data)
469
      if (event & select.POLLNVAL or event & select.POLLHUP or
470
          event & select.POLLERR):
471
        poller.unregister(fd)
472
        del fdmap[fd]
473

    
474
  out = out.getvalue()
475
  err = err.getvalue()
476

    
477
  status = child.wait()
478
  return out, err, status
479

    
480

    
481
def _RunCmdFile(cmd, env, via_shell, output, cwd):
482
  """Run a command and save its output to a file.
483

484
  @type  cmd: string or list
485
  @param cmd: Command to run
486
  @type env: dict
487
  @param env: The environment to use
488
  @type via_shell: bool
489
  @param via_shell: if we should run via the shell
490
  @type output: str
491
  @param output: the filename in which to save the output
492
  @type cwd: string
493
  @param cwd: the working directory for the program
494
  @rtype: int
495
  @return: the exit status
496

497
  """
498
  fh = open(output, "a")
499
  try:
500
    child = subprocess.Popen(cmd, shell=via_shell,
501
                             stderr=subprocess.STDOUT,
502
                             stdout=fh,
503
                             stdin=subprocess.PIPE,
504
                             close_fds=True, env=env,
505
                             cwd=cwd)
506

    
507
    child.stdin.close()
508
    status = child.wait()
509
  finally:
510
    fh.close()
511
  return status
512

    
513

    
514
def SetCloseOnExecFlag(fd, enable):
515
  """Sets or unsets the close-on-exec flag on a file descriptor.
516

517
  @type fd: int
518
  @param fd: File descriptor
519
  @type enable: bool
520
  @param enable: Whether to set or unset it.
521

522
  """
523
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
524

    
525
  if enable:
526
    flags |= fcntl.FD_CLOEXEC
527
  else:
528
    flags &= ~fcntl.FD_CLOEXEC
529

    
530
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
531

    
532

    
533
def SetNonblockFlag(fd, enable):
534
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
535

536
  @type fd: int
537
  @param fd: File descriptor
538
  @type enable: bool
539
  @param enable: Whether to set or unset it
540

541
  """
542
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
543

    
544
  if enable:
545
    flags |= os.O_NONBLOCK
546
  else:
547
    flags &= ~os.O_NONBLOCK
548

    
549
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
550

    
551

    
552
def RetryOnSignal(fn, *args, **kwargs):
553
  """Calls a function again if it failed due to EINTR.
554

555
  """
556
  while True:
557
    try:
558
      return fn(*args, **kwargs)
559
    except EnvironmentError, err:
560
      if err.errno != errno.EINTR:
561
        raise
562
    except (socket.error, select.error), err:
563
      # In python 2.6 and above select.error is an IOError, so it's handled
564
      # above, in 2.5 and below it's not, and it's handled here.
565
      if not (err.args and err.args[0] == errno.EINTR):
566
        raise
567

    
568

    
569
def RunParts(dir_name, env=None, reset_env=False):
570
  """Run Scripts or programs in a directory
571

572
  @type dir_name: string
573
  @param dir_name: absolute path to a directory
574
  @type env: dict
575
  @param env: The environment to use
576
  @type reset_env: boolean
577
  @param reset_env: whether to reset or keep the default os environment
578
  @rtype: list of tuples
579
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
580

581
  """
582
  rr = []
583

    
584
  try:
585
    dir_contents = ListVisibleFiles(dir_name)
586
  except OSError, err:
587
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
588
    return rr
589

    
590
  for relname in sorted(dir_contents):
591
    fname = PathJoin(dir_name, relname)
592
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
593
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
594
      rr.append((relname, constants.RUNPARTS_SKIP, None))
595
    else:
596
      try:
597
        result = RunCmd([fname], env=env, reset_env=reset_env)
598
      except Exception, err: # pylint: disable-msg=W0703
599
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
600
      else:
601
        rr.append((relname, constants.RUNPARTS_RUN, result))
602

    
603
  return rr
604

    
605

    
606
def RemoveFile(filename):
607
  """Remove a file ignoring some errors.
608

609
  Remove a file, ignoring non-existing ones or directories. Other
610
  errors are passed.
611

612
  @type filename: str
613
  @param filename: the file to be removed
614

615
  """
616
  try:
617
    os.unlink(filename)
618
  except OSError, err:
619
    if err.errno not in (errno.ENOENT, errno.EISDIR):
620
      raise
621

    
622

    
623
def RemoveDir(dirname):
624
  """Remove an empty directory.
625

626
  Remove a directory, ignoring non-existing ones.
627
  Other errors are passed. This includes the case,
628
  where the directory is not empty, so it can't be removed.
629

630
  @type dirname: str
631
  @param dirname: the empty directory to be removed
632

633
  """
634
  try:
635
    os.rmdir(dirname)
636
  except OSError, err:
637
    if err.errno != errno.ENOENT:
638
      raise
639

    
640

    
641
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
642
  """Renames a file.
643

644
  @type old: string
645
  @param old: Original path
646
  @type new: string
647
  @param new: New path
648
  @type mkdir: bool
649
  @param mkdir: Whether to create target directory if it doesn't exist
650
  @type mkdir_mode: int
651
  @param mkdir_mode: Mode for newly created directories
652

653
  """
654
  try:
655
    return os.rename(old, new)
656
  except OSError, err:
657
    # In at least one use case of this function, the job queue, directory
658
    # creation is very rare. Checking for the directory before renaming is not
659
    # as efficient.
660
    if mkdir and err.errno == errno.ENOENT:
661
      # Create directory and try again
662
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
663

    
664
      return os.rename(old, new)
665

    
666
    raise
667

    
668

    
669
def Makedirs(path, mode=0750):
670
  """Super-mkdir; create a leaf directory and all intermediate ones.
671

672
  This is a wrapper around C{os.makedirs} adding error handling not implemented
673
  before Python 2.5.
674

675
  """
676
  try:
677
    os.makedirs(path, mode)
678
  except OSError, err:
679
    # Ignore EEXIST. This is only handled in os.makedirs as included in
680
    # Python 2.5 and above.
681
    if err.errno != errno.EEXIST or not os.path.exists(path):
682
      raise
683

    
684

    
685
def ResetTempfileModule():
686
  """Resets the random name generator of the tempfile module.
687

688
  This function should be called after C{os.fork} in the child process to
689
  ensure it creates a newly seeded random generator. Otherwise it would
690
  generate the same random parts as the parent process. If several processes
691
  race for the creation of a temporary file, this could lead to one not getting
692
  a temporary name.
693

694
  """
695
  # pylint: disable-msg=W0212
696
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
697
    tempfile._once_lock.acquire()
698
    try:
699
      # Reset random name generator
700
      tempfile._name_sequence = None
701
    finally:
702
      tempfile._once_lock.release()
703
  else:
704
    logging.critical("The tempfile module misses at least one of the"
705
                     " '_once_lock' and '_name_sequence' attributes")
706

    
707

    
708
def _FingerprintFile(filename):
709
  """Compute the fingerprint of a file.
710

711
  If the file does not exist, a None will be returned
712
  instead.
713

714
  @type filename: str
715
  @param filename: the filename to checksum
716
  @rtype: str
717
  @return: the hex digest of the sha checksum of the contents
718
      of the file
719

720
  """
721
  if not (os.path.exists(filename) and os.path.isfile(filename)):
722
    return None
723

    
724
  f = open(filename)
725

    
726
  fp = compat.sha1_hash()
727
  while True:
728
    data = f.read(4096)
729
    if not data:
730
      break
731

    
732
    fp.update(data)
733

    
734
  return fp.hexdigest()
735

    
736

    
737
def FingerprintFiles(files):
738
  """Compute fingerprints for a list of files.
739

740
  @type files: list
741
  @param files: the list of filename to fingerprint
742
  @rtype: dict
743
  @return: a dictionary filename: fingerprint, holding only
744
      existing files
745

746
  """
747
  ret = {}
748

    
749
  for filename in files:
750
    cksum = _FingerprintFile(filename)
751
    if cksum:
752
      ret[filename] = cksum
753

    
754
  return ret
755

    
756

    
757
def ForceDictType(target, key_types, allowed_values=None):
758
  """Force the values of a dict to have certain types.
759

760
  @type target: dict
761
  @param target: the dict to update
762
  @type key_types: dict
763
  @param key_types: dict mapping target dict keys to types
764
                    in constants.ENFORCEABLE_TYPES
765
  @type allowed_values: list
766
  @keyword allowed_values: list of specially allowed values
767

768
  """
769
  if allowed_values is None:
770
    allowed_values = []
771

    
772
  if not isinstance(target, dict):
773
    msg = "Expected dictionary, got '%s'" % target
774
    raise errors.TypeEnforcementError(msg)
775

    
776
  for key in target:
777
    if key not in key_types:
778
      msg = "Unknown key '%s'" % key
779
      raise errors.TypeEnforcementError(msg)
780

    
781
    if target[key] in allowed_values:
782
      continue
783

    
784
    ktype = key_types[key]
785
    if ktype not in constants.ENFORCEABLE_TYPES:
786
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
787
      raise errors.ProgrammerError(msg)
788

    
789
    if ktype == constants.VTYPE_STRING:
790
      if not isinstance(target[key], basestring):
791
        if isinstance(target[key], bool) and not target[key]:
792
          target[key] = ''
793
        else:
794
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
795
          raise errors.TypeEnforcementError(msg)
796
    elif ktype == constants.VTYPE_BOOL:
797
      if isinstance(target[key], basestring) and target[key]:
798
        if target[key].lower() == constants.VALUE_FALSE:
799
          target[key] = False
800
        elif target[key].lower() == constants.VALUE_TRUE:
801
          target[key] = True
802
        else:
803
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
804
          raise errors.TypeEnforcementError(msg)
805
      elif target[key]:
806
        target[key] = True
807
      else:
808
        target[key] = False
809
    elif ktype == constants.VTYPE_SIZE:
810
      try:
811
        target[key] = ParseUnit(target[key])
812
      except errors.UnitParseError, err:
813
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
814
              (key, target[key], err)
815
        raise errors.TypeEnforcementError(msg)
816
    elif ktype == constants.VTYPE_INT:
817
      try:
818
        target[key] = int(target[key])
819
      except (ValueError, TypeError):
820
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
821
        raise errors.TypeEnforcementError(msg)
822

    
823

    
824
def _GetProcStatusPath(pid):
825
  """Returns the path for a PID's proc status file.
826

827
  @type pid: int
828
  @param pid: Process ID
829
  @rtype: string
830

831
  """
832
  return "/proc/%d/status" % pid
833

    
834

    
835
def IsProcessAlive(pid):
836
  """Check if a given pid exists on the system.
837

838
  @note: zombie status is not handled, so zombie processes
839
      will be returned as alive
840
  @type pid: int
841
  @param pid: the process ID to check
842
  @rtype: boolean
843
  @return: True if the process exists
844

845
  """
846
  def _TryStat(name):
847
    try:
848
      os.stat(name)
849
      return True
850
    except EnvironmentError, err:
851
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
852
        return False
853
      elif err.errno == errno.EINVAL:
854
        raise RetryAgain(err)
855
      raise
856

    
857
  assert isinstance(pid, int), "pid must be an integer"
858
  if pid <= 0:
859
    return False
860

    
861
  # /proc in a multiprocessor environment can have strange behaviors.
862
  # Retry the os.stat a few times until we get a good result.
863
  try:
864
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
865
                 args=[_GetProcStatusPath(pid)])
866
  except RetryTimeout, err:
867
    err.RaiseInner()
868

    
869

    
870
def _ParseSigsetT(sigset):
871
  """Parse a rendered sigset_t value.
872

873
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
874
  function.
875

876
  @type sigset: string
877
  @param sigset: Rendered signal set from /proc/$pid/status
878
  @rtype: set
879
  @return: Set of all enabled signal numbers
880

881
  """
882
  result = set()
883

    
884
  signum = 0
885
  for ch in reversed(sigset):
886
    chv = int(ch, 16)
887

    
888
    # The following could be done in a loop, but it's easier to read and
889
    # understand in the unrolled form
890
    if chv & 1:
891
      result.add(signum + 1)
892
    if chv & 2:
893
      result.add(signum + 2)
894
    if chv & 4:
895
      result.add(signum + 3)
896
    if chv & 8:
897
      result.add(signum + 4)
898

    
899
    signum += 4
900

    
901
  return result
902

    
903

    
904
def _GetProcStatusField(pstatus, field):
905
  """Retrieves a field from the contents of a proc status file.
906

907
  @type pstatus: string
908
  @param pstatus: Contents of /proc/$pid/status
909
  @type field: string
910
  @param field: Name of field whose value should be returned
911
  @rtype: string
912

913
  """
914
  for line in pstatus.splitlines():
915
    parts = line.split(":", 1)
916

    
917
    if len(parts) < 2 or parts[0] != field:
918
      continue
919

    
920
    return parts[1].strip()
921

    
922
  return None
923

    
924

    
925
def IsProcessHandlingSignal(pid, signum, status_path=None):
926
  """Checks whether a process is handling a signal.
927

928
  @type pid: int
929
  @param pid: Process ID
930
  @type signum: int
931
  @param signum: Signal number
932
  @rtype: bool
933

934
  """
935
  if status_path is None:
936
    status_path = _GetProcStatusPath(pid)
937

    
938
  try:
939
    proc_status = ReadFile(status_path)
940
  except EnvironmentError, err:
941
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
942
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
943
      return False
944
    raise
945

    
946
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
947
  if sigcgt is None:
948
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
949

    
950
  # Now check whether signal is handled
951
  return signum in _ParseSigsetT(sigcgt)
952

    
953

    
954
def ReadPidFile(pidfile):
955
  """Read a pid from a file.
956

957
  @type  pidfile: string
958
  @param pidfile: path to the file containing the pid
959
  @rtype: int
960
  @return: The process id, if the file exists and contains a valid PID,
961
           otherwise 0
962

963
  """
964
  try:
965
    raw_data = ReadOneLineFile(pidfile)
966
  except EnvironmentError, err:
967
    if err.errno != errno.ENOENT:
968
      logging.exception("Can't read pid file")
969
    return 0
970

    
971
  try:
972
    pid = int(raw_data)
973
  except (TypeError, ValueError), err:
974
    logging.info("Can't parse pid file contents", exc_info=True)
975
    return 0
976

    
977
  return pid
978

    
979

    
980
def ReadLockedPidFile(path):
981
  """Reads a locked PID file.
982

983
  This can be used together with L{StartDaemon}.
984

985
  @type path: string
986
  @param path: Path to PID file
987
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
988

989
  """
990
  try:
991
    fd = os.open(path, os.O_RDONLY)
992
  except EnvironmentError, err:
993
    if err.errno == errno.ENOENT:
994
      # PID file doesn't exist
995
      return None
996
    raise
997

    
998
  try:
999
    try:
1000
      # Try to acquire lock
1001
      LockFile(fd)
1002
    except errors.LockError:
1003
      # Couldn't lock, daemon is running
1004
      return int(os.read(fd, 100))
1005
  finally:
1006
    os.close(fd)
1007

    
1008
  return None
1009

    
1010

    
1011
def MatchNameComponent(key, name_list, case_sensitive=True):
1012
  """Try to match a name against a list.
1013

1014
  This function will try to match a name like test1 against a list
1015
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1016
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1017
  not I{'test1.ex'}. A multiple match will be considered as no match
1018
  at all (e.g. I{'test1'} against C{['test1.example.com',
1019
  'test1.example.org']}), except when the key fully matches an entry
1020
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1021

1022
  @type key: str
1023
  @param key: the name to be searched
1024
  @type name_list: list
1025
  @param name_list: the list of strings against which to search the key
1026
  @type case_sensitive: boolean
1027
  @param case_sensitive: whether to provide a case-sensitive match
1028

1029
  @rtype: None or str
1030
  @return: None if there is no match I{or} if there are multiple matches,
1031
      otherwise the element from the list which matches
1032

1033
  """
1034
  if key in name_list:
1035
    return key
1036

    
1037
  re_flags = 0
1038
  if not case_sensitive:
1039
    re_flags |= re.IGNORECASE
1040
    key = key.upper()
1041
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1042
  names_filtered = []
1043
  string_matches = []
1044
  for name in name_list:
1045
    if mo.match(name) is not None:
1046
      names_filtered.append(name)
1047
      if not case_sensitive and key == name.upper():
1048
        string_matches.append(name)
1049

    
1050
  if len(string_matches) == 1:
1051
    return string_matches[0]
1052
  if len(names_filtered) == 1:
1053
    return names_filtered[0]
1054
  return None
1055

    
1056

    
1057
def ValidateServiceName(name):
1058
  """Validate the given service name.
1059

1060
  @type name: number or string
1061
  @param name: Service name or port specification
1062

1063
  """
1064
  try:
1065
    numport = int(name)
1066
  except (ValueError, TypeError):
1067
    # Non-numeric service name
1068
    valid = _VALID_SERVICE_NAME_RE.match(name)
1069
  else:
1070
    # Numeric port (protocols other than TCP or UDP might need adjustments
1071
    # here)
1072
    valid = (numport >= 0 and numport < (1 << 16))
1073

    
1074
  if not valid:
1075
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1076
                               errors.ECODE_INVAL)
1077

    
1078
  return name
1079

    
1080

    
1081
def ListVolumeGroups():
1082
  """List volume groups and their size
1083

1084
  @rtype: dict
1085
  @return:
1086
       Dictionary with keys volume name and values
1087
       the size of the volume
1088

1089
  """
1090
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1091
  result = RunCmd(command)
1092
  retval = {}
1093
  if result.failed:
1094
    return retval
1095

    
1096
  for line in result.stdout.splitlines():
1097
    try:
1098
      name, size = line.split()
1099
      size = int(float(size))
1100
    except (IndexError, ValueError), err:
1101
      logging.error("Invalid output from vgs (%s): %s", err, line)
1102
      continue
1103

    
1104
    retval[name] = size
1105

    
1106
  return retval
1107

    
1108

    
1109
def BridgeExists(bridge):
1110
  """Check whether the given bridge exists in the system
1111

1112
  @type bridge: str
1113
  @param bridge: the bridge name to check
1114
  @rtype: boolean
1115
  @return: True if it does
1116

1117
  """
1118
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1119

    
1120

    
1121
def NiceSort(name_list):
1122
  """Sort a list of strings based on digit and non-digit groupings.
1123

1124
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1125
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1126
  'a11']}.
1127

1128
  The sort algorithm breaks each name in groups of either only-digits
1129
  or no-digits. Only the first eight such groups are considered, and
1130
  after that we just use what's left of the string.
1131

1132
  @type name_list: list
1133
  @param name_list: the names to be sorted
1134
  @rtype: list
1135
  @return: a copy of the name list sorted with our algorithm
1136

1137
  """
1138
  _SORTER_BASE = "(\D+|\d+)"
1139
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1140
                                                  _SORTER_BASE, _SORTER_BASE,
1141
                                                  _SORTER_BASE, _SORTER_BASE,
1142
                                                  _SORTER_BASE, _SORTER_BASE)
1143
  _SORTER_RE = re.compile(_SORTER_FULL)
1144
  _SORTER_NODIGIT = re.compile("^\D*$")
1145
  def _TryInt(val):
1146
    """Attempts to convert a variable to integer."""
1147
    if val is None or _SORTER_NODIGIT.match(val):
1148
      return val
1149
    rval = int(val)
1150
    return rval
1151

    
1152
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1153
             for name in name_list]
1154
  to_sort.sort()
1155
  return [tup[1] for tup in to_sort]
1156

    
1157

    
1158
def TryConvert(fn, val):
1159
  """Try to convert a value ignoring errors.
1160

1161
  This function tries to apply function I{fn} to I{val}. If no
1162
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1163
  the result, else it will return the original value. Any other
1164
  exceptions are propagated to the caller.
1165

1166
  @type fn: callable
1167
  @param fn: function to apply to the value
1168
  @param val: the value to be converted
1169
  @return: The converted value if the conversion was successful,
1170
      otherwise the original value.
1171

1172
  """
1173
  try:
1174
    nv = fn(val)
1175
  except (ValueError, TypeError):
1176
    nv = val
1177
  return nv
1178

    
1179

    
1180
def IsValidShellParam(word):
1181
  """Verifies is the given word is safe from the shell's p.o.v.
1182

1183
  This means that we can pass this to a command via the shell and be
1184
  sure that it doesn't alter the command line and is passed as such to
1185
  the actual command.
1186

1187
  Note that we are overly restrictive here, in order to be on the safe
1188
  side.
1189

1190
  @type word: str
1191
  @param word: the word to check
1192
  @rtype: boolean
1193
  @return: True if the word is 'safe'
1194

1195
  """
1196
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1197

    
1198

    
1199
def BuildShellCmd(template, *args):
1200
  """Build a safe shell command line from the given arguments.
1201

1202
  This function will check all arguments in the args list so that they
1203
  are valid shell parameters (i.e. they don't contain shell
1204
  metacharacters). If everything is ok, it will return the result of
1205
  template % args.
1206

1207
  @type template: str
1208
  @param template: the string holding the template for the
1209
      string formatting
1210
  @rtype: str
1211
  @return: the expanded command line
1212

1213
  """
1214
  for word in args:
1215
    if not IsValidShellParam(word):
1216
      raise errors.ProgrammerError("Shell argument '%s' contains"
1217
                                   " invalid characters" % word)
1218
  return template % args
1219

    
1220

    
1221
def FormatUnit(value, units):
1222
  """Formats an incoming number of MiB with the appropriate unit.
1223

1224
  @type value: int
1225
  @param value: integer representing the value in MiB (1048576)
1226
  @type units: char
1227
  @param units: the type of formatting we should do:
1228
      - 'h' for automatic scaling
1229
      - 'm' for MiBs
1230
      - 'g' for GiBs
1231
      - 't' for TiBs
1232
  @rtype: str
1233
  @return: the formatted value (with suffix)
1234

1235
  """
1236
  if units not in ('m', 'g', 't', 'h'):
1237
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1238

    
1239
  suffix = ''
1240

    
1241
  if units == 'm' or (units == 'h' and value < 1024):
1242
    if units == 'h':
1243
      suffix = 'M'
1244
    return "%d%s" % (round(value, 0), suffix)
1245

    
1246
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1247
    if units == 'h':
1248
      suffix = 'G'
1249
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1250

    
1251
  else:
1252
    if units == 'h':
1253
      suffix = 'T'
1254
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1255

    
1256

    
1257
def ParseUnit(input_string):
1258
  """Tries to extract number and scale from the given string.
1259

1260
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1261
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1262
  is always an int in MiB.
1263

1264
  """
1265
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1266
  if not m:
1267
    raise errors.UnitParseError("Invalid format")
1268

    
1269
  value = float(m.groups()[0])
1270

    
1271
  unit = m.groups()[1]
1272
  if unit:
1273
    lcunit = unit.lower()
1274
  else:
1275
    lcunit = 'm'
1276

    
1277
  if lcunit in ('m', 'mb', 'mib'):
1278
    # Value already in MiB
1279
    pass
1280

    
1281
  elif lcunit in ('g', 'gb', 'gib'):
1282
    value *= 1024
1283

    
1284
  elif lcunit in ('t', 'tb', 'tib'):
1285
    value *= 1024 * 1024
1286

    
1287
  else:
1288
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1289

    
1290
  # Make sure we round up
1291
  if int(value) < value:
1292
    value += 1
1293

    
1294
  # Round up to the next multiple of 4
1295
  value = int(value)
1296
  if value % 4:
1297
    value += 4 - value % 4
1298

    
1299
  return value
1300

    
1301

    
1302
def AddAuthorizedKey(file_name, key):
1303
  """Adds an SSH public key to an authorized_keys file.
1304

1305
  @type file_name: str
1306
  @param file_name: path to authorized_keys file
1307
  @type key: str
1308
  @param key: string containing key
1309

1310
  """
1311
  key_fields = key.split()
1312

    
1313
  f = open(file_name, 'a+')
1314
  try:
1315
    nl = True
1316
    for line in f:
1317
      # Ignore whitespace changes
1318
      if line.split() == key_fields:
1319
        break
1320
      nl = line.endswith('\n')
1321
    else:
1322
      if not nl:
1323
        f.write("\n")
1324
      f.write(key.rstrip('\r\n'))
1325
      f.write("\n")
1326
      f.flush()
1327
  finally:
1328
    f.close()
1329

    
1330

    
1331
def RemoveAuthorizedKey(file_name, key):
1332
  """Removes an SSH public key from an authorized_keys file.
1333

1334
  @type file_name: str
1335
  @param file_name: path to authorized_keys file
1336
  @type key: str
1337
  @param key: string containing key
1338

1339
  """
1340
  key_fields = key.split()
1341

    
1342
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1343
  try:
1344
    out = os.fdopen(fd, 'w')
1345
    try:
1346
      f = open(file_name, 'r')
1347
      try:
1348
        for line in f:
1349
          # Ignore whitespace changes while comparing lines
1350
          if line.split() != key_fields:
1351
            out.write(line)
1352

    
1353
        out.flush()
1354
        os.rename(tmpname, file_name)
1355
      finally:
1356
        f.close()
1357
    finally:
1358
      out.close()
1359
  except:
1360
    RemoveFile(tmpname)
1361
    raise
1362

    
1363

    
1364
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1365
  """Sets the name of an IP address and hostname in /etc/hosts.
1366

1367
  @type file_name: str
1368
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1369
  @type ip: str
1370
  @param ip: the IP address
1371
  @type hostname: str
1372
  @param hostname: the hostname to be added
1373
  @type aliases: list
1374
  @param aliases: the list of aliases to add for the hostname
1375

1376
  """
1377
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1378
  # Ensure aliases are unique
1379
  aliases = UniqueSequence([hostname] + aliases)[1:]
1380

    
1381
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1382
  try:
1383
    out = os.fdopen(fd, 'w')
1384
    try:
1385
      f = open(file_name, 'r')
1386
      try:
1387
        for line in f:
1388
          fields = line.split()
1389
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1390
            continue
1391
          out.write(line)
1392

    
1393
        out.write("%s\t%s" % (ip, hostname))
1394
        if aliases:
1395
          out.write(" %s" % ' '.join(aliases))
1396
        out.write('\n')
1397

    
1398
        out.flush()
1399
        os.fsync(out)
1400
        os.chmod(tmpname, 0644)
1401
        os.rename(tmpname, file_name)
1402
      finally:
1403
        f.close()
1404
    finally:
1405
      out.close()
1406
  except:
1407
    RemoveFile(tmpname)
1408
    raise
1409

    
1410

    
1411
def AddHostToEtcHosts(hostname):
1412
  """Wrapper around SetEtcHostsEntry.
1413

1414
  @type hostname: str
1415
  @param hostname: a hostname that will be resolved and added to
1416
      L{constants.ETC_HOSTS}
1417

1418
  """
1419
  hi = netutils.HostInfo(name=hostname)
1420
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1421

    
1422

    
1423
def RemoveEtcHostsEntry(file_name, hostname):
1424
  """Removes a hostname from /etc/hosts.
1425

1426
  IP addresses without names are removed from the file.
1427

1428
  @type file_name: str
1429
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1430
  @type hostname: str
1431
  @param hostname: the hostname to be removed
1432

1433
  """
1434
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1435
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1436
  try:
1437
    out = os.fdopen(fd, 'w')
1438
    try:
1439
      f = open(file_name, 'r')
1440
      try:
1441
        for line in f:
1442
          fields = line.split()
1443
          if len(fields) > 1 and not fields[0].startswith('#'):
1444
            names = fields[1:]
1445
            if hostname in names:
1446
              while hostname in names:
1447
                names.remove(hostname)
1448
              if names:
1449
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1450
              continue
1451

    
1452
          out.write(line)
1453

    
1454
        out.flush()
1455
        os.fsync(out)
1456
        os.chmod(tmpname, 0644)
1457
        os.rename(tmpname, file_name)
1458
      finally:
1459
        f.close()
1460
    finally:
1461
      out.close()
1462
  except:
1463
    RemoveFile(tmpname)
1464
    raise
1465

    
1466

    
1467
def RemoveHostFromEtcHosts(hostname):
1468
  """Wrapper around RemoveEtcHostsEntry.
1469

1470
  @type hostname: str
1471
  @param hostname: hostname that will be resolved and its
1472
      full and shot name will be removed from
1473
      L{constants.ETC_HOSTS}
1474

1475
  """
1476
  hi = netutils.HostInfo(name=hostname)
1477
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1478
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1479

    
1480

    
1481
def TimestampForFilename():
1482
  """Returns the current time formatted for filenames.
1483

1484
  The format doesn't contain colons as some shells and applications them as
1485
  separators.
1486

1487
  """
1488
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1489

    
1490

    
1491
def CreateBackup(file_name):
1492
  """Creates a backup of a file.
1493

1494
  @type file_name: str
1495
  @param file_name: file to be backed up
1496
  @rtype: str
1497
  @return: the path to the newly created backup
1498
  @raise errors.ProgrammerError: for invalid file names
1499

1500
  """
1501
  if not os.path.isfile(file_name):
1502
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1503
                                file_name)
1504

    
1505
  prefix = ("%s.backup-%s." %
1506
            (os.path.basename(file_name), TimestampForFilename()))
1507
  dir_name = os.path.dirname(file_name)
1508

    
1509
  fsrc = open(file_name, 'rb')
1510
  try:
1511
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1512
    fdst = os.fdopen(fd, 'wb')
1513
    try:
1514
      logging.debug("Backing up %s at %s", file_name, backup_name)
1515
      shutil.copyfileobj(fsrc, fdst)
1516
    finally:
1517
      fdst.close()
1518
  finally:
1519
    fsrc.close()
1520

    
1521
  return backup_name
1522

    
1523

    
1524
def ShellQuote(value):
1525
  """Quotes shell argument according to POSIX.
1526

1527
  @type value: str
1528
  @param value: the argument to be quoted
1529
  @rtype: str
1530
  @return: the quoted value
1531

1532
  """
1533
  if _re_shell_unquoted.match(value):
1534
    return value
1535
  else:
1536
    return "'%s'" % value.replace("'", "'\\''")
1537

    
1538

    
1539
def ShellQuoteArgs(args):
1540
  """Quotes a list of shell arguments.
1541

1542
  @type args: list
1543
  @param args: list of arguments to be quoted
1544
  @rtype: str
1545
  @return: the quoted arguments concatenated with spaces
1546

1547
  """
1548
  return ' '.join([ShellQuote(i) for i in args])
1549

    
1550

    
1551
def ListVisibleFiles(path):
1552
  """Returns a list of visible files in a directory.
1553

1554
  @type path: str
1555
  @param path: the directory to enumerate
1556
  @rtype: list
1557
  @return: the list of all files not starting with a dot
1558
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1559

1560
  """
1561
  if not IsNormAbsPath(path):
1562
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1563
                                 " absolute/normalized: '%s'" % path)
1564
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1565
  return files
1566

    
1567

    
1568
def GetHomeDir(user, default=None):
1569
  """Try to get the homedir of the given user.
1570

1571
  The user can be passed either as a string (denoting the name) or as
1572
  an integer (denoting the user id). If the user is not found, the
1573
  'default' argument is returned, which defaults to None.
1574

1575
  """
1576
  try:
1577
    if isinstance(user, basestring):
1578
      result = pwd.getpwnam(user)
1579
    elif isinstance(user, (int, long)):
1580
      result = pwd.getpwuid(user)
1581
    else:
1582
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1583
                                   type(user))
1584
  except KeyError:
1585
    return default
1586
  return result.pw_dir
1587

    
1588

    
1589
def NewUUID():
1590
  """Returns a random UUID.
1591

1592
  @note: This is a Linux-specific method as it uses the /proc
1593
      filesystem.
1594
  @rtype: str
1595

1596
  """
1597
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1598

    
1599

    
1600
def GenerateSecret(numbytes=20):
1601
  """Generates a random secret.
1602

1603
  This will generate a pseudo-random secret returning an hex string
1604
  (so that it can be used where an ASCII string is needed).
1605

1606
  @param numbytes: the number of bytes which will be represented by the returned
1607
      string (defaulting to 20, the length of a SHA1 hash)
1608
  @rtype: str
1609
  @return: an hex representation of the pseudo-random sequence
1610

1611
  """
1612
  return os.urandom(numbytes).encode('hex')
1613

    
1614

    
1615
def EnsureDirs(dirs):
1616
  """Make required directories, if they don't exist.
1617

1618
  @param dirs: list of tuples (dir_name, dir_mode)
1619
  @type dirs: list of (string, integer)
1620

1621
  """
1622
  for dir_name, dir_mode in dirs:
1623
    try:
1624
      os.mkdir(dir_name, dir_mode)
1625
    except EnvironmentError, err:
1626
      if err.errno != errno.EEXIST:
1627
        raise errors.GenericError("Cannot create needed directory"
1628
                                  " '%s': %s" % (dir_name, err))
1629
    try:
1630
      os.chmod(dir_name, dir_mode)
1631
    except EnvironmentError, err:
1632
      raise errors.GenericError("Cannot change directory permissions on"
1633
                                " '%s': %s" % (dir_name, err))
1634
    if not os.path.isdir(dir_name):
1635
      raise errors.GenericError("%s is not a directory" % dir_name)
1636

    
1637

    
1638
def ReadFile(file_name, size=-1):
1639
  """Reads a file.
1640

1641
  @type size: int
1642
  @param size: Read at most size bytes (if negative, entire file)
1643
  @rtype: str
1644
  @return: the (possibly partial) content of the file
1645

1646
  """
1647
  f = open(file_name, "r")
1648
  try:
1649
    return f.read(size)
1650
  finally:
1651
    f.close()
1652

    
1653

    
1654
def WriteFile(file_name, fn=None, data=None,
1655
              mode=None, uid=-1, gid=-1,
1656
              atime=None, mtime=None, close=True,
1657
              dry_run=False, backup=False,
1658
              prewrite=None, postwrite=None):
1659
  """(Over)write a file atomically.
1660

1661
  The file_name and either fn (a function taking one argument, the
1662
  file descriptor, and which should write the data to it) or data (the
1663
  contents of the file) must be passed. The other arguments are
1664
  optional and allow setting the file mode, owner and group, and the
1665
  mtime/atime of the file.
1666

1667
  If the function doesn't raise an exception, it has succeeded and the
1668
  target file has the new contents. If the function has raised an
1669
  exception, an existing target file should be unmodified and the
1670
  temporary file should be removed.
1671

1672
  @type file_name: str
1673
  @param file_name: the target filename
1674
  @type fn: callable
1675
  @param fn: content writing function, called with
1676
      file descriptor as parameter
1677
  @type data: str
1678
  @param data: contents of the file
1679
  @type mode: int
1680
  @param mode: file mode
1681
  @type uid: int
1682
  @param uid: the owner of the file
1683
  @type gid: int
1684
  @param gid: the group of the file
1685
  @type atime: int
1686
  @param atime: a custom access time to be set on the file
1687
  @type mtime: int
1688
  @param mtime: a custom modification time to be set on the file
1689
  @type close: boolean
1690
  @param close: whether to close file after writing it
1691
  @type prewrite: callable
1692
  @param prewrite: function to be called before writing content
1693
  @type postwrite: callable
1694
  @param postwrite: function to be called after writing content
1695

1696
  @rtype: None or int
1697
  @return: None if the 'close' parameter evaluates to True,
1698
      otherwise the file descriptor
1699

1700
  @raise errors.ProgrammerError: if any of the arguments are not valid
1701

1702
  """
1703
  if not os.path.isabs(file_name):
1704
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1705
                                 " absolute: '%s'" % file_name)
1706

    
1707
  if [fn, data].count(None) != 1:
1708
    raise errors.ProgrammerError("fn or data required")
1709

    
1710
  if [atime, mtime].count(None) == 1:
1711
    raise errors.ProgrammerError("Both atime and mtime must be either"
1712
                                 " set or None")
1713

    
1714
  if backup and not dry_run and os.path.isfile(file_name):
1715
    CreateBackup(file_name)
1716

    
1717
  dir_name, base_name = os.path.split(file_name)
1718
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1719
  do_remove = True
1720
  # here we need to make sure we remove the temp file, if any error
1721
  # leaves it in place
1722
  try:
1723
    if uid != -1 or gid != -1:
1724
      os.chown(new_name, uid, gid)
1725
    if mode:
1726
      os.chmod(new_name, mode)
1727
    if callable(prewrite):
1728
      prewrite(fd)
1729
    if data is not None:
1730
      os.write(fd, data)
1731
    else:
1732
      fn(fd)
1733
    if callable(postwrite):
1734
      postwrite(fd)
1735
    os.fsync(fd)
1736
    if atime is not None and mtime is not None:
1737
      os.utime(new_name, (atime, mtime))
1738
    if not dry_run:
1739
      os.rename(new_name, file_name)
1740
      do_remove = False
1741
  finally:
1742
    if close:
1743
      os.close(fd)
1744
      result = None
1745
    else:
1746
      result = fd
1747
    if do_remove:
1748
      RemoveFile(new_name)
1749

    
1750
  return result
1751

    
1752

    
1753
def ReadOneLineFile(file_name, strict=False):
1754
  """Return the first non-empty line from a file.
1755

1756
  @type strict: boolean
1757
  @param strict: if True, abort if the file has more than one
1758
      non-empty line
1759

1760
  """
1761
  file_lines = ReadFile(file_name).splitlines()
1762
  full_lines = filter(bool, file_lines)
1763
  if not file_lines or not full_lines:
1764
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1765
  elif strict and len(full_lines) > 1:
1766
    raise errors.GenericError("Too many lines in one-liner file %s" %
1767
                              file_name)
1768
  return full_lines[0]
1769

    
1770

    
1771
def FirstFree(seq, base=0):
1772
  """Returns the first non-existing integer from seq.
1773

1774
  The seq argument should be a sorted list of positive integers. The
1775
  first time the index of an element is smaller than the element
1776
  value, the index will be returned.
1777

1778
  The base argument is used to start at a different offset,
1779
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1780

1781
  Example: C{[0, 1, 3]} will return I{2}.
1782

1783
  @type seq: sequence
1784
  @param seq: the sequence to be analyzed.
1785
  @type base: int
1786
  @param base: use this value as the base index of the sequence
1787
  @rtype: int
1788
  @return: the first non-used index in the sequence
1789

1790
  """
1791
  for idx, elem in enumerate(seq):
1792
    assert elem >= base, "Passed element is higher than base offset"
1793
    if elem > idx + base:
1794
      # idx is not used
1795
      return idx + base
1796
  return None
1797

    
1798

    
1799
def SingleWaitForFdCondition(fdobj, event, timeout):
1800
  """Waits for a condition to occur on the socket.
1801

1802
  Immediately returns at the first interruption.
1803

1804
  @type fdobj: integer or object supporting a fileno() method
1805
  @param fdobj: entity to wait for events on
1806
  @type event: integer
1807
  @param event: ORed condition (see select module)
1808
  @type timeout: float or None
1809
  @param timeout: Timeout in seconds
1810
  @rtype: int or None
1811
  @return: None for timeout, otherwise occured conditions
1812

1813
  """
1814
  check = (event | select.POLLPRI |
1815
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1816

    
1817
  if timeout is not None:
1818
    # Poller object expects milliseconds
1819
    timeout *= 1000
1820

    
1821
  poller = select.poll()
1822
  poller.register(fdobj, event)
1823
  try:
1824
    # TODO: If the main thread receives a signal and we have no timeout, we
1825
    # could wait forever. This should check a global "quit" flag or something
1826
    # every so often.
1827
    io_events = poller.poll(timeout)
1828
  except select.error, err:
1829
    if err[0] != errno.EINTR:
1830
      raise
1831
    io_events = []
1832
  if io_events and io_events[0][1] & check:
1833
    return io_events[0][1]
1834
  else:
1835
    return None
1836

    
1837

    
1838
class FdConditionWaiterHelper(object):
1839
  """Retry helper for WaitForFdCondition.
1840

1841
  This class contains the retried and wait functions that make sure
1842
  WaitForFdCondition can continue waiting until the timeout is actually
1843
  expired.
1844

1845
  """
1846

    
1847
  def __init__(self, timeout):
1848
    self.timeout = timeout
1849

    
1850
  def Poll(self, fdobj, event):
1851
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1852
    if result is None:
1853
      raise RetryAgain()
1854
    else:
1855
      return result
1856

    
1857
  def UpdateTimeout(self, timeout):
1858
    self.timeout = timeout
1859

    
1860

    
1861
def WaitForFdCondition(fdobj, event, timeout):
1862
  """Waits for a condition to occur on the socket.
1863

1864
  Retries until the timeout is expired, even if interrupted.
1865

1866
  @type fdobj: integer or object supporting a fileno() method
1867
  @param fdobj: entity to wait for events on
1868
  @type event: integer
1869
  @param event: ORed condition (see select module)
1870
  @type timeout: float or None
1871
  @param timeout: Timeout in seconds
1872
  @rtype: int or None
1873
  @return: None for timeout, otherwise occured conditions
1874

1875
  """
1876
  if timeout is not None:
1877
    retrywaiter = FdConditionWaiterHelper(timeout)
1878
    try:
1879
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1880
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1881
    except RetryTimeout:
1882
      result = None
1883
  else:
1884
    result = None
1885
    while result is None:
1886
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1887
  return result
1888

    
1889

    
1890
def UniqueSequence(seq):
1891
  """Returns a list with unique elements.
1892

1893
  Element order is preserved.
1894

1895
  @type seq: sequence
1896
  @param seq: the sequence with the source elements
1897
  @rtype: list
1898
  @return: list of unique elements from seq
1899

1900
  """
1901
  seen = set()
1902
  return [i for i in seq if i not in seen and not seen.add(i)]
1903

    
1904

    
1905
def NormalizeAndValidateMac(mac):
1906
  """Normalizes and check if a MAC address is valid.
1907

1908
  Checks whether the supplied MAC address is formally correct, only
1909
  accepts colon separated format. Normalize it to all lower.
1910

1911
  @type mac: str
1912
  @param mac: the MAC to be validated
1913
  @rtype: str
1914
  @return: returns the normalized and validated MAC.
1915

1916
  @raise errors.OpPrereqError: If the MAC isn't valid
1917

1918
  """
1919
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1920
  if not mac_check.match(mac):
1921
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1922
                               mac, errors.ECODE_INVAL)
1923

    
1924
  return mac.lower()
1925

    
1926

    
1927
def TestDelay(duration):
1928
  """Sleep for a fixed amount of time.
1929

1930
  @type duration: float
1931
  @param duration: the sleep duration
1932
  @rtype: boolean
1933
  @return: False for negative value, True otherwise
1934

1935
  """
1936
  if duration < 0:
1937
    return False, "Invalid sleep duration"
1938
  time.sleep(duration)
1939
  return True, None
1940

    
1941

    
1942
def _CloseFDNoErr(fd, retries=5):
1943
  """Close a file descriptor ignoring errors.
1944

1945
  @type fd: int
1946
  @param fd: the file descriptor
1947
  @type retries: int
1948
  @param retries: how many retries to make, in case we get any
1949
      other error than EBADF
1950

1951
  """
1952
  try:
1953
    os.close(fd)
1954
  except OSError, err:
1955
    if err.errno != errno.EBADF:
1956
      if retries > 0:
1957
        _CloseFDNoErr(fd, retries - 1)
1958
    # else either it's closed already or we're out of retries, so we
1959
    # ignore this and go on
1960

    
1961

    
1962
def CloseFDs(noclose_fds=None):
1963
  """Close file descriptors.
1964

1965
  This closes all file descriptors above 2 (i.e. except
1966
  stdin/out/err).
1967

1968
  @type noclose_fds: list or None
1969
  @param noclose_fds: if given, it denotes a list of file descriptor
1970
      that should not be closed
1971

1972
  """
1973
  # Default maximum for the number of available file descriptors.
1974
  if 'SC_OPEN_MAX' in os.sysconf_names:
1975
    try:
1976
      MAXFD = os.sysconf('SC_OPEN_MAX')
1977
      if MAXFD < 0:
1978
        MAXFD = 1024
1979
    except OSError:
1980
      MAXFD = 1024
1981
  else:
1982
    MAXFD = 1024
1983
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1984
  if (maxfd == resource.RLIM_INFINITY):
1985
    maxfd = MAXFD
1986

    
1987
  # Iterate through and close all file descriptors (except the standard ones)
1988
  for fd in range(3, maxfd):
1989
    if noclose_fds and fd in noclose_fds:
1990
      continue
1991
    _CloseFDNoErr(fd)
1992

    
1993

    
1994
def Mlockall(_ctypes=ctypes):
1995
  """Lock current process' virtual address space into RAM.
1996

1997
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
1998
  see mlock(2) for more details. This function requires ctypes module.
1999

2000
  @raises errors.NoCtypesError: if ctypes module is not found
2001

2002
  """
2003
  if _ctypes is None:
2004
    raise errors.NoCtypesError()
2005

    
2006
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2007
  if libc is None:
2008
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2009
    return
2010

    
2011
  # Some older version of the ctypes module don't have built-in functionality
2012
  # to access the errno global variable, where function error codes are stored.
2013
  # By declaring this variable as a pointer to an integer we can then access
2014
  # its value correctly, should the mlockall call fail, in order to see what
2015
  # the actual error code was.
2016
  # pylint: disable-msg=W0212
2017
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2018

    
2019
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2020
    # pylint: disable-msg=W0212
2021
    logging.error("Cannot set memory lock: %s",
2022
                  os.strerror(libc.__errno_location().contents.value))
2023
    return
2024

    
2025
  logging.debug("Memory lock set")
2026

    
2027

    
2028
def Daemonize(logfile, run_uid, run_gid):
2029
  """Daemonize the current process.
2030

2031
  This detaches the current process from the controlling terminal and
2032
  runs it in the background as a daemon.
2033

2034
  @type logfile: str
2035
  @param logfile: the logfile to which we should redirect stdout/stderr
2036
  @type run_uid: int
2037
  @param run_uid: Run the child under this uid
2038
  @type run_gid: int
2039
  @param run_gid: Run the child under this gid
2040
  @rtype: int
2041
  @return: the value zero
2042

2043
  """
2044
  # pylint: disable-msg=W0212
2045
  # yes, we really want os._exit
2046
  UMASK = 077
2047
  WORKDIR = "/"
2048

    
2049
  # this might fail
2050
  pid = os.fork()
2051
  if (pid == 0):  # The first child.
2052
    os.setsid()
2053
    # FIXME: When removing again and moving to start-stop-daemon privilege drop
2054
    #        make sure to check for config permission and bail out when invoked
2055
    #        with wrong user.
2056
    os.setgid(run_gid)
2057
    os.setuid(run_uid)
2058
    # this might fail
2059
    pid = os.fork() # Fork a second child.
2060
    if (pid == 0):  # The second child.
2061
      os.chdir(WORKDIR)
2062
      os.umask(UMASK)
2063
    else:
2064
      # exit() or _exit()?  See below.
2065
      os._exit(0) # Exit parent (the first child) of the second child.
2066
  else:
2067
    os._exit(0) # Exit parent of the first child.
2068

    
2069
  for fd in range(3):
2070
    _CloseFDNoErr(fd)
2071
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2072
  assert i == 0, "Can't close/reopen stdin"
2073
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2074
  assert i == 1, "Can't close/reopen stdout"
2075
  # Duplicate standard output to standard error.
2076
  os.dup2(1, 2)
2077
  return 0
2078

    
2079

    
2080
def DaemonPidFileName(name):
2081
  """Compute a ganeti pid file absolute path
2082

2083
  @type name: str
2084
  @param name: the daemon name
2085
  @rtype: str
2086
  @return: the full path to the pidfile corresponding to the given
2087
      daemon name
2088

2089
  """
2090
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2091

    
2092

    
2093
def EnsureDaemon(name):
2094
  """Check for and start daemon if not alive.
2095

2096
  """
2097
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2098
  if result.failed:
2099
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2100
                  name, result.fail_reason, result.output)
2101
    return False
2102

    
2103
  return True
2104

    
2105

    
2106
def StopDaemon(name):
2107
  """Stop daemon
2108

2109
  """
2110
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2111
  if result.failed:
2112
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2113
                  name, result.fail_reason, result.output)
2114
    return False
2115

    
2116
  return True
2117

    
2118

    
2119
def WritePidFile(name):
2120
  """Write the current process pidfile.
2121

2122
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2123

2124
  @type name: str
2125
  @param name: the daemon name to use
2126
  @raise errors.GenericError: if the pid file already exists and
2127
      points to a live process
2128

2129
  """
2130
  pid = os.getpid()
2131
  pidfilename = DaemonPidFileName(name)
2132
  if IsProcessAlive(ReadPidFile(pidfilename)):
2133
    raise errors.GenericError("%s contains a live process" % pidfilename)
2134

    
2135
  WriteFile(pidfilename, data="%d\n" % pid)
2136

    
2137

    
2138
def RemovePidFile(name):
2139
  """Remove the current process pidfile.
2140

2141
  Any errors are ignored.
2142

2143
  @type name: str
2144
  @param name: the daemon name used to derive the pidfile name
2145

2146
  """
2147
  pidfilename = DaemonPidFileName(name)
2148
  # TODO: we could check here that the file contains our pid
2149
  try:
2150
    RemoveFile(pidfilename)
2151
  except: # pylint: disable-msg=W0702
2152
    pass
2153

    
2154

    
2155
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2156
                waitpid=False):
2157
  """Kill a process given by its pid.
2158

2159
  @type pid: int
2160
  @param pid: The PID to terminate.
2161
  @type signal_: int
2162
  @param signal_: The signal to send, by default SIGTERM
2163
  @type timeout: int
2164
  @param timeout: The timeout after which, if the process is still alive,
2165
                  a SIGKILL will be sent. If not positive, no such checking
2166
                  will be done
2167
  @type waitpid: boolean
2168
  @param waitpid: If true, we should waitpid on this process after
2169
      sending signals, since it's our own child and otherwise it
2170
      would remain as zombie
2171

2172
  """
2173
  def _helper(pid, signal_, wait):
2174
    """Simple helper to encapsulate the kill/waitpid sequence"""
2175
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2176
      try:
2177
        os.waitpid(pid, os.WNOHANG)
2178
      except OSError:
2179
        pass
2180

    
2181
  if pid <= 0:
2182
    # kill with pid=0 == suicide
2183
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2184

    
2185
  if not IsProcessAlive(pid):
2186
    return
2187

    
2188
  _helper(pid, signal_, waitpid)
2189

    
2190
  if timeout <= 0:
2191
    return
2192

    
2193
  def _CheckProcess():
2194
    if not IsProcessAlive(pid):
2195
      return
2196

    
2197
    try:
2198
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2199
    except OSError:
2200
      raise RetryAgain()
2201

    
2202
    if result_pid > 0:
2203
      return
2204

    
2205
    raise RetryAgain()
2206

    
2207
  try:
2208
    # Wait up to $timeout seconds
2209
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2210
  except RetryTimeout:
2211
    pass
2212

    
2213
  if IsProcessAlive(pid):
2214
    # Kill process if it's still alive
2215
    _helper(pid, signal.SIGKILL, waitpid)
2216

    
2217

    
2218
def FindFile(name, search_path, test=os.path.exists):
2219
  """Look for a filesystem object in a given path.
2220

2221
  This is an abstract method to search for filesystem object (files,
2222
  dirs) under a given search path.
2223

2224
  @type name: str
2225
  @param name: the name to look for
2226
  @type search_path: str
2227
  @param search_path: location to start at
2228
  @type test: callable
2229
  @param test: a function taking one argument that should return True
2230
      if the a given object is valid; the default value is
2231
      os.path.exists, causing only existing files to be returned
2232
  @rtype: str or None
2233
  @return: full path to the object if found, None otherwise
2234

2235
  """
2236
  # validate the filename mask
2237
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2238
    logging.critical("Invalid value passed for external script name: '%s'",
2239
                     name)
2240
    return None
2241

    
2242
  for dir_name in search_path:
2243
    # FIXME: investigate switch to PathJoin
2244
    item_name = os.path.sep.join([dir_name, name])
2245
    # check the user test and that we're indeed resolving to the given
2246
    # basename
2247
    if test(item_name) and os.path.basename(item_name) == name:
2248
      return item_name
2249
  return None
2250

    
2251

    
2252
def CheckVolumeGroupSize(vglist, vgname, minsize):
2253
  """Checks if the volume group list is valid.
2254

2255
  The function will check if a given volume group is in the list of
2256
  volume groups and has a minimum size.
2257

2258
  @type vglist: dict
2259
  @param vglist: dictionary of volume group names and their size
2260
  @type vgname: str
2261
  @param vgname: the volume group we should check
2262
  @type minsize: int
2263
  @param minsize: the minimum size we accept
2264
  @rtype: None or str
2265
  @return: None for success, otherwise the error message
2266

2267
  """
2268
  vgsize = vglist.get(vgname, None)
2269
  if vgsize is None:
2270
    return "volume group '%s' missing" % vgname
2271
  elif vgsize < minsize:
2272
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2273
            (vgname, minsize, vgsize))
2274
  return None
2275

    
2276

    
2277
def SplitTime(value):
2278
  """Splits time as floating point number into a tuple.
2279

2280
  @param value: Time in seconds
2281
  @type value: int or float
2282
  @return: Tuple containing (seconds, microseconds)
2283

2284
  """
2285
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2286

    
2287
  assert 0 <= seconds, \
2288
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2289
  assert 0 <= microseconds <= 999999, \
2290
    "Microseconds must be 0-999999, but are %s" % microseconds
2291

    
2292
  return (int(seconds), int(microseconds))
2293

    
2294

    
2295
def MergeTime(timetuple):
2296
  """Merges a tuple into time as a floating point number.
2297

2298
  @param timetuple: Time as tuple, (seconds, microseconds)
2299
  @type timetuple: tuple
2300
  @return: Time as a floating point number expressed in seconds
2301

2302
  """
2303
  (seconds, microseconds) = timetuple
2304

    
2305
  assert 0 <= seconds, \
2306
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2307
  assert 0 <= microseconds <= 999999, \
2308
    "Microseconds must be 0-999999, but are %s" % microseconds
2309

    
2310
  return float(seconds) + (float(microseconds) * 0.000001)
2311

    
2312

    
2313
class LogFileHandler(logging.FileHandler):
2314
  """Log handler that doesn't fallback to stderr.
2315

2316
  When an error occurs while writing on the logfile, logging.FileHandler tries
2317
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2318
  the logfile. This class avoids failures reporting errors to /dev/console.
2319

2320
  """
2321
  def __init__(self, filename, mode="a", encoding=None):
2322
    """Open the specified file and use it as the stream for logging.
2323

2324
    Also open /dev/console to report errors while logging.
2325

2326
    """
2327
    logging.FileHandler.__init__(self, filename, mode, encoding)
2328
    self.console = open(constants.DEV_CONSOLE, "a")
2329

    
2330
  def handleError(self, record): # pylint: disable-msg=C0103
2331
    """Handle errors which occur during an emit() call.
2332

2333
    Try to handle errors with FileHandler method, if it fails write to
2334
    /dev/console.
2335

2336
    """
2337
    try:
2338
      logging.FileHandler.handleError(self, record)
2339
    except Exception: # pylint: disable-msg=W0703
2340
      try:
2341
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2342
      except Exception: # pylint: disable-msg=W0703
2343
        # Log handler tried everything it could, now just give up
2344
        pass
2345

    
2346

    
2347
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2348
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2349
                 console_logging=False):
2350
  """Configures the logging module.
2351

2352
  @type logfile: str
2353
  @param logfile: the filename to which we should log
2354
  @type debug: integer
2355
  @param debug: if greater than zero, enable debug messages, otherwise
2356
      only those at C{INFO} and above level
2357
  @type stderr_logging: boolean
2358
  @param stderr_logging: whether we should also log to the standard error
2359
  @type program: str
2360
  @param program: the name under which we should log messages
2361
  @type multithreaded: boolean
2362
  @param multithreaded: if True, will add the thread name to the log file
2363
  @type syslog: string
2364
  @param syslog: one of 'no', 'yes', 'only':
2365
      - if no, syslog is not used
2366
      - if yes, syslog is used (in addition to file-logging)
2367
      - if only, only syslog is used
2368
  @type console_logging: boolean
2369
  @param console_logging: if True, will use a FileHandler which falls back to
2370
      the system console if logging fails
2371
  @raise EnvironmentError: if we can't open the log file and
2372
      syslog/stderr logging is disabled
2373

2374
  """
2375
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2376
  sft = program + "[%(process)d]:"
2377
  if multithreaded:
2378
    fmt += "/%(threadName)s"
2379
    sft += " (%(threadName)s)"
2380
  if debug:
2381
    fmt += " %(module)s:%(lineno)s"
2382
    # no debug info for syslog loggers
2383
  fmt += " %(levelname)s %(message)s"
2384
  # yes, we do want the textual level, as remote syslog will probably
2385
  # lose the error level, and it's easier to grep for it
2386
  sft += " %(levelname)s %(message)s"
2387
  formatter = logging.Formatter(fmt)
2388
  sys_fmt = logging.Formatter(sft)
2389

    
2390
  root_logger = logging.getLogger("")
2391
  root_logger.setLevel(logging.NOTSET)
2392

    
2393
  # Remove all previously setup handlers
2394
  for handler in root_logger.handlers:
2395
    handler.close()
2396
    root_logger.removeHandler(handler)
2397

    
2398
  if stderr_logging:
2399
    stderr_handler = logging.StreamHandler()
2400
    stderr_handler.setFormatter(formatter)
2401
    if debug:
2402
      stderr_handler.setLevel(logging.NOTSET)
2403
    else:
2404
      stderr_handler.setLevel(logging.CRITICAL)
2405
    root_logger.addHandler(stderr_handler)
2406

    
2407
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2408
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2409
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2410
                                                    facility)
2411
    syslog_handler.setFormatter(sys_fmt)
2412
    # Never enable debug over syslog
2413
    syslog_handler.setLevel(logging.INFO)
2414
    root_logger.addHandler(syslog_handler)
2415

    
2416
  if syslog != constants.SYSLOG_ONLY:
2417
    # this can fail, if the logging directories are not setup or we have
2418
    # a permisssion problem; in this case, it's best to log but ignore
2419
    # the error if stderr_logging is True, and if false we re-raise the
2420
    # exception since otherwise we could run but without any logs at all
2421
    try:
2422
      if console_logging:
2423
        logfile_handler = LogFileHandler(logfile)
2424
      else:
2425
        logfile_handler = logging.FileHandler(logfile)
2426
      logfile_handler.setFormatter(formatter)
2427
      if debug:
2428
        logfile_handler.setLevel(logging.DEBUG)
2429
      else:
2430
        logfile_handler.setLevel(logging.INFO)
2431
      root_logger.addHandler(logfile_handler)
2432
    except EnvironmentError:
2433
      if stderr_logging or syslog == constants.SYSLOG_YES:
2434
        logging.exception("Failed to enable logging to file '%s'", logfile)
2435
      else:
2436
        # we need to re-raise the exception
2437
        raise
2438

    
2439

    
2440
def IsNormAbsPath(path):
2441
  """Check whether a path is absolute and also normalized
2442

2443
  This avoids things like /dir/../../other/path to be valid.
2444

2445
  """
2446
  return os.path.normpath(path) == path and os.path.isabs(path)
2447

    
2448

    
2449
def PathJoin(*args):
2450
  """Safe-join a list of path components.
2451

2452
  Requirements:
2453
      - the first argument must be an absolute path
2454
      - no component in the path must have backtracking (e.g. /../),
2455
        since we check for normalization at the end
2456

2457
  @param args: the path components to be joined
2458
  @raise ValueError: for invalid paths
2459

2460
  """
2461
  # ensure we're having at least one path passed in
2462
  assert args
2463
  # ensure the first component is an absolute and normalized path name
2464
  root = args[0]
2465
  if not IsNormAbsPath(root):
2466
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2467
  result = os.path.join(*args)
2468
  # ensure that the whole path is normalized
2469
  if not IsNormAbsPath(result):
2470
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2471
  # check that we're still under the original prefix
2472
  prefix = os.path.commonprefix([root, result])
2473
  if prefix != root:
2474
    raise ValueError("Error: path joining resulted in different prefix"
2475
                     " (%s != %s)" % (prefix, root))
2476
  return result
2477

    
2478

    
2479
def TailFile(fname, lines=20):
2480
  """Return the last lines from a file.
2481

2482
  @note: this function will only read and parse the last 4KB of
2483
      the file; if the lines are very long, it could be that less
2484
      than the requested number of lines are returned
2485

2486
  @param fname: the file name
2487
  @type lines: int
2488
  @param lines: the (maximum) number of lines to return
2489

2490
  """
2491
  fd = open(fname, "r")
2492
  try:
2493
    fd.seek(0, 2)
2494
    pos = fd.tell()
2495
    pos = max(0, pos-4096)
2496
    fd.seek(pos, 0)
2497
    raw_data = fd.read()
2498
  finally:
2499
    fd.close()
2500

    
2501
  rows = raw_data.splitlines()
2502
  return rows[-lines:]
2503

    
2504

    
2505
def FormatTimestampWithTZ(secs):
2506
  """Formats a Unix timestamp with the local timezone.
2507

2508
  """
2509
  return time.strftime("%F %T %Z", time.gmtime(secs))
2510

    
2511

    
2512
def _ParseAsn1Generalizedtime(value):
2513
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2514

2515
  @type value: string
2516
  @param value: ASN1 GENERALIZEDTIME timestamp
2517

2518
  """
2519
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2520
  if m:
2521
    # We have an offset
2522
    asn1time = m.group(1)
2523
    hours = int(m.group(2))
2524
    minutes = int(m.group(3))
2525
    utcoffset = (60 * hours) + minutes
2526
  else:
2527
    if not value.endswith("Z"):
2528
      raise ValueError("Missing timezone")
2529
    asn1time = value[:-1]
2530
    utcoffset = 0
2531

    
2532
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2533

    
2534
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2535

    
2536
  return calendar.timegm(tt.utctimetuple())
2537

    
2538

    
2539
def GetX509CertValidity(cert):
2540
  """Returns the validity period of the certificate.
2541

2542
  @type cert: OpenSSL.crypto.X509
2543
  @param cert: X509 certificate object
2544

2545
  """
2546
  # The get_notBefore and get_notAfter functions are only supported in
2547
  # pyOpenSSL 0.7 and above.
2548
  try:
2549
    get_notbefore_fn = cert.get_notBefore
2550
  except AttributeError:
2551
    not_before = None
2552
  else:
2553
    not_before_asn1 = get_notbefore_fn()
2554

    
2555
    if not_before_asn1 is None:
2556
      not_before = None
2557
    else:
2558
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2559

    
2560
  try:
2561
    get_notafter_fn = cert.get_notAfter
2562
  except AttributeError:
2563
    not_after = None
2564
  else:
2565
    not_after_asn1 = get_notafter_fn()
2566

    
2567
    if not_after_asn1 is None:
2568
      not_after = None
2569
    else:
2570
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2571

    
2572
  return (not_before, not_after)
2573

    
2574

    
2575
def _VerifyCertificateInner(expired, not_before, not_after, now,
2576
                            warn_days, error_days):
2577
  """Verifies certificate validity.
2578

2579
  @type expired: bool
2580
  @param expired: Whether pyOpenSSL considers the certificate as expired
2581
  @type not_before: number or None
2582
  @param not_before: Unix timestamp before which certificate is not valid
2583
  @type not_after: number or None
2584
  @param not_after: Unix timestamp after which certificate is invalid
2585
  @type now: number
2586
  @param now: Current time as Unix timestamp
2587
  @type warn_days: number or None
2588
  @param warn_days: How many days before expiration a warning should be reported
2589
  @type error_days: number or None
2590
  @param error_days: How many days before expiration an error should be reported
2591

2592
  """
2593
  if expired:
2594
    msg = "Certificate is expired"
2595

    
2596
    if not_before is not None and not_after is not None:
2597
      msg += (" (valid from %s to %s)" %
2598
              (FormatTimestampWithTZ(not_before),
2599
               FormatTimestampWithTZ(not_after)))
2600
    elif not_before is not None:
2601
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2602
    elif not_after is not None:
2603
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2604

    
2605
    return (CERT_ERROR, msg)
2606

    
2607
  elif not_before is not None and not_before > now:
2608
    return (CERT_WARNING,
2609
            "Certificate not yet valid (valid from %s)" %
2610
            FormatTimestampWithTZ(not_before))
2611

    
2612
  elif not_after is not None:
2613
    remaining_days = int((not_after - now) / (24 * 3600))
2614

    
2615
    msg = "Certificate expires in about %d days" % remaining_days
2616

    
2617
    if error_days is not None and remaining_days <= error_days:
2618
      return (CERT_ERROR, msg)
2619

    
2620
    if warn_days is not None and remaining_days <= warn_days:
2621
      return (CERT_WARNING, msg)
2622

    
2623
  return (None, None)
2624

    
2625

    
2626
def VerifyX509Certificate(cert, warn_days, error_days):
2627
  """Verifies a certificate for LUVerifyCluster.
2628

2629
  @type cert: OpenSSL.crypto.X509
2630
  @param cert: X509 certificate object
2631
  @type warn_days: number or None
2632
  @param warn_days: How many days before expiration a warning should be reported
2633
  @type error_days: number or None
2634
  @param error_days: How many days before expiration an error should be reported
2635

2636
  """
2637
  # Depending on the pyOpenSSL version, this can just return (None, None)
2638
  (not_before, not_after) = GetX509CertValidity(cert)
2639

    
2640
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2641
                                 time.time(), warn_days, error_days)
2642

    
2643

    
2644
def SignX509Certificate(cert, key, salt):
2645
  """Sign a X509 certificate.
2646

2647
  An RFC822-like signature header is added in front of the certificate.
2648

2649
  @type cert: OpenSSL.crypto.X509
2650
  @param cert: X509 certificate object
2651
  @type key: string
2652
  @param key: Key for HMAC
2653
  @type salt: string
2654
  @param salt: Salt for HMAC
2655
  @rtype: string
2656
  @return: Serialized and signed certificate in PEM format
2657

2658
  """
2659
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2660
    raise errors.GenericError("Invalid salt: %r" % salt)
2661

    
2662
  # Dumping as PEM here ensures the certificate is in a sane format
2663
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2664

    
2665
  return ("%s: %s/%s\n\n%s" %
2666
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2667
           Sha1Hmac(key, cert_pem, salt=salt),
2668
           cert_pem))
2669

    
2670

    
2671
def _ExtractX509CertificateSignature(cert_pem):
2672
  """Helper function to extract signature from X509 certificate.
2673

2674
  """
2675
  # Extract signature from original PEM data
2676
  for line in cert_pem.splitlines():
2677
    if line.startswith("---"):
2678
      break
2679

    
2680
    m = X509_SIGNATURE.match(line.strip())
2681
    if m:
2682
      return (m.group("salt"), m.group("sign"))
2683

    
2684
  raise errors.GenericError("X509 certificate signature is missing")
2685

    
2686

    
2687
def LoadSignedX509Certificate(cert_pem, key):
2688
  """Verifies a signed X509 certificate.
2689

2690
  @type cert_pem: string
2691
  @param cert_pem: Certificate in PEM format and with signature header
2692
  @type key: string
2693
  @param key: Key for HMAC
2694
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2695
  @return: X509 certificate object and salt
2696

2697
  """
2698
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2699

    
2700
  # Load certificate
2701
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2702

    
2703
  # Dump again to ensure it's in a sane format
2704
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2705

    
2706
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2707
    raise errors.GenericError("X509 certificate signature is invalid")
2708

    
2709
  return (cert, salt)
2710

    
2711

    
2712
def Sha1Hmac(key, text, salt=None):
2713
  """Calculates the HMAC-SHA1 digest of a text.
2714

2715
  HMAC is defined in RFC2104.
2716

2717
  @type key: string
2718
  @param key: Secret key
2719
  @type text: string
2720

2721
  """
2722
  if salt:
2723
    salted_text = salt + text
2724
  else:
2725
    salted_text = text
2726

    
2727
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2728

    
2729

    
2730
def VerifySha1Hmac(key, text, digest, salt=None):
2731
  """Verifies the HMAC-SHA1 digest of a text.
2732

2733
  HMAC is defined in RFC2104.
2734

2735
  @type key: string
2736
  @param key: Secret key
2737
  @type text: string
2738
  @type digest: string
2739
  @param digest: Expected digest
2740
  @rtype: bool
2741
  @return: Whether HMAC-SHA1 digest matches
2742

2743
  """
2744
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2745

    
2746

    
2747
def SafeEncode(text):
2748
  """Return a 'safe' version of a source string.
2749

2750
  This function mangles the input string and returns a version that
2751
  should be safe to display/encode as ASCII. To this end, we first
2752
  convert it to ASCII using the 'backslashreplace' encoding which
2753
  should get rid of any non-ASCII chars, and then we process it
2754
  through a loop copied from the string repr sources in the python; we
2755
  don't use string_escape anymore since that escape single quotes and
2756
  backslashes too, and that is too much; and that escaping is not
2757
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2758

2759
  @type text: str or unicode
2760
  @param text: input data
2761
  @rtype: str
2762
  @return: a safe version of text
2763

2764
  """
2765
  if isinstance(text, unicode):
2766
    # only if unicode; if str already, we handle it below
2767
    text = text.encode('ascii', 'backslashreplace')
2768
  resu = ""
2769
  for char in text:
2770
    c = ord(char)
2771
    if char  == '\t':
2772
      resu += r'\t'
2773
    elif char == '\n':
2774
      resu += r'\n'
2775
    elif char == '\r':
2776
      resu += r'\'r'
2777
    elif c < 32 or c >= 127: # non-printable
2778
      resu += "\\x%02x" % (c & 0xff)
2779
    else:
2780
      resu += char
2781
  return resu
2782

    
2783

    
2784
def UnescapeAndSplit(text, sep=","):
2785
  """Split and unescape a string based on a given separator.
2786

2787
  This function splits a string based on a separator where the
2788
  separator itself can be escape in order to be an element of the
2789
  elements. The escaping rules are (assuming coma being the
2790
  separator):
2791
    - a plain , separates the elements
2792
    - a sequence \\\\, (double backslash plus comma) is handled as a
2793
      backslash plus a separator comma
2794
    - a sequence \, (backslash plus comma) is handled as a
2795
      non-separator comma
2796

2797
  @type text: string
2798
  @param text: the string to split
2799
  @type sep: string
2800
  @param text: the separator
2801
  @rtype: string
2802
  @return: a list of strings
2803

2804
  """
2805
  # we split the list by sep (with no escaping at this stage)
2806
  slist = text.split(sep)
2807
  # next, we revisit the elements and if any of them ended with an odd
2808
  # number of backslashes, then we join it with the next
2809
  rlist = []
2810
  while slist:
2811
    e1 = slist.pop(0)
2812
    if e1.endswith("\\"):
2813
      num_b = len(e1) - len(e1.rstrip("\\"))
2814
      if num_b % 2 == 1:
2815
        e2 = slist.pop(0)
2816
        # here the backslashes remain (all), and will be reduced in
2817
        # the next step
2818
        rlist.append(e1 + sep + e2)
2819
        continue
2820
    rlist.append(e1)
2821
  # finally, replace backslash-something with something
2822
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2823
  return rlist
2824

    
2825

    
2826
def CommaJoin(names):
2827
  """Nicely join a set of identifiers.
2828

2829
  @param names: set, list or tuple
2830
  @return: a string with the formatted results
2831

2832
  """
2833
  return ", ".join([str(val) for val in names])
2834

    
2835

    
2836
def BytesToMebibyte(value):
2837
  """Converts bytes to mebibytes.
2838

2839
  @type value: int
2840
  @param value: Value in bytes
2841
  @rtype: int
2842
  @return: Value in mebibytes
2843

2844
  """
2845
  return int(round(value / (1024.0 * 1024.0), 0))
2846

    
2847

    
2848
def CalculateDirectorySize(path):
2849
  """Calculates the size of a directory recursively.
2850

2851
  @type path: string
2852
  @param path: Path to directory
2853
  @rtype: int
2854
  @return: Size in mebibytes
2855

2856
  """
2857
  size = 0
2858

    
2859
  for (curpath, _, files) in os.walk(path):
2860
    for filename in files:
2861
      st = os.lstat(PathJoin(curpath, filename))
2862
      size += st.st_size
2863

    
2864
  return BytesToMebibyte(size)
2865

    
2866

    
2867
def GetMounts(filename=constants.PROC_MOUNTS):
2868
  """Returns the list of mounted filesystems.
2869

2870
  This function is Linux-specific.
2871

2872
  @param filename: path of mounts file (/proc/mounts by default)
2873
  @rtype: list of tuples
2874
  @return: list of mount entries (device, mountpoint, fstype, options)
2875

2876
  """
2877
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2878
  data = []
2879
  mountlines = ReadFile(filename).splitlines()
2880
  for line in mountlines:
2881
    device, mountpoint, fstype, options, _ = line.split(None, 4)
2882
    data.append((device, mountpoint, fstype, options))
2883

    
2884
  return data
2885

    
2886

    
2887
def GetFilesystemStats(path):
2888
  """Returns the total and free space on a filesystem.
2889

2890
  @type path: string
2891
  @param path: Path on filesystem to be examined
2892
  @rtype: int
2893
  @return: tuple of (Total space, Free space) in mebibytes
2894

2895
  """
2896
  st = os.statvfs(path)
2897

    
2898
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2899
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2900
  return (tsize, fsize)
2901

    
2902

    
2903
def RunInSeparateProcess(fn, *args):
2904
  """Runs a function in a separate process.
2905

2906
  Note: Only boolean return values are supported.
2907

2908
  @type fn: callable
2909
  @param fn: Function to be called
2910
  @rtype: bool
2911
  @return: Function's result
2912

2913
  """
2914
  pid = os.fork()
2915
  if pid == 0:
2916
    # Child process
2917
    try:
2918
      # In case the function uses temporary files
2919
      ResetTempfileModule()
2920

    
2921
      # Call function
2922
      result = int(bool(fn(*args)))
2923
      assert result in (0, 1)
2924
    except: # pylint: disable-msg=W0702
2925
      logging.exception("Error while calling function in separate process")
2926
      # 0 and 1 are reserved for the return value
2927
      result = 33
2928

    
2929
    os._exit(result) # pylint: disable-msg=W0212
2930

    
2931
  # Parent process
2932

    
2933
  # Avoid zombies and check exit code
2934
  (_, status) = os.waitpid(pid, 0)
2935

    
2936
  if os.WIFSIGNALED(status):
2937
    exitcode = None
2938
    signum = os.WTERMSIG(status)
2939
  else:
2940
    exitcode = os.WEXITSTATUS(status)
2941
    signum = None
2942

    
2943
  if not (exitcode in (0, 1) and signum is None):
2944
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2945
                              (exitcode, signum))
2946

    
2947
  return bool(exitcode)
2948

    
2949

    
2950
def IgnoreProcessNotFound(fn, *args, **kwargs):
2951
  """Ignores ESRCH when calling a process-related function.
2952

2953
  ESRCH is raised when a process is not found.
2954

2955
  @rtype: bool
2956
  @return: Whether process was found
2957

2958
  """
2959
  try:
2960
    fn(*args, **kwargs)
2961
  except EnvironmentError, err:
2962
    # Ignore ESRCH
2963
    if err.errno == errno.ESRCH:
2964
      return False
2965
    raise
2966

    
2967
  return True
2968

    
2969

    
2970
def IgnoreSignals(fn, *args, **kwargs):
2971
  """Tries to call a function ignoring failures due to EINTR.
2972

2973
  """
2974
  try:
2975
    return fn(*args, **kwargs)
2976
  except EnvironmentError, err:
2977
    if err.errno == errno.EINTR:
2978
      return None
2979
    else:
2980
      raise
2981
  except (select.error, socket.error), err:
2982
    # In python 2.6 and above select.error is an IOError, so it's handled
2983
    # above, in 2.5 and below it's not, and it's handled here.
2984
    if err.args and err.args[0] == errno.EINTR:
2985
      return None
2986
    else:
2987
      raise
2988

    
2989

    
2990
def LockFile(fd):
2991
  """Locks a file using POSIX locks.
2992

2993
  @type fd: int
2994
  @param fd: the file descriptor we need to lock
2995

2996
  """
2997
  try:
2998
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2999
  except IOError, err:
3000
    if err.errno == errno.EAGAIN:
3001
      raise errors.LockError("File already locked")
3002
    raise
3003

    
3004

    
3005
def FormatTime(val):
3006
  """Formats a time value.
3007

3008
  @type val: float or None
3009
  @param val: the timestamp as returned by time.time()
3010
  @return: a string value or N/A if we don't have a valid timestamp
3011

3012
  """
3013
  if val is None or not isinstance(val, (int, float)):
3014
    return "N/A"
3015
  # these two codes works on Linux, but they are not guaranteed on all
3016
  # platforms
3017
  return time.strftime("%F %T", time.localtime(val))
3018

    
3019

    
3020
def FormatSeconds(secs):
3021
  """Formats seconds for easier reading.
3022

3023
  @type secs: number
3024
  @param secs: Number of seconds
3025
  @rtype: string
3026
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3027

3028
  """
3029
  parts = []
3030

    
3031
  secs = round(secs, 0)
3032

    
3033
  if secs > 0:
3034
    # Negative values would be a bit tricky
3035
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3036
      (complete, secs) = divmod(secs, one)
3037
      if complete or parts:
3038
        parts.append("%d%s" % (complete, unit))
3039

    
3040
  parts.append("%ds" % secs)
3041

    
3042
  return " ".join(parts)
3043

    
3044

    
3045
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3046
  """Reads the watcher pause file.
3047

3048
  @type filename: string
3049
  @param filename: Path to watcher pause file
3050
  @type now: None, float or int
3051
  @param now: Current time as Unix timestamp
3052
  @type remove_after: int
3053
  @param remove_after: Remove watcher pause file after specified amount of
3054
    seconds past the pause end time
3055

3056
  """
3057
  if now is None:
3058
    now = time.time()
3059

    
3060
  try:
3061
    value = ReadFile(filename)
3062
  except IOError, err:
3063
    if err.errno != errno.ENOENT:
3064
      raise
3065
    value = None
3066

    
3067
  if value is not None:
3068
    try:
3069
      value = int(value)
3070
    except ValueError:
3071
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3072
                       " removing it"), filename)
3073
      RemoveFile(filename)
3074
      value = None
3075

    
3076
    if value is not None:
3077
      # Remove file if it's outdated
3078
      if now > (value + remove_after):
3079
        RemoveFile(filename)
3080
        value = None
3081

    
3082
      elif now > value:
3083
        value = None
3084

    
3085
  return value
3086

    
3087

    
3088
class RetryTimeout(Exception):
3089
  """Retry loop timed out.
3090

3091
  Any arguments which was passed by the retried function to RetryAgain will be
3092
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3093
  the RaiseInner helper method will reraise it.
3094

3095
  """
3096
  def RaiseInner(self):
3097
    if self.args and isinstance(self.args[0], Exception):
3098
      raise self.args[0]
3099
    else:
3100
      raise RetryTimeout(*self.args)
3101

    
3102

    
3103
class RetryAgain(Exception):
3104
  """Retry again.
3105

3106
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3107
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3108
  of the RetryTimeout() method can be used to reraise it.
3109

3110
  """
3111

    
3112

    
3113
class _RetryDelayCalculator(object):
3114
  """Calculator for increasing delays.
3115

3116
  """
3117
  __slots__ = [
3118
    "_factor",
3119
    "_limit",
3120
    "_next",
3121
    "_start",
3122
    ]
3123

    
3124
  def __init__(self, start, factor, limit):
3125
    """Initializes this class.
3126

3127
    @type start: float
3128
    @param start: Initial delay
3129
    @type factor: float
3130
    @param factor: Factor for delay increase
3131
    @type limit: float or None
3132
    @param limit: Upper limit for delay or None for no limit
3133

3134
    """
3135
    assert start > 0.0
3136
    assert factor >= 1.0
3137
    assert limit is None or limit >= 0.0
3138

    
3139
    self._start = start
3140
    self._factor = factor
3141
    self._limit = limit
3142

    
3143
    self._next = start
3144

    
3145
  def __call__(self):
3146
    """Returns current delay and calculates the next one.
3147

3148
    """
3149
    current = self._next
3150

    
3151
    # Update for next run
3152
    if self._limit is None or self._next < self._limit:
3153
      self._next = min(self._limit, self._next * self._factor)
3154

    
3155
    return current
3156

    
3157

    
3158
#: Special delay to specify whole remaining timeout
3159
RETRY_REMAINING_TIME = object()
3160

    
3161

    
3162
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3163
          _time_fn=time.time):
3164
  """Call a function repeatedly until it succeeds.
3165

3166
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3167
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3168
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3169

3170
  C{delay} can be one of the following:
3171
    - callable returning the delay length as a float
3172
    - Tuple of (start, factor, limit)
3173
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3174
      useful when overriding L{wait_fn} to wait for an external event)
3175
    - A static delay as a number (int or float)
3176

3177
  @type fn: callable
3178
  @param fn: Function to be called
3179
  @param delay: Either a callable (returning the delay), a tuple of (start,
3180
                factor, limit) (see L{_RetryDelayCalculator}),
3181
                L{RETRY_REMAINING_TIME} or a number (int or float)
3182
  @type timeout: float
3183
  @param timeout: Total timeout
3184
  @type wait_fn: callable
3185
  @param wait_fn: Waiting function
3186
  @return: Return value of function
3187

3188
  """
3189
  assert callable(fn)
3190
  assert callable(wait_fn)
3191
  assert callable(_time_fn)
3192

    
3193
  if args is None:
3194
    args = []
3195

    
3196
  end_time = _time_fn() + timeout
3197

    
3198
  if callable(delay):
3199
    # External function to calculate delay
3200
    calc_delay = delay
3201

    
3202
  elif isinstance(delay, (tuple, list)):
3203
    # Increasing delay with optional upper boundary
3204
    (start, factor, limit) = delay
3205
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3206

    
3207
  elif delay is RETRY_REMAINING_TIME:
3208
    # Always use the remaining time
3209
    calc_delay = None
3210

    
3211
  else:
3212
    # Static delay
3213
    calc_delay = lambda: delay
3214

    
3215
  assert calc_delay is None or callable(calc_delay)
3216

    
3217
  while True:
3218
    retry_args = []
3219
    try:
3220
      # pylint: disable-msg=W0142
3221
      return fn(*args)
3222
    except RetryAgain, err:
3223
      retry_args = err.args
3224
    except RetryTimeout:
3225
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3226
                                   " handle RetryTimeout")
3227

    
3228
    remaining_time = end_time - _time_fn()
3229

    
3230
    if remaining_time < 0.0:
3231
      # pylint: disable-msg=W0142
3232
      raise RetryTimeout(*retry_args)
3233

    
3234
    assert remaining_time >= 0.0
3235

    
3236
    if calc_delay is None:
3237
      wait_fn(remaining_time)
3238
    else:
3239
      current_delay = calc_delay()
3240
      if current_delay > 0.0:
3241
        wait_fn(current_delay)
3242

    
3243

    
3244
def GetClosedTempfile(*args, **kwargs):
3245
  """Creates a temporary file and returns its path.
3246

3247
  """
3248
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3249
  _CloseFDNoErr(fd)
3250
  return path
3251

    
3252

    
3253
def GenerateSelfSignedX509Cert(common_name, validity):
3254
  """Generates a self-signed X509 certificate.
3255

3256
  @type common_name: string
3257
  @param common_name: commonName value
3258
  @type validity: int
3259
  @param validity: Validity for certificate in seconds
3260

3261
  """
3262
  # Create private and public key
3263
  key = OpenSSL.crypto.PKey()
3264
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3265

    
3266
  # Create self-signed certificate
3267
  cert = OpenSSL.crypto.X509()
3268
  if common_name:
3269
    cert.get_subject().CN = common_name
3270
  cert.set_serial_number(1)
3271
  cert.gmtime_adj_notBefore(0)
3272
  cert.gmtime_adj_notAfter(validity)
3273
  cert.set_issuer(cert.get_subject())
3274
  cert.set_pubkey(key)
3275
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3276

    
3277
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3278
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3279

    
3280
  return (key_pem, cert_pem)
3281

    
3282

    
3283
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3284
  """Legacy function to generate self-signed X509 certificate.
3285

3286
  """
3287
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3288
                                                   validity * 24 * 60 * 60)
3289

    
3290
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3291

    
3292

    
3293
class FileLock(object):
3294
  """Utility class for file locks.
3295

3296
  """
3297
  def __init__(self, fd, filename):
3298
    """Constructor for FileLock.
3299

3300
    @type fd: file
3301
    @param fd: File object
3302
    @type filename: str
3303
    @param filename: Path of the file opened at I{fd}
3304

3305
    """
3306
    self.fd = fd
3307
    self.filename = filename
3308

    
3309
  @classmethod
3310
  def Open(cls, filename):
3311
    """Creates and opens a file to be used as a file-based lock.
3312

3313
    @type filename: string
3314
    @param filename: path to the file to be locked
3315

3316
    """
3317
    # Using "os.open" is necessary to allow both opening existing file
3318
    # read/write and creating if not existing. Vanilla "open" will truncate an
3319
    # existing file -or- allow creating if not existing.
3320
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3321
               filename)
3322

    
3323
  def __del__(self):
3324
    self.Close()
3325

    
3326
  def Close(self):
3327
    """Close the file and release the lock.
3328

3329
    """
3330
    if hasattr(self, "fd") and self.fd:
3331
      self.fd.close()
3332
      self.fd = None
3333

    
3334
  def _flock(self, flag, blocking, timeout, errmsg):
3335
    """Wrapper for fcntl.flock.
3336

3337
    @type flag: int
3338
    @param flag: operation flag
3339
    @type blocking: bool
3340
    @param blocking: whether the operation should be done in blocking mode.
3341
    @type timeout: None or float
3342
    @param timeout: for how long the operation should be retried (implies
3343
                    non-blocking mode).
3344
    @type errmsg: string
3345
    @param errmsg: error message in case operation fails.
3346

3347
    """
3348
    assert self.fd, "Lock was closed"
3349
    assert timeout is None or timeout >= 0, \
3350
      "If specified, timeout must be positive"
3351
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3352

    
3353
    # When a timeout is used, LOCK_NB must always be set
3354
    if not (timeout is None and blocking):
3355
      flag |= fcntl.LOCK_NB
3356

    
3357
    if timeout is None:
3358
      self._Lock(self.fd, flag, timeout)
3359
    else:
3360
      try:
3361
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3362
              args=(self.fd, flag, timeout))
3363
      except RetryTimeout:
3364
        raise errors.LockError(errmsg)
3365

    
3366
  @staticmethod
3367
  def _Lock(fd, flag, timeout):
3368
    try:
3369
      fcntl.flock(fd, flag)
3370
    except IOError, err:
3371
      if timeout is not None and err.errno == errno.EAGAIN:
3372
        raise RetryAgain()
3373

    
3374
      logging.exception("fcntl.flock failed")
3375
      raise
3376

    
3377
  def Exclusive(self, blocking=False, timeout=None):
3378
    """Locks the file in exclusive mode.
3379

3380
    @type blocking: boolean
3381
    @param blocking: whether to block and wait until we
3382
        can lock the file or return immediately
3383
    @type timeout: int or None
3384
    @param timeout: if not None, the duration to wait for the lock
3385
        (in blocking mode)
3386

3387
    """
3388
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3389
                "Failed to lock %s in exclusive mode" % self.filename)
3390

    
3391
  def Shared(self, blocking=False, timeout=None):
3392
    """Locks the file in shared mode.
3393

3394
    @type blocking: boolean
3395
    @param blocking: whether to block and wait until we
3396
        can lock the file or return immediately
3397
    @type timeout: int or None
3398
    @param timeout: if not None, the duration to wait for the lock
3399
        (in blocking mode)
3400

3401
    """
3402
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3403
                "Failed to lock %s in shared mode" % self.filename)
3404

    
3405
  def Unlock(self, blocking=True, timeout=None):
3406
    """Unlocks the file.
3407

3408
    According to C{flock(2)}, unlocking can also be a nonblocking
3409
    operation::
3410

3411
      To make a non-blocking request, include LOCK_NB with any of the above
3412
      operations.
3413

3414
    @type blocking: boolean
3415
    @param blocking: whether to block and wait until we
3416
        can lock the file or return immediately
3417
    @type timeout: int or None
3418
    @param timeout: if not None, the duration to wait for the lock
3419
        (in blocking mode)
3420

3421
    """
3422
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3423
                "Failed to unlock %s" % self.filename)
3424

    
3425

    
3426
class LineSplitter:
3427
  """Splits data chunks into lines separated by newline.
3428

3429
  Instances provide a file-like interface.
3430

3431
  """
3432
  def __init__(self, line_fn, *args):
3433
    """Initializes this class.
3434

3435
    @type line_fn: callable
3436
    @param line_fn: Function called for each line, first parameter is line
3437
    @param args: Extra arguments for L{line_fn}
3438

3439
    """
3440
    assert callable(line_fn)
3441

    
3442
    if args:
3443
      # Python 2.4 doesn't have functools.partial yet
3444
      self._line_fn = \
3445
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3446
    else:
3447
      self._line_fn = line_fn
3448

    
3449
    self._lines = collections.deque()
3450
    self._buffer = ""
3451

    
3452
  def write(self, data):
3453
    parts = (self._buffer + data).split("\n")
3454
    self._buffer = parts.pop()
3455
    self._lines.extend(parts)
3456

    
3457
  def flush(self):
3458
    while self._lines:
3459
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3460

    
3461
  def close(self):
3462
    self.flush()
3463
    if self._buffer:
3464
      self._line_fn(self._buffer)
3465

    
3466

    
3467
def SignalHandled(signums):
3468
  """Signal Handled decoration.
3469

3470
  This special decorator installs a signal handler and then calls the target
3471
  function. The function must accept a 'signal_handlers' keyword argument,
3472
  which will contain a dict indexed by signal number, with SignalHandler
3473
  objects as values.
3474

3475
  The decorator can be safely stacked with iself, to handle multiple signals
3476
  with different handlers.
3477

3478
  @type signums: list
3479
  @param signums: signals to intercept
3480

3481
  """
3482
  def wrap(fn):
3483
    def sig_function(*args, **kwargs):
3484
      assert 'signal_handlers' not in kwargs or \
3485
             kwargs['signal_handlers'] is None or \
3486
             isinstance(kwargs['signal_handlers'], dict), \
3487
             "Wrong signal_handlers parameter in original function call"
3488
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3489
        signal_handlers = kwargs['signal_handlers']
3490
      else:
3491
        signal_handlers = {}
3492
        kwargs['signal_handlers'] = signal_handlers
3493
      sighandler = SignalHandler(signums)
3494
      try:
3495
        for sig in signums:
3496
          signal_handlers[sig] = sighandler
3497
        return fn(*args, **kwargs)
3498
      finally:
3499
        sighandler.Reset()
3500
    return sig_function
3501
  return wrap
3502

    
3503

    
3504
class SignalWakeupFd(object):
3505
  try:
3506
    # This is only supported in Python 2.5 and above (some distributions
3507
    # backported it to Python 2.4)
3508
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3509
  except AttributeError:
3510
    # Not supported
3511
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3512
      return -1
3513
  else:
3514
    def _SetWakeupFd(self, fd):
3515
      return self._set_wakeup_fd_fn(fd)
3516

    
3517
  def __init__(self):
3518
    """Initializes this class.
3519

3520
    """
3521
    (read_fd, write_fd) = os.pipe()
3522

    
3523
    # Once these succeeded, the file descriptors will be closed automatically.
3524
    # Buffer size 0 is important, otherwise .read() with a specified length
3525
    # might buffer data and the file descriptors won't be marked readable.
3526
    self._read_fh = os.fdopen(read_fd, "r", 0)
3527
    self._write_fh = os.fdopen(write_fd, "w", 0)
3528

    
3529
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3530

    
3531
    # Utility functions
3532
    self.fileno = self._read_fh.fileno
3533
    self.read = self._read_fh.read
3534

    
3535
  def Reset(self):
3536
    """Restores the previous wakeup file descriptor.
3537

3538
    """
3539
    if hasattr(self, "_previous") and self._previous is not None:
3540
      self._SetWakeupFd(self._previous)
3541
      self._previous = None
3542

    
3543
  def Notify(self):
3544
    """Notifies the wakeup file descriptor.
3545

3546
    """
3547
    self._write_fh.write("\0")
3548

    
3549
  def __del__(self):
3550
    """Called before object deletion.
3551

3552
    """
3553
    self.Reset()
3554

    
3555

    
3556
class SignalHandler(object):
3557
  """Generic signal handler class.
3558

3559
  It automatically restores the original handler when deconstructed or
3560
  when L{Reset} is called. You can either pass your own handler
3561
  function in or query the L{called} attribute to detect whether the
3562
  signal was sent.
3563

3564
  @type signum: list
3565
  @ivar signum: the signals we handle
3566
  @type called: boolean
3567
  @ivar called: tracks whether any of the signals have been raised
3568

3569
  """
3570
  def __init__(self, signum, handler_fn=None, wakeup=None):
3571
    """Constructs a new SignalHandler instance.
3572

3573
    @type signum: int or list of ints
3574
    @param signum: Single signal number or set of signal numbers
3575
    @type handler_fn: callable
3576
    @param handler_fn: Signal handling function
3577

3578
    """
3579
    assert handler_fn is None or callable(handler_fn)
3580

    
3581
    self.signum = set(signum)
3582
    self.called = False
3583

    
3584
    self._handler_fn = handler_fn
3585
    self._wakeup = wakeup
3586

    
3587
    self._previous = {}
3588
    try:
3589
      for signum in self.signum:
3590
        # Setup handler
3591
        prev_handler = signal.signal(signum, self._HandleSignal)
3592
        try:
3593
          self._previous[signum] = prev_handler
3594
        except:
3595
          # Restore previous handler
3596
          signal.signal(signum, prev_handler)
3597
          raise
3598
    except:
3599
      # Reset all handlers
3600
      self.Reset()
3601
      # Here we have a race condition: a handler may have already been called,
3602
      # but there's not much we can do about it at this point.
3603
      raise
3604

    
3605
  def __del__(self):
3606
    self.Reset()
3607

    
3608
  def Reset(self):
3609
    """Restore previous handler.
3610

3611
    This will reset all the signals to their previous handlers.
3612

3613
    """
3614
    for signum, prev_handler in self._previous.items():
3615
      signal.signal(signum, prev_handler)
3616
      # If successful, remove from dict
3617
      del self._previous[signum]
3618

    
3619
  def Clear(self):
3620
    """Unsets the L{called} flag.
3621

3622
    This function can be used in case a signal may arrive several times.
3623

3624
    """
3625
    self.called = False
3626

    
3627
  def _HandleSignal(self, signum, frame):
3628
    """Actual signal handling function.
3629

3630
    """
3631
    # This is not nice and not absolutely atomic, but it appears to be the only
3632
    # solution in Python -- there are no atomic types.
3633
    self.called = True
3634

    
3635
    if self._wakeup:
3636
      # Notify whoever is interested in signals
3637
      self._wakeup.Notify()
3638

    
3639
    if self._handler_fn:
3640
      self._handler_fn(signum, frame)
3641

    
3642

    
3643
class FieldSet(object):
3644
  """A simple field set.
3645

3646
  Among the features are:
3647
    - checking if a string is among a list of static string or regex objects
3648
    - checking if a whole list of string matches
3649
    - returning the matching groups from a regex match
3650

3651
  Internally, all fields are held as regular expression objects.
3652

3653
  """
3654
  def __init__(self, *items):
3655
    self.items = [re.compile("^%s$" % value) for value in items]
3656

    
3657
  def Extend(self, other_set):
3658
    """Extend the field set with the items from another one"""
3659
    self.items.extend(other_set.items)
3660

    
3661
  def Matches(self, field):
3662
    """Checks if a field matches the current set
3663

3664
    @type field: str
3665
    @param field: the string to match
3666
    @return: either None or a regular expression match object
3667

3668
    """
3669
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3670
      return m
3671
    return None
3672

    
3673
  def NonMatching(self, items):
3674
    """Returns the list of fields not matching the current set
3675

3676
    @type items: list
3677
    @param items: the list of fields to check
3678
    @rtype: list
3679
    @return: list of non-matching fields
3680

3681
    """
3682
    return [val for val in items if not self.Matches(val)]