Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ c7406bbe

History | View | Annotate | Download (100 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52
import struct
53
import IN
54

    
55
from cStringIO import StringIO
56

    
57
try:
58
  from hashlib import sha1
59
except ImportError:
60
  import sha as sha1
61

    
62
try:
63
  import ctypes
64
except ImportError:
65
  ctypes = None
66

    
67
from ganeti import errors
68
from ganeti import constants
69

    
70

    
71
_locksheld = []
72
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
73

    
74
debug_locks = False
75

    
76
#: when set to True, L{RunCmd} is disabled
77
no_fork = False
78

    
79
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
80

    
81
HEX_CHAR_RE = r"[a-zA-Z0-9]"
82
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
83
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
84
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
85
                             HEX_CHAR_RE, HEX_CHAR_RE),
86
                            re.S | re.I)
87

    
88
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
89
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
90
#
91
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
92
# pid_t to "int".
93
#
94
# IEEE Std 1003.1-2008:
95
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
96
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
97
_STRUCT_UCRED = "iII"
98
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
99

    
100
# Certificate verification results
101
(CERT_WARNING,
102
 CERT_ERROR) = range(1, 3)
103

    
104
# Flags for mlockall() (from bits/mman.h)
105
_MCL_CURRENT = 1
106
_MCL_FUTURE = 2
107

    
108

    
109
class RunResult(object):
110
  """Holds the result of running external programs.
111

112
  @type exit_code: int
113
  @ivar exit_code: the exit code of the program, or None (if the program
114
      didn't exit())
115
  @type signal: int or None
116
  @ivar signal: the signal that caused the program to finish, or None
117
      (if the program wasn't terminated by a signal)
118
  @type stdout: str
119
  @ivar stdout: the standard output of the program
120
  @type stderr: str
121
  @ivar stderr: the standard error of the program
122
  @type failed: boolean
123
  @ivar failed: True in case the program was
124
      terminated by a signal or exited with a non-zero exit code
125
  @ivar fail_reason: a string detailing the termination reason
126

127
  """
128
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
129
               "failed", "fail_reason", "cmd"]
130

    
131

    
132
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
133
    self.cmd = cmd
134
    self.exit_code = exit_code
135
    self.signal = signal_
136
    self.stdout = stdout
137
    self.stderr = stderr
138
    self.failed = (signal_ is not None or exit_code != 0)
139

    
140
    if self.signal is not None:
141
      self.fail_reason = "terminated by signal %s" % self.signal
142
    elif self.exit_code is not None:
143
      self.fail_reason = "exited with exit code %s" % self.exit_code
144
    else:
145
      self.fail_reason = "unable to determine termination reason"
146

    
147
    if self.failed:
148
      logging.debug("Command '%s' failed (%s); output: %s",
149
                    self.cmd, self.fail_reason, self.output)
150

    
151
  def _GetOutput(self):
152
    """Returns the combined stdout and stderr for easier usage.
153

154
    """
155
    return self.stdout + self.stderr
156

    
157
  output = property(_GetOutput, None, None, "Return full output")
158

    
159

    
160
def _BuildCmdEnvironment(env, reset):
161
  """Builds the environment for an external program.
162

163
  """
164
  if reset:
165
    cmd_env = {}
166
  else:
167
    cmd_env = os.environ.copy()
168
    cmd_env["LC_ALL"] = "C"
169

    
170
  if env is not None:
171
    cmd_env.update(env)
172

    
173
  return cmd_env
174

    
175

    
176
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
177
  """Execute a (shell) command.
178

179
  The command should not read from its standard input, as it will be
180
  closed.
181

182
  @type cmd: string or list
183
  @param cmd: Command to run
184
  @type env: dict
185
  @param env: Additional environment variables
186
  @type output: str
187
  @param output: if desired, the output of the command can be
188
      saved in a file instead of the RunResult instance; this
189
      parameter denotes the file name (if not None)
190
  @type cwd: string
191
  @param cwd: if specified, will be used as the working
192
      directory for the command; the default will be /
193
  @type reset_env: boolean
194
  @param reset_env: whether to reset or keep the default os environment
195
  @rtype: L{RunResult}
196
  @return: RunResult instance
197
  @raise errors.ProgrammerError: if we call this when forks are disabled
198

199
  """
200
  if no_fork:
201
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
202

    
203
  if isinstance(cmd, basestring):
204
    strcmd = cmd
205
    shell = True
206
  else:
207
    cmd = [str(val) for val in cmd]
208
    strcmd = ShellQuoteArgs(cmd)
209
    shell = False
210

    
211
  if output:
212
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
213
  else:
214
    logging.debug("RunCmd %s", strcmd)
215

    
216
  cmd_env = _BuildCmdEnvironment(env, reset_env)
217

    
218
  try:
219
    if output is None:
220
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
221
    else:
222
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
223
      out = err = ""
224
  except OSError, err:
225
    if err.errno == errno.ENOENT:
226
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
227
                               (strcmd, err))
228
    else:
229
      raise
230

    
231
  if status >= 0:
232
    exitcode = status
233
    signal_ = None
234
  else:
235
    exitcode = None
236
    signal_ = -status
237

    
238
  return RunResult(exitcode, signal_, out, err, strcmd)
239

    
240

    
241
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
242
                pidfile=None):
243
  """Start a daemon process after forking twice.
244

245
  @type cmd: string or list
246
  @param cmd: Command to run
247
  @type env: dict
248
  @param env: Additional environment variables
249
  @type cwd: string
250
  @param cwd: Working directory for the program
251
  @type output: string
252
  @param output: Path to file in which to save the output
253
  @type output_fd: int
254
  @param output_fd: File descriptor for output
255
  @type pidfile: string
256
  @param pidfile: Process ID file
257
  @rtype: int
258
  @return: Daemon process ID
259
  @raise errors.ProgrammerError: if we call this when forks are disabled
260

261
  """
262
  if no_fork:
263
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
264
                                 " disabled")
265

    
266
  if output and not (bool(output) ^ (output_fd is not None)):
267
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
268
                                 " specified")
269

    
270
  if isinstance(cmd, basestring):
271
    cmd = ["/bin/sh", "-c", cmd]
272

    
273
  strcmd = ShellQuoteArgs(cmd)
274

    
275
  if output:
276
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
277
  else:
278
    logging.debug("StartDaemon %s", strcmd)
279

    
280
  cmd_env = _BuildCmdEnvironment(env, False)
281

    
282
  # Create pipe for sending PID back
283
  (pidpipe_read, pidpipe_write) = os.pipe()
284
  try:
285
    try:
286
      # Create pipe for sending error messages
287
      (errpipe_read, errpipe_write) = os.pipe()
288
      try:
289
        try:
290
          # First fork
291
          pid = os.fork()
292
          if pid == 0:
293
            try:
294
              # Child process, won't return
295
              _StartDaemonChild(errpipe_read, errpipe_write,
296
                                pidpipe_read, pidpipe_write,
297
                                cmd, cmd_env, cwd,
298
                                output, output_fd, pidfile)
299
            finally:
300
              # Well, maybe child process failed
301
              os._exit(1) # pylint: disable-msg=W0212
302
        finally:
303
          _CloseFDNoErr(errpipe_write)
304

    
305
        # Wait for daemon to be started (or an error message to arrive) and read
306
        # up to 100 KB as an error message
307
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
308
      finally:
309
        _CloseFDNoErr(errpipe_read)
310
    finally:
311
      _CloseFDNoErr(pidpipe_write)
312

    
313
    # Read up to 128 bytes for PID
314
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
315
  finally:
316
    _CloseFDNoErr(pidpipe_read)
317

    
318
  # Try to avoid zombies by waiting for child process
319
  try:
320
    os.waitpid(pid, 0)
321
  except OSError:
322
    pass
323

    
324
  if errormsg:
325
    raise errors.OpExecError("Error when starting daemon process: %r" %
326
                             errormsg)
327

    
328
  try:
329
    return int(pidtext)
330
  except (ValueError, TypeError), err:
331
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
332
                             (pidtext, err))
333

    
334

    
335
def _StartDaemonChild(errpipe_read, errpipe_write,
336
                      pidpipe_read, pidpipe_write,
337
                      args, env, cwd,
338
                      output, fd_output, pidfile):
339
  """Child process for starting daemon.
340

341
  """
342
  try:
343
    # Close parent's side
344
    _CloseFDNoErr(errpipe_read)
345
    _CloseFDNoErr(pidpipe_read)
346

    
347
    # First child process
348
    os.chdir("/")
349
    os.umask(077)
350
    os.setsid()
351

    
352
    # And fork for the second time
353
    pid = os.fork()
354
    if pid != 0:
355
      # Exit first child process
356
      os._exit(0) # pylint: disable-msg=W0212
357

    
358
    # Make sure pipe is closed on execv* (and thereby notifies original process)
359
    SetCloseOnExecFlag(errpipe_write, True)
360

    
361
    # List of file descriptors to be left open
362
    noclose_fds = [errpipe_write]
363

    
364
    # Open PID file
365
    if pidfile:
366
      try:
367
        # TODO: Atomic replace with another locked file instead of writing into
368
        # it after creating
369
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
370

    
371
        # Lock the PID file (and fail if not possible to do so). Any code
372
        # wanting to send a signal to the daemon should try to lock the PID
373
        # file before reading it. If acquiring the lock succeeds, the daemon is
374
        # no longer running and the signal should not be sent.
375
        LockFile(fd_pidfile)
376

    
377
        os.write(fd_pidfile, "%d\n" % os.getpid())
378
      except Exception, err:
379
        raise Exception("Creating and locking PID file failed: %s" % err)
380

    
381
      # Keeping the file open to hold the lock
382
      noclose_fds.append(fd_pidfile)
383

    
384
      SetCloseOnExecFlag(fd_pidfile, False)
385
    else:
386
      fd_pidfile = None
387

    
388
    # Open /dev/null
389
    fd_devnull = os.open(os.devnull, os.O_RDWR)
390

    
391
    assert not output or (bool(output) ^ (fd_output is not None))
392

    
393
    if fd_output is not None:
394
      pass
395
    elif output:
396
      # Open output file
397
      try:
398
        # TODO: Implement flag to set append=yes/no
399
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
400
      except EnvironmentError, err:
401
        raise Exception("Opening output file failed: %s" % err)
402
    else:
403
      fd_output = fd_devnull
404

    
405
    # Redirect standard I/O
406
    os.dup2(fd_devnull, 0)
407
    os.dup2(fd_output, 1)
408
    os.dup2(fd_output, 2)
409

    
410
    # Send daemon PID to parent
411
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
412

    
413
    # Close all file descriptors except stdio and error message pipe
414
    CloseFDs(noclose_fds=noclose_fds)
415

    
416
    # Change working directory
417
    os.chdir(cwd)
418

    
419
    if env is None:
420
      os.execvp(args[0], args)
421
    else:
422
      os.execvpe(args[0], args, env)
423
  except: # pylint: disable-msg=W0702
424
    try:
425
      # Report errors to original process
426
      buf = str(sys.exc_info()[1])
427

    
428
      RetryOnSignal(os.write, errpipe_write, buf)
429
    except: # pylint: disable-msg=W0702
430
      # Ignore errors in error handling
431
      pass
432

    
433
  os._exit(1) # pylint: disable-msg=W0212
434

    
435

    
436
def _RunCmdPipe(cmd, env, via_shell, cwd):
437
  """Run a command and return its output.
438

439
  @type  cmd: string or list
440
  @param cmd: Command to run
441
  @type env: dict
442
  @param env: The environment to use
443
  @type via_shell: bool
444
  @param via_shell: if we should run via the shell
445
  @type cwd: string
446
  @param cwd: the working directory for the program
447
  @rtype: tuple
448
  @return: (out, err, status)
449

450
  """
451
  poller = select.poll()
452
  child = subprocess.Popen(cmd, shell=via_shell,
453
                           stderr=subprocess.PIPE,
454
                           stdout=subprocess.PIPE,
455
                           stdin=subprocess.PIPE,
456
                           close_fds=True, env=env,
457
                           cwd=cwd)
458

    
459
  child.stdin.close()
460
  poller.register(child.stdout, select.POLLIN)
461
  poller.register(child.stderr, select.POLLIN)
462
  out = StringIO()
463
  err = StringIO()
464
  fdmap = {
465
    child.stdout.fileno(): (out, child.stdout),
466
    child.stderr.fileno(): (err, child.stderr),
467
    }
468
  for fd in fdmap:
469
    SetNonblockFlag(fd, True)
470

    
471
  while fdmap:
472
    pollresult = RetryOnSignal(poller.poll)
473

    
474
    for fd, event in pollresult:
475
      if event & select.POLLIN or event & select.POLLPRI:
476
        data = fdmap[fd][1].read()
477
        # no data from read signifies EOF (the same as POLLHUP)
478
        if not data:
479
          poller.unregister(fd)
480
          del fdmap[fd]
481
          continue
482
        fdmap[fd][0].write(data)
483
      if (event & select.POLLNVAL or event & select.POLLHUP or
484
          event & select.POLLERR):
485
        poller.unregister(fd)
486
        del fdmap[fd]
487

    
488
  out = out.getvalue()
489
  err = err.getvalue()
490

    
491
  status = child.wait()
492
  return out, err, status
493

    
494

    
495
def _RunCmdFile(cmd, env, via_shell, output, cwd):
496
  """Run a command and save its output to a file.
497

498
  @type  cmd: string or list
499
  @param cmd: Command to run
500
  @type env: dict
501
  @param env: The environment to use
502
  @type via_shell: bool
503
  @param via_shell: if we should run via the shell
504
  @type output: str
505
  @param output: the filename in which to save the output
506
  @type cwd: string
507
  @param cwd: the working directory for the program
508
  @rtype: int
509
  @return: the exit status
510

511
  """
512
  fh = open(output, "a")
513
  try:
514
    child = subprocess.Popen(cmd, shell=via_shell,
515
                             stderr=subprocess.STDOUT,
516
                             stdout=fh,
517
                             stdin=subprocess.PIPE,
518
                             close_fds=True, env=env,
519
                             cwd=cwd)
520

    
521
    child.stdin.close()
522
    status = child.wait()
523
  finally:
524
    fh.close()
525
  return status
526

    
527

    
528
def SetCloseOnExecFlag(fd, enable):
529
  """Sets or unsets the close-on-exec flag on a file descriptor.
530

531
  @type fd: int
532
  @param fd: File descriptor
533
  @type enable: bool
534
  @param enable: Whether to set or unset it.
535

536
  """
537
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
538

    
539
  if enable:
540
    flags |= fcntl.FD_CLOEXEC
541
  else:
542
    flags &= ~fcntl.FD_CLOEXEC
543

    
544
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
545

    
546

    
547
def SetNonblockFlag(fd, enable):
548
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
549

550
  @type fd: int
551
  @param fd: File descriptor
552
  @type enable: bool
553
  @param enable: Whether to set or unset it
554

555
  """
556
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
557

    
558
  if enable:
559
    flags |= os.O_NONBLOCK
560
  else:
561
    flags &= ~os.O_NONBLOCK
562

    
563
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
564

    
565

    
566
def RetryOnSignal(fn, *args, **kwargs):
567
  """Calls a function again if it failed due to EINTR.
568

569
  """
570
  while True:
571
    try:
572
      return fn(*args, **kwargs)
573
    except (EnvironmentError, socket.error), err:
574
      if err.errno != errno.EINTR:
575
        raise
576
    except select.error, err:
577
      if not (err.args and err.args[0] == errno.EINTR):
578
        raise
579

    
580

    
581
def RunParts(dir_name, env=None, reset_env=False):
582
  """Run Scripts or programs in a directory
583

584
  @type dir_name: string
585
  @param dir_name: absolute path to a directory
586
  @type env: dict
587
  @param env: The environment to use
588
  @type reset_env: boolean
589
  @param reset_env: whether to reset or keep the default os environment
590
  @rtype: list of tuples
591
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
592

593
  """
594
  rr = []
595

    
596
  try:
597
    dir_contents = ListVisibleFiles(dir_name)
598
  except OSError, err:
599
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
600
    return rr
601

    
602
  for relname in sorted(dir_contents):
603
    fname = PathJoin(dir_name, relname)
604
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
605
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
606
      rr.append((relname, constants.RUNPARTS_SKIP, None))
607
    else:
608
      try:
609
        result = RunCmd([fname], env=env, reset_env=reset_env)
610
      except Exception, err: # pylint: disable-msg=W0703
611
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
612
      else:
613
        rr.append((relname, constants.RUNPARTS_RUN, result))
614

    
615
  return rr
616

    
617

    
618
def GetSocketCredentials(sock):
619
  """Returns the credentials of the foreign process connected to a socket.
620

621
  @param sock: Unix socket
622
  @rtype: tuple; (number, number, number)
623
  @return: The PID, UID and GID of the connected foreign process.
624

625
  """
626
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
627
                             _STRUCT_UCRED_SIZE)
628
  return struct.unpack(_STRUCT_UCRED, peercred)
629

    
630

    
631
def RemoveFile(filename):
632
  """Remove a file ignoring some errors.
633

634
  Remove a file, ignoring non-existing ones or directories. Other
635
  errors are passed.
636

637
  @type filename: str
638
  @param filename: the file to be removed
639

640
  """
641
  try:
642
    os.unlink(filename)
643
  except OSError, err:
644
    if err.errno not in (errno.ENOENT, errno.EISDIR):
645
      raise
646

    
647

    
648
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
649
  """Renames a file.
650

651
  @type old: string
652
  @param old: Original path
653
  @type new: string
654
  @param new: New path
655
  @type mkdir: bool
656
  @param mkdir: Whether to create target directory if it doesn't exist
657
  @type mkdir_mode: int
658
  @param mkdir_mode: Mode for newly created directories
659

660
  """
661
  try:
662
    return os.rename(old, new)
663
  except OSError, err:
664
    # In at least one use case of this function, the job queue, directory
665
    # creation is very rare. Checking for the directory before renaming is not
666
    # as efficient.
667
    if mkdir and err.errno == errno.ENOENT:
668
      # Create directory and try again
669
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
670

    
671
      return os.rename(old, new)
672

    
673
    raise
674

    
675

    
676
def Makedirs(path, mode=0750):
677
  """Super-mkdir; create a leaf directory and all intermediate ones.
678

679
  This is a wrapper around C{os.makedirs} adding error handling not implemented
680
  before Python 2.5.
681

682
  """
683
  try:
684
    os.makedirs(path, mode)
685
  except OSError, err:
686
    # Ignore EEXIST. This is only handled in os.makedirs as included in
687
    # Python 2.5 and above.
688
    if err.errno != errno.EEXIST or not os.path.exists(path):
689
      raise
690

    
691

    
692
def ResetTempfileModule():
693
  """Resets the random name generator of the tempfile module.
694

695
  This function should be called after C{os.fork} in the child process to
696
  ensure it creates a newly seeded random generator. Otherwise it would
697
  generate the same random parts as the parent process. If several processes
698
  race for the creation of a temporary file, this could lead to one not getting
699
  a temporary name.
700

701
  """
702
  # pylint: disable-msg=W0212
703
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
704
    tempfile._once_lock.acquire()
705
    try:
706
      # Reset random name generator
707
      tempfile._name_sequence = None
708
    finally:
709
      tempfile._once_lock.release()
710
  else:
711
    logging.critical("The tempfile module misses at least one of the"
712
                     " '_once_lock' and '_name_sequence' attributes")
713

    
714

    
715
def _FingerprintFile(filename):
716
  """Compute the fingerprint of a file.
717

718
  If the file does not exist, a None will be returned
719
  instead.
720

721
  @type filename: str
722
  @param filename: the filename to checksum
723
  @rtype: str
724
  @return: the hex digest of the sha checksum of the contents
725
      of the file
726

727
  """
728
  if not (os.path.exists(filename) and os.path.isfile(filename)):
729
    return None
730

    
731
  f = open(filename)
732

    
733
  if callable(sha1):
734
    fp = sha1()
735
  else:
736
    fp = sha1.new()
737
  while True:
738
    data = f.read(4096)
739
    if not data:
740
      break
741

    
742
    fp.update(data)
743

    
744
  return fp.hexdigest()
745

    
746

    
747
def FingerprintFiles(files):
748
  """Compute fingerprints for a list of files.
749

750
  @type files: list
751
  @param files: the list of filename to fingerprint
752
  @rtype: dict
753
  @return: a dictionary filename: fingerprint, holding only
754
      existing files
755

756
  """
757
  ret = {}
758

    
759
  for filename in files:
760
    cksum = _FingerprintFile(filename)
761
    if cksum:
762
      ret[filename] = cksum
763

    
764
  return ret
765

    
766

    
767
def ForceDictType(target, key_types, allowed_values=None):
768
  """Force the values of a dict to have certain types.
769

770
  @type target: dict
771
  @param target: the dict to update
772
  @type key_types: dict
773
  @param key_types: dict mapping target dict keys to types
774
                    in constants.ENFORCEABLE_TYPES
775
  @type allowed_values: list
776
  @keyword allowed_values: list of specially allowed values
777

778
  """
779
  if allowed_values is None:
780
    allowed_values = []
781

    
782
  if not isinstance(target, dict):
783
    msg = "Expected dictionary, got '%s'" % target
784
    raise errors.TypeEnforcementError(msg)
785

    
786
  for key in target:
787
    if key not in key_types:
788
      msg = "Unknown key '%s'" % key
789
      raise errors.TypeEnforcementError(msg)
790

    
791
    if target[key] in allowed_values:
792
      continue
793

    
794
    ktype = key_types[key]
795
    if ktype not in constants.ENFORCEABLE_TYPES:
796
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
797
      raise errors.ProgrammerError(msg)
798

    
799
    if ktype == constants.VTYPE_STRING:
800
      if not isinstance(target[key], basestring):
801
        if isinstance(target[key], bool) and not target[key]:
802
          target[key] = ''
803
        else:
804
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
805
          raise errors.TypeEnforcementError(msg)
806
    elif ktype == constants.VTYPE_BOOL:
807
      if isinstance(target[key], basestring) and target[key]:
808
        if target[key].lower() == constants.VALUE_FALSE:
809
          target[key] = False
810
        elif target[key].lower() == constants.VALUE_TRUE:
811
          target[key] = True
812
        else:
813
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
814
          raise errors.TypeEnforcementError(msg)
815
      elif target[key]:
816
        target[key] = True
817
      else:
818
        target[key] = False
819
    elif ktype == constants.VTYPE_SIZE:
820
      try:
821
        target[key] = ParseUnit(target[key])
822
      except errors.UnitParseError, err:
823
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
824
              (key, target[key], err)
825
        raise errors.TypeEnforcementError(msg)
826
    elif ktype == constants.VTYPE_INT:
827
      try:
828
        target[key] = int(target[key])
829
      except (ValueError, TypeError):
830
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
831
        raise errors.TypeEnforcementError(msg)
832

    
833

    
834
def IsProcessAlive(pid):
835
  """Check if a given pid exists on the system.
836

837
  @note: zombie status is not handled, so zombie processes
838
      will be returned as alive
839
  @type pid: int
840
  @param pid: the process ID to check
841
  @rtype: boolean
842
  @return: True if the process exists
843

844
  """
845
  def _TryStat(name):
846
    try:
847
      os.stat(name)
848
      return True
849
    except EnvironmentError, err:
850
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
851
        return False
852
      elif err.errno == errno.EINVAL:
853
        raise RetryAgain(err)
854
      raise
855

    
856
  assert isinstance(pid, int), "pid must be an integer"
857
  if pid <= 0:
858
    return False
859

    
860
  proc_entry = "/proc/%d/status" % pid
861
  # /proc in a multiprocessor environment can have strange behaviors.
862
  # Retry the os.stat a few times until we get a good result.
863
  try:
864
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, args=[proc_entry])
865
  except RetryTimeout, err:
866
    err.RaiseInner()
867

    
868

    
869
def ReadPidFile(pidfile):
870
  """Read a pid from a file.
871

872
  @type  pidfile: string
873
  @param pidfile: path to the file containing the pid
874
  @rtype: int
875
  @return: The process id, if the file exists and contains a valid PID,
876
           otherwise 0
877

878
  """
879
  try:
880
    raw_data = ReadOneLineFile(pidfile)
881
  except EnvironmentError, err:
882
    if err.errno != errno.ENOENT:
883
      logging.exception("Can't read pid file")
884
    return 0
885

    
886
  try:
887
    pid = int(raw_data)
888
  except (TypeError, ValueError), err:
889
    logging.info("Can't parse pid file contents", exc_info=True)
890
    return 0
891

    
892
  return pid
893

    
894

    
895
def ReadLockedPidFile(path):
896
  """Reads a locked PID file.
897

898
  This can be used together with L{StartDaemon}.
899

900
  @type path: string
901
  @param path: Path to PID file
902
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
903

904
  """
905
  try:
906
    fd = os.open(path, os.O_RDONLY)
907
  except EnvironmentError, err:
908
    if err.errno == errno.ENOENT:
909
      # PID file doesn't exist
910
      return None
911
    raise
912

    
913
  try:
914
    try:
915
      # Try to acquire lock
916
      LockFile(fd)
917
    except errors.LockError:
918
      # Couldn't lock, daemon is running
919
      return int(os.read(fd, 100))
920
  finally:
921
    os.close(fd)
922

    
923
  return None
924

    
925

    
926
def MatchNameComponent(key, name_list, case_sensitive=True):
927
  """Try to match a name against a list.
928

929
  This function will try to match a name like test1 against a list
930
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
931
  this list, I{'test1'} as well as I{'test1.example'} will match, but
932
  not I{'test1.ex'}. A multiple match will be considered as no match
933
  at all (e.g. I{'test1'} against C{['test1.example.com',
934
  'test1.example.org']}), except when the key fully matches an entry
935
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
936

937
  @type key: str
938
  @param key: the name to be searched
939
  @type name_list: list
940
  @param name_list: the list of strings against which to search the key
941
  @type case_sensitive: boolean
942
  @param case_sensitive: whether to provide a case-sensitive match
943

944
  @rtype: None or str
945
  @return: None if there is no match I{or} if there are multiple matches,
946
      otherwise the element from the list which matches
947

948
  """
949
  if key in name_list:
950
    return key
951

    
952
  re_flags = 0
953
  if not case_sensitive:
954
    re_flags |= re.IGNORECASE
955
    key = key.upper()
956
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
957
  names_filtered = []
958
  string_matches = []
959
  for name in name_list:
960
    if mo.match(name) is not None:
961
      names_filtered.append(name)
962
      if not case_sensitive and key == name.upper():
963
        string_matches.append(name)
964

    
965
  if len(string_matches) == 1:
966
    return string_matches[0]
967
  if len(names_filtered) == 1:
968
    return names_filtered[0]
969
  return None
970

    
971

    
972
class HostInfo:
973
  """Class implementing resolver and hostname functionality
974

975
  """
976
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
977

    
978
  def __init__(self, name=None):
979
    """Initialize the host name object.
980

981
    If the name argument is not passed, it will use this system's
982
    name.
983

984
    """
985
    if name is None:
986
      name = self.SysName()
987

    
988
    self.query = name
989
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
990
    self.ip = self.ipaddrs[0]
991

    
992
  def ShortName(self):
993
    """Returns the hostname without domain.
994

995
    """
996
    return self.name.split('.')[0]
997

    
998
  @staticmethod
999
  def SysName():
1000
    """Return the current system's name.
1001

1002
    This is simply a wrapper over C{socket.gethostname()}.
1003

1004
    """
1005
    return socket.gethostname()
1006

    
1007
  @staticmethod
1008
  def LookupHostname(hostname):
1009
    """Look up hostname
1010

1011
    @type hostname: str
1012
    @param hostname: hostname to look up
1013

1014
    @rtype: tuple
1015
    @return: a tuple (name, aliases, ipaddrs) as returned by
1016
        C{socket.gethostbyname_ex}
1017
    @raise errors.ResolverError: in case of errors in resolving
1018

1019
    """
1020
    try:
1021
      result = socket.gethostbyname_ex(hostname)
1022
    except socket.gaierror, err:
1023
      # hostname not found in DNS
1024
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
1025

    
1026
    return result
1027

    
1028
  @classmethod
1029
  def NormalizeName(cls, hostname):
1030
    """Validate and normalize the given hostname.
1031

1032
    @attention: the validation is a bit more relaxed than the standards
1033
        require; most importantly, we allow underscores in names
1034
    @raise errors.OpPrereqError: when the name is not valid
1035

1036
    """
1037
    hostname = hostname.lower()
1038
    if (not cls._VALID_NAME_RE.match(hostname) or
1039
        # double-dots, meaning empty label
1040
        ".." in hostname or
1041
        # empty initial label
1042
        hostname.startswith(".")):
1043
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
1044
                                 errors.ECODE_INVAL)
1045
    if hostname.endswith("."):
1046
      hostname = hostname.rstrip(".")
1047
    return hostname
1048

    
1049

    
1050
def GetHostInfo(name=None):
1051
  """Lookup host name and raise an OpPrereqError for failures"""
1052

    
1053
  try:
1054
    return HostInfo(name)
1055
  except errors.ResolverError, err:
1056
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
1057
                               (err[0], err[2]), errors.ECODE_RESOLVER)
1058

    
1059

    
1060
def ListVolumeGroups():
1061
  """List volume groups and their size
1062

1063
  @rtype: dict
1064
  @return:
1065
       Dictionary with keys volume name and values
1066
       the size of the volume
1067

1068
  """
1069
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1070
  result = RunCmd(command)
1071
  retval = {}
1072
  if result.failed:
1073
    return retval
1074

    
1075
  for line in result.stdout.splitlines():
1076
    try:
1077
      name, size = line.split()
1078
      size = int(float(size))
1079
    except (IndexError, ValueError), err:
1080
      logging.error("Invalid output from vgs (%s): %s", err, line)
1081
      continue
1082

    
1083
    retval[name] = size
1084

    
1085
  return retval
1086

    
1087

    
1088
def BridgeExists(bridge):
1089
  """Check whether the given bridge exists in the system
1090

1091
  @type bridge: str
1092
  @param bridge: the bridge name to check
1093
  @rtype: boolean
1094
  @return: True if it does
1095

1096
  """
1097
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1098

    
1099

    
1100
def NiceSort(name_list):
1101
  """Sort a list of strings based on digit and non-digit groupings.
1102

1103
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1104
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1105
  'a11']}.
1106

1107
  The sort algorithm breaks each name in groups of either only-digits
1108
  or no-digits. Only the first eight such groups are considered, and
1109
  after that we just use what's left of the string.
1110

1111
  @type name_list: list
1112
  @param name_list: the names to be sorted
1113
  @rtype: list
1114
  @return: a copy of the name list sorted with our algorithm
1115

1116
  """
1117
  _SORTER_BASE = "(\D+|\d+)"
1118
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1119
                                                  _SORTER_BASE, _SORTER_BASE,
1120
                                                  _SORTER_BASE, _SORTER_BASE,
1121
                                                  _SORTER_BASE, _SORTER_BASE)
1122
  _SORTER_RE = re.compile(_SORTER_FULL)
1123
  _SORTER_NODIGIT = re.compile("^\D*$")
1124
  def _TryInt(val):
1125
    """Attempts to convert a variable to integer."""
1126
    if val is None or _SORTER_NODIGIT.match(val):
1127
      return val
1128
    rval = int(val)
1129
    return rval
1130

    
1131
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1132
             for name in name_list]
1133
  to_sort.sort()
1134
  return [tup[1] for tup in to_sort]
1135

    
1136

    
1137
def TryConvert(fn, val):
1138
  """Try to convert a value ignoring errors.
1139

1140
  This function tries to apply function I{fn} to I{val}. If no
1141
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1142
  the result, else it will return the original value. Any other
1143
  exceptions are propagated to the caller.
1144

1145
  @type fn: callable
1146
  @param fn: function to apply to the value
1147
  @param val: the value to be converted
1148
  @return: The converted value if the conversion was successful,
1149
      otherwise the original value.
1150

1151
  """
1152
  try:
1153
    nv = fn(val)
1154
  except (ValueError, TypeError):
1155
    nv = val
1156
  return nv
1157

    
1158

    
1159
def IsValidIP(ip):
1160
  """Verifies the syntax of an IPv4 address.
1161

1162
  This function checks if the IPv4 address passes is valid or not based
1163
  on syntax (not IP range, class calculations, etc.).
1164

1165
  @type ip: str
1166
  @param ip: the address to be checked
1167
  @rtype: a regular expression match object
1168
  @return: a regular expression match object, or None if the
1169
      address is not valid
1170

1171
  """
1172
  unit = "(0|[1-9]\d{0,2})"
1173
  #TODO: convert and return only boolean
1174
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
1175

    
1176

    
1177
def IsValidShellParam(word):
1178
  """Verifies is the given word is safe from the shell's p.o.v.
1179

1180
  This means that we can pass this to a command via the shell and be
1181
  sure that it doesn't alter the command line and is passed as such to
1182
  the actual command.
1183

1184
  Note that we are overly restrictive here, in order to be on the safe
1185
  side.
1186

1187
  @type word: str
1188
  @param word: the word to check
1189
  @rtype: boolean
1190
  @return: True if the word is 'safe'
1191

1192
  """
1193
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1194

    
1195

    
1196
def BuildShellCmd(template, *args):
1197
  """Build a safe shell command line from the given arguments.
1198

1199
  This function will check all arguments in the args list so that they
1200
  are valid shell parameters (i.e. they don't contain shell
1201
  metacharacters). If everything is ok, it will return the result of
1202
  template % args.
1203

1204
  @type template: str
1205
  @param template: the string holding the template for the
1206
      string formatting
1207
  @rtype: str
1208
  @return: the expanded command line
1209

1210
  """
1211
  for word in args:
1212
    if not IsValidShellParam(word):
1213
      raise errors.ProgrammerError("Shell argument '%s' contains"
1214
                                   " invalid characters" % word)
1215
  return template % args
1216

    
1217

    
1218
def FormatUnit(value, units):
1219
  """Formats an incoming number of MiB with the appropriate unit.
1220

1221
  @type value: int
1222
  @param value: integer representing the value in MiB (1048576)
1223
  @type units: char
1224
  @param units: the type of formatting we should do:
1225
      - 'h' for automatic scaling
1226
      - 'm' for MiBs
1227
      - 'g' for GiBs
1228
      - 't' for TiBs
1229
  @rtype: str
1230
  @return: the formatted value (with suffix)
1231

1232
  """
1233
  if units not in ('m', 'g', 't', 'h'):
1234
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1235

    
1236
  suffix = ''
1237

    
1238
  if units == 'm' or (units == 'h' and value < 1024):
1239
    if units == 'h':
1240
      suffix = 'M'
1241
    return "%d%s" % (round(value, 0), suffix)
1242

    
1243
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1244
    if units == 'h':
1245
      suffix = 'G'
1246
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1247

    
1248
  else:
1249
    if units == 'h':
1250
      suffix = 'T'
1251
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1252

    
1253

    
1254
def ParseUnit(input_string):
1255
  """Tries to extract number and scale from the given string.
1256

1257
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1258
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1259
  is always an int in MiB.
1260

1261
  """
1262
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1263
  if not m:
1264
    raise errors.UnitParseError("Invalid format")
1265

    
1266
  value = float(m.groups()[0])
1267

    
1268
  unit = m.groups()[1]
1269
  if unit:
1270
    lcunit = unit.lower()
1271
  else:
1272
    lcunit = 'm'
1273

    
1274
  if lcunit in ('m', 'mb', 'mib'):
1275
    # Value already in MiB
1276
    pass
1277

    
1278
  elif lcunit in ('g', 'gb', 'gib'):
1279
    value *= 1024
1280

    
1281
  elif lcunit in ('t', 'tb', 'tib'):
1282
    value *= 1024 * 1024
1283

    
1284
  else:
1285
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1286

    
1287
  # Make sure we round up
1288
  if int(value) < value:
1289
    value += 1
1290

    
1291
  # Round up to the next multiple of 4
1292
  value = int(value)
1293
  if value % 4:
1294
    value += 4 - value % 4
1295

    
1296
  return value
1297

    
1298

    
1299
def AddAuthorizedKey(file_name, key):
1300
  """Adds an SSH public key to an authorized_keys file.
1301

1302
  @type file_name: str
1303
  @param file_name: path to authorized_keys file
1304
  @type key: str
1305
  @param key: string containing key
1306

1307
  """
1308
  key_fields = key.split()
1309

    
1310
  f = open(file_name, 'a+')
1311
  try:
1312
    nl = True
1313
    for line in f:
1314
      # Ignore whitespace changes
1315
      if line.split() == key_fields:
1316
        break
1317
      nl = line.endswith('\n')
1318
    else:
1319
      if not nl:
1320
        f.write("\n")
1321
      f.write(key.rstrip('\r\n'))
1322
      f.write("\n")
1323
      f.flush()
1324
  finally:
1325
    f.close()
1326

    
1327

    
1328
def RemoveAuthorizedKey(file_name, key):
1329
  """Removes an SSH public key from an authorized_keys file.
1330

1331
  @type file_name: str
1332
  @param file_name: path to authorized_keys file
1333
  @type key: str
1334
  @param key: string containing key
1335

1336
  """
1337
  key_fields = key.split()
1338

    
1339
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1340
  try:
1341
    out = os.fdopen(fd, 'w')
1342
    try:
1343
      f = open(file_name, 'r')
1344
      try:
1345
        for line in f:
1346
          # Ignore whitespace changes while comparing lines
1347
          if line.split() != key_fields:
1348
            out.write(line)
1349

    
1350
        out.flush()
1351
        os.rename(tmpname, file_name)
1352
      finally:
1353
        f.close()
1354
    finally:
1355
      out.close()
1356
  except:
1357
    RemoveFile(tmpname)
1358
    raise
1359

    
1360

    
1361
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1362
  """Sets the name of an IP address and hostname in /etc/hosts.
1363

1364
  @type file_name: str
1365
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1366
  @type ip: str
1367
  @param ip: the IP address
1368
  @type hostname: str
1369
  @param hostname: the hostname to be added
1370
  @type aliases: list
1371
  @param aliases: the list of aliases to add for the hostname
1372

1373
  """
1374
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1375
  # Ensure aliases are unique
1376
  aliases = UniqueSequence([hostname] + aliases)[1:]
1377

    
1378
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1379
  try:
1380
    out = os.fdopen(fd, 'w')
1381
    try:
1382
      f = open(file_name, 'r')
1383
      try:
1384
        for line in f:
1385
          fields = line.split()
1386
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1387
            continue
1388
          out.write(line)
1389

    
1390
        out.write("%s\t%s" % (ip, hostname))
1391
        if aliases:
1392
          out.write(" %s" % ' '.join(aliases))
1393
        out.write('\n')
1394

    
1395
        out.flush()
1396
        os.fsync(out)
1397
        os.chmod(tmpname, 0644)
1398
        os.rename(tmpname, file_name)
1399
      finally:
1400
        f.close()
1401
    finally:
1402
      out.close()
1403
  except:
1404
    RemoveFile(tmpname)
1405
    raise
1406

    
1407

    
1408
def AddHostToEtcHosts(hostname):
1409
  """Wrapper around SetEtcHostsEntry.
1410

1411
  @type hostname: str
1412
  @param hostname: a hostname that will be resolved and added to
1413
      L{constants.ETC_HOSTS}
1414

1415
  """
1416
  hi = HostInfo(name=hostname)
1417
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1418

    
1419

    
1420
def RemoveEtcHostsEntry(file_name, hostname):
1421
  """Removes a hostname from /etc/hosts.
1422

1423
  IP addresses without names are removed from the file.
1424

1425
  @type file_name: str
1426
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1427
  @type hostname: str
1428
  @param hostname: the hostname to be removed
1429

1430
  """
1431
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1432
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1433
  try:
1434
    out = os.fdopen(fd, 'w')
1435
    try:
1436
      f = open(file_name, 'r')
1437
      try:
1438
        for line in f:
1439
          fields = line.split()
1440
          if len(fields) > 1 and not fields[0].startswith('#'):
1441
            names = fields[1:]
1442
            if hostname in names:
1443
              while hostname in names:
1444
                names.remove(hostname)
1445
              if names:
1446
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1447
              continue
1448

    
1449
          out.write(line)
1450

    
1451
        out.flush()
1452
        os.fsync(out)
1453
        os.chmod(tmpname, 0644)
1454
        os.rename(tmpname, file_name)
1455
      finally:
1456
        f.close()
1457
    finally:
1458
      out.close()
1459
  except:
1460
    RemoveFile(tmpname)
1461
    raise
1462

    
1463

    
1464
def RemoveHostFromEtcHosts(hostname):
1465
  """Wrapper around RemoveEtcHostsEntry.
1466

1467
  @type hostname: str
1468
  @param hostname: hostname that will be resolved and its
1469
      full and shot name will be removed from
1470
      L{constants.ETC_HOSTS}
1471

1472
  """
1473
  hi = HostInfo(name=hostname)
1474
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1475
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1476

    
1477

    
1478
def TimestampForFilename():
1479
  """Returns the current time formatted for filenames.
1480

1481
  The format doesn't contain colons as some shells and applications them as
1482
  separators.
1483

1484
  """
1485
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1486

    
1487

    
1488
def CreateBackup(file_name):
1489
  """Creates a backup of a file.
1490

1491
  @type file_name: str
1492
  @param file_name: file to be backed up
1493
  @rtype: str
1494
  @return: the path to the newly created backup
1495
  @raise errors.ProgrammerError: for invalid file names
1496

1497
  """
1498
  if not os.path.isfile(file_name):
1499
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1500
                                file_name)
1501

    
1502
  prefix = ("%s.backup-%s." %
1503
            (os.path.basename(file_name), TimestampForFilename()))
1504
  dir_name = os.path.dirname(file_name)
1505

    
1506
  fsrc = open(file_name, 'rb')
1507
  try:
1508
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1509
    fdst = os.fdopen(fd, 'wb')
1510
    try:
1511
      logging.debug("Backing up %s at %s", file_name, backup_name)
1512
      shutil.copyfileobj(fsrc, fdst)
1513
    finally:
1514
      fdst.close()
1515
  finally:
1516
    fsrc.close()
1517

    
1518
  return backup_name
1519

    
1520

    
1521
def ShellQuote(value):
1522
  """Quotes shell argument according to POSIX.
1523

1524
  @type value: str
1525
  @param value: the argument to be quoted
1526
  @rtype: str
1527
  @return: the quoted value
1528

1529
  """
1530
  if _re_shell_unquoted.match(value):
1531
    return value
1532
  else:
1533
    return "'%s'" % value.replace("'", "'\\''")
1534

    
1535

    
1536
def ShellQuoteArgs(args):
1537
  """Quotes a list of shell arguments.
1538

1539
  @type args: list
1540
  @param args: list of arguments to be quoted
1541
  @rtype: str
1542
  @return: the quoted arguments concatenated with spaces
1543

1544
  """
1545
  return ' '.join([ShellQuote(i) for i in args])
1546

    
1547

    
1548
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1549
  """Simple ping implementation using TCP connect(2).
1550

1551
  Check if the given IP is reachable by doing attempting a TCP connect
1552
  to it.
1553

1554
  @type target: str
1555
  @param target: the IP or hostname to ping
1556
  @type port: int
1557
  @param port: the port to connect to
1558
  @type timeout: int
1559
  @param timeout: the timeout on the connection attempt
1560
  @type live_port_needed: boolean
1561
  @param live_port_needed: whether a closed port will cause the
1562
      function to return failure, as if there was a timeout
1563
  @type source: str or None
1564
  @param source: if specified, will cause the connect to be made
1565
      from this specific source address; failures to bind other
1566
      than C{EADDRNOTAVAIL} will be ignored
1567

1568
  """
1569
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1570

    
1571
  success = False
1572

    
1573
  if source is not None:
1574
    try:
1575
      sock.bind((source, 0))
1576
    except socket.error, (errcode, _):
1577
      if errcode == errno.EADDRNOTAVAIL:
1578
        success = False
1579

    
1580
  sock.settimeout(timeout)
1581

    
1582
  try:
1583
    sock.connect((target, port))
1584
    sock.close()
1585
    success = True
1586
  except socket.timeout:
1587
    success = False
1588
  except socket.error, (errcode, _):
1589
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1590

    
1591
  return success
1592

    
1593

    
1594
def OwnIpAddress(address):
1595
  """Check if the current host has the the given IP address.
1596

1597
  Currently this is done by TCP-pinging the address from the loopback
1598
  address.
1599

1600
  @type address: string
1601
  @param address: the address to check
1602
  @rtype: bool
1603
  @return: True if we own the address
1604

1605
  """
1606
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1607
                 source=constants.LOCALHOST_IP_ADDRESS)
1608

    
1609

    
1610
def ListVisibleFiles(path):
1611
  """Returns a list of visible files in a directory.
1612

1613
  @type path: str
1614
  @param path: the directory to enumerate
1615
  @rtype: list
1616
  @return: the list of all files not starting with a dot
1617
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1618

1619
  """
1620
  if not IsNormAbsPath(path):
1621
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1622
                                 " absolute/normalized: '%s'" % path)
1623
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1624
  files.sort()
1625
  return files
1626

    
1627

    
1628
def GetHomeDir(user, default=None):
1629
  """Try to get the homedir of the given user.
1630

1631
  The user can be passed either as a string (denoting the name) or as
1632
  an integer (denoting the user id). If the user is not found, the
1633
  'default' argument is returned, which defaults to None.
1634

1635
  """
1636
  try:
1637
    if isinstance(user, basestring):
1638
      result = pwd.getpwnam(user)
1639
    elif isinstance(user, (int, long)):
1640
      result = pwd.getpwuid(user)
1641
    else:
1642
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1643
                                   type(user))
1644
  except KeyError:
1645
    return default
1646
  return result.pw_dir
1647

    
1648

    
1649
def NewUUID():
1650
  """Returns a random UUID.
1651

1652
  @note: This is a Linux-specific method as it uses the /proc
1653
      filesystem.
1654
  @rtype: str
1655

1656
  """
1657
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1658

    
1659

    
1660
def GenerateSecret(numbytes=20):
1661
  """Generates a random secret.
1662

1663
  This will generate a pseudo-random secret returning an hex string
1664
  (so that it can be used where an ASCII string is needed).
1665

1666
  @param numbytes: the number of bytes which will be represented by the returned
1667
      string (defaulting to 20, the length of a SHA1 hash)
1668
  @rtype: str
1669
  @return: an hex representation of the pseudo-random sequence
1670

1671
  """
1672
  return os.urandom(numbytes).encode('hex')
1673

    
1674

    
1675
def EnsureDirs(dirs):
1676
  """Make required directories, if they don't exist.
1677

1678
  @param dirs: list of tuples (dir_name, dir_mode)
1679
  @type dirs: list of (string, integer)
1680

1681
  """
1682
  for dir_name, dir_mode in dirs:
1683
    try:
1684
      os.mkdir(dir_name, dir_mode)
1685
    except EnvironmentError, err:
1686
      if err.errno != errno.EEXIST:
1687
        raise errors.GenericError("Cannot create needed directory"
1688
                                  " '%s': %s" % (dir_name, err))
1689
    if not os.path.isdir(dir_name):
1690
      raise errors.GenericError("%s is not a directory" % dir_name)
1691

    
1692

    
1693
def ReadFile(file_name, size=-1):
1694
  """Reads a file.
1695

1696
  @type size: int
1697
  @param size: Read at most size bytes (if negative, entire file)
1698
  @rtype: str
1699
  @return: the (possibly partial) content of the file
1700

1701
  """
1702
  f = open(file_name, "r")
1703
  try:
1704
    return f.read(size)
1705
  finally:
1706
    f.close()
1707

    
1708

    
1709
def WriteFile(file_name, fn=None, data=None,
1710
              mode=None, uid=-1, gid=-1,
1711
              atime=None, mtime=None, close=True,
1712
              dry_run=False, backup=False,
1713
              prewrite=None, postwrite=None):
1714
  """(Over)write a file atomically.
1715

1716
  The file_name and either fn (a function taking one argument, the
1717
  file descriptor, and which should write the data to it) or data (the
1718
  contents of the file) must be passed. The other arguments are
1719
  optional and allow setting the file mode, owner and group, and the
1720
  mtime/atime of the file.
1721

1722
  If the function doesn't raise an exception, it has succeeded and the
1723
  target file has the new contents. If the function has raised an
1724
  exception, an existing target file should be unmodified and the
1725
  temporary file should be removed.
1726

1727
  @type file_name: str
1728
  @param file_name: the target filename
1729
  @type fn: callable
1730
  @param fn: content writing function, called with
1731
      file descriptor as parameter
1732
  @type data: str
1733
  @param data: contents of the file
1734
  @type mode: int
1735
  @param mode: file mode
1736
  @type uid: int
1737
  @param uid: the owner of the file
1738
  @type gid: int
1739
  @param gid: the group of the file
1740
  @type atime: int
1741
  @param atime: a custom access time to be set on the file
1742
  @type mtime: int
1743
  @param mtime: a custom modification time to be set on the file
1744
  @type close: boolean
1745
  @param close: whether to close file after writing it
1746
  @type prewrite: callable
1747
  @param prewrite: function to be called before writing content
1748
  @type postwrite: callable
1749
  @param postwrite: function to be called after writing content
1750

1751
  @rtype: None or int
1752
  @return: None if the 'close' parameter evaluates to True,
1753
      otherwise the file descriptor
1754

1755
  @raise errors.ProgrammerError: if any of the arguments are not valid
1756

1757
  """
1758
  if not os.path.isabs(file_name):
1759
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1760
                                 " absolute: '%s'" % file_name)
1761

    
1762
  if [fn, data].count(None) != 1:
1763
    raise errors.ProgrammerError("fn or data required")
1764

    
1765
  if [atime, mtime].count(None) == 1:
1766
    raise errors.ProgrammerError("Both atime and mtime must be either"
1767
                                 " set or None")
1768

    
1769
  if backup and not dry_run and os.path.isfile(file_name):
1770
    CreateBackup(file_name)
1771

    
1772
  dir_name, base_name = os.path.split(file_name)
1773
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1774
  do_remove = True
1775
  # here we need to make sure we remove the temp file, if any error
1776
  # leaves it in place
1777
  try:
1778
    if uid != -1 or gid != -1:
1779
      os.chown(new_name, uid, gid)
1780
    if mode:
1781
      os.chmod(new_name, mode)
1782
    if callable(prewrite):
1783
      prewrite(fd)
1784
    if data is not None:
1785
      os.write(fd, data)
1786
    else:
1787
      fn(fd)
1788
    if callable(postwrite):
1789
      postwrite(fd)
1790
    os.fsync(fd)
1791
    if atime is not None and mtime is not None:
1792
      os.utime(new_name, (atime, mtime))
1793
    if not dry_run:
1794
      os.rename(new_name, file_name)
1795
      do_remove = False
1796
  finally:
1797
    if close:
1798
      os.close(fd)
1799
      result = None
1800
    else:
1801
      result = fd
1802
    if do_remove:
1803
      RemoveFile(new_name)
1804

    
1805
  return result
1806

    
1807

    
1808
def ReadOneLineFile(file_name, strict=False):
1809
  """Return the first non-empty line from a file.
1810

1811
  @type strict: boolean
1812
  @param strict: if True, abort if the file has more than one
1813
      non-empty line
1814

1815
  """
1816
  file_lines = ReadFile(file_name).splitlines()
1817
  full_lines = filter(bool, file_lines)
1818
  if not file_lines or not full_lines:
1819
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1820
  elif strict and len(full_lines) > 1:
1821
    raise errors.GenericError("Too many lines in one-liner file %s" %
1822
                              file_name)
1823
  return full_lines[0]
1824

    
1825

    
1826
def FirstFree(seq, base=0):
1827
  """Returns the first non-existing integer from seq.
1828

1829
  The seq argument should be a sorted list of positive integers. The
1830
  first time the index of an element is smaller than the element
1831
  value, the index will be returned.
1832

1833
  The base argument is used to start at a different offset,
1834
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1835

1836
  Example: C{[0, 1, 3]} will return I{2}.
1837

1838
  @type seq: sequence
1839
  @param seq: the sequence to be analyzed.
1840
  @type base: int
1841
  @param base: use this value as the base index of the sequence
1842
  @rtype: int
1843
  @return: the first non-used index in the sequence
1844

1845
  """
1846
  for idx, elem in enumerate(seq):
1847
    assert elem >= base, "Passed element is higher than base offset"
1848
    if elem > idx + base:
1849
      # idx is not used
1850
      return idx + base
1851
  return None
1852

    
1853

    
1854
def SingleWaitForFdCondition(fdobj, event, timeout):
1855
  """Waits for a condition to occur on the socket.
1856

1857
  Immediately returns at the first interruption.
1858

1859
  @type fdobj: integer or object supporting a fileno() method
1860
  @param fdobj: entity to wait for events on
1861
  @type event: integer
1862
  @param event: ORed condition (see select module)
1863
  @type timeout: float or None
1864
  @param timeout: Timeout in seconds
1865
  @rtype: int or None
1866
  @return: None for timeout, otherwise occured conditions
1867

1868
  """
1869
  check = (event | select.POLLPRI |
1870
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1871

    
1872
  if timeout is not None:
1873
    # Poller object expects milliseconds
1874
    timeout *= 1000
1875

    
1876
  poller = select.poll()
1877
  poller.register(fdobj, event)
1878
  try:
1879
    # TODO: If the main thread receives a signal and we have no timeout, we
1880
    # could wait forever. This should check a global "quit" flag or something
1881
    # every so often.
1882
    io_events = poller.poll(timeout)
1883
  except select.error, err:
1884
    if err[0] != errno.EINTR:
1885
      raise
1886
    io_events = []
1887
  if io_events and io_events[0][1] & check:
1888
    return io_events[0][1]
1889
  else:
1890
    return None
1891

    
1892

    
1893
class FdConditionWaiterHelper(object):
1894
  """Retry helper for WaitForFdCondition.
1895

1896
  This class contains the retried and wait functions that make sure
1897
  WaitForFdCondition can continue waiting until the timeout is actually
1898
  expired.
1899

1900
  """
1901

    
1902
  def __init__(self, timeout):
1903
    self.timeout = timeout
1904

    
1905
  def Poll(self, fdobj, event):
1906
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1907
    if result is None:
1908
      raise RetryAgain()
1909
    else:
1910
      return result
1911

    
1912
  def UpdateTimeout(self, timeout):
1913
    self.timeout = timeout
1914

    
1915

    
1916
def WaitForFdCondition(fdobj, event, timeout):
1917
  """Waits for a condition to occur on the socket.
1918

1919
  Retries until the timeout is expired, even if interrupted.
1920

1921
  @type fdobj: integer or object supporting a fileno() method
1922
  @param fdobj: entity to wait for events on
1923
  @type event: integer
1924
  @param event: ORed condition (see select module)
1925
  @type timeout: float or None
1926
  @param timeout: Timeout in seconds
1927
  @rtype: int or None
1928
  @return: None for timeout, otherwise occured conditions
1929

1930
  """
1931
  if timeout is not None:
1932
    retrywaiter = FdConditionWaiterHelper(timeout)
1933
    try:
1934
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1935
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1936
    except RetryTimeout:
1937
      result = None
1938
  else:
1939
    result = None
1940
    while result is None:
1941
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1942
  return result
1943

    
1944

    
1945
def UniqueSequence(seq):
1946
  """Returns a list with unique elements.
1947

1948
  Element order is preserved.
1949

1950
  @type seq: sequence
1951
  @param seq: the sequence with the source elements
1952
  @rtype: list
1953
  @return: list of unique elements from seq
1954

1955
  """
1956
  seen = set()
1957
  return [i for i in seq if i not in seen and not seen.add(i)]
1958

    
1959

    
1960
def NormalizeAndValidateMac(mac):
1961
  """Normalizes and check if a MAC address is valid.
1962

1963
  Checks whether the supplied MAC address is formally correct, only
1964
  accepts colon separated format. Normalize it to all lower.
1965

1966
  @type mac: str
1967
  @param mac: the MAC to be validated
1968
  @rtype: str
1969
  @return: returns the normalized and validated MAC.
1970

1971
  @raise errors.OpPrereqError: If the MAC isn't valid
1972

1973
  """
1974
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1975
  if not mac_check.match(mac):
1976
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1977
                               mac, errors.ECODE_INVAL)
1978

    
1979
  return mac.lower()
1980

    
1981

    
1982
def TestDelay(duration):
1983
  """Sleep for a fixed amount of time.
1984

1985
  @type duration: float
1986
  @param duration: the sleep duration
1987
  @rtype: boolean
1988
  @return: False for negative value, True otherwise
1989

1990
  """
1991
  if duration < 0:
1992
    return False, "Invalid sleep duration"
1993
  time.sleep(duration)
1994
  return True, None
1995

    
1996

    
1997
def _CloseFDNoErr(fd, retries=5):
1998
  """Close a file descriptor ignoring errors.
1999

2000
  @type fd: int
2001
  @param fd: the file descriptor
2002
  @type retries: int
2003
  @param retries: how many retries to make, in case we get any
2004
      other error than EBADF
2005

2006
  """
2007
  try:
2008
    os.close(fd)
2009
  except OSError, err:
2010
    if err.errno != errno.EBADF:
2011
      if retries > 0:
2012
        _CloseFDNoErr(fd, retries - 1)
2013
    # else either it's closed already or we're out of retries, so we
2014
    # ignore this and go on
2015

    
2016

    
2017
def CloseFDs(noclose_fds=None):
2018
  """Close file descriptors.
2019

2020
  This closes all file descriptors above 2 (i.e. except
2021
  stdin/out/err).
2022

2023
  @type noclose_fds: list or None
2024
  @param noclose_fds: if given, it denotes a list of file descriptor
2025
      that should not be closed
2026

2027
  """
2028
  # Default maximum for the number of available file descriptors.
2029
  if 'SC_OPEN_MAX' in os.sysconf_names:
2030
    try:
2031
      MAXFD = os.sysconf('SC_OPEN_MAX')
2032
      if MAXFD < 0:
2033
        MAXFD = 1024
2034
    except OSError:
2035
      MAXFD = 1024
2036
  else:
2037
    MAXFD = 1024
2038
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2039
  if (maxfd == resource.RLIM_INFINITY):
2040
    maxfd = MAXFD
2041

    
2042
  # Iterate through and close all file descriptors (except the standard ones)
2043
  for fd in range(3, maxfd):
2044
    if noclose_fds and fd in noclose_fds:
2045
      continue
2046
    _CloseFDNoErr(fd)
2047

    
2048

    
2049
def Mlockall():
2050
  """Lock current process' virtual address space into RAM.
2051

2052
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2053
  see mlock(2) for more details. This function requires ctypes module.
2054

2055
  """
2056
  if ctypes is None:
2057
    logging.warning("Cannot set memory lock, ctypes module not found")
2058
    return
2059

    
2060
  libc = ctypes.cdll.LoadLibrary("libc.so.6")
2061
  if libc is None:
2062
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2063
    return
2064

    
2065
  # Some older version of the ctypes module don't have built-in functionality
2066
  # to access the errno global variable, where function error codes are stored.
2067
  # By declaring this variable as a pointer to an integer we can then access
2068
  # its value correctly, should the mlockall call fail, in order to see what
2069
  # the actual error code was.
2070
  # pylint: disable-msg=W0212
2071
  libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int)
2072

    
2073
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2074
    # pylint: disable-msg=W0212
2075
    logging.error("Cannot set memory lock: %s",
2076
                  os.strerror(libc.__errno_location().contents.value))
2077
    return
2078

    
2079
  logging.debug("Memory lock set")
2080

    
2081

    
2082
def Daemonize(logfile):
2083
  """Daemonize the current process.
2084

2085
  This detaches the current process from the controlling terminal and
2086
  runs it in the background as a daemon.
2087

2088
  @type logfile: str
2089
  @param logfile: the logfile to which we should redirect stdout/stderr
2090
  @rtype: int
2091
  @return: the value zero
2092

2093
  """
2094
  # pylint: disable-msg=W0212
2095
  # yes, we really want os._exit
2096
  UMASK = 077
2097
  WORKDIR = "/"
2098

    
2099
  # this might fail
2100
  pid = os.fork()
2101
  if (pid == 0):  # The first child.
2102
    os.setsid()
2103
    # this might fail
2104
    pid = os.fork() # Fork a second child.
2105
    if (pid == 0):  # The second child.
2106
      os.chdir(WORKDIR)
2107
      os.umask(UMASK)
2108
    else:
2109
      # exit() or _exit()?  See below.
2110
      os._exit(0) # Exit parent (the first child) of the second child.
2111
  else:
2112
    os._exit(0) # Exit parent of the first child.
2113

    
2114
  for fd in range(3):
2115
    _CloseFDNoErr(fd)
2116
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2117
  assert i == 0, "Can't close/reopen stdin"
2118
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2119
  assert i == 1, "Can't close/reopen stdout"
2120
  # Duplicate standard output to standard error.
2121
  os.dup2(1, 2)
2122
  return 0
2123

    
2124

    
2125
def DaemonPidFileName(name):
2126
  """Compute a ganeti pid file absolute path
2127

2128
  @type name: str
2129
  @param name: the daemon name
2130
  @rtype: str
2131
  @return: the full path to the pidfile corresponding to the given
2132
      daemon name
2133

2134
  """
2135
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2136

    
2137

    
2138
def EnsureDaemon(name):
2139
  """Check for and start daemon if not alive.
2140

2141
  """
2142
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2143
  if result.failed:
2144
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2145
                  name, result.fail_reason, result.output)
2146
    return False
2147

    
2148
  return True
2149

    
2150

    
2151
def WritePidFile(name):
2152
  """Write the current process pidfile.
2153

2154
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2155

2156
  @type name: str
2157
  @param name: the daemon name to use
2158
  @raise errors.GenericError: if the pid file already exists and
2159
      points to a live process
2160

2161
  """
2162
  pid = os.getpid()
2163
  pidfilename = DaemonPidFileName(name)
2164
  if IsProcessAlive(ReadPidFile(pidfilename)):
2165
    raise errors.GenericError("%s contains a live process" % pidfilename)
2166

    
2167
  WriteFile(pidfilename, data="%d\n" % pid)
2168

    
2169

    
2170
def RemovePidFile(name):
2171
  """Remove the current process pidfile.
2172

2173
  Any errors are ignored.
2174

2175
  @type name: str
2176
  @param name: the daemon name used to derive the pidfile name
2177

2178
  """
2179
  pidfilename = DaemonPidFileName(name)
2180
  # TODO: we could check here that the file contains our pid
2181
  try:
2182
    RemoveFile(pidfilename)
2183
  except: # pylint: disable-msg=W0702
2184
    pass
2185

    
2186

    
2187
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2188
                waitpid=False):
2189
  """Kill a process given by its pid.
2190

2191
  @type pid: int
2192
  @param pid: The PID to terminate.
2193
  @type signal_: int
2194
  @param signal_: The signal to send, by default SIGTERM
2195
  @type timeout: int
2196
  @param timeout: The timeout after which, if the process is still alive,
2197
                  a SIGKILL will be sent. If not positive, no such checking
2198
                  will be done
2199
  @type waitpid: boolean
2200
  @param waitpid: If true, we should waitpid on this process after
2201
      sending signals, since it's our own child and otherwise it
2202
      would remain as zombie
2203

2204
  """
2205
  def _helper(pid, signal_, wait):
2206
    """Simple helper to encapsulate the kill/waitpid sequence"""
2207
    os.kill(pid, signal_)
2208
    if wait:
2209
      try:
2210
        os.waitpid(pid, os.WNOHANG)
2211
      except OSError:
2212
        pass
2213

    
2214
  if pid <= 0:
2215
    # kill with pid=0 == suicide
2216
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2217

    
2218
  if not IsProcessAlive(pid):
2219
    return
2220

    
2221
  _helper(pid, signal_, waitpid)
2222

    
2223
  if timeout <= 0:
2224
    return
2225

    
2226
  def _CheckProcess():
2227
    if not IsProcessAlive(pid):
2228
      return
2229

    
2230
    try:
2231
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2232
    except OSError:
2233
      raise RetryAgain()
2234

    
2235
    if result_pid > 0:
2236
      return
2237

    
2238
    raise RetryAgain()
2239

    
2240
  try:
2241
    # Wait up to $timeout seconds
2242
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2243
  except RetryTimeout:
2244
    pass
2245

    
2246
  if IsProcessAlive(pid):
2247
    # Kill process if it's still alive
2248
    _helper(pid, signal.SIGKILL, waitpid)
2249

    
2250

    
2251
def FindFile(name, search_path, test=os.path.exists):
2252
  """Look for a filesystem object in a given path.
2253

2254
  This is an abstract method to search for filesystem object (files,
2255
  dirs) under a given search path.
2256

2257
  @type name: str
2258
  @param name: the name to look for
2259
  @type search_path: str
2260
  @param search_path: location to start at
2261
  @type test: callable
2262
  @param test: a function taking one argument that should return True
2263
      if the a given object is valid; the default value is
2264
      os.path.exists, causing only existing files to be returned
2265
  @rtype: str or None
2266
  @return: full path to the object if found, None otherwise
2267

2268
  """
2269
  # validate the filename mask
2270
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2271
    logging.critical("Invalid value passed for external script name: '%s'",
2272
                     name)
2273
    return None
2274

    
2275
  for dir_name in search_path:
2276
    # FIXME: investigate switch to PathJoin
2277
    item_name = os.path.sep.join([dir_name, name])
2278
    # check the user test and that we're indeed resolving to the given
2279
    # basename
2280
    if test(item_name) and os.path.basename(item_name) == name:
2281
      return item_name
2282
  return None
2283

    
2284

    
2285
def CheckVolumeGroupSize(vglist, vgname, minsize):
2286
  """Checks if the volume group list is valid.
2287

2288
  The function will check if a given volume group is in the list of
2289
  volume groups and has a minimum size.
2290

2291
  @type vglist: dict
2292
  @param vglist: dictionary of volume group names and their size
2293
  @type vgname: str
2294
  @param vgname: the volume group we should check
2295
  @type minsize: int
2296
  @param minsize: the minimum size we accept
2297
  @rtype: None or str
2298
  @return: None for success, otherwise the error message
2299

2300
  """
2301
  vgsize = vglist.get(vgname, None)
2302
  if vgsize is None:
2303
    return "volume group '%s' missing" % vgname
2304
  elif vgsize < minsize:
2305
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2306
            (vgname, minsize, vgsize))
2307
  return None
2308

    
2309

    
2310
def SplitTime(value):
2311
  """Splits time as floating point number into a tuple.
2312

2313
  @param value: Time in seconds
2314
  @type value: int or float
2315
  @return: Tuple containing (seconds, microseconds)
2316

2317
  """
2318
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2319

    
2320
  assert 0 <= seconds, \
2321
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2322
  assert 0 <= microseconds <= 999999, \
2323
    "Microseconds must be 0-999999, but are %s" % microseconds
2324

    
2325
  return (int(seconds), int(microseconds))
2326

    
2327

    
2328
def MergeTime(timetuple):
2329
  """Merges a tuple into time as a floating point number.
2330

2331
  @param timetuple: Time as tuple, (seconds, microseconds)
2332
  @type timetuple: tuple
2333
  @return: Time as a floating point number expressed in seconds
2334

2335
  """
2336
  (seconds, microseconds) = timetuple
2337

    
2338
  assert 0 <= seconds, \
2339
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2340
  assert 0 <= microseconds <= 999999, \
2341
    "Microseconds must be 0-999999, but are %s" % microseconds
2342

    
2343
  return float(seconds) + (float(microseconds) * 0.000001)
2344

    
2345

    
2346
def GetDaemonPort(daemon_name):
2347
  """Get the daemon port for this cluster.
2348

2349
  Note that this routine does not read a ganeti-specific file, but
2350
  instead uses C{socket.getservbyname} to allow pre-customization of
2351
  this parameter outside of Ganeti.
2352

2353
  @type daemon_name: string
2354
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2355
  @rtype: int
2356

2357
  """
2358
  if daemon_name not in constants.DAEMONS_PORTS:
2359
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2360

    
2361
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2362
  try:
2363
    port = socket.getservbyname(daemon_name, proto)
2364
  except socket.error:
2365
    port = default_port
2366

    
2367
  return port
2368

    
2369

    
2370
class LogFileHandler(logging.FileHandler):
2371
  """Log handler that doesn't fallback to stderr.
2372

2373
  When an error occurs while writing on the logfile, logging.FileHandler tries
2374
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2375
  the logfile. This class avoids failures reporting errors to /dev/console.
2376

2377
  """
2378
  def __init__(self, filename, mode="a", encoding=None):
2379
    """Open the specified file and use it as the stream for logging.
2380

2381
    Also open /dev/console to report errors while logging.
2382

2383
    """
2384
    logging.FileHandler.__init__(self, filename, mode, encoding)
2385
    self.console = open(constants.DEV_CONSOLE, "a")
2386

    
2387
  def handleError(self, record): # pylint: disable-msg=C0103
2388
    """Handle errors which occur during an emit() call.
2389

2390
    Try to handle errors with FileHandler method, if it fails write to
2391
    /dev/console.
2392

2393
    """
2394
    try:
2395
      logging.FileHandler.handleError(self, record)
2396
    except Exception: # pylint: disable-msg=W0703
2397
      try:
2398
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2399
      except Exception: # pylint: disable-msg=W0703
2400
        # Log handler tried everything it could, now just give up
2401
        pass
2402

    
2403

    
2404
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2405
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2406
                 console_logging=False):
2407
  """Configures the logging module.
2408

2409
  @type logfile: str
2410
  @param logfile: the filename to which we should log
2411
  @type debug: integer
2412
  @param debug: if greater than zero, enable debug messages, otherwise
2413
      only those at C{INFO} and above level
2414
  @type stderr_logging: boolean
2415
  @param stderr_logging: whether we should also log to the standard error
2416
  @type program: str
2417
  @param program: the name under which we should log messages
2418
  @type multithreaded: boolean
2419
  @param multithreaded: if True, will add the thread name to the log file
2420
  @type syslog: string
2421
  @param syslog: one of 'no', 'yes', 'only':
2422
      - if no, syslog is not used
2423
      - if yes, syslog is used (in addition to file-logging)
2424
      - if only, only syslog is used
2425
  @type console_logging: boolean
2426
  @param console_logging: if True, will use a FileHandler which falls back to
2427
      the system console if logging fails
2428
  @raise EnvironmentError: if we can't open the log file and
2429
      syslog/stderr logging is disabled
2430

2431
  """
2432
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2433
  sft = program + "[%(process)d]:"
2434
  if multithreaded:
2435
    fmt += "/%(threadName)s"
2436
    sft += " (%(threadName)s)"
2437
  if debug:
2438
    fmt += " %(module)s:%(lineno)s"
2439
    # no debug info for syslog loggers
2440
  fmt += " %(levelname)s %(message)s"
2441
  # yes, we do want the textual level, as remote syslog will probably
2442
  # lose the error level, and it's easier to grep for it
2443
  sft += " %(levelname)s %(message)s"
2444
  formatter = logging.Formatter(fmt)
2445
  sys_fmt = logging.Formatter(sft)
2446

    
2447
  root_logger = logging.getLogger("")
2448
  root_logger.setLevel(logging.NOTSET)
2449

    
2450
  # Remove all previously setup handlers
2451
  for handler in root_logger.handlers:
2452
    handler.close()
2453
    root_logger.removeHandler(handler)
2454

    
2455
  if stderr_logging:
2456
    stderr_handler = logging.StreamHandler()
2457
    stderr_handler.setFormatter(formatter)
2458
    if debug:
2459
      stderr_handler.setLevel(logging.NOTSET)
2460
    else:
2461
      stderr_handler.setLevel(logging.CRITICAL)
2462
    root_logger.addHandler(stderr_handler)
2463

    
2464
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2465
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2466
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2467
                                                    facility)
2468
    syslog_handler.setFormatter(sys_fmt)
2469
    # Never enable debug over syslog
2470
    syslog_handler.setLevel(logging.INFO)
2471
    root_logger.addHandler(syslog_handler)
2472

    
2473
  if syslog != constants.SYSLOG_ONLY:
2474
    # this can fail, if the logging directories are not setup or we have
2475
    # a permisssion problem; in this case, it's best to log but ignore
2476
    # the error if stderr_logging is True, and if false we re-raise the
2477
    # exception since otherwise we could run but without any logs at all
2478
    try:
2479
      if console_logging:
2480
        logfile_handler = LogFileHandler(logfile)
2481
      else:
2482
        logfile_handler = logging.FileHandler(logfile)
2483
      logfile_handler.setFormatter(formatter)
2484
      if debug:
2485
        logfile_handler.setLevel(logging.DEBUG)
2486
      else:
2487
        logfile_handler.setLevel(logging.INFO)
2488
      root_logger.addHandler(logfile_handler)
2489
    except EnvironmentError:
2490
      if stderr_logging or syslog == constants.SYSLOG_YES:
2491
        logging.exception("Failed to enable logging to file '%s'", logfile)
2492
      else:
2493
        # we need to re-raise the exception
2494
        raise
2495

    
2496

    
2497
def IsNormAbsPath(path):
2498
  """Check whether a path is absolute and also normalized
2499

2500
  This avoids things like /dir/../../other/path to be valid.
2501

2502
  """
2503
  return os.path.normpath(path) == path and os.path.isabs(path)
2504

    
2505

    
2506
def PathJoin(*args):
2507
  """Safe-join a list of path components.
2508

2509
  Requirements:
2510
      - the first argument must be an absolute path
2511
      - no component in the path must have backtracking (e.g. /../),
2512
        since we check for normalization at the end
2513

2514
  @param args: the path components to be joined
2515
  @raise ValueError: for invalid paths
2516

2517
  """
2518
  # ensure we're having at least one path passed in
2519
  assert args
2520
  # ensure the first component is an absolute and normalized path name
2521
  root = args[0]
2522
  if not IsNormAbsPath(root):
2523
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2524
  result = os.path.join(*args)
2525
  # ensure that the whole path is normalized
2526
  if not IsNormAbsPath(result):
2527
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2528
  # check that we're still under the original prefix
2529
  prefix = os.path.commonprefix([root, result])
2530
  if prefix != root:
2531
    raise ValueError("Error: path joining resulted in different prefix"
2532
                     " (%s != %s)" % (prefix, root))
2533
  return result
2534

    
2535

    
2536
def TailFile(fname, lines=20):
2537
  """Return the last lines from a file.
2538

2539
  @note: this function will only read and parse the last 4KB of
2540
      the file; if the lines are very long, it could be that less
2541
      than the requested number of lines are returned
2542

2543
  @param fname: the file name
2544
  @type lines: int
2545
  @param lines: the (maximum) number of lines to return
2546

2547
  """
2548
  fd = open(fname, "r")
2549
  try:
2550
    fd.seek(0, 2)
2551
    pos = fd.tell()
2552
    pos = max(0, pos-4096)
2553
    fd.seek(pos, 0)
2554
    raw_data = fd.read()
2555
  finally:
2556
    fd.close()
2557

    
2558
  rows = raw_data.splitlines()
2559
  return rows[-lines:]
2560

    
2561

    
2562
def FormatTimestampWithTZ(secs):
2563
  """Formats a Unix timestamp with the local timezone.
2564

2565
  """
2566
  return time.strftime("%F %T %Z", time.gmtime(secs))
2567

    
2568

    
2569
def _ParseAsn1Generalizedtime(value):
2570
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2571

2572
  @type value: string
2573
  @param value: ASN1 GENERALIZEDTIME timestamp
2574

2575
  """
2576
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2577
  if m:
2578
    # We have an offset
2579
    asn1time = m.group(1)
2580
    hours = int(m.group(2))
2581
    minutes = int(m.group(3))
2582
    utcoffset = (60 * hours) + minutes
2583
  else:
2584
    if not value.endswith("Z"):
2585
      raise ValueError("Missing timezone")
2586
    asn1time = value[:-1]
2587
    utcoffset = 0
2588

    
2589
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2590

    
2591
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2592

    
2593
  return calendar.timegm(tt.utctimetuple())
2594

    
2595

    
2596
def GetX509CertValidity(cert):
2597
  """Returns the validity period of the certificate.
2598

2599
  @type cert: OpenSSL.crypto.X509
2600
  @param cert: X509 certificate object
2601

2602
  """
2603
  # The get_notBefore and get_notAfter functions are only supported in
2604
  # pyOpenSSL 0.7 and above.
2605
  try:
2606
    get_notbefore_fn = cert.get_notBefore
2607
  except AttributeError:
2608
    not_before = None
2609
  else:
2610
    not_before_asn1 = get_notbefore_fn()
2611

    
2612
    if not_before_asn1 is None:
2613
      not_before = None
2614
    else:
2615
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2616

    
2617
  try:
2618
    get_notafter_fn = cert.get_notAfter
2619
  except AttributeError:
2620
    not_after = None
2621
  else:
2622
    not_after_asn1 = get_notafter_fn()
2623

    
2624
    if not_after_asn1 is None:
2625
      not_after = None
2626
    else:
2627
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2628

    
2629
  return (not_before, not_after)
2630

    
2631

    
2632
def _VerifyCertificateInner(expired, not_before, not_after, now,
2633
                            warn_days, error_days):
2634
  """Verifies certificate validity.
2635

2636
  @type expired: bool
2637
  @param expired: Whether pyOpenSSL considers the certificate as expired
2638
  @type not_before: number or None
2639
  @param not_before: Unix timestamp before which certificate is not valid
2640
  @type not_after: number or None
2641
  @param not_after: Unix timestamp after which certificate is invalid
2642
  @type now: number
2643
  @param now: Current time as Unix timestamp
2644
  @type warn_days: number or None
2645
  @param warn_days: How many days before expiration a warning should be reported
2646
  @type error_days: number or None
2647
  @param error_days: How many days before expiration an error should be reported
2648

2649
  """
2650
  if expired:
2651
    msg = "Certificate is expired"
2652

    
2653
    if not_before is not None and not_after is not None:
2654
      msg += (" (valid from %s to %s)" %
2655
              (FormatTimestampWithTZ(not_before),
2656
               FormatTimestampWithTZ(not_after)))
2657
    elif not_before is not None:
2658
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2659
    elif not_after is not None:
2660
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2661

    
2662
    return (CERT_ERROR, msg)
2663

    
2664
  elif not_before is not None and not_before > now:
2665
    return (CERT_WARNING,
2666
            "Certificate not yet valid (valid from %s)" %
2667
            FormatTimestampWithTZ(not_before))
2668

    
2669
  elif not_after is not None:
2670
    remaining_days = int((not_after - now) / (24 * 3600))
2671

    
2672
    msg = "Certificate expires in about %d days" % remaining_days
2673

    
2674
    if error_days is not None and remaining_days <= error_days:
2675
      return (CERT_ERROR, msg)
2676

    
2677
    if warn_days is not None and remaining_days <= warn_days:
2678
      return (CERT_WARNING, msg)
2679

    
2680
  return (None, None)
2681

    
2682

    
2683
def VerifyX509Certificate(cert, warn_days, error_days):
2684
  """Verifies a certificate for LUVerifyCluster.
2685

2686
  @type cert: OpenSSL.crypto.X509
2687
  @param cert: X509 certificate object
2688
  @type warn_days: number or None
2689
  @param warn_days: How many days before expiration a warning should be reported
2690
  @type error_days: number or None
2691
  @param error_days: How many days before expiration an error should be reported
2692

2693
  """
2694
  # Depending on the pyOpenSSL version, this can just return (None, None)
2695
  (not_before, not_after) = GetX509CertValidity(cert)
2696

    
2697
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2698
                                 time.time(), warn_days, error_days)
2699

    
2700

    
2701
def SignX509Certificate(cert, key, salt):
2702
  """Sign a X509 certificate.
2703

2704
  An RFC822-like signature header is added in front of the certificate.
2705

2706
  @type cert: OpenSSL.crypto.X509
2707
  @param cert: X509 certificate object
2708
  @type key: string
2709
  @param key: Key for HMAC
2710
  @type salt: string
2711
  @param salt: Salt for HMAC
2712
  @rtype: string
2713
  @return: Serialized and signed certificate in PEM format
2714

2715
  """
2716
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2717
    raise errors.GenericError("Invalid salt: %r" % salt)
2718

    
2719
  # Dumping as PEM here ensures the certificate is in a sane format
2720
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2721

    
2722
  return ("%s: %s/%s\n\n%s" %
2723
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2724
           Sha1Hmac(key, cert_pem, salt=salt),
2725
           cert_pem))
2726

    
2727

    
2728
def _ExtractX509CertificateSignature(cert_pem):
2729
  """Helper function to extract signature from X509 certificate.
2730

2731
  """
2732
  # Extract signature from original PEM data
2733
  for line in cert_pem.splitlines():
2734
    if line.startswith("---"):
2735
      break
2736

    
2737
    m = X509_SIGNATURE.match(line.strip())
2738
    if m:
2739
      return (m.group("salt"), m.group("sign"))
2740

    
2741
  raise errors.GenericError("X509 certificate signature is missing")
2742

    
2743

    
2744
def LoadSignedX509Certificate(cert_pem, key):
2745
  """Verifies a signed X509 certificate.
2746

2747
  @type cert_pem: string
2748
  @param cert_pem: Certificate in PEM format and with signature header
2749
  @type key: string
2750
  @param key: Key for HMAC
2751
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2752
  @return: X509 certificate object and salt
2753

2754
  """
2755
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2756

    
2757
  # Load certificate
2758
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2759

    
2760
  # Dump again to ensure it's in a sane format
2761
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2762

    
2763
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2764
    raise errors.GenericError("X509 certificate signature is invalid")
2765

    
2766
  return (cert, salt)
2767

    
2768

    
2769
def Sha1Hmac(key, text, salt=None):
2770
  """Calculates the HMAC-SHA1 digest of a text.
2771

2772
  HMAC is defined in RFC2104.
2773

2774
  @type key: string
2775
  @param key: Secret key
2776
  @type text: string
2777

2778
  """
2779
  if salt:
2780
    salted_text = salt + text
2781
  else:
2782
    salted_text = text
2783

    
2784
  return hmac.new(key, salted_text, sha1).hexdigest()
2785

    
2786

    
2787
def VerifySha1Hmac(key, text, digest, salt=None):
2788
  """Verifies the HMAC-SHA1 digest of a text.
2789

2790
  HMAC is defined in RFC2104.
2791

2792
  @type key: string
2793
  @param key: Secret key
2794
  @type text: string
2795
  @type digest: string
2796
  @param digest: Expected digest
2797
  @rtype: bool
2798
  @return: Whether HMAC-SHA1 digest matches
2799

2800
  """
2801
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2802

    
2803

    
2804
def SafeEncode(text):
2805
  """Return a 'safe' version of a source string.
2806

2807
  This function mangles the input string and returns a version that
2808
  should be safe to display/encode as ASCII. To this end, we first
2809
  convert it to ASCII using the 'backslashreplace' encoding which
2810
  should get rid of any non-ASCII chars, and then we process it
2811
  through a loop copied from the string repr sources in the python; we
2812
  don't use string_escape anymore since that escape single quotes and
2813
  backslashes too, and that is too much; and that escaping is not
2814
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2815

2816
  @type text: str or unicode
2817
  @param text: input data
2818
  @rtype: str
2819
  @return: a safe version of text
2820

2821
  """
2822
  if isinstance(text, unicode):
2823
    # only if unicode; if str already, we handle it below
2824
    text = text.encode('ascii', 'backslashreplace')
2825
  resu = ""
2826
  for char in text:
2827
    c = ord(char)
2828
    if char  == '\t':
2829
      resu += r'\t'
2830
    elif char == '\n':
2831
      resu += r'\n'
2832
    elif char == '\r':
2833
      resu += r'\'r'
2834
    elif c < 32 or c >= 127: # non-printable
2835
      resu += "\\x%02x" % (c & 0xff)
2836
    else:
2837
      resu += char
2838
  return resu
2839

    
2840

    
2841
def UnescapeAndSplit(text, sep=","):
2842
  """Split and unescape a string based on a given separator.
2843

2844
  This function splits a string based on a separator where the
2845
  separator itself can be escape in order to be an element of the
2846
  elements. The escaping rules are (assuming coma being the
2847
  separator):
2848
    - a plain , separates the elements
2849
    - a sequence \\\\, (double backslash plus comma) is handled as a
2850
      backslash plus a separator comma
2851
    - a sequence \, (backslash plus comma) is handled as a
2852
      non-separator comma
2853

2854
  @type text: string
2855
  @param text: the string to split
2856
  @type sep: string
2857
  @param text: the separator
2858
  @rtype: string
2859
  @return: a list of strings
2860

2861
  """
2862
  # we split the list by sep (with no escaping at this stage)
2863
  slist = text.split(sep)
2864
  # next, we revisit the elements and if any of them ended with an odd
2865
  # number of backslashes, then we join it with the next
2866
  rlist = []
2867
  while slist:
2868
    e1 = slist.pop(0)
2869
    if e1.endswith("\\"):
2870
      num_b = len(e1) - len(e1.rstrip("\\"))
2871
      if num_b % 2 == 1:
2872
        e2 = slist.pop(0)
2873
        # here the backslashes remain (all), and will be reduced in
2874
        # the next step
2875
        rlist.append(e1 + sep + e2)
2876
        continue
2877
    rlist.append(e1)
2878
  # finally, replace backslash-something with something
2879
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2880
  return rlist
2881

    
2882

    
2883
def CommaJoin(names):
2884
  """Nicely join a set of identifiers.
2885

2886
  @param names: set, list or tuple
2887
  @return: a string with the formatted results
2888

2889
  """
2890
  return ", ".join([str(val) for val in names])
2891

    
2892

    
2893
def BytesToMebibyte(value):
2894
  """Converts bytes to mebibytes.
2895

2896
  @type value: int
2897
  @param value: Value in bytes
2898
  @rtype: int
2899
  @return: Value in mebibytes
2900

2901
  """
2902
  return int(round(value / (1024.0 * 1024.0), 0))
2903

    
2904

    
2905
def CalculateDirectorySize(path):
2906
  """Calculates the size of a directory recursively.
2907

2908
  @type path: string
2909
  @param path: Path to directory
2910
  @rtype: int
2911
  @return: Size in mebibytes
2912

2913
  """
2914
  size = 0
2915

    
2916
  for (curpath, _, files) in os.walk(path):
2917
    for filename in files:
2918
      st = os.lstat(PathJoin(curpath, filename))
2919
      size += st.st_size
2920

    
2921
  return BytesToMebibyte(size)
2922

    
2923

    
2924
def GetFilesystemStats(path):
2925
  """Returns the total and free space on a filesystem.
2926

2927
  @type path: string
2928
  @param path: Path on filesystem to be examined
2929
  @rtype: int
2930
  @return: tuple of (Total space, Free space) in mebibytes
2931

2932
  """
2933
  st = os.statvfs(path)
2934

    
2935
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2936
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2937
  return (tsize, fsize)
2938

    
2939

    
2940
def RunInSeparateProcess(fn, *args):
2941
  """Runs a function in a separate process.
2942

2943
  Note: Only boolean return values are supported.
2944

2945
  @type fn: callable
2946
  @param fn: Function to be called
2947
  @rtype: bool
2948
  @return: Function's result
2949

2950
  """
2951
  pid = os.fork()
2952
  if pid == 0:
2953
    # Child process
2954
    try:
2955
      # In case the function uses temporary files
2956
      ResetTempfileModule()
2957

    
2958
      # Call function
2959
      result = int(bool(fn(*args)))
2960
      assert result in (0, 1)
2961
    except: # pylint: disable-msg=W0702
2962
      logging.exception("Error while calling function in separate process")
2963
      # 0 and 1 are reserved for the return value
2964
      result = 33
2965

    
2966
    os._exit(result) # pylint: disable-msg=W0212
2967

    
2968
  # Parent process
2969

    
2970
  # Avoid zombies and check exit code
2971
  (_, status) = os.waitpid(pid, 0)
2972

    
2973
  if os.WIFSIGNALED(status):
2974
    exitcode = None
2975
    signum = os.WTERMSIG(status)
2976
  else:
2977
    exitcode = os.WEXITSTATUS(status)
2978
    signum = None
2979

    
2980
  if not (exitcode in (0, 1) and signum is None):
2981
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2982
                              (exitcode, signum))
2983

    
2984
  return bool(exitcode)
2985

    
2986

    
2987
def LockedMethod(fn):
2988
  """Synchronized object access decorator.
2989

2990
  This decorator is intended to protect access to an object using the
2991
  object's own lock which is hardcoded to '_lock'.
2992

2993
  """
2994
  def _LockDebug(*args, **kwargs):
2995
    if debug_locks:
2996
      logging.debug(*args, **kwargs)
2997

    
2998
  def wrapper(self, *args, **kwargs):
2999
    # pylint: disable-msg=W0212
3000
    assert hasattr(self, '_lock')
3001
    lock = self._lock
3002
    _LockDebug("Waiting for %s", lock)
3003
    lock.acquire()
3004
    try:
3005
      _LockDebug("Acquired %s", lock)
3006
      result = fn(self, *args, **kwargs)
3007
    finally:
3008
      _LockDebug("Releasing %s", lock)
3009
      lock.release()
3010
      _LockDebug("Released %s", lock)
3011
    return result
3012
  return wrapper
3013

    
3014

    
3015
def LockFile(fd):
3016
  """Locks a file using POSIX locks.
3017

3018
  @type fd: int
3019
  @param fd: the file descriptor we need to lock
3020

3021
  """
3022
  try:
3023
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3024
  except IOError, err:
3025
    if err.errno == errno.EAGAIN:
3026
      raise errors.LockError("File already locked")
3027
    raise
3028

    
3029

    
3030
def FormatTime(val):
3031
  """Formats a time value.
3032

3033
  @type val: float or None
3034
  @param val: the timestamp as returned by time.time()
3035
  @return: a string value or N/A if we don't have a valid timestamp
3036

3037
  """
3038
  if val is None or not isinstance(val, (int, float)):
3039
    return "N/A"
3040
  # these two codes works on Linux, but they are not guaranteed on all
3041
  # platforms
3042
  return time.strftime("%F %T", time.localtime(val))
3043

    
3044

    
3045
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3046
  """Reads the watcher pause file.
3047

3048
  @type filename: string
3049
  @param filename: Path to watcher pause file
3050
  @type now: None, float or int
3051
  @param now: Current time as Unix timestamp
3052
  @type remove_after: int
3053
  @param remove_after: Remove watcher pause file after specified amount of
3054
    seconds past the pause end time
3055

3056
  """
3057
  if now is None:
3058
    now = time.time()
3059

    
3060
  try:
3061
    value = ReadFile(filename)
3062
  except IOError, err:
3063
    if err.errno != errno.ENOENT:
3064
      raise
3065
    value = None
3066

    
3067
  if value is not None:
3068
    try:
3069
      value = int(value)
3070
    except ValueError:
3071
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3072
                       " removing it"), filename)
3073
      RemoveFile(filename)
3074
      value = None
3075

    
3076
    if value is not None:
3077
      # Remove file if it's outdated
3078
      if now > (value + remove_after):
3079
        RemoveFile(filename)
3080
        value = None
3081

    
3082
      elif now > value:
3083
        value = None
3084

    
3085
  return value
3086

    
3087

    
3088
class RetryTimeout(Exception):
3089
  """Retry loop timed out.
3090

3091
  Any arguments which was passed by the retried function to RetryAgain will be
3092
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3093
  the RaiseInner helper method will reraise it.
3094

3095
  """
3096
  def RaiseInner(self):
3097
    if self.args and isinstance(self.args[0], Exception):
3098
      raise self.args[0]
3099
    else:
3100
      raise RetryTimeout(*self.args)
3101

    
3102

    
3103
class RetryAgain(Exception):
3104
  """Retry again.
3105

3106
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3107
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3108
  of the RetryTimeout() method can be used to reraise it.
3109

3110
  """
3111

    
3112

    
3113
class _RetryDelayCalculator(object):
3114
  """Calculator for increasing delays.
3115

3116
  """
3117
  __slots__ = [
3118
    "_factor",
3119
    "_limit",
3120
    "_next",
3121
    "_start",
3122
    ]
3123

    
3124
  def __init__(self, start, factor, limit):
3125
    """Initializes this class.
3126

3127
    @type start: float
3128
    @param start: Initial delay
3129
    @type factor: float
3130
    @param factor: Factor for delay increase
3131
    @type limit: float or None
3132
    @param limit: Upper limit for delay or None for no limit
3133

3134
    """
3135
    assert start > 0.0
3136
    assert factor >= 1.0
3137
    assert limit is None or limit >= 0.0
3138

    
3139
    self._start = start
3140
    self._factor = factor
3141
    self._limit = limit
3142

    
3143
    self._next = start
3144

    
3145
  def __call__(self):
3146
    """Returns current delay and calculates the next one.
3147

3148
    """
3149
    current = self._next
3150

    
3151
    # Update for next run
3152
    if self._limit is None or self._next < self._limit:
3153
      self._next = min(self._limit, self._next * self._factor)
3154

    
3155
    return current
3156

    
3157

    
3158
#: Special delay to specify whole remaining timeout
3159
RETRY_REMAINING_TIME = object()
3160

    
3161

    
3162
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3163
          _time_fn=time.time):
3164
  """Call a function repeatedly until it succeeds.
3165

3166
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3167
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3168
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3169

3170
  C{delay} can be one of the following:
3171
    - callable returning the delay length as a float
3172
    - Tuple of (start, factor, limit)
3173
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3174
      useful when overriding L{wait_fn} to wait for an external event)
3175
    - A static delay as a number (int or float)
3176

3177
  @type fn: callable
3178
  @param fn: Function to be called
3179
  @param delay: Either a callable (returning the delay), a tuple of (start,
3180
                factor, limit) (see L{_RetryDelayCalculator}),
3181
                L{RETRY_REMAINING_TIME} or a number (int or float)
3182
  @type timeout: float
3183
  @param timeout: Total timeout
3184
  @type wait_fn: callable
3185
  @param wait_fn: Waiting function
3186
  @return: Return value of function
3187

3188
  """
3189
  assert callable(fn)
3190
  assert callable(wait_fn)
3191
  assert callable(_time_fn)
3192

    
3193
  if args is None:
3194
    args = []
3195

    
3196
  end_time = _time_fn() + timeout
3197

    
3198
  if callable(delay):
3199
    # External function to calculate delay
3200
    calc_delay = delay
3201

    
3202
  elif isinstance(delay, (tuple, list)):
3203
    # Increasing delay with optional upper boundary
3204
    (start, factor, limit) = delay
3205
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3206

    
3207
  elif delay is RETRY_REMAINING_TIME:
3208
    # Always use the remaining time
3209
    calc_delay = None
3210

    
3211
  else:
3212
    # Static delay
3213
    calc_delay = lambda: delay
3214

    
3215
  assert calc_delay is None or callable(calc_delay)
3216

    
3217
  while True:
3218
    retry_args = []
3219
    try:
3220
      # pylint: disable-msg=W0142
3221
      return fn(*args)
3222
    except RetryAgain, err:
3223
      retry_args = err.args
3224
    except RetryTimeout:
3225
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3226
                                   " handle RetryTimeout")
3227

    
3228
    remaining_time = end_time - _time_fn()
3229

    
3230
    if remaining_time < 0.0:
3231
      # pylint: disable-msg=W0142
3232
      raise RetryTimeout(*retry_args)
3233

    
3234
    assert remaining_time >= 0.0
3235

    
3236
    if calc_delay is None:
3237
      wait_fn(remaining_time)
3238
    else:
3239
      current_delay = calc_delay()
3240
      if current_delay > 0.0:
3241
        wait_fn(current_delay)
3242

    
3243

    
3244
def GetClosedTempfile(*args, **kwargs):
3245
  """Creates a temporary file and returns its path.
3246

3247
  """
3248
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3249
  _CloseFDNoErr(fd)
3250
  return path
3251

    
3252

    
3253
def GenerateSelfSignedX509Cert(common_name, validity):
3254
  """Generates a self-signed X509 certificate.
3255

3256
  @type common_name: string
3257
  @param common_name: commonName value
3258
  @type validity: int
3259
  @param validity: Validity for certificate in seconds
3260

3261
  """
3262
  # Create private and public key
3263
  key = OpenSSL.crypto.PKey()
3264
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3265

    
3266
  # Create self-signed certificate
3267
  cert = OpenSSL.crypto.X509()
3268
  if common_name:
3269
    cert.get_subject().CN = common_name
3270
  cert.set_serial_number(1)
3271
  cert.gmtime_adj_notBefore(0)
3272
  cert.gmtime_adj_notAfter(validity)
3273
  cert.set_issuer(cert.get_subject())
3274
  cert.set_pubkey(key)
3275
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3276

    
3277
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3278
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3279

    
3280
  return (key_pem, cert_pem)
3281

    
3282

    
3283
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3284
  """Legacy function to generate self-signed X509 certificate.
3285

3286
  """
3287
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3288
                                                   validity * 24 * 60 * 60)
3289

    
3290
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3291

    
3292

    
3293
class FileLock(object):
3294
  """Utility class for file locks.
3295

3296
  """
3297
  def __init__(self, fd, filename):
3298
    """Constructor for FileLock.
3299

3300
    @type fd: file
3301
    @param fd: File object
3302
    @type filename: str
3303
    @param filename: Path of the file opened at I{fd}
3304

3305
    """
3306
    self.fd = fd
3307
    self.filename = filename
3308

    
3309
  @classmethod
3310
  def Open(cls, filename):
3311
    """Creates and opens a file to be used as a file-based lock.
3312

3313
    @type filename: string
3314
    @param filename: path to the file to be locked
3315

3316
    """
3317
    # Using "os.open" is necessary to allow both opening existing file
3318
    # read/write and creating if not existing. Vanilla "open" will truncate an
3319
    # existing file -or- allow creating if not existing.
3320
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3321
               filename)
3322

    
3323
  def __del__(self):
3324
    self.Close()
3325

    
3326
  def Close(self):
3327
    """Close the file and release the lock.
3328

3329
    """
3330
    if hasattr(self, "fd") and self.fd:
3331
      self.fd.close()
3332
      self.fd = None
3333

    
3334
  def _flock(self, flag, blocking, timeout, errmsg):
3335
    """Wrapper for fcntl.flock.
3336

3337
    @type flag: int
3338
    @param flag: operation flag
3339
    @type blocking: bool
3340
    @param blocking: whether the operation should be done in blocking mode.
3341
    @type timeout: None or float
3342
    @param timeout: for how long the operation should be retried (implies
3343
                    non-blocking mode).
3344
    @type errmsg: string
3345
    @param errmsg: error message in case operation fails.
3346

3347
    """
3348
    assert self.fd, "Lock was closed"
3349
    assert timeout is None or timeout >= 0, \
3350
      "If specified, timeout must be positive"
3351
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3352

    
3353
    # When a timeout is used, LOCK_NB must always be set
3354
    if not (timeout is None and blocking):
3355
      flag |= fcntl.LOCK_NB
3356

    
3357
    if timeout is None:
3358
      self._Lock(self.fd, flag, timeout)
3359
    else:
3360
      try:
3361
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3362
              args=(self.fd, flag, timeout))
3363
      except RetryTimeout:
3364
        raise errors.LockError(errmsg)
3365

    
3366
  @staticmethod
3367
  def _Lock(fd, flag, timeout):
3368
    try:
3369
      fcntl.flock(fd, flag)
3370
    except IOError, err:
3371
      if timeout is not None and err.errno == errno.EAGAIN:
3372
        raise RetryAgain()
3373

    
3374
      logging.exception("fcntl.flock failed")
3375
      raise
3376

    
3377
  def Exclusive(self, blocking=False, timeout=None):
3378
    """Locks the file in exclusive mode.
3379

3380
    @type blocking: boolean
3381
    @param blocking: whether to block and wait until we
3382
        can lock the file or return immediately
3383
    @type timeout: int or None
3384
    @param timeout: if not None, the duration to wait for the lock
3385
        (in blocking mode)
3386

3387
    """
3388
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3389
                "Failed to lock %s in exclusive mode" % self.filename)
3390

    
3391
  def Shared(self, blocking=False, timeout=None):
3392
    """Locks the file in shared mode.
3393

3394
    @type blocking: boolean
3395
    @param blocking: whether to block and wait until we
3396
        can lock the file or return immediately
3397
    @type timeout: int or None
3398
    @param timeout: if not None, the duration to wait for the lock
3399
        (in blocking mode)
3400

3401
    """
3402
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3403
                "Failed to lock %s in shared mode" % self.filename)
3404

    
3405
  def Unlock(self, blocking=True, timeout=None):
3406
    """Unlocks the file.
3407

3408
    According to C{flock(2)}, unlocking can also be a nonblocking
3409
    operation::
3410

3411
      To make a non-blocking request, include LOCK_NB with any of the above
3412
      operations.
3413

3414
    @type blocking: boolean
3415
    @param blocking: whether to block and wait until we
3416
        can lock the file or return immediately
3417
    @type timeout: int or None
3418
    @param timeout: if not None, the duration to wait for the lock
3419
        (in blocking mode)
3420

3421
    """
3422
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3423
                "Failed to unlock %s" % self.filename)
3424

    
3425

    
3426
class LineSplitter:
3427
  """Splits data chunks into lines separated by newline.
3428

3429
  Instances provide a file-like interface.
3430

3431
  """
3432
  def __init__(self, line_fn, *args):
3433
    """Initializes this class.
3434

3435
    @type line_fn: callable
3436
    @param line_fn: Function called for each line, first parameter is line
3437
    @param args: Extra arguments for L{line_fn}
3438

3439
    """
3440
    assert callable(line_fn)
3441

    
3442
    if args:
3443
      # Python 2.4 doesn't have functools.partial yet
3444
      self._line_fn = \
3445
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3446
    else:
3447
      self._line_fn = line_fn
3448

    
3449
    self._lines = collections.deque()
3450
    self._buffer = ""
3451

    
3452
  def write(self, data):
3453
    parts = (self._buffer + data).split("\n")
3454
    self._buffer = parts.pop()
3455
    self._lines.extend(parts)
3456

    
3457
  def flush(self):
3458
    while self._lines:
3459
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3460

    
3461
  def close(self):
3462
    self.flush()
3463
    if self._buffer:
3464
      self._line_fn(self._buffer)
3465

    
3466

    
3467
def SignalHandled(signums):
3468
  """Signal Handled decoration.
3469

3470
  This special decorator installs a signal handler and then calls the target
3471
  function. The function must accept a 'signal_handlers' keyword argument,
3472
  which will contain a dict indexed by signal number, with SignalHandler
3473
  objects as values.
3474

3475
  The decorator can be safely stacked with iself, to handle multiple signals
3476
  with different handlers.
3477

3478
  @type signums: list
3479
  @param signums: signals to intercept
3480

3481
  """
3482
  def wrap(fn):
3483
    def sig_function(*args, **kwargs):
3484
      assert 'signal_handlers' not in kwargs or \
3485
             kwargs['signal_handlers'] is None or \
3486
             isinstance(kwargs['signal_handlers'], dict), \
3487
             "Wrong signal_handlers parameter in original function call"
3488
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3489
        signal_handlers = kwargs['signal_handlers']
3490
      else:
3491
        signal_handlers = {}
3492
        kwargs['signal_handlers'] = signal_handlers
3493
      sighandler = SignalHandler(signums)
3494
      try:
3495
        for sig in signums:
3496
          signal_handlers[sig] = sighandler
3497
        return fn(*args, **kwargs)
3498
      finally:
3499
        sighandler.Reset()
3500
    return sig_function
3501
  return wrap
3502

    
3503

    
3504
class SignalWakeupFd(object):
3505
  try:
3506
    # This is only supported in Python 2.5 and above (some distributions
3507
    # backported it to Python 2.4)
3508
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3509
  except AttributeError:
3510
    # Not supported
3511
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3512
      return -1
3513
  else:
3514
    def _SetWakeupFd(self, fd):
3515
      return self._set_wakeup_fd_fn(fd)
3516

    
3517
  def __init__(self):
3518
    """Initializes this class.
3519

3520
    """
3521
    (read_fd, write_fd) = os.pipe()
3522

    
3523
    # Once these succeeded, the file descriptors will be closed automatically.
3524
    # Buffer size 0 is important, otherwise .read() with a specified length
3525
    # might buffer data and the file descriptors won't be marked readable.
3526
    self._read_fh = os.fdopen(read_fd, "r", 0)
3527
    self._write_fh = os.fdopen(write_fd, "w", 0)
3528

    
3529
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3530

    
3531
    # Utility functions
3532
    self.fileno = self._read_fh.fileno
3533
    self.read = self._read_fh.read
3534

    
3535
  def Reset(self):
3536
    """Restores the previous wakeup file descriptor.
3537

3538
    """
3539
    if hasattr(self, "_previous") and self._previous is not None:
3540
      self._SetWakeupFd(self._previous)
3541
      self._previous = None
3542

    
3543
  def Notify(self):
3544
    """Notifies the wakeup file descriptor.
3545

3546
    """
3547
    self._write_fh.write("\0")
3548

    
3549
  def __del__(self):
3550
    """Called before object deletion.
3551

3552
    """
3553
    self.Reset()
3554

    
3555

    
3556
class SignalHandler(object):
3557
  """Generic signal handler class.
3558

3559
  It automatically restores the original handler when deconstructed or
3560
  when L{Reset} is called. You can either pass your own handler
3561
  function in or query the L{called} attribute to detect whether the
3562
  signal was sent.
3563

3564
  @type signum: list
3565
  @ivar signum: the signals we handle
3566
  @type called: boolean
3567
  @ivar called: tracks whether any of the signals have been raised
3568

3569
  """
3570
  def __init__(self, signum, handler_fn=None, wakeup=None):
3571
    """Constructs a new SignalHandler instance.
3572

3573
    @type signum: int or list of ints
3574
    @param signum: Single signal number or set of signal numbers
3575
    @type handler_fn: callable
3576
    @param handler_fn: Signal handling function
3577

3578
    """
3579
    assert handler_fn is None or callable(handler_fn)
3580

    
3581
    self.signum = set(signum)
3582
    self.called = False
3583

    
3584
    self._handler_fn = handler_fn
3585
    self._wakeup = wakeup
3586

    
3587
    self._previous = {}
3588
    try:
3589
      for signum in self.signum:
3590
        # Setup handler
3591
        prev_handler = signal.signal(signum, self._HandleSignal)
3592
        try:
3593
          self._previous[signum] = prev_handler
3594
        except:
3595
          # Restore previous handler
3596
          signal.signal(signum, prev_handler)
3597
          raise
3598
    except:
3599
      # Reset all handlers
3600
      self.Reset()
3601
      # Here we have a race condition: a handler may have already been called,
3602
      # but there's not much we can do about it at this point.
3603
      raise
3604

    
3605
  def __del__(self):
3606
    self.Reset()
3607

    
3608
  def Reset(self):
3609
    """Restore previous handler.
3610

3611
    This will reset all the signals to their previous handlers.
3612

3613
    """
3614
    for signum, prev_handler in self._previous.items():
3615
      signal.signal(signum, prev_handler)
3616
      # If successful, remove from dict
3617
      del self._previous[signum]
3618

    
3619
  def Clear(self):
3620
    """Unsets the L{called} flag.
3621

3622
    This function can be used in case a signal may arrive several times.
3623

3624
    """
3625
    self.called = False
3626

    
3627
  def _HandleSignal(self, signum, frame):
3628
    """Actual signal handling function.
3629

3630
    """
3631
    # This is not nice and not absolutely atomic, but it appears to be the only
3632
    # solution in Python -- there are no atomic types.
3633
    self.called = True
3634

    
3635
    if self._wakeup:
3636
      # Notify whoever is interested in signals
3637
      self._wakeup.Notify()
3638

    
3639
    if self._handler_fn:
3640
      self._handler_fn(signum, frame)
3641

    
3642

    
3643
class FieldSet(object):
3644
  """A simple field set.
3645

3646
  Among the features are:
3647
    - checking if a string is among a list of static string or regex objects
3648
    - checking if a whole list of string matches
3649
    - returning the matching groups from a regex match
3650

3651
  Internally, all fields are held as regular expression objects.
3652

3653
  """
3654
  def __init__(self, *items):
3655
    self.items = [re.compile("^%s$" % value) for value in items]
3656

    
3657
  def Extend(self, other_set):
3658
    """Extend the field set with the items from another one"""
3659
    self.items.extend(other_set.items)
3660

    
3661
  def Matches(self, field):
3662
    """Checks if a field matches the current set
3663

3664
    @type field: str
3665
    @param field: the string to match
3666
    @return: either None or a regular expression match object
3667

3668
    """
3669
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3670
      return m
3671
    return None
3672

    
3673
  def NonMatching(self, items):
3674
    """Returns the list of fields not matching the current set
3675

3676
    @type items: list
3677
    @param items: the list of fields to check
3678
    @rtype: list
3679
    @return: list of non-matching fields
3680

3681
    """
3682
    return [val for val in items if not self.Matches(val)]