Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 614244bd

History | View | Annotate | Download (103.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86
                     '[a-f0-9]{4}-[a-f0-9]{12}$')
87

    
88
# Certificate verification results
89
(CERT_WARNING,
90
 CERT_ERROR) = range(1, 3)
91

    
92
# Flags for mlockall() (from bits/mman.h)
93
_MCL_CURRENT = 1
94
_MCL_FUTURE = 2
95

    
96
#: MAC checker regexp
97
_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
98

    
99

    
100
class RunResult(object):
101
  """Holds the result of running external programs.
102

103
  @type exit_code: int
104
  @ivar exit_code: the exit code of the program, or None (if the program
105
      didn't exit())
106
  @type signal: int or None
107
  @ivar signal: the signal that caused the program to finish, or None
108
      (if the program wasn't terminated by a signal)
109
  @type stdout: str
110
  @ivar stdout: the standard output of the program
111
  @type stderr: str
112
  @ivar stderr: the standard error of the program
113
  @type failed: boolean
114
  @ivar failed: True in case the program was
115
      terminated by a signal or exited with a non-zero exit code
116
  @ivar fail_reason: a string detailing the termination reason
117

118
  """
119
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
120
               "failed", "fail_reason", "cmd"]
121

    
122

    
123
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
124
    self.cmd = cmd
125
    self.exit_code = exit_code
126
    self.signal = signal_
127
    self.stdout = stdout
128
    self.stderr = stderr
129
    self.failed = (signal_ is not None or exit_code != 0)
130

    
131
    if self.signal is not None:
132
      self.fail_reason = "terminated by signal %s" % self.signal
133
    elif self.exit_code is not None:
134
      self.fail_reason = "exited with exit code %s" % self.exit_code
135
    else:
136
      self.fail_reason = "unable to determine termination reason"
137

    
138
    if self.failed:
139
      logging.debug("Command '%s' failed (%s); output: %s",
140
                    self.cmd, self.fail_reason, self.output)
141

    
142
  def _GetOutput(self):
143
    """Returns the combined stdout and stderr for easier usage.
144

145
    """
146
    return self.stdout + self.stderr
147

    
148
  output = property(_GetOutput, None, None, "Return full output")
149

    
150

    
151
def _BuildCmdEnvironment(env, reset):
152
  """Builds the environment for an external program.
153

154
  """
155
  if reset:
156
    cmd_env = {}
157
  else:
158
    cmd_env = os.environ.copy()
159
    cmd_env["LC_ALL"] = "C"
160

    
161
  if env is not None:
162
    cmd_env.update(env)
163

    
164
  return cmd_env
165

    
166

    
167
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
168
           interactive=False):
169
  """Execute a (shell) command.
170

171
  The command should not read from its standard input, as it will be
172
  closed.
173

174
  @type cmd: string or list
175
  @param cmd: Command to run
176
  @type env: dict
177
  @param env: Additional environment variables
178
  @type output: str
179
  @param output: if desired, the output of the command can be
180
      saved in a file instead of the RunResult instance; this
181
      parameter denotes the file name (if not None)
182
  @type cwd: string
183
  @param cwd: if specified, will be used as the working
184
      directory for the command; the default will be /
185
  @type reset_env: boolean
186
  @param reset_env: whether to reset or keep the default os environment
187
  @type interactive: boolean
188
  @param interactive: weather we pipe stdin, stdout and stderr
189
                      (default behaviour) or run the command interactive
190
  @rtype: L{RunResult}
191
  @return: RunResult instance
192
  @raise errors.ProgrammerError: if we call this when forks are disabled
193

194
  """
195
  if no_fork:
196
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
197

    
198
  if output and interactive:
199
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
200
                                 " not be provided at the same time")
201

    
202
  if isinstance(cmd, basestring):
203
    strcmd = cmd
204
    shell = True
205
  else:
206
    cmd = [str(val) for val in cmd]
207
    strcmd = ShellQuoteArgs(cmd)
208
    shell = False
209

    
210
  if output:
211
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
212
  else:
213
    logging.debug("RunCmd %s", strcmd)
214

    
215
  cmd_env = _BuildCmdEnvironment(env, reset_env)
216

    
217
  try:
218
    if output is None:
219
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd, interactive)
220
    else:
221
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
222
      out = err = ""
223
  except OSError, err:
224
    if err.errno == errno.ENOENT:
225
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
226
                               (strcmd, err))
227
    else:
228
      raise
229

    
230
  if status >= 0:
231
    exitcode = status
232
    signal_ = None
233
  else:
234
    exitcode = None
235
    signal_ = -status
236

    
237
  return RunResult(exitcode, signal_, out, err, strcmd)
238

    
239

    
240
def SetupDaemonEnv(cwd="/", umask=077):
241
  """Setup a daemon's environment.
242

243
  This should be called between the first and second fork, due to
244
  setsid usage.
245

246
  @param cwd: the directory to which to chdir
247
  @param umask: the umask to setup
248

249
  """
250
  os.chdir(cwd)
251
  os.umask(umask)
252
  os.setsid()
253

    
254

    
255
def SetupDaemonFDs(output_file, output_fd):
256
  """Setups up a daemon's file descriptors.
257

258
  @param output_file: if not None, the file to which to redirect
259
      stdout/stderr
260
  @param output_fd: if not None, the file descriptor for stdout/stderr
261

262
  """
263
  # check that at most one is defined
264
  assert [output_file, output_fd].count(None) >= 1
265

    
266
  # Open /dev/null (read-only, only for stdin)
267
  devnull_fd = os.open(os.devnull, os.O_RDONLY)
268

    
269
  if output_fd is not None:
270
    pass
271
  elif output_file is not None:
272
    # Open output file
273
    try:
274
      output_fd = os.open(output_file,
275
                          os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
276
    except EnvironmentError, err:
277
      raise Exception("Opening output file failed: %s" % err)
278
  else:
279
    output_fd = os.open(os.devnull, os.O_WRONLY)
280

    
281
  # Redirect standard I/O
282
  os.dup2(devnull_fd, 0)
283
  os.dup2(output_fd, 1)
284
  os.dup2(output_fd, 2)
285

    
286

    
287
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
288
                pidfile=None):
289
  """Start a daemon process after forking twice.
290

291
  @type cmd: string or list
292
  @param cmd: Command to run
293
  @type env: dict
294
  @param env: Additional environment variables
295
  @type cwd: string
296
  @param cwd: Working directory for the program
297
  @type output: string
298
  @param output: Path to file in which to save the output
299
  @type output_fd: int
300
  @param output_fd: File descriptor for output
301
  @type pidfile: string
302
  @param pidfile: Process ID file
303
  @rtype: int
304
  @return: Daemon process ID
305
  @raise errors.ProgrammerError: if we call this when forks are disabled
306

307
  """
308
  if no_fork:
309
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
310
                                 " disabled")
311

    
312
  if output and not (bool(output) ^ (output_fd is not None)):
313
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
314
                                 " specified")
315

    
316
  if isinstance(cmd, basestring):
317
    cmd = ["/bin/sh", "-c", cmd]
318

    
319
  strcmd = ShellQuoteArgs(cmd)
320

    
321
  if output:
322
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
323
  else:
324
    logging.debug("StartDaemon %s", strcmd)
325

    
326
  cmd_env = _BuildCmdEnvironment(env, False)
327

    
328
  # Create pipe for sending PID back
329
  (pidpipe_read, pidpipe_write) = os.pipe()
330
  try:
331
    try:
332
      # Create pipe for sending error messages
333
      (errpipe_read, errpipe_write) = os.pipe()
334
      try:
335
        try:
336
          # First fork
337
          pid = os.fork()
338
          if pid == 0:
339
            try:
340
              # Child process, won't return
341
              _StartDaemonChild(errpipe_read, errpipe_write,
342
                                pidpipe_read, pidpipe_write,
343
                                cmd, cmd_env, cwd,
344
                                output, output_fd, pidfile)
345
            finally:
346
              # Well, maybe child process failed
347
              os._exit(1) # pylint: disable-msg=W0212
348
        finally:
349
          _CloseFDNoErr(errpipe_write)
350

    
351
        # Wait for daemon to be started (or an error message to
352
        # arrive) and read up to 100 KB as an error message
353
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
354
      finally:
355
        _CloseFDNoErr(errpipe_read)
356
    finally:
357
      _CloseFDNoErr(pidpipe_write)
358

    
359
    # Read up to 128 bytes for PID
360
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
361
  finally:
362
    _CloseFDNoErr(pidpipe_read)
363

    
364
  # Try to avoid zombies by waiting for child process
365
  try:
366
    os.waitpid(pid, 0)
367
  except OSError:
368
    pass
369

    
370
  if errormsg:
371
    raise errors.OpExecError("Error when starting daemon process: %r" %
372
                             errormsg)
373

    
374
  try:
375
    return int(pidtext)
376
  except (ValueError, TypeError), err:
377
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
378
                             (pidtext, err))
379

    
380

    
381
def _StartDaemonChild(errpipe_read, errpipe_write,
382
                      pidpipe_read, pidpipe_write,
383
                      args, env, cwd,
384
                      output, fd_output, pidfile):
385
  """Child process for starting daemon.
386

387
  """
388
  try:
389
    # Close parent's side
390
    _CloseFDNoErr(errpipe_read)
391
    _CloseFDNoErr(pidpipe_read)
392

    
393
    # First child process
394
    SetupDaemonEnv()
395

    
396
    # And fork for the second time
397
    pid = os.fork()
398
    if pid != 0:
399
      # Exit first child process
400
      os._exit(0) # pylint: disable-msg=W0212
401

    
402
    # Make sure pipe is closed on execv* (and thereby notifies
403
    # original process)
404
    SetCloseOnExecFlag(errpipe_write, True)
405

    
406
    # List of file descriptors to be left open
407
    noclose_fds = [errpipe_write]
408

    
409
    # Open PID file
410
    if pidfile:
411
      fd_pidfile = WritePidFile(pidfile)
412

    
413
      # Keeping the file open to hold the lock
414
      noclose_fds.append(fd_pidfile)
415

    
416
      SetCloseOnExecFlag(fd_pidfile, False)
417
    else:
418
      fd_pidfile = None
419

    
420
    SetupDaemonFDs(output, fd_output)
421

    
422
    # Send daemon PID to parent
423
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
424

    
425
    # Close all file descriptors except stdio and error message pipe
426
    CloseFDs(noclose_fds=noclose_fds)
427

    
428
    # Change working directory
429
    os.chdir(cwd)
430

    
431
    if env is None:
432
      os.execvp(args[0], args)
433
    else:
434
      os.execvpe(args[0], args, env)
435
  except: # pylint: disable-msg=W0702
436
    try:
437
      # Report errors to original process
438
      WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
439
    except: # pylint: disable-msg=W0702
440
      # Ignore errors in error handling
441
      pass
442

    
443
  os._exit(1) # pylint: disable-msg=W0212
444

    
445

    
446
def WriteErrorToFD(fd, err):
447
  """Possibly write an error message to a fd.
448

449
  @type fd: None or int (file descriptor)
450
  @param fd: if not None, the error will be written to this fd
451
  @param err: string, the error message
452

453
  """
454
  if fd is None:
455
    return
456

    
457
  if not err:
458
    err = "<unknown error>"
459

    
460
  RetryOnSignal(os.write, fd, err)
461

    
462

    
463
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive):
464
  """Run a command and return its output.
465

466
  @type  cmd: string or list
467
  @param cmd: Command to run
468
  @type env: dict
469
  @param env: The environment to use
470
  @type via_shell: bool
471
  @param via_shell: if we should run via the shell
472
  @type cwd: string
473
  @param cwd: the working directory for the program
474
  @type interactive: boolean
475
  @param interactive: Run command interactive (without piping)
476
  @rtype: tuple
477
  @return: (out, err, status)
478

479
  """
480
  poller = select.poll()
481

    
482
  stderr = subprocess.PIPE
483
  stdout = subprocess.PIPE
484
  stdin = subprocess.PIPE
485

    
486
  if interactive:
487
    stderr = stdout = stdin = None
488

    
489
  child = subprocess.Popen(cmd, shell=via_shell,
490
                           stderr=stderr,
491
                           stdout=stdout,
492
                           stdin=stdin,
493
                           close_fds=True, env=env,
494
                           cwd=cwd)
495

    
496
  out = StringIO()
497
  err = StringIO()
498
  if not interactive:
499
    child.stdin.close()
500
    poller.register(child.stdout, select.POLLIN)
501
    poller.register(child.stderr, select.POLLIN)
502
    fdmap = {
503
      child.stdout.fileno(): (out, child.stdout),
504
      child.stderr.fileno(): (err, child.stderr),
505
      }
506
    for fd in fdmap:
507
      SetNonblockFlag(fd, True)
508

    
509
    while fdmap:
510
      pollresult = RetryOnSignal(poller.poll)
511

    
512
      for fd, event in pollresult:
513
        if event & select.POLLIN or event & select.POLLPRI:
514
          data = fdmap[fd][1].read()
515
          # no data from read signifies EOF (the same as POLLHUP)
516
          if not data:
517
            poller.unregister(fd)
518
            del fdmap[fd]
519
            continue
520
          fdmap[fd][0].write(data)
521
        if (event & select.POLLNVAL or event & select.POLLHUP or
522
            event & select.POLLERR):
523
          poller.unregister(fd)
524
          del fdmap[fd]
525

    
526
  out = out.getvalue()
527
  err = err.getvalue()
528

    
529
  status = child.wait()
530
  return out, err, status
531

    
532

    
533
def _RunCmdFile(cmd, env, via_shell, output, cwd):
534
  """Run a command and save its output to a file.
535

536
  @type  cmd: string or list
537
  @param cmd: Command to run
538
  @type env: dict
539
  @param env: The environment to use
540
  @type via_shell: bool
541
  @param via_shell: if we should run via the shell
542
  @type output: str
543
  @param output: the filename in which to save the output
544
  @type cwd: string
545
  @param cwd: the working directory for the program
546
  @rtype: int
547
  @return: the exit status
548

549
  """
550
  fh = open(output, "a")
551
  try:
552
    child = subprocess.Popen(cmd, shell=via_shell,
553
                             stderr=subprocess.STDOUT,
554
                             stdout=fh,
555
                             stdin=subprocess.PIPE,
556
                             close_fds=True, env=env,
557
                             cwd=cwd)
558

    
559
    child.stdin.close()
560
    status = child.wait()
561
  finally:
562
    fh.close()
563
  return status
564

    
565

    
566
def SetCloseOnExecFlag(fd, enable):
567
  """Sets or unsets the close-on-exec flag on a file descriptor.
568

569
  @type fd: int
570
  @param fd: File descriptor
571
  @type enable: bool
572
  @param enable: Whether to set or unset it.
573

574
  """
575
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
576

    
577
  if enable:
578
    flags |= fcntl.FD_CLOEXEC
579
  else:
580
    flags &= ~fcntl.FD_CLOEXEC
581

    
582
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
583

    
584

    
585
def SetNonblockFlag(fd, enable):
586
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
587

588
  @type fd: int
589
  @param fd: File descriptor
590
  @type enable: bool
591
  @param enable: Whether to set or unset it
592

593
  """
594
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
595

    
596
  if enable:
597
    flags |= os.O_NONBLOCK
598
  else:
599
    flags &= ~os.O_NONBLOCK
600

    
601
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
602

    
603

    
604
def RetryOnSignal(fn, *args, **kwargs):
605
  """Calls a function again if it failed due to EINTR.
606

607
  """
608
  while True:
609
    try:
610
      return fn(*args, **kwargs)
611
    except EnvironmentError, err:
612
      if err.errno != errno.EINTR:
613
        raise
614
    except (socket.error, select.error), err:
615
      # In python 2.6 and above select.error is an IOError, so it's handled
616
      # above, in 2.5 and below it's not, and it's handled here.
617
      if not (err.args and err.args[0] == errno.EINTR):
618
        raise
619

    
620

    
621
def RunParts(dir_name, env=None, reset_env=False):
622
  """Run Scripts or programs in a directory
623

624
  @type dir_name: string
625
  @param dir_name: absolute path to a directory
626
  @type env: dict
627
  @param env: The environment to use
628
  @type reset_env: boolean
629
  @param reset_env: whether to reset or keep the default os environment
630
  @rtype: list of tuples
631
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
632

633
  """
634
  rr = []
635

    
636
  try:
637
    dir_contents = ListVisibleFiles(dir_name)
638
  except OSError, err:
639
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
640
    return rr
641

    
642
  for relname in sorted(dir_contents):
643
    fname = PathJoin(dir_name, relname)
644
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
645
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
646
      rr.append((relname, constants.RUNPARTS_SKIP, None))
647
    else:
648
      try:
649
        result = RunCmd([fname], env=env, reset_env=reset_env)
650
      except Exception, err: # pylint: disable-msg=W0703
651
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
652
      else:
653
        rr.append((relname, constants.RUNPARTS_RUN, result))
654

    
655
  return rr
656

    
657

    
658
def RemoveFile(filename):
659
  """Remove a file ignoring some errors.
660

661
  Remove a file, ignoring non-existing ones or directories. Other
662
  errors are passed.
663

664
  @type filename: str
665
  @param filename: the file to be removed
666

667
  """
668
  try:
669
    os.unlink(filename)
670
  except OSError, err:
671
    if err.errno not in (errno.ENOENT, errno.EISDIR):
672
      raise
673

    
674

    
675
def RemoveDir(dirname):
676
  """Remove an empty directory.
677

678
  Remove a directory, ignoring non-existing ones.
679
  Other errors are passed. This includes the case,
680
  where the directory is not empty, so it can't be removed.
681

682
  @type dirname: str
683
  @param dirname: the empty directory to be removed
684

685
  """
686
  try:
687
    os.rmdir(dirname)
688
  except OSError, err:
689
    if err.errno != errno.ENOENT:
690
      raise
691

    
692

    
693
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
694
  """Renames a file.
695

696
  @type old: string
697
  @param old: Original path
698
  @type new: string
699
  @param new: New path
700
  @type mkdir: bool
701
  @param mkdir: Whether to create target directory if it doesn't exist
702
  @type mkdir_mode: int
703
  @param mkdir_mode: Mode for newly created directories
704

705
  """
706
  try:
707
    return os.rename(old, new)
708
  except OSError, err:
709
    # In at least one use case of this function, the job queue, directory
710
    # creation is very rare. Checking for the directory before renaming is not
711
    # as efficient.
712
    if mkdir and err.errno == errno.ENOENT:
713
      # Create directory and try again
714
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
715

    
716
      return os.rename(old, new)
717

    
718
    raise
719

    
720

    
721
def Makedirs(path, mode=0750):
722
  """Super-mkdir; create a leaf directory and all intermediate ones.
723

724
  This is a wrapper around C{os.makedirs} adding error handling not implemented
725
  before Python 2.5.
726

727
  """
728
  try:
729
    os.makedirs(path, mode)
730
  except OSError, err:
731
    # Ignore EEXIST. This is only handled in os.makedirs as included in
732
    # Python 2.5 and above.
733
    if err.errno != errno.EEXIST or not os.path.exists(path):
734
      raise
735

    
736

    
737
def ResetTempfileModule():
738
  """Resets the random name generator of the tempfile module.
739

740
  This function should be called after C{os.fork} in the child process to
741
  ensure it creates a newly seeded random generator. Otherwise it would
742
  generate the same random parts as the parent process. If several processes
743
  race for the creation of a temporary file, this could lead to one not getting
744
  a temporary name.
745

746
  """
747
  # pylint: disable-msg=W0212
748
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
749
    tempfile._once_lock.acquire()
750
    try:
751
      # Reset random name generator
752
      tempfile._name_sequence = None
753
    finally:
754
      tempfile._once_lock.release()
755
  else:
756
    logging.critical("The tempfile module misses at least one of the"
757
                     " '_once_lock' and '_name_sequence' attributes")
758

    
759

    
760
def _FingerprintFile(filename):
761
  """Compute the fingerprint of a file.
762

763
  If the file does not exist, a None will be returned
764
  instead.
765

766
  @type filename: str
767
  @param filename: the filename to checksum
768
  @rtype: str
769
  @return: the hex digest of the sha checksum of the contents
770
      of the file
771

772
  """
773
  if not (os.path.exists(filename) and os.path.isfile(filename)):
774
    return None
775

    
776
  f = open(filename)
777

    
778
  fp = compat.sha1_hash()
779
  while True:
780
    data = f.read(4096)
781
    if not data:
782
      break
783

    
784
    fp.update(data)
785

    
786
  return fp.hexdigest()
787

    
788

    
789
def FingerprintFiles(files):
790
  """Compute fingerprints for a list of files.
791

792
  @type files: list
793
  @param files: the list of filename to fingerprint
794
  @rtype: dict
795
  @return: a dictionary filename: fingerprint, holding only
796
      existing files
797

798
  """
799
  ret = {}
800

    
801
  for filename in files:
802
    cksum = _FingerprintFile(filename)
803
    if cksum:
804
      ret[filename] = cksum
805

    
806
  return ret
807

    
808

    
809
def ForceDictType(target, key_types, allowed_values=None):
810
  """Force the values of a dict to have certain types.
811

812
  @type target: dict
813
  @param target: the dict to update
814
  @type key_types: dict
815
  @param key_types: dict mapping target dict keys to types
816
                    in constants.ENFORCEABLE_TYPES
817
  @type allowed_values: list
818
  @keyword allowed_values: list of specially allowed values
819

820
  """
821
  if allowed_values is None:
822
    allowed_values = []
823

    
824
  if not isinstance(target, dict):
825
    msg = "Expected dictionary, got '%s'" % target
826
    raise errors.TypeEnforcementError(msg)
827

    
828
  for key in target:
829
    if key not in key_types:
830
      msg = "Unknown key '%s'" % key
831
      raise errors.TypeEnforcementError(msg)
832

    
833
    if target[key] in allowed_values:
834
      continue
835

    
836
    ktype = key_types[key]
837
    if ktype not in constants.ENFORCEABLE_TYPES:
838
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
839
      raise errors.ProgrammerError(msg)
840

    
841
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
842
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
843
        pass
844
      elif not isinstance(target[key], basestring):
845
        if isinstance(target[key], bool) and not target[key]:
846
          target[key] = ''
847
        else:
848
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
849
          raise errors.TypeEnforcementError(msg)
850
    elif ktype == constants.VTYPE_BOOL:
851
      if isinstance(target[key], basestring) and target[key]:
852
        if target[key].lower() == constants.VALUE_FALSE:
853
          target[key] = False
854
        elif target[key].lower() == constants.VALUE_TRUE:
855
          target[key] = True
856
        else:
857
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
858
          raise errors.TypeEnforcementError(msg)
859
      elif target[key]:
860
        target[key] = True
861
      else:
862
        target[key] = False
863
    elif ktype == constants.VTYPE_SIZE:
864
      try:
865
        target[key] = ParseUnit(target[key])
866
      except errors.UnitParseError, err:
867
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
868
              (key, target[key], err)
869
        raise errors.TypeEnforcementError(msg)
870
    elif ktype == constants.VTYPE_INT:
871
      try:
872
        target[key] = int(target[key])
873
      except (ValueError, TypeError):
874
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
875
        raise errors.TypeEnforcementError(msg)
876

    
877

    
878
def _GetProcStatusPath(pid):
879
  """Returns the path for a PID's proc status file.
880

881
  @type pid: int
882
  @param pid: Process ID
883
  @rtype: string
884

885
  """
886
  return "/proc/%d/status" % pid
887

    
888

    
889
def IsProcessAlive(pid):
890
  """Check if a given pid exists on the system.
891

892
  @note: zombie status is not handled, so zombie processes
893
      will be returned as alive
894
  @type pid: int
895
  @param pid: the process ID to check
896
  @rtype: boolean
897
  @return: True if the process exists
898

899
  """
900
  def _TryStat(name):
901
    try:
902
      os.stat(name)
903
      return True
904
    except EnvironmentError, err:
905
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
906
        return False
907
      elif err.errno == errno.EINVAL:
908
        raise RetryAgain(err)
909
      raise
910

    
911
  assert isinstance(pid, int), "pid must be an integer"
912
  if pid <= 0:
913
    return False
914

    
915
  # /proc in a multiprocessor environment can have strange behaviors.
916
  # Retry the os.stat a few times until we get a good result.
917
  try:
918
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
919
                 args=[_GetProcStatusPath(pid)])
920
  except RetryTimeout, err:
921
    err.RaiseInner()
922

    
923

    
924
def _ParseSigsetT(sigset):
925
  """Parse a rendered sigset_t value.
926

927
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
928
  function.
929

930
  @type sigset: string
931
  @param sigset: Rendered signal set from /proc/$pid/status
932
  @rtype: set
933
  @return: Set of all enabled signal numbers
934

935
  """
936
  result = set()
937

    
938
  signum = 0
939
  for ch in reversed(sigset):
940
    chv = int(ch, 16)
941

    
942
    # The following could be done in a loop, but it's easier to read and
943
    # understand in the unrolled form
944
    if chv & 1:
945
      result.add(signum + 1)
946
    if chv & 2:
947
      result.add(signum + 2)
948
    if chv & 4:
949
      result.add(signum + 3)
950
    if chv & 8:
951
      result.add(signum + 4)
952

    
953
    signum += 4
954

    
955
  return result
956

    
957

    
958
def _GetProcStatusField(pstatus, field):
959
  """Retrieves a field from the contents of a proc status file.
960

961
  @type pstatus: string
962
  @param pstatus: Contents of /proc/$pid/status
963
  @type field: string
964
  @param field: Name of field whose value should be returned
965
  @rtype: string
966

967
  """
968
  for line in pstatus.splitlines():
969
    parts = line.split(":", 1)
970

    
971
    if len(parts) < 2 or parts[0] != field:
972
      continue
973

    
974
    return parts[1].strip()
975

    
976
  return None
977

    
978

    
979
def IsProcessHandlingSignal(pid, signum, status_path=None):
980
  """Checks whether a process is handling a signal.
981

982
  @type pid: int
983
  @param pid: Process ID
984
  @type signum: int
985
  @param signum: Signal number
986
  @rtype: bool
987

988
  """
989
  if status_path is None:
990
    status_path = _GetProcStatusPath(pid)
991

    
992
  try:
993
    proc_status = ReadFile(status_path)
994
  except EnvironmentError, err:
995
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
996
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
997
      return False
998
    raise
999

    
1000
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1001
  if sigcgt is None:
1002
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1003

    
1004
  # Now check whether signal is handled
1005
  return signum in _ParseSigsetT(sigcgt)
1006

    
1007

    
1008
def ReadPidFile(pidfile):
1009
  """Read a pid from a file.
1010

1011
  @type  pidfile: string
1012
  @param pidfile: path to the file containing the pid
1013
  @rtype: int
1014
  @return: The process id, if the file exists and contains a valid PID,
1015
           otherwise 0
1016

1017
  """
1018
  try:
1019
    raw_data = ReadOneLineFile(pidfile)
1020
  except EnvironmentError, err:
1021
    if err.errno != errno.ENOENT:
1022
      logging.exception("Can't read pid file")
1023
    return 0
1024

    
1025
  try:
1026
    pid = int(raw_data)
1027
  except (TypeError, ValueError), err:
1028
    logging.info("Can't parse pid file contents", exc_info=True)
1029
    return 0
1030

    
1031
  return pid
1032

    
1033

    
1034
def ReadLockedPidFile(path):
1035
  """Reads a locked PID file.
1036

1037
  This can be used together with L{StartDaemon}.
1038

1039
  @type path: string
1040
  @param path: Path to PID file
1041
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1042

1043
  """
1044
  try:
1045
    fd = os.open(path, os.O_RDONLY)
1046
  except EnvironmentError, err:
1047
    if err.errno == errno.ENOENT:
1048
      # PID file doesn't exist
1049
      return None
1050
    raise
1051

    
1052
  try:
1053
    try:
1054
      # Try to acquire lock
1055
      LockFile(fd)
1056
    except errors.LockError:
1057
      # Couldn't lock, daemon is running
1058
      return int(os.read(fd, 100))
1059
  finally:
1060
    os.close(fd)
1061

    
1062
  return None
1063

    
1064

    
1065
def MatchNameComponent(key, name_list, case_sensitive=True):
1066
  """Try to match a name against a list.
1067

1068
  This function will try to match a name like test1 against a list
1069
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1070
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1071
  not I{'test1.ex'}. A multiple match will be considered as no match
1072
  at all (e.g. I{'test1'} against C{['test1.example.com',
1073
  'test1.example.org']}), except when the key fully matches an entry
1074
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1075

1076
  @type key: str
1077
  @param key: the name to be searched
1078
  @type name_list: list
1079
  @param name_list: the list of strings against which to search the key
1080
  @type case_sensitive: boolean
1081
  @param case_sensitive: whether to provide a case-sensitive match
1082

1083
  @rtype: None or str
1084
  @return: None if there is no match I{or} if there are multiple matches,
1085
      otherwise the element from the list which matches
1086

1087
  """
1088
  if key in name_list:
1089
    return key
1090

    
1091
  re_flags = 0
1092
  if not case_sensitive:
1093
    re_flags |= re.IGNORECASE
1094
    key = key.upper()
1095
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1096
  names_filtered = []
1097
  string_matches = []
1098
  for name in name_list:
1099
    if mo.match(name) is not None:
1100
      names_filtered.append(name)
1101
      if not case_sensitive and key == name.upper():
1102
        string_matches.append(name)
1103

    
1104
  if len(string_matches) == 1:
1105
    return string_matches[0]
1106
  if len(names_filtered) == 1:
1107
    return names_filtered[0]
1108
  return None
1109

    
1110

    
1111
def ValidateServiceName(name):
1112
  """Validate the given service name.
1113

1114
  @type name: number or string
1115
  @param name: Service name or port specification
1116

1117
  """
1118
  try:
1119
    numport = int(name)
1120
  except (ValueError, TypeError):
1121
    # Non-numeric service name
1122
    valid = _VALID_SERVICE_NAME_RE.match(name)
1123
  else:
1124
    # Numeric port (protocols other than TCP or UDP might need adjustments
1125
    # here)
1126
    valid = (numport >= 0 and numport < (1 << 16))
1127

    
1128
  if not valid:
1129
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1130
                               errors.ECODE_INVAL)
1131

    
1132
  return name
1133

    
1134

    
1135
def ListVolumeGroups():
1136
  """List volume groups and their size
1137

1138
  @rtype: dict
1139
  @return:
1140
       Dictionary with keys volume name and values
1141
       the size of the volume
1142

1143
  """
1144
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1145
  result = RunCmd(command)
1146
  retval = {}
1147
  if result.failed:
1148
    return retval
1149

    
1150
  for line in result.stdout.splitlines():
1151
    try:
1152
      name, size = line.split()
1153
      size = int(float(size))
1154
    except (IndexError, ValueError), err:
1155
      logging.error("Invalid output from vgs (%s): %s", err, line)
1156
      continue
1157

    
1158
    retval[name] = size
1159

    
1160
  return retval
1161

    
1162

    
1163
def BridgeExists(bridge):
1164
  """Check whether the given bridge exists in the system
1165

1166
  @type bridge: str
1167
  @param bridge: the bridge name to check
1168
  @rtype: boolean
1169
  @return: True if it does
1170

1171
  """
1172
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1173

    
1174

    
1175
def NiceSort(name_list):
1176
  """Sort a list of strings based on digit and non-digit groupings.
1177

1178
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1179
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1180
  'a11']}.
1181

1182
  The sort algorithm breaks each name in groups of either only-digits
1183
  or no-digits. Only the first eight such groups are considered, and
1184
  after that we just use what's left of the string.
1185

1186
  @type name_list: list
1187
  @param name_list: the names to be sorted
1188
  @rtype: list
1189
  @return: a copy of the name list sorted with our algorithm
1190

1191
  """
1192
  _SORTER_BASE = "(\D+|\d+)"
1193
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1194
                                                  _SORTER_BASE, _SORTER_BASE,
1195
                                                  _SORTER_BASE, _SORTER_BASE,
1196
                                                  _SORTER_BASE, _SORTER_BASE)
1197
  _SORTER_RE = re.compile(_SORTER_FULL)
1198
  _SORTER_NODIGIT = re.compile("^\D*$")
1199
  def _TryInt(val):
1200
    """Attempts to convert a variable to integer."""
1201
    if val is None or _SORTER_NODIGIT.match(val):
1202
      return val
1203
    rval = int(val)
1204
    return rval
1205

    
1206
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1207
             for name in name_list]
1208
  to_sort.sort()
1209
  return [tup[1] for tup in to_sort]
1210

    
1211

    
1212
def TryConvert(fn, val):
1213
  """Try to convert a value ignoring errors.
1214

1215
  This function tries to apply function I{fn} to I{val}. If no
1216
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1217
  the result, else it will return the original value. Any other
1218
  exceptions are propagated to the caller.
1219

1220
  @type fn: callable
1221
  @param fn: function to apply to the value
1222
  @param val: the value to be converted
1223
  @return: The converted value if the conversion was successful,
1224
      otherwise the original value.
1225

1226
  """
1227
  try:
1228
    nv = fn(val)
1229
  except (ValueError, TypeError):
1230
    nv = val
1231
  return nv
1232

    
1233

    
1234
def IsValidShellParam(word):
1235
  """Verifies is the given word is safe from the shell's p.o.v.
1236

1237
  This means that we can pass this to a command via the shell and be
1238
  sure that it doesn't alter the command line and is passed as such to
1239
  the actual command.
1240

1241
  Note that we are overly restrictive here, in order to be on the safe
1242
  side.
1243

1244
  @type word: str
1245
  @param word: the word to check
1246
  @rtype: boolean
1247
  @return: True if the word is 'safe'
1248

1249
  """
1250
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1251

    
1252

    
1253
def BuildShellCmd(template, *args):
1254
  """Build a safe shell command line from the given arguments.
1255

1256
  This function will check all arguments in the args list so that they
1257
  are valid shell parameters (i.e. they don't contain shell
1258
  metacharacters). If everything is ok, it will return the result of
1259
  template % args.
1260

1261
  @type template: str
1262
  @param template: the string holding the template for the
1263
      string formatting
1264
  @rtype: str
1265
  @return: the expanded command line
1266

1267
  """
1268
  for word in args:
1269
    if not IsValidShellParam(word):
1270
      raise errors.ProgrammerError("Shell argument '%s' contains"
1271
                                   " invalid characters" % word)
1272
  return template % args
1273

    
1274

    
1275
def FormatUnit(value, units):
1276
  """Formats an incoming number of MiB with the appropriate unit.
1277

1278
  @type value: int
1279
  @param value: integer representing the value in MiB (1048576)
1280
  @type units: char
1281
  @param units: the type of formatting we should do:
1282
      - 'h' for automatic scaling
1283
      - 'm' for MiBs
1284
      - 'g' for GiBs
1285
      - 't' for TiBs
1286
  @rtype: str
1287
  @return: the formatted value (with suffix)
1288

1289
  """
1290
  if units not in ('m', 'g', 't', 'h'):
1291
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1292

    
1293
  suffix = ''
1294

    
1295
  if units == 'm' or (units == 'h' and value < 1024):
1296
    if units == 'h':
1297
      suffix = 'M'
1298
    return "%d%s" % (round(value, 0), suffix)
1299

    
1300
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1301
    if units == 'h':
1302
      suffix = 'G'
1303
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1304

    
1305
  else:
1306
    if units == 'h':
1307
      suffix = 'T'
1308
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1309

    
1310

    
1311
def ParseUnit(input_string):
1312
  """Tries to extract number and scale from the given string.
1313

1314
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1315
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1316
  is always an int in MiB.
1317

1318
  """
1319
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1320
  if not m:
1321
    raise errors.UnitParseError("Invalid format")
1322

    
1323
  value = float(m.groups()[0])
1324

    
1325
  unit = m.groups()[1]
1326
  if unit:
1327
    lcunit = unit.lower()
1328
  else:
1329
    lcunit = 'm'
1330

    
1331
  if lcunit in ('m', 'mb', 'mib'):
1332
    # Value already in MiB
1333
    pass
1334

    
1335
  elif lcunit in ('g', 'gb', 'gib'):
1336
    value *= 1024
1337

    
1338
  elif lcunit in ('t', 'tb', 'tib'):
1339
    value *= 1024 * 1024
1340

    
1341
  else:
1342
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1343

    
1344
  # Make sure we round up
1345
  if int(value) < value:
1346
    value += 1
1347

    
1348
  # Round up to the next multiple of 4
1349
  value = int(value)
1350
  if value % 4:
1351
    value += 4 - value % 4
1352

    
1353
  return value
1354

    
1355

    
1356
def ParseCpuMask(cpu_mask):
1357
  """Parse a CPU mask definition and return the list of CPU IDs.
1358

1359
  CPU mask format: comma-separated list of CPU IDs
1360
  or dash-separated ID ranges
1361
  Example: "0-2,5" -> "0,1,2,5"
1362

1363
  @type cpu_mask: str
1364
  @param cpu_mask: CPU mask definition
1365
  @rtype: list of int
1366
  @return: list of CPU IDs
1367

1368
  """
1369
  if not cpu_mask:
1370
    return []
1371
  cpu_list = []
1372
  for range_def in cpu_mask.split(","):
1373
    boundaries = range_def.split("-")
1374
    n_elements = len(boundaries)
1375
    if n_elements > 2:
1376
      raise errors.ParseError("Invalid CPU ID range definition"
1377
                              " (only one hyphen allowed): %s" % range_def)
1378
    try:
1379
      lower = int(boundaries[0])
1380
    except (ValueError, TypeError), err:
1381
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1382
                              " CPU ID range: %s" % str(err))
1383
    try:
1384
      higher = int(boundaries[-1])
1385
    except (ValueError, TypeError), err:
1386
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1387
                              " CPU ID range: %s" % str(err))
1388
    if lower > higher:
1389
      raise errors.ParseError("Invalid CPU ID range definition"
1390
                              " (%d > %d): %s" % (lower, higher, range_def))
1391
    cpu_list.extend(range(lower, higher + 1))
1392
  return cpu_list
1393

    
1394

    
1395
def AddAuthorizedKey(file_obj, key):
1396
  """Adds an SSH public key to an authorized_keys file.
1397

1398
  @type file_obj: str or file handle
1399
  @param file_obj: path to authorized_keys file
1400
  @type key: str
1401
  @param key: string containing key
1402

1403
  """
1404
  key_fields = key.split()
1405

    
1406
  if isinstance(file_obj, basestring):
1407
    f = open(file_obj, 'a+')
1408
  else:
1409
    f = file_obj
1410

    
1411
  try:
1412
    nl = True
1413
    for line in f:
1414
      # Ignore whitespace changes
1415
      if line.split() == key_fields:
1416
        break
1417
      nl = line.endswith('\n')
1418
    else:
1419
      if not nl:
1420
        f.write("\n")
1421
      f.write(key.rstrip('\r\n'))
1422
      f.write("\n")
1423
      f.flush()
1424
  finally:
1425
    f.close()
1426

    
1427

    
1428
def RemoveAuthorizedKey(file_name, key):
1429
  """Removes an SSH public key from an authorized_keys file.
1430

1431
  @type file_name: str
1432
  @param file_name: path to authorized_keys file
1433
  @type key: str
1434
  @param key: string containing key
1435

1436
  """
1437
  key_fields = key.split()
1438

    
1439
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1440
  try:
1441
    out = os.fdopen(fd, 'w')
1442
    try:
1443
      f = open(file_name, 'r')
1444
      try:
1445
        for line in f:
1446
          # Ignore whitespace changes while comparing lines
1447
          if line.split() != key_fields:
1448
            out.write(line)
1449

    
1450
        out.flush()
1451
        os.rename(tmpname, file_name)
1452
      finally:
1453
        f.close()
1454
    finally:
1455
      out.close()
1456
  except:
1457
    RemoveFile(tmpname)
1458
    raise
1459

    
1460

    
1461
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1462
  """Sets the name of an IP address and hostname in /etc/hosts.
1463

1464
  @type file_name: str
1465
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1466
  @type ip: str
1467
  @param ip: the IP address
1468
  @type hostname: str
1469
  @param hostname: the hostname to be added
1470
  @type aliases: list
1471
  @param aliases: the list of aliases to add for the hostname
1472

1473
  """
1474
  # Ensure aliases are unique
1475
  aliases = UniqueSequence([hostname] + aliases)[1:]
1476

    
1477
  def _WriteEtcHosts(fd):
1478
    # Duplicating file descriptor because os.fdopen's result will automatically
1479
    # close the descriptor, but we would still like to have its functionality.
1480
    out = os.fdopen(os.dup(fd), "w")
1481
    try:
1482
      for line in ReadFile(file_name).splitlines(True):
1483
        fields = line.split()
1484
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1485
          continue
1486
        out.write(line)
1487

    
1488
      out.write("%s\t%s" % (ip, hostname))
1489
      if aliases:
1490
        out.write(" %s" % " ".join(aliases))
1491
      out.write("\n")
1492
      out.flush()
1493
    finally:
1494
      out.close()
1495

    
1496
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1497

    
1498

    
1499
def AddHostToEtcHosts(hostname, ip):
1500
  """Wrapper around SetEtcHostsEntry.
1501

1502
  @type hostname: str
1503
  @param hostname: a hostname that will be resolved and added to
1504
      L{constants.ETC_HOSTS}
1505
  @type ip: str
1506
  @param ip: The ip address of the host
1507

1508
  """
1509
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1510

    
1511

    
1512
def RemoveEtcHostsEntry(file_name, hostname):
1513
  """Removes a hostname from /etc/hosts.
1514

1515
  IP addresses without names are removed from the file.
1516

1517
  @type file_name: str
1518
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1519
  @type hostname: str
1520
  @param hostname: the hostname to be removed
1521

1522
  """
1523
  def _WriteEtcHosts(fd):
1524
    # Duplicating file descriptor because os.fdopen's result will automatically
1525
    # close the descriptor, but we would still like to have its functionality.
1526
    out = os.fdopen(os.dup(fd), "w")
1527
    try:
1528
      for line in ReadFile(file_name).splitlines(True):
1529
        fields = line.split()
1530
        if len(fields) > 1 and not fields[0].startswith("#"):
1531
          names = fields[1:]
1532
          if hostname in names:
1533
            while hostname in names:
1534
              names.remove(hostname)
1535
            if names:
1536
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1537
            continue
1538

    
1539
        out.write(line)
1540

    
1541
      out.flush()
1542
    finally:
1543
      out.close()
1544

    
1545
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1546

    
1547

    
1548
def RemoveHostFromEtcHosts(hostname):
1549
  """Wrapper around RemoveEtcHostsEntry.
1550

1551
  @type hostname: str
1552
  @param hostname: hostname that will be resolved and its
1553
      full and shot name will be removed from
1554
      L{constants.ETC_HOSTS}
1555

1556
  """
1557
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1558
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1559

    
1560

    
1561
def TimestampForFilename():
1562
  """Returns the current time formatted for filenames.
1563

1564
  The format doesn't contain colons as some shells and applications them as
1565
  separators.
1566

1567
  """
1568
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1569

    
1570

    
1571
def CreateBackup(file_name):
1572
  """Creates a backup of a file.
1573

1574
  @type file_name: str
1575
  @param file_name: file to be backed up
1576
  @rtype: str
1577
  @return: the path to the newly created backup
1578
  @raise errors.ProgrammerError: for invalid file names
1579

1580
  """
1581
  if not os.path.isfile(file_name):
1582
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1583
                                file_name)
1584

    
1585
  prefix = ("%s.backup-%s." %
1586
            (os.path.basename(file_name), TimestampForFilename()))
1587
  dir_name = os.path.dirname(file_name)
1588

    
1589
  fsrc = open(file_name, 'rb')
1590
  try:
1591
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1592
    fdst = os.fdopen(fd, 'wb')
1593
    try:
1594
      logging.debug("Backing up %s at %s", file_name, backup_name)
1595
      shutil.copyfileobj(fsrc, fdst)
1596
    finally:
1597
      fdst.close()
1598
  finally:
1599
    fsrc.close()
1600

    
1601
  return backup_name
1602

    
1603

    
1604
def ShellQuote(value):
1605
  """Quotes shell argument according to POSIX.
1606

1607
  @type value: str
1608
  @param value: the argument to be quoted
1609
  @rtype: str
1610
  @return: the quoted value
1611

1612
  """
1613
  if _re_shell_unquoted.match(value):
1614
    return value
1615
  else:
1616
    return "'%s'" % value.replace("'", "'\\''")
1617

    
1618

    
1619
def ShellQuoteArgs(args):
1620
  """Quotes a list of shell arguments.
1621

1622
  @type args: list
1623
  @param args: list of arguments to be quoted
1624
  @rtype: str
1625
  @return: the quoted arguments concatenated with spaces
1626

1627
  """
1628
  return ' '.join([ShellQuote(i) for i in args])
1629

    
1630

    
1631
class ShellWriter:
1632
  """Helper class to write scripts with indentation.
1633

1634
  """
1635
  INDENT_STR = "  "
1636

    
1637
  def __init__(self, fh):
1638
    """Initializes this class.
1639

1640
    """
1641
    self._fh = fh
1642
    self._indent = 0
1643

    
1644
  def IncIndent(self):
1645
    """Increase indentation level by 1.
1646

1647
    """
1648
    self._indent += 1
1649

    
1650
  def DecIndent(self):
1651
    """Decrease indentation level by 1.
1652

1653
    """
1654
    assert self._indent > 0
1655
    self._indent -= 1
1656

    
1657
  def Write(self, txt, *args):
1658
    """Write line to output file.
1659

1660
    """
1661
    assert self._indent >= 0
1662

    
1663
    self._fh.write(self._indent * self.INDENT_STR)
1664

    
1665
    if args:
1666
      self._fh.write(txt % args)
1667
    else:
1668
      self._fh.write(txt)
1669

    
1670
    self._fh.write("\n")
1671

    
1672

    
1673
def ListVisibleFiles(path):
1674
  """Returns a list of visible files in a directory.
1675

1676
  @type path: str
1677
  @param path: the directory to enumerate
1678
  @rtype: list
1679
  @return: the list of all files not starting with a dot
1680
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1681

1682
  """
1683
  if not IsNormAbsPath(path):
1684
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1685
                                 " absolute/normalized: '%s'" % path)
1686
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1687
  return files
1688

    
1689

    
1690
def GetHomeDir(user, default=None):
1691
  """Try to get the homedir of the given user.
1692

1693
  The user can be passed either as a string (denoting the name) or as
1694
  an integer (denoting the user id). If the user is not found, the
1695
  'default' argument is returned, which defaults to None.
1696

1697
  """
1698
  try:
1699
    if isinstance(user, basestring):
1700
      result = pwd.getpwnam(user)
1701
    elif isinstance(user, (int, long)):
1702
      result = pwd.getpwuid(user)
1703
    else:
1704
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1705
                                   type(user))
1706
  except KeyError:
1707
    return default
1708
  return result.pw_dir
1709

    
1710

    
1711
def NewUUID():
1712
  """Returns a random UUID.
1713

1714
  @note: This is a Linux-specific method as it uses the /proc
1715
      filesystem.
1716
  @rtype: str
1717

1718
  """
1719
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1720

    
1721

    
1722
def GenerateSecret(numbytes=20):
1723
  """Generates a random secret.
1724

1725
  This will generate a pseudo-random secret returning an hex string
1726
  (so that it can be used where an ASCII string is needed).
1727

1728
  @param numbytes: the number of bytes which will be represented by the returned
1729
      string (defaulting to 20, the length of a SHA1 hash)
1730
  @rtype: str
1731
  @return: an hex representation of the pseudo-random sequence
1732

1733
  """
1734
  return os.urandom(numbytes).encode('hex')
1735

    
1736

    
1737
def EnsureDirs(dirs):
1738
  """Make required directories, if they don't exist.
1739

1740
  @param dirs: list of tuples (dir_name, dir_mode)
1741
  @type dirs: list of (string, integer)
1742

1743
  """
1744
  for dir_name, dir_mode in dirs:
1745
    try:
1746
      os.mkdir(dir_name, dir_mode)
1747
    except EnvironmentError, err:
1748
      if err.errno != errno.EEXIST:
1749
        raise errors.GenericError("Cannot create needed directory"
1750
                                  " '%s': %s" % (dir_name, err))
1751
    try:
1752
      os.chmod(dir_name, dir_mode)
1753
    except EnvironmentError, err:
1754
      raise errors.GenericError("Cannot change directory permissions on"
1755
                                " '%s': %s" % (dir_name, err))
1756
    if not os.path.isdir(dir_name):
1757
      raise errors.GenericError("%s is not a directory" % dir_name)
1758

    
1759

    
1760
def ReadFile(file_name, size=-1):
1761
  """Reads a file.
1762

1763
  @type size: int
1764
  @param size: Read at most size bytes (if negative, entire file)
1765
  @rtype: str
1766
  @return: the (possibly partial) content of the file
1767

1768
  """
1769
  f = open(file_name, "r")
1770
  try:
1771
    return f.read(size)
1772
  finally:
1773
    f.close()
1774

    
1775

    
1776
def WriteFile(file_name, fn=None, data=None,
1777
              mode=None, uid=-1, gid=-1,
1778
              atime=None, mtime=None, close=True,
1779
              dry_run=False, backup=False,
1780
              prewrite=None, postwrite=None):
1781
  """(Over)write a file atomically.
1782

1783
  The file_name and either fn (a function taking one argument, the
1784
  file descriptor, and which should write the data to it) or data (the
1785
  contents of the file) must be passed. The other arguments are
1786
  optional and allow setting the file mode, owner and group, and the
1787
  mtime/atime of the file.
1788

1789
  If the function doesn't raise an exception, it has succeeded and the
1790
  target file has the new contents. If the function has raised an
1791
  exception, an existing target file should be unmodified and the
1792
  temporary file should be removed.
1793

1794
  @type file_name: str
1795
  @param file_name: the target filename
1796
  @type fn: callable
1797
  @param fn: content writing function, called with
1798
      file descriptor as parameter
1799
  @type data: str
1800
  @param data: contents of the file
1801
  @type mode: int
1802
  @param mode: file mode
1803
  @type uid: int
1804
  @param uid: the owner of the file
1805
  @type gid: int
1806
  @param gid: the group of the file
1807
  @type atime: int
1808
  @param atime: a custom access time to be set on the file
1809
  @type mtime: int
1810
  @param mtime: a custom modification time to be set on the file
1811
  @type close: boolean
1812
  @param close: whether to close file after writing it
1813
  @type prewrite: callable
1814
  @param prewrite: function to be called before writing content
1815
  @type postwrite: callable
1816
  @param postwrite: function to be called after writing content
1817

1818
  @rtype: None or int
1819
  @return: None if the 'close' parameter evaluates to True,
1820
      otherwise the file descriptor
1821

1822
  @raise errors.ProgrammerError: if any of the arguments are not valid
1823

1824
  """
1825
  if not os.path.isabs(file_name):
1826
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1827
                                 " absolute: '%s'" % file_name)
1828

    
1829
  if [fn, data].count(None) != 1:
1830
    raise errors.ProgrammerError("fn or data required")
1831

    
1832
  if [atime, mtime].count(None) == 1:
1833
    raise errors.ProgrammerError("Both atime and mtime must be either"
1834
                                 " set or None")
1835

    
1836
  if backup and not dry_run and os.path.isfile(file_name):
1837
    CreateBackup(file_name)
1838

    
1839
  dir_name, base_name = os.path.split(file_name)
1840
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1841
  do_remove = True
1842
  # here we need to make sure we remove the temp file, if any error
1843
  # leaves it in place
1844
  try:
1845
    if uid != -1 or gid != -1:
1846
      os.chown(new_name, uid, gid)
1847
    if mode:
1848
      os.chmod(new_name, mode)
1849
    if callable(prewrite):
1850
      prewrite(fd)
1851
    if data is not None:
1852
      os.write(fd, data)
1853
    else:
1854
      fn(fd)
1855
    if callable(postwrite):
1856
      postwrite(fd)
1857
    os.fsync(fd)
1858
    if atime is not None and mtime is not None:
1859
      os.utime(new_name, (atime, mtime))
1860
    if not dry_run:
1861
      os.rename(new_name, file_name)
1862
      do_remove = False
1863
  finally:
1864
    if close:
1865
      os.close(fd)
1866
      result = None
1867
    else:
1868
      result = fd
1869
    if do_remove:
1870
      RemoveFile(new_name)
1871

    
1872
  return result
1873

    
1874

    
1875
def ReadOneLineFile(file_name, strict=False):
1876
  """Return the first non-empty line from a file.
1877

1878
  @type strict: boolean
1879
  @param strict: if True, abort if the file has more than one
1880
      non-empty line
1881

1882
  """
1883
  file_lines = ReadFile(file_name).splitlines()
1884
  full_lines = filter(bool, file_lines)
1885
  if not file_lines or not full_lines:
1886
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1887
  elif strict and len(full_lines) > 1:
1888
    raise errors.GenericError("Too many lines in one-liner file %s" %
1889
                              file_name)
1890
  return full_lines[0]
1891

    
1892

    
1893
def FirstFree(seq, base=0):
1894
  """Returns the first non-existing integer from seq.
1895

1896
  The seq argument should be a sorted list of positive integers. The
1897
  first time the index of an element is smaller than the element
1898
  value, the index will be returned.
1899

1900
  The base argument is used to start at a different offset,
1901
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1902

1903
  Example: C{[0, 1, 3]} will return I{2}.
1904

1905
  @type seq: sequence
1906
  @param seq: the sequence to be analyzed.
1907
  @type base: int
1908
  @param base: use this value as the base index of the sequence
1909
  @rtype: int
1910
  @return: the first non-used index in the sequence
1911

1912
  """
1913
  for idx, elem in enumerate(seq):
1914
    assert elem >= base, "Passed element is higher than base offset"
1915
    if elem > idx + base:
1916
      # idx is not used
1917
      return idx + base
1918
  return None
1919

    
1920

    
1921
def SingleWaitForFdCondition(fdobj, event, timeout):
1922
  """Waits for a condition to occur on the socket.
1923

1924
  Immediately returns at the first interruption.
1925

1926
  @type fdobj: integer or object supporting a fileno() method
1927
  @param fdobj: entity to wait for events on
1928
  @type event: integer
1929
  @param event: ORed condition (see select module)
1930
  @type timeout: float or None
1931
  @param timeout: Timeout in seconds
1932
  @rtype: int or None
1933
  @return: None for timeout, otherwise occured conditions
1934

1935
  """
1936
  check = (event | select.POLLPRI |
1937
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1938

    
1939
  if timeout is not None:
1940
    # Poller object expects milliseconds
1941
    timeout *= 1000
1942

    
1943
  poller = select.poll()
1944
  poller.register(fdobj, event)
1945
  try:
1946
    # TODO: If the main thread receives a signal and we have no timeout, we
1947
    # could wait forever. This should check a global "quit" flag or something
1948
    # every so often.
1949
    io_events = poller.poll(timeout)
1950
  except select.error, err:
1951
    if err[0] != errno.EINTR:
1952
      raise
1953
    io_events = []
1954
  if io_events and io_events[0][1] & check:
1955
    return io_events[0][1]
1956
  else:
1957
    return None
1958

    
1959

    
1960
class FdConditionWaiterHelper(object):
1961
  """Retry helper for WaitForFdCondition.
1962

1963
  This class contains the retried and wait functions that make sure
1964
  WaitForFdCondition can continue waiting until the timeout is actually
1965
  expired.
1966

1967
  """
1968

    
1969
  def __init__(self, timeout):
1970
    self.timeout = timeout
1971

    
1972
  def Poll(self, fdobj, event):
1973
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1974
    if result is None:
1975
      raise RetryAgain()
1976
    else:
1977
      return result
1978

    
1979
  def UpdateTimeout(self, timeout):
1980
    self.timeout = timeout
1981

    
1982

    
1983
def WaitForFdCondition(fdobj, event, timeout):
1984
  """Waits for a condition to occur on the socket.
1985

1986
  Retries until the timeout is expired, even if interrupted.
1987

1988
  @type fdobj: integer or object supporting a fileno() method
1989
  @param fdobj: entity to wait for events on
1990
  @type event: integer
1991
  @param event: ORed condition (see select module)
1992
  @type timeout: float or None
1993
  @param timeout: Timeout in seconds
1994
  @rtype: int or None
1995
  @return: None for timeout, otherwise occured conditions
1996

1997
  """
1998
  if timeout is not None:
1999
    retrywaiter = FdConditionWaiterHelper(timeout)
2000
    try:
2001
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2002
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2003
    except RetryTimeout:
2004
      result = None
2005
  else:
2006
    result = None
2007
    while result is None:
2008
      result = SingleWaitForFdCondition(fdobj, event, timeout)
2009
  return result
2010

    
2011

    
2012
def UniqueSequence(seq):
2013
  """Returns a list with unique elements.
2014

2015
  Element order is preserved.
2016

2017
  @type seq: sequence
2018
  @param seq: the sequence with the source elements
2019
  @rtype: list
2020
  @return: list of unique elements from seq
2021

2022
  """
2023
  seen = set()
2024
  return [i for i in seq if i not in seen and not seen.add(i)]
2025

    
2026

    
2027
def NormalizeAndValidateMac(mac):
2028
  """Normalizes and check if a MAC address is valid.
2029

2030
  Checks whether the supplied MAC address is formally correct, only
2031
  accepts colon separated format. Normalize it to all lower.
2032

2033
  @type mac: str
2034
  @param mac: the MAC to be validated
2035
  @rtype: str
2036
  @return: returns the normalized and validated MAC.
2037

2038
  @raise errors.OpPrereqError: If the MAC isn't valid
2039

2040
  """
2041
  if not _MAC_CHECK.match(mac):
2042
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2043
                               mac, errors.ECODE_INVAL)
2044

    
2045
  return mac.lower()
2046

    
2047

    
2048
def TestDelay(duration):
2049
  """Sleep for a fixed amount of time.
2050

2051
  @type duration: float
2052
  @param duration: the sleep duration
2053
  @rtype: boolean
2054
  @return: False for negative value, True otherwise
2055

2056
  """
2057
  if duration < 0:
2058
    return False, "Invalid sleep duration"
2059
  time.sleep(duration)
2060
  return True, None
2061

    
2062

    
2063
def _CloseFDNoErr(fd, retries=5):
2064
  """Close a file descriptor ignoring errors.
2065

2066
  @type fd: int
2067
  @param fd: the file descriptor
2068
  @type retries: int
2069
  @param retries: how many retries to make, in case we get any
2070
      other error than EBADF
2071

2072
  """
2073
  try:
2074
    os.close(fd)
2075
  except OSError, err:
2076
    if err.errno != errno.EBADF:
2077
      if retries > 0:
2078
        _CloseFDNoErr(fd, retries - 1)
2079
    # else either it's closed already or we're out of retries, so we
2080
    # ignore this and go on
2081

    
2082

    
2083
def CloseFDs(noclose_fds=None):
2084
  """Close file descriptors.
2085

2086
  This closes all file descriptors above 2 (i.e. except
2087
  stdin/out/err).
2088

2089
  @type noclose_fds: list or None
2090
  @param noclose_fds: if given, it denotes a list of file descriptor
2091
      that should not be closed
2092

2093
  """
2094
  # Default maximum for the number of available file descriptors.
2095
  if 'SC_OPEN_MAX' in os.sysconf_names:
2096
    try:
2097
      MAXFD = os.sysconf('SC_OPEN_MAX')
2098
      if MAXFD < 0:
2099
        MAXFD = 1024
2100
    except OSError:
2101
      MAXFD = 1024
2102
  else:
2103
    MAXFD = 1024
2104
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2105
  if (maxfd == resource.RLIM_INFINITY):
2106
    maxfd = MAXFD
2107

    
2108
  # Iterate through and close all file descriptors (except the standard ones)
2109
  for fd in range(3, maxfd):
2110
    if noclose_fds and fd in noclose_fds:
2111
      continue
2112
    _CloseFDNoErr(fd)
2113

    
2114

    
2115
def Mlockall(_ctypes=ctypes):
2116
  """Lock current process' virtual address space into RAM.
2117

2118
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2119
  see mlock(2) for more details. This function requires ctypes module.
2120

2121
  @raises errors.NoCtypesError: if ctypes module is not found
2122

2123
  """
2124
  if _ctypes is None:
2125
    raise errors.NoCtypesError()
2126

    
2127
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2128
  if libc is None:
2129
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2130
    return
2131

    
2132
  # Some older version of the ctypes module don't have built-in functionality
2133
  # to access the errno global variable, where function error codes are stored.
2134
  # By declaring this variable as a pointer to an integer we can then access
2135
  # its value correctly, should the mlockall call fail, in order to see what
2136
  # the actual error code was.
2137
  # pylint: disable-msg=W0212
2138
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2139

    
2140
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2141
    # pylint: disable-msg=W0212
2142
    logging.error("Cannot set memory lock: %s",
2143
                  os.strerror(libc.__errno_location().contents.value))
2144
    return
2145

    
2146
  logging.debug("Memory lock set")
2147

    
2148

    
2149
def Daemonize(logfile):
2150
  """Daemonize the current process.
2151

2152
  This detaches the current process from the controlling terminal and
2153
  runs it in the background as a daemon.
2154

2155
  @type logfile: str
2156
  @param logfile: the logfile to which we should redirect stdout/stderr
2157
  @rtype: int
2158
  @return: the value zero
2159

2160
  """
2161
  # pylint: disable-msg=W0212
2162
  # yes, we really want os._exit
2163

    
2164
  # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2165
  # least abstract the pipe functionality between them
2166

    
2167
  # Create pipe for sending error messages
2168
  (rpipe, wpipe) = os.pipe()
2169

    
2170
  # this might fail
2171
  pid = os.fork()
2172
  if (pid == 0):  # The first child.
2173
    SetupDaemonEnv()
2174

    
2175
    # this might fail
2176
    pid = os.fork() # Fork a second child.
2177
    if (pid == 0):  # The second child.
2178
      _CloseFDNoErr(rpipe)
2179
    else:
2180
      # exit() or _exit()?  See below.
2181
      os._exit(0) # Exit parent (the first child) of the second child.
2182
  else:
2183
    _CloseFDNoErr(wpipe)
2184
    # Wait for daemon to be started (or an error message to
2185
    # arrive) and read up to 100 KB as an error message
2186
    errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2187
    if errormsg:
2188
      sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2189
      rcode = 1
2190
    else:
2191
      rcode = 0
2192
    os._exit(rcode) # Exit parent of the first child.
2193

    
2194
  SetupDaemonFDs(logfile, None)
2195
  return wpipe
2196

    
2197

    
2198
def DaemonPidFileName(name):
2199
  """Compute a ganeti pid file absolute path
2200

2201
  @type name: str
2202
  @param name: the daemon name
2203
  @rtype: str
2204
  @return: the full path to the pidfile corresponding to the given
2205
      daemon name
2206

2207
  """
2208
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2209

    
2210

    
2211
def EnsureDaemon(name):
2212
  """Check for and start daemon if not alive.
2213

2214
  """
2215
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2216
  if result.failed:
2217
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2218
                  name, result.fail_reason, result.output)
2219
    return False
2220

    
2221
  return True
2222

    
2223

    
2224
def StopDaemon(name):
2225
  """Stop daemon
2226

2227
  """
2228
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2229
  if result.failed:
2230
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2231
                  name, result.fail_reason, result.output)
2232
    return False
2233

    
2234
  return True
2235

    
2236

    
2237
def WritePidFile(pidfile):
2238
  """Write the current process pidfile.
2239

2240
  @type pidfile: sting
2241
  @param pidfile: the path to the file to be written
2242
  @raise errors.LockError: if the pid file already exists and
2243
      points to a live process
2244
  @rtype: int
2245
  @return: the file descriptor of the lock file; do not close this unless
2246
      you want to unlock the pid file
2247

2248
  """
2249
  # We don't rename nor truncate the file to not drop locks under
2250
  # existing processes
2251
  fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2252

    
2253
  # Lock the PID file (and fail if not possible to do so). Any code
2254
  # wanting to send a signal to the daemon should try to lock the PID
2255
  # file before reading it. If acquiring the lock succeeds, the daemon is
2256
  # no longer running and the signal should not be sent.
2257
  LockFile(fd_pidfile)
2258

    
2259
  os.write(fd_pidfile, "%d\n" % os.getpid())
2260

    
2261
  return fd_pidfile
2262

    
2263

    
2264
def RemovePidFile(name):
2265
  """Remove the current process pidfile.
2266

2267
  Any errors are ignored.
2268

2269
  @type name: str
2270
  @param name: the daemon name used to derive the pidfile name
2271

2272
  """
2273
  pidfilename = DaemonPidFileName(name)
2274
  # TODO: we could check here that the file contains our pid
2275
  try:
2276
    RemoveFile(pidfilename)
2277
  except: # pylint: disable-msg=W0702
2278
    pass
2279

    
2280

    
2281
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2282
                waitpid=False):
2283
  """Kill a process given by its pid.
2284

2285
  @type pid: int
2286
  @param pid: The PID to terminate.
2287
  @type signal_: int
2288
  @param signal_: The signal to send, by default SIGTERM
2289
  @type timeout: int
2290
  @param timeout: The timeout after which, if the process is still alive,
2291
                  a SIGKILL will be sent. If not positive, no such checking
2292
                  will be done
2293
  @type waitpid: boolean
2294
  @param waitpid: If true, we should waitpid on this process after
2295
      sending signals, since it's our own child and otherwise it
2296
      would remain as zombie
2297

2298
  """
2299
  def _helper(pid, signal_, wait):
2300
    """Simple helper to encapsulate the kill/waitpid sequence"""
2301
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2302
      try:
2303
        os.waitpid(pid, os.WNOHANG)
2304
      except OSError:
2305
        pass
2306

    
2307
  if pid <= 0:
2308
    # kill with pid=0 == suicide
2309
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2310

    
2311
  if not IsProcessAlive(pid):
2312
    return
2313

    
2314
  _helper(pid, signal_, waitpid)
2315

    
2316
  if timeout <= 0:
2317
    return
2318

    
2319
  def _CheckProcess():
2320
    if not IsProcessAlive(pid):
2321
      return
2322

    
2323
    try:
2324
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2325
    except OSError:
2326
      raise RetryAgain()
2327

    
2328
    if result_pid > 0:
2329
      return
2330

    
2331
    raise RetryAgain()
2332

    
2333
  try:
2334
    # Wait up to $timeout seconds
2335
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2336
  except RetryTimeout:
2337
    pass
2338

    
2339
  if IsProcessAlive(pid):
2340
    # Kill process if it's still alive
2341
    _helper(pid, signal.SIGKILL, waitpid)
2342

    
2343

    
2344
def FindFile(name, search_path, test=os.path.exists):
2345
  """Look for a filesystem object in a given path.
2346

2347
  This is an abstract method to search for filesystem object (files,
2348
  dirs) under a given search path.
2349

2350
  @type name: str
2351
  @param name: the name to look for
2352
  @type search_path: str
2353
  @param search_path: location to start at
2354
  @type test: callable
2355
  @param test: a function taking one argument that should return True
2356
      if the a given object is valid; the default value is
2357
      os.path.exists, causing only existing files to be returned
2358
  @rtype: str or None
2359
  @return: full path to the object if found, None otherwise
2360

2361
  """
2362
  # validate the filename mask
2363
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2364
    logging.critical("Invalid value passed for external script name: '%s'",
2365
                     name)
2366
    return None
2367

    
2368
  for dir_name in search_path:
2369
    # FIXME: investigate switch to PathJoin
2370
    item_name = os.path.sep.join([dir_name, name])
2371
    # check the user test and that we're indeed resolving to the given
2372
    # basename
2373
    if test(item_name) and os.path.basename(item_name) == name:
2374
      return item_name
2375
  return None
2376

    
2377

    
2378
def CheckVolumeGroupSize(vglist, vgname, minsize):
2379
  """Checks if the volume group list is valid.
2380

2381
  The function will check if a given volume group is in the list of
2382
  volume groups and has a minimum size.
2383

2384
  @type vglist: dict
2385
  @param vglist: dictionary of volume group names and their size
2386
  @type vgname: str
2387
  @param vgname: the volume group we should check
2388
  @type minsize: int
2389
  @param minsize: the minimum size we accept
2390
  @rtype: None or str
2391
  @return: None for success, otherwise the error message
2392

2393
  """
2394
  vgsize = vglist.get(vgname, None)
2395
  if vgsize is None:
2396
    return "volume group '%s' missing" % vgname
2397
  elif vgsize < minsize:
2398
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2399
            (vgname, minsize, vgsize))
2400
  return None
2401

    
2402

    
2403
def SplitTime(value):
2404
  """Splits time as floating point number into a tuple.
2405

2406
  @param value: Time in seconds
2407
  @type value: int or float
2408
  @return: Tuple containing (seconds, microseconds)
2409

2410
  """
2411
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2412

    
2413
  assert 0 <= seconds, \
2414
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2415
  assert 0 <= microseconds <= 999999, \
2416
    "Microseconds must be 0-999999, but are %s" % microseconds
2417

    
2418
  return (int(seconds), int(microseconds))
2419

    
2420

    
2421
def MergeTime(timetuple):
2422
  """Merges a tuple into time as a floating point number.
2423

2424
  @param timetuple: Time as tuple, (seconds, microseconds)
2425
  @type timetuple: tuple
2426
  @return: Time as a floating point number expressed in seconds
2427

2428
  """
2429
  (seconds, microseconds) = timetuple
2430

    
2431
  assert 0 <= seconds, \
2432
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2433
  assert 0 <= microseconds <= 999999, \
2434
    "Microseconds must be 0-999999, but are %s" % microseconds
2435

    
2436
  return float(seconds) + (float(microseconds) * 0.000001)
2437

    
2438

    
2439
class LogFileHandler(logging.FileHandler):
2440
  """Log handler that doesn't fallback to stderr.
2441

2442
  When an error occurs while writing on the logfile, logging.FileHandler tries
2443
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2444
  the logfile. This class avoids failures reporting errors to /dev/console.
2445

2446
  """
2447
  def __init__(self, filename, mode="a", encoding=None):
2448
    """Open the specified file and use it as the stream for logging.
2449

2450
    Also open /dev/console to report errors while logging.
2451

2452
    """
2453
    logging.FileHandler.__init__(self, filename, mode, encoding)
2454
    self.console = open(constants.DEV_CONSOLE, "a")
2455

    
2456
  def handleError(self, record): # pylint: disable-msg=C0103
2457
    """Handle errors which occur during an emit() call.
2458

2459
    Try to handle errors with FileHandler method, if it fails write to
2460
    /dev/console.
2461

2462
    """
2463
    try:
2464
      logging.FileHandler.handleError(self, record)
2465
    except Exception: # pylint: disable-msg=W0703
2466
      try:
2467
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2468
      except Exception: # pylint: disable-msg=W0703
2469
        # Log handler tried everything it could, now just give up
2470
        pass
2471

    
2472

    
2473
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2474
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2475
                 console_logging=False):
2476
  """Configures the logging module.
2477

2478
  @type logfile: str
2479
  @param logfile: the filename to which we should log
2480
  @type debug: integer
2481
  @param debug: if greater than zero, enable debug messages, otherwise
2482
      only those at C{INFO} and above level
2483
  @type stderr_logging: boolean
2484
  @param stderr_logging: whether we should also log to the standard error
2485
  @type program: str
2486
  @param program: the name under which we should log messages
2487
  @type multithreaded: boolean
2488
  @param multithreaded: if True, will add the thread name to the log file
2489
  @type syslog: string
2490
  @param syslog: one of 'no', 'yes', 'only':
2491
      - if no, syslog is not used
2492
      - if yes, syslog is used (in addition to file-logging)
2493
      - if only, only syslog is used
2494
  @type console_logging: boolean
2495
  @param console_logging: if True, will use a FileHandler which falls back to
2496
      the system console if logging fails
2497
  @raise EnvironmentError: if we can't open the log file and
2498
      syslog/stderr logging is disabled
2499

2500
  """
2501
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2502
  sft = program + "[%(process)d]:"
2503
  if multithreaded:
2504
    fmt += "/%(threadName)s"
2505
    sft += " (%(threadName)s)"
2506
  if debug:
2507
    fmt += " %(module)s:%(lineno)s"
2508
    # no debug info for syslog loggers
2509
  fmt += " %(levelname)s %(message)s"
2510
  # yes, we do want the textual level, as remote syslog will probably
2511
  # lose the error level, and it's easier to grep for it
2512
  sft += " %(levelname)s %(message)s"
2513
  formatter = logging.Formatter(fmt)
2514
  sys_fmt = logging.Formatter(sft)
2515

    
2516
  root_logger = logging.getLogger("")
2517
  root_logger.setLevel(logging.NOTSET)
2518

    
2519
  # Remove all previously setup handlers
2520
  for handler in root_logger.handlers:
2521
    handler.close()
2522
    root_logger.removeHandler(handler)
2523

    
2524
  if stderr_logging:
2525
    stderr_handler = logging.StreamHandler()
2526
    stderr_handler.setFormatter(formatter)
2527
    if debug:
2528
      stderr_handler.setLevel(logging.NOTSET)
2529
    else:
2530
      stderr_handler.setLevel(logging.CRITICAL)
2531
    root_logger.addHandler(stderr_handler)
2532

    
2533
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2534
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2535
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2536
                                                    facility)
2537
    syslog_handler.setFormatter(sys_fmt)
2538
    # Never enable debug over syslog
2539
    syslog_handler.setLevel(logging.INFO)
2540
    root_logger.addHandler(syslog_handler)
2541

    
2542
  if syslog != constants.SYSLOG_ONLY:
2543
    # this can fail, if the logging directories are not setup or we have
2544
    # a permisssion problem; in this case, it's best to log but ignore
2545
    # the error if stderr_logging is True, and if false we re-raise the
2546
    # exception since otherwise we could run but without any logs at all
2547
    try:
2548
      if console_logging:
2549
        logfile_handler = LogFileHandler(logfile)
2550
      else:
2551
        logfile_handler = logging.FileHandler(logfile)
2552
      logfile_handler.setFormatter(formatter)
2553
      if debug:
2554
        logfile_handler.setLevel(logging.DEBUG)
2555
      else:
2556
        logfile_handler.setLevel(logging.INFO)
2557
      root_logger.addHandler(logfile_handler)
2558
    except EnvironmentError:
2559
      if stderr_logging or syslog == constants.SYSLOG_YES:
2560
        logging.exception("Failed to enable logging to file '%s'", logfile)
2561
      else:
2562
        # we need to re-raise the exception
2563
        raise
2564

    
2565

    
2566
def IsNormAbsPath(path):
2567
  """Check whether a path is absolute and also normalized
2568

2569
  This avoids things like /dir/../../other/path to be valid.
2570

2571
  """
2572
  return os.path.normpath(path) == path and os.path.isabs(path)
2573

    
2574

    
2575
def PathJoin(*args):
2576
  """Safe-join a list of path components.
2577

2578
  Requirements:
2579
      - the first argument must be an absolute path
2580
      - no component in the path must have backtracking (e.g. /../),
2581
        since we check for normalization at the end
2582

2583
  @param args: the path components to be joined
2584
  @raise ValueError: for invalid paths
2585

2586
  """
2587
  # ensure we're having at least one path passed in
2588
  assert args
2589
  # ensure the first component is an absolute and normalized path name
2590
  root = args[0]
2591
  if not IsNormAbsPath(root):
2592
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2593
  result = os.path.join(*args)
2594
  # ensure that the whole path is normalized
2595
  if not IsNormAbsPath(result):
2596
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2597
  # check that we're still under the original prefix
2598
  prefix = os.path.commonprefix([root, result])
2599
  if prefix != root:
2600
    raise ValueError("Error: path joining resulted in different prefix"
2601
                     " (%s != %s)" % (prefix, root))
2602
  return result
2603

    
2604

    
2605
def TailFile(fname, lines=20):
2606
  """Return the last lines from a file.
2607

2608
  @note: this function will only read and parse the last 4KB of
2609
      the file; if the lines are very long, it could be that less
2610
      than the requested number of lines are returned
2611

2612
  @param fname: the file name
2613
  @type lines: int
2614
  @param lines: the (maximum) number of lines to return
2615

2616
  """
2617
  fd = open(fname, "r")
2618
  try:
2619
    fd.seek(0, 2)
2620
    pos = fd.tell()
2621
    pos = max(0, pos-4096)
2622
    fd.seek(pos, 0)
2623
    raw_data = fd.read()
2624
  finally:
2625
    fd.close()
2626

    
2627
  rows = raw_data.splitlines()
2628
  return rows[-lines:]
2629

    
2630

    
2631
def FormatTimestampWithTZ(secs):
2632
  """Formats a Unix timestamp with the local timezone.
2633

2634
  """
2635
  return time.strftime("%F %T %Z", time.gmtime(secs))
2636

    
2637

    
2638
def _ParseAsn1Generalizedtime(value):
2639
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2640

2641
  @type value: string
2642
  @param value: ASN1 GENERALIZEDTIME timestamp
2643

2644
  """
2645
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2646
  if m:
2647
    # We have an offset
2648
    asn1time = m.group(1)
2649
    hours = int(m.group(2))
2650
    minutes = int(m.group(3))
2651
    utcoffset = (60 * hours) + minutes
2652
  else:
2653
    if not value.endswith("Z"):
2654
      raise ValueError("Missing timezone")
2655
    asn1time = value[:-1]
2656
    utcoffset = 0
2657

    
2658
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2659

    
2660
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2661

    
2662
  return calendar.timegm(tt.utctimetuple())
2663

    
2664

    
2665
def GetX509CertValidity(cert):
2666
  """Returns the validity period of the certificate.
2667

2668
  @type cert: OpenSSL.crypto.X509
2669
  @param cert: X509 certificate object
2670

2671
  """
2672
  # The get_notBefore and get_notAfter functions are only supported in
2673
  # pyOpenSSL 0.7 and above.
2674
  try:
2675
    get_notbefore_fn = cert.get_notBefore
2676
  except AttributeError:
2677
    not_before = None
2678
  else:
2679
    not_before_asn1 = get_notbefore_fn()
2680

    
2681
    if not_before_asn1 is None:
2682
      not_before = None
2683
    else:
2684
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2685

    
2686
  try:
2687
    get_notafter_fn = cert.get_notAfter
2688
  except AttributeError:
2689
    not_after = None
2690
  else:
2691
    not_after_asn1 = get_notafter_fn()
2692

    
2693
    if not_after_asn1 is None:
2694
      not_after = None
2695
    else:
2696
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2697

    
2698
  return (not_before, not_after)
2699

    
2700

    
2701
def _VerifyCertificateInner(expired, not_before, not_after, now,
2702
                            warn_days, error_days):
2703
  """Verifies certificate validity.
2704

2705
  @type expired: bool
2706
  @param expired: Whether pyOpenSSL considers the certificate as expired
2707
  @type not_before: number or None
2708
  @param not_before: Unix timestamp before which certificate is not valid
2709
  @type not_after: number or None
2710
  @param not_after: Unix timestamp after which certificate is invalid
2711
  @type now: number
2712
  @param now: Current time as Unix timestamp
2713
  @type warn_days: number or None
2714
  @param warn_days: How many days before expiration a warning should be reported
2715
  @type error_days: number or None
2716
  @param error_days: How many days before expiration an error should be reported
2717

2718
  """
2719
  if expired:
2720
    msg = "Certificate is expired"
2721

    
2722
    if not_before is not None and not_after is not None:
2723
      msg += (" (valid from %s to %s)" %
2724
              (FormatTimestampWithTZ(not_before),
2725
               FormatTimestampWithTZ(not_after)))
2726
    elif not_before is not None:
2727
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2728
    elif not_after is not None:
2729
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2730

    
2731
    return (CERT_ERROR, msg)
2732

    
2733
  elif not_before is not None and not_before > now:
2734
    return (CERT_WARNING,
2735
            "Certificate not yet valid (valid from %s)" %
2736
            FormatTimestampWithTZ(not_before))
2737

    
2738
  elif not_after is not None:
2739
    remaining_days = int((not_after - now) / (24 * 3600))
2740

    
2741
    msg = "Certificate expires in about %d days" % remaining_days
2742

    
2743
    if error_days is not None and remaining_days <= error_days:
2744
      return (CERT_ERROR, msg)
2745

    
2746
    if warn_days is not None and remaining_days <= warn_days:
2747
      return (CERT_WARNING, msg)
2748

    
2749
  return (None, None)
2750

    
2751

    
2752
def VerifyX509Certificate(cert, warn_days, error_days):
2753
  """Verifies a certificate for LUVerifyCluster.
2754

2755
  @type cert: OpenSSL.crypto.X509
2756
  @param cert: X509 certificate object
2757
  @type warn_days: number or None
2758
  @param warn_days: How many days before expiration a warning should be reported
2759
  @type error_days: number or None
2760
  @param error_days: How many days before expiration an error should be reported
2761

2762
  """
2763
  # Depending on the pyOpenSSL version, this can just return (None, None)
2764
  (not_before, not_after) = GetX509CertValidity(cert)
2765

    
2766
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2767
                                 time.time(), warn_days, error_days)
2768

    
2769

    
2770
def SignX509Certificate(cert, key, salt):
2771
  """Sign a X509 certificate.
2772

2773
  An RFC822-like signature header is added in front of the certificate.
2774

2775
  @type cert: OpenSSL.crypto.X509
2776
  @param cert: X509 certificate object
2777
  @type key: string
2778
  @param key: Key for HMAC
2779
  @type salt: string
2780
  @param salt: Salt for HMAC
2781
  @rtype: string
2782
  @return: Serialized and signed certificate in PEM format
2783

2784
  """
2785
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2786
    raise errors.GenericError("Invalid salt: %r" % salt)
2787

    
2788
  # Dumping as PEM here ensures the certificate is in a sane format
2789
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2790

    
2791
  return ("%s: %s/%s\n\n%s" %
2792
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2793
           Sha1Hmac(key, cert_pem, salt=salt),
2794
           cert_pem))
2795

    
2796

    
2797
def _ExtractX509CertificateSignature(cert_pem):
2798
  """Helper function to extract signature from X509 certificate.
2799

2800
  """
2801
  # Extract signature from original PEM data
2802
  for line in cert_pem.splitlines():
2803
    if line.startswith("---"):
2804
      break
2805

    
2806
    m = X509_SIGNATURE.match(line.strip())
2807
    if m:
2808
      return (m.group("salt"), m.group("sign"))
2809

    
2810
  raise errors.GenericError("X509 certificate signature is missing")
2811

    
2812

    
2813
def LoadSignedX509Certificate(cert_pem, key):
2814
  """Verifies a signed X509 certificate.
2815

2816
  @type cert_pem: string
2817
  @param cert_pem: Certificate in PEM format and with signature header
2818
  @type key: string
2819
  @param key: Key for HMAC
2820
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2821
  @return: X509 certificate object and salt
2822

2823
  """
2824
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2825

    
2826
  # Load certificate
2827
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2828

    
2829
  # Dump again to ensure it's in a sane format
2830
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2831

    
2832
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2833
    raise errors.GenericError("X509 certificate signature is invalid")
2834

    
2835
  return (cert, salt)
2836

    
2837

    
2838
def Sha1Hmac(key, text, salt=None):
2839
  """Calculates the HMAC-SHA1 digest of a text.
2840

2841
  HMAC is defined in RFC2104.
2842

2843
  @type key: string
2844
  @param key: Secret key
2845
  @type text: string
2846

2847
  """
2848
  if salt:
2849
    salted_text = salt + text
2850
  else:
2851
    salted_text = text
2852

    
2853
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2854

    
2855

    
2856
def VerifySha1Hmac(key, text, digest, salt=None):
2857
  """Verifies the HMAC-SHA1 digest of a text.
2858

2859
  HMAC is defined in RFC2104.
2860

2861
  @type key: string
2862
  @param key: Secret key
2863
  @type text: string
2864
  @type digest: string
2865
  @param digest: Expected digest
2866
  @rtype: bool
2867
  @return: Whether HMAC-SHA1 digest matches
2868

2869
  """
2870
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2871

    
2872

    
2873
def SafeEncode(text):
2874
  """Return a 'safe' version of a source string.
2875

2876
  This function mangles the input string and returns a version that
2877
  should be safe to display/encode as ASCII. To this end, we first
2878
  convert it to ASCII using the 'backslashreplace' encoding which
2879
  should get rid of any non-ASCII chars, and then we process it
2880
  through a loop copied from the string repr sources in the python; we
2881
  don't use string_escape anymore since that escape single quotes and
2882
  backslashes too, and that is too much; and that escaping is not
2883
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2884

2885
  @type text: str or unicode
2886
  @param text: input data
2887
  @rtype: str
2888
  @return: a safe version of text
2889

2890
  """
2891
  if isinstance(text, unicode):
2892
    # only if unicode; if str already, we handle it below
2893
    text = text.encode('ascii', 'backslashreplace')
2894
  resu = ""
2895
  for char in text:
2896
    c = ord(char)
2897
    if char  == '\t':
2898
      resu += r'\t'
2899
    elif char == '\n':
2900
      resu += r'\n'
2901
    elif char == '\r':
2902
      resu += r'\'r'
2903
    elif c < 32 or c >= 127: # non-printable
2904
      resu += "\\x%02x" % (c & 0xff)
2905
    else:
2906
      resu += char
2907
  return resu
2908

    
2909

    
2910
def UnescapeAndSplit(text, sep=","):
2911
  """Split and unescape a string based on a given separator.
2912

2913
  This function splits a string based on a separator where the
2914
  separator itself can be escape in order to be an element of the
2915
  elements. The escaping rules are (assuming coma being the
2916
  separator):
2917
    - a plain , separates the elements
2918
    - a sequence \\\\, (double backslash plus comma) is handled as a
2919
      backslash plus a separator comma
2920
    - a sequence \, (backslash plus comma) is handled as a
2921
      non-separator comma
2922

2923
  @type text: string
2924
  @param text: the string to split
2925
  @type sep: string
2926
  @param text: the separator
2927
  @rtype: string
2928
  @return: a list of strings
2929

2930
  """
2931
  # we split the list by sep (with no escaping at this stage)
2932
  slist = text.split(sep)
2933
  # next, we revisit the elements and if any of them ended with an odd
2934
  # number of backslashes, then we join it with the next
2935
  rlist = []
2936
  while slist:
2937
    e1 = slist.pop(0)
2938
    if e1.endswith("\\"):
2939
      num_b = len(e1) - len(e1.rstrip("\\"))
2940
      if num_b % 2 == 1:
2941
        e2 = slist.pop(0)
2942
        # here the backslashes remain (all), and will be reduced in
2943
        # the next step
2944
        rlist.append(e1 + sep + e2)
2945
        continue
2946
    rlist.append(e1)
2947
  # finally, replace backslash-something with something
2948
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2949
  return rlist
2950

    
2951

    
2952
def CommaJoin(names):
2953
  """Nicely join a set of identifiers.
2954

2955
  @param names: set, list or tuple
2956
  @return: a string with the formatted results
2957

2958
  """
2959
  return ", ".join([str(val) for val in names])
2960

    
2961

    
2962
def BytesToMebibyte(value):
2963
  """Converts bytes to mebibytes.
2964

2965
  @type value: int
2966
  @param value: Value in bytes
2967
  @rtype: int
2968
  @return: Value in mebibytes
2969

2970
  """
2971
  return int(round(value / (1024.0 * 1024.0), 0))
2972

    
2973

    
2974
def CalculateDirectorySize(path):
2975
  """Calculates the size of a directory recursively.
2976

2977
  @type path: string
2978
  @param path: Path to directory
2979
  @rtype: int
2980
  @return: Size in mebibytes
2981

2982
  """
2983
  size = 0
2984

    
2985
  for (curpath, _, files) in os.walk(path):
2986
    for filename in files:
2987
      st = os.lstat(PathJoin(curpath, filename))
2988
      size += st.st_size
2989

    
2990
  return BytesToMebibyte(size)
2991

    
2992

    
2993
def GetMounts(filename=constants.PROC_MOUNTS):
2994
  """Returns the list of mounted filesystems.
2995

2996
  This function is Linux-specific.
2997

2998
  @param filename: path of mounts file (/proc/mounts by default)
2999
  @rtype: list of tuples
3000
  @return: list of mount entries (device, mountpoint, fstype, options)
3001

3002
  """
3003
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3004
  data = []
3005
  mountlines = ReadFile(filename).splitlines()
3006
  for line in mountlines:
3007
    device, mountpoint, fstype, options, _ = line.split(None, 4)
3008
    data.append((device, mountpoint, fstype, options))
3009

    
3010
  return data
3011

    
3012

    
3013
def GetFilesystemStats(path):
3014
  """Returns the total and free space on a filesystem.
3015

3016
  @type path: string
3017
  @param path: Path on filesystem to be examined
3018
  @rtype: int
3019
  @return: tuple of (Total space, Free space) in mebibytes
3020

3021
  """
3022
  st = os.statvfs(path)
3023

    
3024
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3025
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3026
  return (tsize, fsize)
3027

    
3028

    
3029
def RunInSeparateProcess(fn, *args):
3030
  """Runs a function in a separate process.
3031

3032
  Note: Only boolean return values are supported.
3033

3034
  @type fn: callable
3035
  @param fn: Function to be called
3036
  @rtype: bool
3037
  @return: Function's result
3038

3039
  """
3040
  pid = os.fork()
3041
  if pid == 0:
3042
    # Child process
3043
    try:
3044
      # In case the function uses temporary files
3045
      ResetTempfileModule()
3046

    
3047
      # Call function
3048
      result = int(bool(fn(*args)))
3049
      assert result in (0, 1)
3050
    except: # pylint: disable-msg=W0702
3051
      logging.exception("Error while calling function in separate process")
3052
      # 0 and 1 are reserved for the return value
3053
      result = 33
3054

    
3055
    os._exit(result) # pylint: disable-msg=W0212
3056

    
3057
  # Parent process
3058

    
3059
  # Avoid zombies and check exit code
3060
  (_, status) = os.waitpid(pid, 0)
3061

    
3062
  if os.WIFSIGNALED(status):
3063
    exitcode = None
3064
    signum = os.WTERMSIG(status)
3065
  else:
3066
    exitcode = os.WEXITSTATUS(status)
3067
    signum = None
3068

    
3069
  if not (exitcode in (0, 1) and signum is None):
3070
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3071
                              (exitcode, signum))
3072

    
3073
  return bool(exitcode)
3074

    
3075

    
3076
def IgnoreProcessNotFound(fn, *args, **kwargs):
3077
  """Ignores ESRCH when calling a process-related function.
3078

3079
  ESRCH is raised when a process is not found.
3080

3081
  @rtype: bool
3082
  @return: Whether process was found
3083

3084
  """
3085
  try:
3086
    fn(*args, **kwargs)
3087
  except EnvironmentError, err:
3088
    # Ignore ESRCH
3089
    if err.errno == errno.ESRCH:
3090
      return False
3091
    raise
3092

    
3093
  return True
3094

    
3095

    
3096
def IgnoreSignals(fn, *args, **kwargs):
3097
  """Tries to call a function ignoring failures due to EINTR.
3098

3099
  """
3100
  try:
3101
    return fn(*args, **kwargs)
3102
  except EnvironmentError, err:
3103
    if err.errno == errno.EINTR:
3104
      return None
3105
    else:
3106
      raise
3107
  except (select.error, socket.error), err:
3108
    # In python 2.6 and above select.error is an IOError, so it's handled
3109
    # above, in 2.5 and below it's not, and it's handled here.
3110
    if err.args and err.args[0] == errno.EINTR:
3111
      return None
3112
    else:
3113
      raise
3114

    
3115

    
3116
def LockFile(fd):
3117
  """Locks a file using POSIX locks.
3118

3119
  @type fd: int
3120
  @param fd: the file descriptor we need to lock
3121

3122
  """
3123
  try:
3124
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3125
  except IOError, err:
3126
    if err.errno == errno.EAGAIN:
3127
      raise errors.LockError("File already locked")
3128
    raise
3129

    
3130

    
3131
def FormatTime(val):
3132
  """Formats a time value.
3133

3134
  @type val: float or None
3135
  @param val: the timestamp as returned by time.time()
3136
  @return: a string value or N/A if we don't have a valid timestamp
3137

3138
  """
3139
  if val is None or not isinstance(val, (int, float)):
3140
    return "N/A"
3141
  # these two codes works on Linux, but they are not guaranteed on all
3142
  # platforms
3143
  return time.strftime("%F %T", time.localtime(val))
3144

    
3145

    
3146
def FormatSeconds(secs):
3147
  """Formats seconds for easier reading.
3148

3149
  @type secs: number
3150
  @param secs: Number of seconds
3151
  @rtype: string
3152
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3153

3154
  """
3155
  parts = []
3156

    
3157
  secs = round(secs, 0)
3158

    
3159
  if secs > 0:
3160
    # Negative values would be a bit tricky
3161
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3162
      (complete, secs) = divmod(secs, one)
3163
      if complete or parts:
3164
        parts.append("%d%s" % (complete, unit))
3165

    
3166
  parts.append("%ds" % secs)
3167

    
3168
  return " ".join(parts)
3169

    
3170

    
3171
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3172
  """Reads the watcher pause file.
3173

3174
  @type filename: string
3175
  @param filename: Path to watcher pause file
3176
  @type now: None, float or int
3177
  @param now: Current time as Unix timestamp
3178
  @type remove_after: int
3179
  @param remove_after: Remove watcher pause file after specified amount of
3180
    seconds past the pause end time
3181

3182
  """
3183
  if now is None:
3184
    now = time.time()
3185

    
3186
  try:
3187
    value = ReadFile(filename)
3188
  except IOError, err:
3189
    if err.errno != errno.ENOENT:
3190
      raise
3191
    value = None
3192

    
3193
  if value is not None:
3194
    try:
3195
      value = int(value)
3196
    except ValueError:
3197
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3198
                       " removing it"), filename)
3199
      RemoveFile(filename)
3200
      value = None
3201

    
3202
    if value is not None:
3203
      # Remove file if it's outdated
3204
      if now > (value + remove_after):
3205
        RemoveFile(filename)
3206
        value = None
3207

    
3208
      elif now > value:
3209
        value = None
3210

    
3211
  return value
3212

    
3213

    
3214
class RetryTimeout(Exception):
3215
  """Retry loop timed out.
3216

3217
  Any arguments which was passed by the retried function to RetryAgain will be
3218
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3219
  the RaiseInner helper method will reraise it.
3220

3221
  """
3222
  def RaiseInner(self):
3223
    if self.args and isinstance(self.args[0], Exception):
3224
      raise self.args[0]
3225
    else:
3226
      raise RetryTimeout(*self.args)
3227

    
3228

    
3229
class RetryAgain(Exception):
3230
  """Retry again.
3231

3232
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3233
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3234
  of the RetryTimeout() method can be used to reraise it.
3235

3236
  """
3237

    
3238

    
3239
class _RetryDelayCalculator(object):
3240
  """Calculator for increasing delays.
3241

3242
  """
3243
  __slots__ = [
3244
    "_factor",
3245
    "_limit",
3246
    "_next",
3247
    "_start",
3248
    ]
3249

    
3250
  def __init__(self, start, factor, limit):
3251
    """Initializes this class.
3252

3253
    @type start: float
3254
    @param start: Initial delay
3255
    @type factor: float
3256
    @param factor: Factor for delay increase
3257
    @type limit: float or None
3258
    @param limit: Upper limit for delay or None for no limit
3259

3260
    """
3261
    assert start > 0.0
3262
    assert factor >= 1.0
3263
    assert limit is None or limit >= 0.0
3264

    
3265
    self._start = start
3266
    self._factor = factor
3267
    self._limit = limit
3268

    
3269
    self._next = start
3270

    
3271
  def __call__(self):
3272
    """Returns current delay and calculates the next one.
3273

3274
    """
3275
    current = self._next
3276

    
3277
    # Update for next run
3278
    if self._limit is None or self._next < self._limit:
3279
      self._next = min(self._limit, self._next * self._factor)
3280

    
3281
    return current
3282

    
3283

    
3284
#: Special delay to specify whole remaining timeout
3285
RETRY_REMAINING_TIME = object()
3286

    
3287

    
3288
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3289
          _time_fn=time.time):
3290
  """Call a function repeatedly until it succeeds.
3291

3292
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3293
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3294
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3295

3296
  C{delay} can be one of the following:
3297
    - callable returning the delay length as a float
3298
    - Tuple of (start, factor, limit)
3299
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3300
      useful when overriding L{wait_fn} to wait for an external event)
3301
    - A static delay as a number (int or float)
3302

3303
  @type fn: callable
3304
  @param fn: Function to be called
3305
  @param delay: Either a callable (returning the delay), a tuple of (start,
3306
                factor, limit) (see L{_RetryDelayCalculator}),
3307
                L{RETRY_REMAINING_TIME} or a number (int or float)
3308
  @type timeout: float
3309
  @param timeout: Total timeout
3310
  @type wait_fn: callable
3311
  @param wait_fn: Waiting function
3312
  @return: Return value of function
3313

3314
  """
3315
  assert callable(fn)
3316
  assert callable(wait_fn)
3317
  assert callable(_time_fn)
3318

    
3319
  if args is None:
3320
    args = []
3321

    
3322
  end_time = _time_fn() + timeout
3323

    
3324
  if callable(delay):
3325
    # External function to calculate delay
3326
    calc_delay = delay
3327

    
3328
  elif isinstance(delay, (tuple, list)):
3329
    # Increasing delay with optional upper boundary
3330
    (start, factor, limit) = delay
3331
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3332

    
3333
  elif delay is RETRY_REMAINING_TIME:
3334
    # Always use the remaining time
3335
    calc_delay = None
3336

    
3337
  else:
3338
    # Static delay
3339
    calc_delay = lambda: delay
3340

    
3341
  assert calc_delay is None or callable(calc_delay)
3342

    
3343
  while True:
3344
    retry_args = []
3345
    try:
3346
      # pylint: disable-msg=W0142
3347
      return fn(*args)
3348
    except RetryAgain, err:
3349
      retry_args = err.args
3350
    except RetryTimeout:
3351
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3352
                                   " handle RetryTimeout")
3353

    
3354
    remaining_time = end_time - _time_fn()
3355

    
3356
    if remaining_time < 0.0:
3357
      # pylint: disable-msg=W0142
3358
      raise RetryTimeout(*retry_args)
3359

    
3360
    assert remaining_time >= 0.0
3361

    
3362
    if calc_delay is None:
3363
      wait_fn(remaining_time)
3364
    else:
3365
      current_delay = calc_delay()
3366
      if current_delay > 0.0:
3367
        wait_fn(current_delay)
3368

    
3369

    
3370
def GetClosedTempfile(*args, **kwargs):
3371
  """Creates a temporary file and returns its path.
3372

3373
  """
3374
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3375
  _CloseFDNoErr(fd)
3376
  return path
3377

    
3378

    
3379
def GenerateSelfSignedX509Cert(common_name, validity):
3380
  """Generates a self-signed X509 certificate.
3381

3382
  @type common_name: string
3383
  @param common_name: commonName value
3384
  @type validity: int
3385
  @param validity: Validity for certificate in seconds
3386

3387
  """
3388
  # Create private and public key
3389
  key = OpenSSL.crypto.PKey()
3390
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3391

    
3392
  # Create self-signed certificate
3393
  cert = OpenSSL.crypto.X509()
3394
  if common_name:
3395
    cert.get_subject().CN = common_name
3396
  cert.set_serial_number(1)
3397
  cert.gmtime_adj_notBefore(0)
3398
  cert.gmtime_adj_notAfter(validity)
3399
  cert.set_issuer(cert.get_subject())
3400
  cert.set_pubkey(key)
3401
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3402

    
3403
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3404
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3405

    
3406
  return (key_pem, cert_pem)
3407

    
3408

    
3409
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3410
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3411
  """Legacy function to generate self-signed X509 certificate.
3412

3413
  @type filename: str
3414
  @param filename: path to write certificate to
3415
  @type common_name: string
3416
  @param common_name: commonName value
3417
  @type validity: int
3418
  @param validity: validity of certificate in number of days
3419

3420
  """
3421
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3422
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3423
  # and node daemon certificates have the proper Subject/Issuer.
3424
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3425
                                                   validity * 24 * 60 * 60)
3426

    
3427
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3428

    
3429

    
3430
class FileLock(object):
3431
  """Utility class for file locks.
3432

3433
  """
3434
  def __init__(self, fd, filename):
3435
    """Constructor for FileLock.
3436

3437
    @type fd: file
3438
    @param fd: File object
3439
    @type filename: str
3440
    @param filename: Path of the file opened at I{fd}
3441

3442
    """
3443
    self.fd = fd
3444
    self.filename = filename
3445

    
3446
  @classmethod
3447
  def Open(cls, filename):
3448
    """Creates and opens a file to be used as a file-based lock.
3449

3450
    @type filename: string
3451
    @param filename: path to the file to be locked
3452

3453
    """
3454
    # Using "os.open" is necessary to allow both opening existing file
3455
    # read/write and creating if not existing. Vanilla "open" will truncate an
3456
    # existing file -or- allow creating if not existing.
3457
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3458
               filename)
3459

    
3460
  def __del__(self):
3461
    self.Close()
3462

    
3463
  def Close(self):
3464
    """Close the file and release the lock.
3465

3466
    """
3467
    if hasattr(self, "fd") and self.fd:
3468
      self.fd.close()
3469
      self.fd = None
3470

    
3471
  def _flock(self, flag, blocking, timeout, errmsg):
3472
    """Wrapper for fcntl.flock.
3473

3474
    @type flag: int
3475
    @param flag: operation flag
3476
    @type blocking: bool
3477
    @param blocking: whether the operation should be done in blocking mode.
3478
    @type timeout: None or float
3479
    @param timeout: for how long the operation should be retried (implies
3480
                    non-blocking mode).
3481
    @type errmsg: string
3482
    @param errmsg: error message in case operation fails.
3483

3484
    """
3485
    assert self.fd, "Lock was closed"
3486
    assert timeout is None or timeout >= 0, \
3487
      "If specified, timeout must be positive"
3488
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3489

    
3490
    # When a timeout is used, LOCK_NB must always be set
3491
    if not (timeout is None and blocking):
3492
      flag |= fcntl.LOCK_NB
3493

    
3494
    if timeout is None:
3495
      self._Lock(self.fd, flag, timeout)
3496
    else:
3497
      try:
3498
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3499
              args=(self.fd, flag, timeout))
3500
      except RetryTimeout:
3501
        raise errors.LockError(errmsg)
3502

    
3503
  @staticmethod
3504
  def _Lock(fd, flag, timeout):
3505
    try:
3506
      fcntl.flock(fd, flag)
3507
    except IOError, err:
3508
      if timeout is not None and err.errno == errno.EAGAIN:
3509
        raise RetryAgain()
3510

    
3511
      logging.exception("fcntl.flock failed")
3512
      raise
3513

    
3514
  def Exclusive(self, blocking=False, timeout=None):
3515
    """Locks the file in exclusive mode.
3516

3517
    @type blocking: boolean
3518
    @param blocking: whether to block and wait until we
3519
        can lock the file or return immediately
3520
    @type timeout: int or None
3521
    @param timeout: if not None, the duration to wait for the lock
3522
        (in blocking mode)
3523

3524
    """
3525
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3526
                "Failed to lock %s in exclusive mode" % self.filename)
3527

    
3528
  def Shared(self, blocking=False, timeout=None):
3529
    """Locks the file in shared mode.
3530

3531
    @type blocking: boolean
3532
    @param blocking: whether to block and wait until we
3533
        can lock the file or return immediately
3534
    @type timeout: int or None
3535
    @param timeout: if not None, the duration to wait for the lock
3536
        (in blocking mode)
3537

3538
    """
3539
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3540
                "Failed to lock %s in shared mode" % self.filename)
3541

    
3542
  def Unlock(self, blocking=True, timeout=None):
3543
    """Unlocks the file.
3544

3545
    According to C{flock(2)}, unlocking can also be a nonblocking
3546
    operation::
3547

3548
      To make a non-blocking request, include LOCK_NB with any of the above
3549
      operations.
3550

3551
    @type blocking: boolean
3552
    @param blocking: whether to block and wait until we
3553
        can lock the file or return immediately
3554
    @type timeout: int or None
3555
    @param timeout: if not None, the duration to wait for the lock
3556
        (in blocking mode)
3557

3558
    """
3559
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3560
                "Failed to unlock %s" % self.filename)
3561

    
3562

    
3563
class LineSplitter:
3564
  """Splits data chunks into lines separated by newline.
3565

3566
  Instances provide a file-like interface.
3567

3568
  """
3569
  def __init__(self, line_fn, *args):
3570
    """Initializes this class.
3571

3572
    @type line_fn: callable
3573
    @param line_fn: Function called for each line, first parameter is line
3574
    @param args: Extra arguments for L{line_fn}
3575

3576
    """
3577
    assert callable(line_fn)
3578

    
3579
    if args:
3580
      # Python 2.4 doesn't have functools.partial yet
3581
      self._line_fn = \
3582
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3583
    else:
3584
      self._line_fn = line_fn
3585

    
3586
    self._lines = collections.deque()
3587
    self._buffer = ""
3588

    
3589
  def write(self, data):
3590
    parts = (self._buffer + data).split("\n")
3591
    self._buffer = parts.pop()
3592
    self._lines.extend(parts)
3593

    
3594
  def flush(self):
3595
    while self._lines:
3596
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3597

    
3598
  def close(self):
3599
    self.flush()
3600
    if self._buffer:
3601
      self._line_fn(self._buffer)
3602

    
3603

    
3604
def SignalHandled(signums):
3605
  """Signal Handled decoration.
3606

3607
  This special decorator installs a signal handler and then calls the target
3608
  function. The function must accept a 'signal_handlers' keyword argument,
3609
  which will contain a dict indexed by signal number, with SignalHandler
3610
  objects as values.
3611

3612
  The decorator can be safely stacked with iself, to handle multiple signals
3613
  with different handlers.
3614

3615
  @type signums: list
3616
  @param signums: signals to intercept
3617

3618
  """
3619
  def wrap(fn):
3620
    def sig_function(*args, **kwargs):
3621
      assert 'signal_handlers' not in kwargs or \
3622
             kwargs['signal_handlers'] is None or \
3623
             isinstance(kwargs['signal_handlers'], dict), \
3624
             "Wrong signal_handlers parameter in original function call"
3625
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3626
        signal_handlers = kwargs['signal_handlers']
3627
      else:
3628
        signal_handlers = {}
3629
        kwargs['signal_handlers'] = signal_handlers
3630
      sighandler = SignalHandler(signums)
3631
      try:
3632
        for sig in signums:
3633
          signal_handlers[sig] = sighandler
3634
        return fn(*args, **kwargs)
3635
      finally:
3636
        sighandler.Reset()
3637
    return sig_function
3638
  return wrap
3639

    
3640

    
3641
class SignalWakeupFd(object):
3642
  try:
3643
    # This is only supported in Python 2.5 and above (some distributions
3644
    # backported it to Python 2.4)
3645
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3646
  except AttributeError:
3647
    # Not supported
3648
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3649
      return -1
3650
  else:
3651
    def _SetWakeupFd(self, fd):
3652
      return self._set_wakeup_fd_fn(fd)
3653

    
3654
  def __init__(self):
3655
    """Initializes this class.
3656

3657
    """
3658
    (read_fd, write_fd) = os.pipe()
3659

    
3660
    # Once these succeeded, the file descriptors will be closed automatically.
3661
    # Buffer size 0 is important, otherwise .read() with a specified length
3662
    # might buffer data and the file descriptors won't be marked readable.
3663
    self._read_fh = os.fdopen(read_fd, "r", 0)
3664
    self._write_fh = os.fdopen(write_fd, "w", 0)
3665

    
3666
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3667

    
3668
    # Utility functions
3669
    self.fileno = self._read_fh.fileno
3670
    self.read = self._read_fh.read
3671

    
3672
  def Reset(self):
3673
    """Restores the previous wakeup file descriptor.
3674

3675
    """
3676
    if hasattr(self, "_previous") and self._previous is not None:
3677
      self._SetWakeupFd(self._previous)
3678
      self._previous = None
3679

    
3680
  def Notify(self):
3681
    """Notifies the wakeup file descriptor.
3682

3683
    """
3684
    self._write_fh.write("\0")
3685

    
3686
  def __del__(self):
3687
    """Called before object deletion.
3688

3689
    """
3690
    self.Reset()
3691

    
3692

    
3693
class SignalHandler(object):
3694
  """Generic signal handler class.
3695

3696
  It automatically restores the original handler when deconstructed or
3697
  when L{Reset} is called. You can either pass your own handler
3698
  function in or query the L{called} attribute to detect whether the
3699
  signal was sent.
3700

3701
  @type signum: list
3702
  @ivar signum: the signals we handle
3703
  @type called: boolean
3704
  @ivar called: tracks whether any of the signals have been raised
3705

3706
  """
3707
  def __init__(self, signum, handler_fn=None, wakeup=None):
3708
    """Constructs a new SignalHandler instance.
3709

3710
    @type signum: int or list of ints
3711
    @param signum: Single signal number or set of signal numbers
3712
    @type handler_fn: callable
3713
    @param handler_fn: Signal handling function
3714

3715
    """
3716
    assert handler_fn is None or callable(handler_fn)
3717

    
3718
    self.signum = set(signum)
3719
    self.called = False
3720

    
3721
    self._handler_fn = handler_fn
3722
    self._wakeup = wakeup
3723

    
3724
    self._previous = {}
3725
    try:
3726
      for signum in self.signum:
3727
        # Setup handler
3728
        prev_handler = signal.signal(signum, self._HandleSignal)
3729
        try:
3730
          self._previous[signum] = prev_handler
3731
        except:
3732
          # Restore previous handler
3733
          signal.signal(signum, prev_handler)
3734
          raise
3735
    except:
3736
      # Reset all handlers
3737
      self.Reset()
3738
      # Here we have a race condition: a handler may have already been called,
3739
      # but there's not much we can do about it at this point.
3740
      raise
3741

    
3742
  def __del__(self):
3743
    self.Reset()
3744

    
3745
  def Reset(self):
3746
    """Restore previous handler.
3747

3748
    This will reset all the signals to their previous handlers.
3749

3750
    """
3751
    for signum, prev_handler in self._previous.items():
3752
      signal.signal(signum, prev_handler)
3753
      # If successful, remove from dict
3754
      del self._previous[signum]
3755

    
3756
  def Clear(self):
3757
    """Unsets the L{called} flag.
3758

3759
    This function can be used in case a signal may arrive several times.
3760

3761
    """
3762
    self.called = False
3763

    
3764
  def _HandleSignal(self, signum, frame):
3765
    """Actual signal handling function.
3766

3767
    """
3768
    # This is not nice and not absolutely atomic, but it appears to be the only
3769
    # solution in Python -- there are no atomic types.
3770
    self.called = True
3771

    
3772
    if self._wakeup:
3773
      # Notify whoever is interested in signals
3774
      self._wakeup.Notify()
3775

    
3776
    if self._handler_fn:
3777
      self._handler_fn(signum, frame)
3778

    
3779

    
3780
class FieldSet(object):
3781
  """A simple field set.
3782

3783
  Among the features are:
3784
    - checking if a string is among a list of static string or regex objects
3785
    - checking if a whole list of string matches
3786
    - returning the matching groups from a regex match
3787

3788
  Internally, all fields are held as regular expression objects.
3789

3790
  """
3791
  def __init__(self, *items):
3792
    self.items = [re.compile("^%s$" % value) for value in items]
3793

    
3794
  def Extend(self, other_set):
3795
    """Extend the field set with the items from another one"""
3796
    self.items.extend(other_set.items)
3797

    
3798
  def Matches(self, field):
3799
    """Checks if a field matches the current set
3800

3801
    @type field: str
3802
    @param field: the string to match
3803
    @return: either None or a regular expression match object
3804

3805
    """
3806
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3807
      return m
3808
    return None
3809

    
3810
  def NonMatching(self, items):
3811
    """Returns the list of fields not matching the current set
3812

3813
    @type items: list
3814
    @param items: the list of fields to check
3815
    @rtype: list
3816
    @return: list of non-matching fields
3817

3818
    """
3819
    return [val for val in items if not self.Matches(val)]