Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 31155d60

History | View | Annotate | Download (101.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64
from ganeti import netutils
65

    
66

    
67
_locksheld = []
68
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
69

    
70
debug_locks = False
71

    
72
#: when set to True, L{RunCmd} is disabled
73
no_fork = False
74

    
75
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
76

    
77
HEX_CHAR_RE = r"[a-zA-Z0-9]"
78
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
79
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
80
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
81
                             HEX_CHAR_RE, HEX_CHAR_RE),
82
                            re.S | re.I)
83

    
84
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
85

    
86
# Certificate verification results
87
(CERT_WARNING,
88
 CERT_ERROR) = range(1, 3)
89

    
90
# Flags for mlockall() (from bits/mman.h)
91
_MCL_CURRENT = 1
92
_MCL_FUTURE = 2
93

    
94

    
95
class RunResult(object):
96
  """Holds the result of running external programs.
97

98
  @type exit_code: int
99
  @ivar exit_code: the exit code of the program, or None (if the program
100
      didn't exit())
101
  @type signal: int or None
102
  @ivar signal: the signal that caused the program to finish, or None
103
      (if the program wasn't terminated by a signal)
104
  @type stdout: str
105
  @ivar stdout: the standard output of the program
106
  @type stderr: str
107
  @ivar stderr: the standard error of the program
108
  @type failed: boolean
109
  @ivar failed: True in case the program was
110
      terminated by a signal or exited with a non-zero exit code
111
  @ivar fail_reason: a string detailing the termination reason
112

113
  """
114
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
115
               "failed", "fail_reason", "cmd"]
116

    
117

    
118
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
119
    self.cmd = cmd
120
    self.exit_code = exit_code
121
    self.signal = signal_
122
    self.stdout = stdout
123
    self.stderr = stderr
124
    self.failed = (signal_ is not None or exit_code != 0)
125

    
126
    if self.signal is not None:
127
      self.fail_reason = "terminated by signal %s" % self.signal
128
    elif self.exit_code is not None:
129
      self.fail_reason = "exited with exit code %s" % self.exit_code
130
    else:
131
      self.fail_reason = "unable to determine termination reason"
132

    
133
    if self.failed:
134
      logging.debug("Command '%s' failed (%s); output: %s",
135
                    self.cmd, self.fail_reason, self.output)
136

    
137
  def _GetOutput(self):
138
    """Returns the combined stdout and stderr for easier usage.
139

140
    """
141
    return self.stdout + self.stderr
142

    
143
  output = property(_GetOutput, None, None, "Return full output")
144

    
145

    
146
def _BuildCmdEnvironment(env, reset):
147
  """Builds the environment for an external program.
148

149
  """
150
  if reset:
151
    cmd_env = {}
152
  else:
153
    cmd_env = os.environ.copy()
154
    cmd_env["LC_ALL"] = "C"
155

    
156
  if env is not None:
157
    cmd_env.update(env)
158

    
159
  return cmd_env
160

    
161

    
162
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
163
  """Execute a (shell) command.
164

165
  The command should not read from its standard input, as it will be
166
  closed.
167

168
  @type cmd: string or list
169
  @param cmd: Command to run
170
  @type env: dict
171
  @param env: Additional environment variables
172
  @type output: str
173
  @param output: if desired, the output of the command can be
174
      saved in a file instead of the RunResult instance; this
175
      parameter denotes the file name (if not None)
176
  @type cwd: string
177
  @param cwd: if specified, will be used as the working
178
      directory for the command; the default will be /
179
  @type reset_env: boolean
180
  @param reset_env: whether to reset or keep the default os environment
181
  @rtype: L{RunResult}
182
  @return: RunResult instance
183
  @raise errors.ProgrammerError: if we call this when forks are disabled
184

185
  """
186
  if no_fork:
187
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
188

    
189
  if isinstance(cmd, basestring):
190
    strcmd = cmd
191
    shell = True
192
  else:
193
    cmd = [str(val) for val in cmd]
194
    strcmd = ShellQuoteArgs(cmd)
195
    shell = False
196

    
197
  if output:
198
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
199
  else:
200
    logging.debug("RunCmd %s", strcmd)
201

    
202
  cmd_env = _BuildCmdEnvironment(env, reset_env)
203

    
204
  try:
205
    if output is None:
206
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
207
    else:
208
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
209
      out = err = ""
210
  except OSError, err:
211
    if err.errno == errno.ENOENT:
212
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
213
                               (strcmd, err))
214
    else:
215
      raise
216

    
217
  if status >= 0:
218
    exitcode = status
219
    signal_ = None
220
  else:
221
    exitcode = None
222
    signal_ = -status
223

    
224
  return RunResult(exitcode, signal_, out, err, strcmd)
225

    
226

    
227
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
228
                pidfile=None):
229
  """Start a daemon process after forking twice.
230

231
  @type cmd: string or list
232
  @param cmd: Command to run
233
  @type env: dict
234
  @param env: Additional environment variables
235
  @type cwd: string
236
  @param cwd: Working directory for the program
237
  @type output: string
238
  @param output: Path to file in which to save the output
239
  @type output_fd: int
240
  @param output_fd: File descriptor for output
241
  @type pidfile: string
242
  @param pidfile: Process ID file
243
  @rtype: int
244
  @return: Daemon process ID
245
  @raise errors.ProgrammerError: if we call this when forks are disabled
246

247
  """
248
  if no_fork:
249
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
250
                                 " disabled")
251

    
252
  if output and not (bool(output) ^ (output_fd is not None)):
253
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
254
                                 " specified")
255

    
256
  if isinstance(cmd, basestring):
257
    cmd = ["/bin/sh", "-c", cmd]
258

    
259
  strcmd = ShellQuoteArgs(cmd)
260

    
261
  if output:
262
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
263
  else:
264
    logging.debug("StartDaemon %s", strcmd)
265

    
266
  cmd_env = _BuildCmdEnvironment(env, False)
267

    
268
  # Create pipe for sending PID back
269
  (pidpipe_read, pidpipe_write) = os.pipe()
270
  try:
271
    try:
272
      # Create pipe for sending error messages
273
      (errpipe_read, errpipe_write) = os.pipe()
274
      try:
275
        try:
276
          # First fork
277
          pid = os.fork()
278
          if pid == 0:
279
            try:
280
              # Child process, won't return
281
              _StartDaemonChild(errpipe_read, errpipe_write,
282
                                pidpipe_read, pidpipe_write,
283
                                cmd, cmd_env, cwd,
284
                                output, output_fd, pidfile)
285
            finally:
286
              # Well, maybe child process failed
287
              os._exit(1) # pylint: disable-msg=W0212
288
        finally:
289
          _CloseFDNoErr(errpipe_write)
290

    
291
        # Wait for daemon to be started (or an error message to arrive) and read
292
        # up to 100 KB as an error message
293
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
294
      finally:
295
        _CloseFDNoErr(errpipe_read)
296
    finally:
297
      _CloseFDNoErr(pidpipe_write)
298

    
299
    # Read up to 128 bytes for PID
300
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
301
  finally:
302
    _CloseFDNoErr(pidpipe_read)
303

    
304
  # Try to avoid zombies by waiting for child process
305
  try:
306
    os.waitpid(pid, 0)
307
  except OSError:
308
    pass
309

    
310
  if errormsg:
311
    raise errors.OpExecError("Error when starting daemon process: %r" %
312
                             errormsg)
313

    
314
  try:
315
    return int(pidtext)
316
  except (ValueError, TypeError), err:
317
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
318
                             (pidtext, err))
319

    
320

    
321
def _StartDaemonChild(errpipe_read, errpipe_write,
322
                      pidpipe_read, pidpipe_write,
323
                      args, env, cwd,
324
                      output, fd_output, pidfile):
325
  """Child process for starting daemon.
326

327
  """
328
  try:
329
    # Close parent's side
330
    _CloseFDNoErr(errpipe_read)
331
    _CloseFDNoErr(pidpipe_read)
332

    
333
    # First child process
334
    os.chdir("/")
335
    os.umask(077)
336
    os.setsid()
337

    
338
    # And fork for the second time
339
    pid = os.fork()
340
    if pid != 0:
341
      # Exit first child process
342
      os._exit(0) # pylint: disable-msg=W0212
343

    
344
    # Make sure pipe is closed on execv* (and thereby notifies original process)
345
    SetCloseOnExecFlag(errpipe_write, True)
346

    
347
    # List of file descriptors to be left open
348
    noclose_fds = [errpipe_write]
349

    
350
    # Open PID file
351
    if pidfile:
352
      try:
353
        # TODO: Atomic replace with another locked file instead of writing into
354
        # it after creating
355
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
356

    
357
        # Lock the PID file (and fail if not possible to do so). Any code
358
        # wanting to send a signal to the daemon should try to lock the PID
359
        # file before reading it. If acquiring the lock succeeds, the daemon is
360
        # no longer running and the signal should not be sent.
361
        LockFile(fd_pidfile)
362

    
363
        os.write(fd_pidfile, "%d\n" % os.getpid())
364
      except Exception, err:
365
        raise Exception("Creating and locking PID file failed: %s" % err)
366

    
367
      # Keeping the file open to hold the lock
368
      noclose_fds.append(fd_pidfile)
369

    
370
      SetCloseOnExecFlag(fd_pidfile, False)
371
    else:
372
      fd_pidfile = None
373

    
374
    # Open /dev/null
375
    fd_devnull = os.open(os.devnull, os.O_RDWR)
376

    
377
    assert not output or (bool(output) ^ (fd_output is not None))
378

    
379
    if fd_output is not None:
380
      pass
381
    elif output:
382
      # Open output file
383
      try:
384
        # TODO: Implement flag to set append=yes/no
385
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
386
      except EnvironmentError, err:
387
        raise Exception("Opening output file failed: %s" % err)
388
    else:
389
      fd_output = fd_devnull
390

    
391
    # Redirect standard I/O
392
    os.dup2(fd_devnull, 0)
393
    os.dup2(fd_output, 1)
394
    os.dup2(fd_output, 2)
395

    
396
    # Send daemon PID to parent
397
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
398

    
399
    # Close all file descriptors except stdio and error message pipe
400
    CloseFDs(noclose_fds=noclose_fds)
401

    
402
    # Change working directory
403
    os.chdir(cwd)
404

    
405
    if env is None:
406
      os.execvp(args[0], args)
407
    else:
408
      os.execvpe(args[0], args, env)
409
  except: # pylint: disable-msg=W0702
410
    try:
411
      # Report errors to original process
412
      buf = str(sys.exc_info()[1])
413

    
414
      RetryOnSignal(os.write, errpipe_write, buf)
415
    except: # pylint: disable-msg=W0702
416
      # Ignore errors in error handling
417
      pass
418

    
419
  os._exit(1) # pylint: disable-msg=W0212
420

    
421

    
422
def _RunCmdPipe(cmd, env, via_shell, cwd):
423
  """Run a command and return its output.
424

425
  @type  cmd: string or list
426
  @param cmd: Command to run
427
  @type env: dict
428
  @param env: The environment to use
429
  @type via_shell: bool
430
  @param via_shell: if we should run via the shell
431
  @type cwd: string
432
  @param cwd: the working directory for the program
433
  @rtype: tuple
434
  @return: (out, err, status)
435

436
  """
437
  poller = select.poll()
438
  child = subprocess.Popen(cmd, shell=via_shell,
439
                           stderr=subprocess.PIPE,
440
                           stdout=subprocess.PIPE,
441
                           stdin=subprocess.PIPE,
442
                           close_fds=True, env=env,
443
                           cwd=cwd)
444

    
445
  child.stdin.close()
446
  poller.register(child.stdout, select.POLLIN)
447
  poller.register(child.stderr, select.POLLIN)
448
  out = StringIO()
449
  err = StringIO()
450
  fdmap = {
451
    child.stdout.fileno(): (out, child.stdout),
452
    child.stderr.fileno(): (err, child.stderr),
453
    }
454
  for fd in fdmap:
455
    SetNonblockFlag(fd, True)
456

    
457
  while fdmap:
458
    pollresult = RetryOnSignal(poller.poll)
459

    
460
    for fd, event in pollresult:
461
      if event & select.POLLIN or event & select.POLLPRI:
462
        data = fdmap[fd][1].read()
463
        # no data from read signifies EOF (the same as POLLHUP)
464
        if not data:
465
          poller.unregister(fd)
466
          del fdmap[fd]
467
          continue
468
        fdmap[fd][0].write(data)
469
      if (event & select.POLLNVAL or event & select.POLLHUP or
470
          event & select.POLLERR):
471
        poller.unregister(fd)
472
        del fdmap[fd]
473

    
474
  out = out.getvalue()
475
  err = err.getvalue()
476

    
477
  status = child.wait()
478
  return out, err, status
479

    
480

    
481
def _RunCmdFile(cmd, env, via_shell, output, cwd):
482
  """Run a command and save its output to a file.
483

484
  @type  cmd: string or list
485
  @param cmd: Command to run
486
  @type env: dict
487
  @param env: The environment to use
488
  @type via_shell: bool
489
  @param via_shell: if we should run via the shell
490
  @type output: str
491
  @param output: the filename in which to save the output
492
  @type cwd: string
493
  @param cwd: the working directory for the program
494
  @rtype: int
495
  @return: the exit status
496

497
  """
498
  fh = open(output, "a")
499
  try:
500
    child = subprocess.Popen(cmd, shell=via_shell,
501
                             stderr=subprocess.STDOUT,
502
                             stdout=fh,
503
                             stdin=subprocess.PIPE,
504
                             close_fds=True, env=env,
505
                             cwd=cwd)
506

    
507
    child.stdin.close()
508
    status = child.wait()
509
  finally:
510
    fh.close()
511
  return status
512

    
513

    
514
def SetCloseOnExecFlag(fd, enable):
515
  """Sets or unsets the close-on-exec flag on a file descriptor.
516

517
  @type fd: int
518
  @param fd: File descriptor
519
  @type enable: bool
520
  @param enable: Whether to set or unset it.
521

522
  """
523
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
524

    
525
  if enable:
526
    flags |= fcntl.FD_CLOEXEC
527
  else:
528
    flags &= ~fcntl.FD_CLOEXEC
529

    
530
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
531

    
532

    
533
def SetNonblockFlag(fd, enable):
534
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
535

536
  @type fd: int
537
  @param fd: File descriptor
538
  @type enable: bool
539
  @param enable: Whether to set or unset it
540

541
  """
542
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
543

    
544
  if enable:
545
    flags |= os.O_NONBLOCK
546
  else:
547
    flags &= ~os.O_NONBLOCK
548

    
549
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
550

    
551

    
552
def RetryOnSignal(fn, *args, **kwargs):
553
  """Calls a function again if it failed due to EINTR.
554

555
  """
556
  while True:
557
    try:
558
      return fn(*args, **kwargs)
559
    except EnvironmentError, err:
560
      if err.errno != errno.EINTR:
561
        raise
562
    except (socket.error, select.error), err:
563
      # In python 2.6 and above select.error is an IOError, so it's handled
564
      # above, in 2.5 and below it's not, and it's handled here.
565
      if not (err.args and err.args[0] == errno.EINTR):
566
        raise
567

    
568

    
569
def RunParts(dir_name, env=None, reset_env=False):
570
  """Run Scripts or programs in a directory
571

572
  @type dir_name: string
573
  @param dir_name: absolute path to a directory
574
  @type env: dict
575
  @param env: The environment to use
576
  @type reset_env: boolean
577
  @param reset_env: whether to reset or keep the default os environment
578
  @rtype: list of tuples
579
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
580

581
  """
582
  rr = []
583

    
584
  try:
585
    dir_contents = ListVisibleFiles(dir_name)
586
  except OSError, err:
587
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
588
    return rr
589

    
590
  for relname in sorted(dir_contents):
591
    fname = PathJoin(dir_name, relname)
592
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
593
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
594
      rr.append((relname, constants.RUNPARTS_SKIP, None))
595
    else:
596
      try:
597
        result = RunCmd([fname], env=env, reset_env=reset_env)
598
      except Exception, err: # pylint: disable-msg=W0703
599
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
600
      else:
601
        rr.append((relname, constants.RUNPARTS_RUN, result))
602

    
603
  return rr
604

    
605

    
606
def RemoveFile(filename):
607
  """Remove a file ignoring some errors.
608

609
  Remove a file, ignoring non-existing ones or directories. Other
610
  errors are passed.
611

612
  @type filename: str
613
  @param filename: the file to be removed
614

615
  """
616
  try:
617
    os.unlink(filename)
618
  except OSError, err:
619
    if err.errno not in (errno.ENOENT, errno.EISDIR):
620
      raise
621

    
622

    
623
def RemoveDir(dirname):
624
  """Remove an empty directory.
625

626
  Remove a directory, ignoring non-existing ones.
627
  Other errors are passed. This includes the case,
628
  where the directory is not empty, so it can't be removed.
629

630
  @type dirname: str
631
  @param dirname: the empty directory to be removed
632

633
  """
634
  try:
635
    os.rmdir(dirname)
636
  except OSError, err:
637
    if err.errno != errno.ENOENT:
638
      raise
639

    
640

    
641
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
642
  """Renames a file.
643

644
  @type old: string
645
  @param old: Original path
646
  @type new: string
647
  @param new: New path
648
  @type mkdir: bool
649
  @param mkdir: Whether to create target directory if it doesn't exist
650
  @type mkdir_mode: int
651
  @param mkdir_mode: Mode for newly created directories
652

653
  """
654
  try:
655
    return os.rename(old, new)
656
  except OSError, err:
657
    # In at least one use case of this function, the job queue, directory
658
    # creation is very rare. Checking for the directory before renaming is not
659
    # as efficient.
660
    if mkdir and err.errno == errno.ENOENT:
661
      # Create directory and try again
662
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
663

    
664
      return os.rename(old, new)
665

    
666
    raise
667

    
668

    
669
def Makedirs(path, mode=0750):
670
  """Super-mkdir; create a leaf directory and all intermediate ones.
671

672
  This is a wrapper around C{os.makedirs} adding error handling not implemented
673
  before Python 2.5.
674

675
  """
676
  try:
677
    os.makedirs(path, mode)
678
  except OSError, err:
679
    # Ignore EEXIST. This is only handled in os.makedirs as included in
680
    # Python 2.5 and above.
681
    if err.errno != errno.EEXIST or not os.path.exists(path):
682
      raise
683

    
684

    
685
def ResetTempfileModule():
686
  """Resets the random name generator of the tempfile module.
687

688
  This function should be called after C{os.fork} in the child process to
689
  ensure it creates a newly seeded random generator. Otherwise it would
690
  generate the same random parts as the parent process. If several processes
691
  race for the creation of a temporary file, this could lead to one not getting
692
  a temporary name.
693

694
  """
695
  # pylint: disable-msg=W0212
696
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
697
    tempfile._once_lock.acquire()
698
    try:
699
      # Reset random name generator
700
      tempfile._name_sequence = None
701
    finally:
702
      tempfile._once_lock.release()
703
  else:
704
    logging.critical("The tempfile module misses at least one of the"
705
                     " '_once_lock' and '_name_sequence' attributes")
706

    
707

    
708
def _FingerprintFile(filename):
709
  """Compute the fingerprint of a file.
710

711
  If the file does not exist, a None will be returned
712
  instead.
713

714
  @type filename: str
715
  @param filename: the filename to checksum
716
  @rtype: str
717
  @return: the hex digest of the sha checksum of the contents
718
      of the file
719

720
  """
721
  if not (os.path.exists(filename) and os.path.isfile(filename)):
722
    return None
723

    
724
  f = open(filename)
725

    
726
  fp = compat.sha1_hash()
727
  while True:
728
    data = f.read(4096)
729
    if not data:
730
      break
731

    
732
    fp.update(data)
733

    
734
  return fp.hexdigest()
735

    
736

    
737
def FingerprintFiles(files):
738
  """Compute fingerprints for a list of files.
739

740
  @type files: list
741
  @param files: the list of filename to fingerprint
742
  @rtype: dict
743
  @return: a dictionary filename: fingerprint, holding only
744
      existing files
745

746
  """
747
  ret = {}
748

    
749
  for filename in files:
750
    cksum = _FingerprintFile(filename)
751
    if cksum:
752
      ret[filename] = cksum
753

    
754
  return ret
755

    
756

    
757
def ForceDictType(target, key_types, allowed_values=None):
758
  """Force the values of a dict to have certain types.
759

760
  @type target: dict
761
  @param target: the dict to update
762
  @type key_types: dict
763
  @param key_types: dict mapping target dict keys to types
764
                    in constants.ENFORCEABLE_TYPES
765
  @type allowed_values: list
766
  @keyword allowed_values: list of specially allowed values
767

768
  """
769
  if allowed_values is None:
770
    allowed_values = []
771

    
772
  if not isinstance(target, dict):
773
    msg = "Expected dictionary, got '%s'" % target
774
    raise errors.TypeEnforcementError(msg)
775

    
776
  for key in target:
777
    if key not in key_types:
778
      msg = "Unknown key '%s'" % key
779
      raise errors.TypeEnforcementError(msg)
780

    
781
    if target[key] in allowed_values:
782
      continue
783

    
784
    ktype = key_types[key]
785
    if ktype not in constants.ENFORCEABLE_TYPES:
786
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
787
      raise errors.ProgrammerError(msg)
788

    
789
    if ktype == constants.VTYPE_STRING:
790
      if not isinstance(target[key], basestring):
791
        if isinstance(target[key], bool) and not target[key]:
792
          target[key] = ''
793
        else:
794
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
795
          raise errors.TypeEnforcementError(msg)
796
    elif ktype == constants.VTYPE_BOOL:
797
      if isinstance(target[key], basestring) and target[key]:
798
        if target[key].lower() == constants.VALUE_FALSE:
799
          target[key] = False
800
        elif target[key].lower() == constants.VALUE_TRUE:
801
          target[key] = True
802
        else:
803
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
804
          raise errors.TypeEnforcementError(msg)
805
      elif target[key]:
806
        target[key] = True
807
      else:
808
        target[key] = False
809
    elif ktype == constants.VTYPE_SIZE:
810
      try:
811
        target[key] = ParseUnit(target[key])
812
      except errors.UnitParseError, err:
813
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
814
              (key, target[key], err)
815
        raise errors.TypeEnforcementError(msg)
816
    elif ktype == constants.VTYPE_INT:
817
      try:
818
        target[key] = int(target[key])
819
      except (ValueError, TypeError):
820
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
821
        raise errors.TypeEnforcementError(msg)
822

    
823

    
824
def _GetProcStatusPath(pid):
825
  """Returns the path for a PID's proc status file.
826

827
  @type pid: int
828
  @param pid: Process ID
829
  @rtype: string
830

831
  """
832
  return "/proc/%d/status" % pid
833

    
834

    
835
def IsProcessAlive(pid):
836
  """Check if a given pid exists on the system.
837

838
  @note: zombie status is not handled, so zombie processes
839
      will be returned as alive
840
  @type pid: int
841
  @param pid: the process ID to check
842
  @rtype: boolean
843
  @return: True if the process exists
844

845
  """
846
  def _TryStat(name):
847
    try:
848
      os.stat(name)
849
      return True
850
    except EnvironmentError, err:
851
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
852
        return False
853
      elif err.errno == errno.EINVAL:
854
        raise RetryAgain(err)
855
      raise
856

    
857
  assert isinstance(pid, int), "pid must be an integer"
858
  if pid <= 0:
859
    return False
860

    
861
  # /proc in a multiprocessor environment can have strange behaviors.
862
  # Retry the os.stat a few times until we get a good result.
863
  try:
864
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
865
                 args=[_GetProcStatusPath(pid)])
866
  except RetryTimeout, err:
867
    err.RaiseInner()
868

    
869

    
870
def _ParseSigsetT(sigset):
871
  """Parse a rendered sigset_t value.
872

873
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
874
  function.
875

876
  @type sigset: string
877
  @param sigset: Rendered signal set from /proc/$pid/status
878
  @rtype: set
879
  @return: Set of all enabled signal numbers
880

881
  """
882
  result = set()
883

    
884
  signum = 0
885
  for ch in reversed(sigset):
886
    chv = int(ch, 16)
887

    
888
    # The following could be done in a loop, but it's easier to read and
889
    # understand in the unrolled form
890
    if chv & 1:
891
      result.add(signum + 1)
892
    if chv & 2:
893
      result.add(signum + 2)
894
    if chv & 4:
895
      result.add(signum + 3)
896
    if chv & 8:
897
      result.add(signum + 4)
898

    
899
    signum += 4
900

    
901
  return result
902

    
903

    
904
def _GetProcStatusField(pstatus, field):
905
  """Retrieves a field from the contents of a proc status file.
906

907
  @type pstatus: string
908
  @param pstatus: Contents of /proc/$pid/status
909
  @type field: string
910
  @param field: Name of field whose value should be returned
911
  @rtype: string
912

913
  """
914
  for line in pstatus.splitlines():
915
    parts = line.split(":", 1)
916

    
917
    if len(parts) < 2 or parts[0] != field:
918
      continue
919

    
920
    return parts[1].strip()
921

    
922
  return None
923

    
924

    
925
def IsProcessHandlingSignal(pid, signum, status_path=None):
926
  """Checks whether a process is handling a signal.
927

928
  @type pid: int
929
  @param pid: Process ID
930
  @type signum: int
931
  @param signum: Signal number
932
  @rtype: bool
933

934
  """
935
  if status_path is None:
936
    status_path = _GetProcStatusPath(pid)
937

    
938
  try:
939
    proc_status = ReadFile(status_path)
940
  except EnvironmentError, err:
941
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
942
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
943
      return False
944
    raise
945

    
946
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
947
  if sigcgt is None:
948
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
949

    
950
  # Now check whether signal is handled
951
  return signum in _ParseSigsetT(sigcgt)
952

    
953

    
954
def ReadPidFile(pidfile):
955
  """Read a pid from a file.
956

957
  @type  pidfile: string
958
  @param pidfile: path to the file containing the pid
959
  @rtype: int
960
  @return: The process id, if the file exists and contains a valid PID,
961
           otherwise 0
962

963
  """
964
  try:
965
    raw_data = ReadOneLineFile(pidfile)
966
  except EnvironmentError, err:
967
    if err.errno != errno.ENOENT:
968
      logging.exception("Can't read pid file")
969
    return 0
970

    
971
  try:
972
    pid = int(raw_data)
973
  except (TypeError, ValueError), err:
974
    logging.info("Can't parse pid file contents", exc_info=True)
975
    return 0
976

    
977
  return pid
978

    
979

    
980
def ReadLockedPidFile(path):
981
  """Reads a locked PID file.
982

983
  This can be used together with L{StartDaemon}.
984

985
  @type path: string
986
  @param path: Path to PID file
987
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
988

989
  """
990
  try:
991
    fd = os.open(path, os.O_RDONLY)
992
  except EnvironmentError, err:
993
    if err.errno == errno.ENOENT:
994
      # PID file doesn't exist
995
      return None
996
    raise
997

    
998
  try:
999
    try:
1000
      # Try to acquire lock
1001
      LockFile(fd)
1002
    except errors.LockError:
1003
      # Couldn't lock, daemon is running
1004
      return int(os.read(fd, 100))
1005
  finally:
1006
    os.close(fd)
1007

    
1008
  return None
1009

    
1010

    
1011
def MatchNameComponent(key, name_list, case_sensitive=True):
1012
  """Try to match a name against a list.
1013

1014
  This function will try to match a name like test1 against a list
1015
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1016
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1017
  not I{'test1.ex'}. A multiple match will be considered as no match
1018
  at all (e.g. I{'test1'} against C{['test1.example.com',
1019
  'test1.example.org']}), except when the key fully matches an entry
1020
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1021

1022
  @type key: str
1023
  @param key: the name to be searched
1024
  @type name_list: list
1025
  @param name_list: the list of strings against which to search the key
1026
  @type case_sensitive: boolean
1027
  @param case_sensitive: whether to provide a case-sensitive match
1028

1029
  @rtype: None or str
1030
  @return: None if there is no match I{or} if there are multiple matches,
1031
      otherwise the element from the list which matches
1032

1033
  """
1034
  if key in name_list:
1035
    return key
1036

    
1037
  re_flags = 0
1038
  if not case_sensitive:
1039
    re_flags |= re.IGNORECASE
1040
    key = key.upper()
1041
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1042
  names_filtered = []
1043
  string_matches = []
1044
  for name in name_list:
1045
    if mo.match(name) is not None:
1046
      names_filtered.append(name)
1047
      if not case_sensitive and key == name.upper():
1048
        string_matches.append(name)
1049

    
1050
  if len(string_matches) == 1:
1051
    return string_matches[0]
1052
  if len(names_filtered) == 1:
1053
    return names_filtered[0]
1054
  return None
1055

    
1056

    
1057
def ValidateServiceName(name):
1058
  """Validate the given service name.
1059

1060
  @type name: number or string
1061
  @param name: Service name or port specification
1062

1063
  """
1064
  try:
1065
    numport = int(name)
1066
  except (ValueError, TypeError):
1067
    # Non-numeric service name
1068
    valid = _VALID_SERVICE_NAME_RE.match(name)
1069
  else:
1070
    # Numeric port (protocols other than TCP or UDP might need adjustments
1071
    # here)
1072
    valid = (numport >= 0 and numport < (1 << 16))
1073

    
1074
  if not valid:
1075
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1076
                               errors.ECODE_INVAL)
1077

    
1078
  return name
1079

    
1080

    
1081
def ListVolumeGroups():
1082
  """List volume groups and their size
1083

1084
  @rtype: dict
1085
  @return:
1086
       Dictionary with keys volume name and values
1087
       the size of the volume
1088

1089
  """
1090
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1091
  result = RunCmd(command)
1092
  retval = {}
1093
  if result.failed:
1094
    return retval
1095

    
1096
  for line in result.stdout.splitlines():
1097
    try:
1098
      name, size = line.split()
1099
      size = int(float(size))
1100
    except (IndexError, ValueError), err:
1101
      logging.error("Invalid output from vgs (%s): %s", err, line)
1102
      continue
1103

    
1104
    retval[name] = size
1105

    
1106
  return retval
1107

    
1108

    
1109
def BridgeExists(bridge):
1110
  """Check whether the given bridge exists in the system
1111

1112
  @type bridge: str
1113
  @param bridge: the bridge name to check
1114
  @rtype: boolean
1115
  @return: True if it does
1116

1117
  """
1118
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1119

    
1120

    
1121
def NiceSort(name_list):
1122
  """Sort a list of strings based on digit and non-digit groupings.
1123

1124
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1125
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1126
  'a11']}.
1127

1128
  The sort algorithm breaks each name in groups of either only-digits
1129
  or no-digits. Only the first eight such groups are considered, and
1130
  after that we just use what's left of the string.
1131

1132
  @type name_list: list
1133
  @param name_list: the names to be sorted
1134
  @rtype: list
1135
  @return: a copy of the name list sorted with our algorithm
1136

1137
  """
1138
  _SORTER_BASE = "(\D+|\d+)"
1139
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1140
                                                  _SORTER_BASE, _SORTER_BASE,
1141
                                                  _SORTER_BASE, _SORTER_BASE,
1142
                                                  _SORTER_BASE, _SORTER_BASE)
1143
  _SORTER_RE = re.compile(_SORTER_FULL)
1144
  _SORTER_NODIGIT = re.compile("^\D*$")
1145
  def _TryInt(val):
1146
    """Attempts to convert a variable to integer."""
1147
    if val is None or _SORTER_NODIGIT.match(val):
1148
      return val
1149
    rval = int(val)
1150
    return rval
1151

    
1152
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1153
             for name in name_list]
1154
  to_sort.sort()
1155
  return [tup[1] for tup in to_sort]
1156

    
1157

    
1158
def TryConvert(fn, val):
1159
  """Try to convert a value ignoring errors.
1160

1161
  This function tries to apply function I{fn} to I{val}. If no
1162
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1163
  the result, else it will return the original value. Any other
1164
  exceptions are propagated to the caller.
1165

1166
  @type fn: callable
1167
  @param fn: function to apply to the value
1168
  @param val: the value to be converted
1169
  @return: The converted value if the conversion was successful,
1170
      otherwise the original value.
1171

1172
  """
1173
  try:
1174
    nv = fn(val)
1175
  except (ValueError, TypeError):
1176
    nv = val
1177
  return nv
1178

    
1179

    
1180
def IsValidShellParam(word):
1181
  """Verifies is the given word is safe from the shell's p.o.v.
1182

1183
  This means that we can pass this to a command via the shell and be
1184
  sure that it doesn't alter the command line and is passed as such to
1185
  the actual command.
1186

1187
  Note that we are overly restrictive here, in order to be on the safe
1188
  side.
1189

1190
  @type word: str
1191
  @param word: the word to check
1192
  @rtype: boolean
1193
  @return: True if the word is 'safe'
1194

1195
  """
1196
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1197

    
1198

    
1199
def BuildShellCmd(template, *args):
1200
  """Build a safe shell command line from the given arguments.
1201

1202
  This function will check all arguments in the args list so that they
1203
  are valid shell parameters (i.e. they don't contain shell
1204
  metacharacters). If everything is ok, it will return the result of
1205
  template % args.
1206

1207
  @type template: str
1208
  @param template: the string holding the template for the
1209
      string formatting
1210
  @rtype: str
1211
  @return: the expanded command line
1212

1213
  """
1214
  for word in args:
1215
    if not IsValidShellParam(word):
1216
      raise errors.ProgrammerError("Shell argument '%s' contains"
1217
                                   " invalid characters" % word)
1218
  return template % args
1219

    
1220

    
1221
def FormatUnit(value, units):
1222
  """Formats an incoming number of MiB with the appropriate unit.
1223

1224
  @type value: int
1225
  @param value: integer representing the value in MiB (1048576)
1226
  @type units: char
1227
  @param units: the type of formatting we should do:
1228
      - 'h' for automatic scaling
1229
      - 'm' for MiBs
1230
      - 'g' for GiBs
1231
      - 't' for TiBs
1232
  @rtype: str
1233
  @return: the formatted value (with suffix)
1234

1235
  """
1236
  if units not in ('m', 'g', 't', 'h'):
1237
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1238

    
1239
  suffix = ''
1240

    
1241
  if units == 'm' or (units == 'h' and value < 1024):
1242
    if units == 'h':
1243
      suffix = 'M'
1244
    return "%d%s" % (round(value, 0), suffix)
1245

    
1246
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1247
    if units == 'h':
1248
      suffix = 'G'
1249
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1250

    
1251
  else:
1252
    if units == 'h':
1253
      suffix = 'T'
1254
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1255

    
1256

    
1257
def ParseUnit(input_string):
1258
  """Tries to extract number and scale from the given string.
1259

1260
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1261
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1262
  is always an int in MiB.
1263

1264
  """
1265
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1266
  if not m:
1267
    raise errors.UnitParseError("Invalid format")
1268

    
1269
  value = float(m.groups()[0])
1270

    
1271
  unit = m.groups()[1]
1272
  if unit:
1273
    lcunit = unit.lower()
1274
  else:
1275
    lcunit = 'm'
1276

    
1277
  if lcunit in ('m', 'mb', 'mib'):
1278
    # Value already in MiB
1279
    pass
1280

    
1281
  elif lcunit in ('g', 'gb', 'gib'):
1282
    value *= 1024
1283

    
1284
  elif lcunit in ('t', 'tb', 'tib'):
1285
    value *= 1024 * 1024
1286

    
1287
  else:
1288
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1289

    
1290
  # Make sure we round up
1291
  if int(value) < value:
1292
    value += 1
1293

    
1294
  # Round up to the next multiple of 4
1295
  value = int(value)
1296
  if value % 4:
1297
    value += 4 - value % 4
1298

    
1299
  return value
1300

    
1301

    
1302
def ParseCpuMask(cpu_mask):
1303
  """Parse a CPU mask definition and return the list of CPU IDs.
1304

1305
  CPU mask format: comma-separated list of CPU IDs
1306
  or dash-separated ID ranges
1307
  Example: "0-2,5" -> "0,1,2,5"
1308

1309
  @type cpu_mask: str
1310
  @param cpu_mask: CPU mask definition
1311
  @rtype: list of int
1312
  @return: list of CPU IDs
1313

1314
  """
1315
  if not cpu_mask:
1316
    return []
1317
  cpu_list = []
1318
  for range_def in cpu_mask.split(","):
1319
    boundaries = range_def.split("-")
1320
    n_elements = len(boundaries)
1321
    if n_elements > 2:
1322
      raise errors.ParseError("Invalid CPU ID range definition"
1323
                              " (only one hyphen allowed): %s" % range_def)
1324
    try:
1325
      lower = int(boundaries[0])
1326
    except (ValueError, TypeError), err:
1327
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1328
                              " CPU ID range: %s" % str(err))
1329
    try:
1330
      higher = int(boundaries[-1])
1331
    except (ValueError, TypeError), err:
1332
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1333
                              " CPU ID range: %s" % str(err))
1334
    if lower > higher:
1335
      raise errors.ParseError("Invalid CPU ID range definition"
1336
                              " (%d > %d): %s" % (lower, higher, range_def))
1337
    cpu_list.extend(range(lower, higher + 1))
1338
  return cpu_list
1339

    
1340

    
1341
def AddAuthorizedKey(file_name, key):
1342
  """Adds an SSH public key to an authorized_keys file.
1343

1344
  @type file_name: str
1345
  @param file_name: path to authorized_keys file
1346
  @type key: str
1347
  @param key: string containing key
1348

1349
  """
1350
  key_fields = key.split()
1351

    
1352
  f = open(file_name, 'a+')
1353
  try:
1354
    nl = True
1355
    for line in f:
1356
      # Ignore whitespace changes
1357
      if line.split() == key_fields:
1358
        break
1359
      nl = line.endswith('\n')
1360
    else:
1361
      if not nl:
1362
        f.write("\n")
1363
      f.write(key.rstrip('\r\n'))
1364
      f.write("\n")
1365
      f.flush()
1366
  finally:
1367
    f.close()
1368

    
1369

    
1370
def RemoveAuthorizedKey(file_name, key):
1371
  """Removes an SSH public key from an authorized_keys file.
1372

1373
  @type file_name: str
1374
  @param file_name: path to authorized_keys file
1375
  @type key: str
1376
  @param key: string containing key
1377

1378
  """
1379
  key_fields = key.split()
1380

    
1381
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1382
  try:
1383
    out = os.fdopen(fd, 'w')
1384
    try:
1385
      f = open(file_name, 'r')
1386
      try:
1387
        for line in f:
1388
          # Ignore whitespace changes while comparing lines
1389
          if line.split() != key_fields:
1390
            out.write(line)
1391

    
1392
        out.flush()
1393
        os.rename(tmpname, file_name)
1394
      finally:
1395
        f.close()
1396
    finally:
1397
      out.close()
1398
  except:
1399
    RemoveFile(tmpname)
1400
    raise
1401

    
1402

    
1403
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1404
  """Sets the name of an IP address and hostname in /etc/hosts.
1405

1406
  @type file_name: str
1407
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1408
  @type ip: str
1409
  @param ip: the IP address
1410
  @type hostname: str
1411
  @param hostname: the hostname to be added
1412
  @type aliases: list
1413
  @param aliases: the list of aliases to add for the hostname
1414

1415
  """
1416
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1417
  # Ensure aliases are unique
1418
  aliases = UniqueSequence([hostname] + aliases)[1:]
1419

    
1420
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1421
  try:
1422
    out = os.fdopen(fd, 'w')
1423
    try:
1424
      f = open(file_name, 'r')
1425
      try:
1426
        for line in f:
1427
          fields = line.split()
1428
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1429
            continue
1430
          out.write(line)
1431

    
1432
        out.write("%s\t%s" % (ip, hostname))
1433
        if aliases:
1434
          out.write(" %s" % ' '.join(aliases))
1435
        out.write('\n')
1436

    
1437
        out.flush()
1438
        os.fsync(out)
1439
        os.chmod(tmpname, 0644)
1440
        os.rename(tmpname, file_name)
1441
      finally:
1442
        f.close()
1443
    finally:
1444
      out.close()
1445
  except:
1446
    RemoveFile(tmpname)
1447
    raise
1448

    
1449

    
1450
def AddHostToEtcHosts(hostname):
1451
  """Wrapper around SetEtcHostsEntry.
1452

1453
  @type hostname: str
1454
  @param hostname: a hostname that will be resolved and added to
1455
      L{constants.ETC_HOSTS}
1456

1457
  """
1458
  hi = netutils.HostInfo(name=hostname)
1459
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1460

    
1461

    
1462
def RemoveEtcHostsEntry(file_name, hostname):
1463
  """Removes a hostname from /etc/hosts.
1464

1465
  IP addresses without names are removed from the file.
1466

1467
  @type file_name: str
1468
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1469
  @type hostname: str
1470
  @param hostname: the hostname to be removed
1471

1472
  """
1473
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1474
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1475
  try:
1476
    out = os.fdopen(fd, 'w')
1477
    try:
1478
      f = open(file_name, 'r')
1479
      try:
1480
        for line in f:
1481
          fields = line.split()
1482
          if len(fields) > 1 and not fields[0].startswith('#'):
1483
            names = fields[1:]
1484
            if hostname in names:
1485
              while hostname in names:
1486
                names.remove(hostname)
1487
              if names:
1488
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1489
              continue
1490

    
1491
          out.write(line)
1492

    
1493
        out.flush()
1494
        os.fsync(out)
1495
        os.chmod(tmpname, 0644)
1496
        os.rename(tmpname, file_name)
1497
      finally:
1498
        f.close()
1499
    finally:
1500
      out.close()
1501
  except:
1502
    RemoveFile(tmpname)
1503
    raise
1504

    
1505

    
1506
def RemoveHostFromEtcHosts(hostname):
1507
  """Wrapper around RemoveEtcHostsEntry.
1508

1509
  @type hostname: str
1510
  @param hostname: hostname that will be resolved and its
1511
      full and shot name will be removed from
1512
      L{constants.ETC_HOSTS}
1513

1514
  """
1515
  hi = netutils.HostInfo(name=hostname)
1516
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1517
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1518

    
1519

    
1520
def TimestampForFilename():
1521
  """Returns the current time formatted for filenames.
1522

1523
  The format doesn't contain colons as some shells and applications them as
1524
  separators.
1525

1526
  """
1527
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1528

    
1529

    
1530
def CreateBackup(file_name):
1531
  """Creates a backup of a file.
1532

1533
  @type file_name: str
1534
  @param file_name: file to be backed up
1535
  @rtype: str
1536
  @return: the path to the newly created backup
1537
  @raise errors.ProgrammerError: for invalid file names
1538

1539
  """
1540
  if not os.path.isfile(file_name):
1541
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1542
                                file_name)
1543

    
1544
  prefix = ("%s.backup-%s." %
1545
            (os.path.basename(file_name), TimestampForFilename()))
1546
  dir_name = os.path.dirname(file_name)
1547

    
1548
  fsrc = open(file_name, 'rb')
1549
  try:
1550
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1551
    fdst = os.fdopen(fd, 'wb')
1552
    try:
1553
      logging.debug("Backing up %s at %s", file_name, backup_name)
1554
      shutil.copyfileobj(fsrc, fdst)
1555
    finally:
1556
      fdst.close()
1557
  finally:
1558
    fsrc.close()
1559

    
1560
  return backup_name
1561

    
1562

    
1563
def ShellQuote(value):
1564
  """Quotes shell argument according to POSIX.
1565

1566
  @type value: str
1567
  @param value: the argument to be quoted
1568
  @rtype: str
1569
  @return: the quoted value
1570

1571
  """
1572
  if _re_shell_unquoted.match(value):
1573
    return value
1574
  else:
1575
    return "'%s'" % value.replace("'", "'\\''")
1576

    
1577

    
1578
def ShellQuoteArgs(args):
1579
  """Quotes a list of shell arguments.
1580

1581
  @type args: list
1582
  @param args: list of arguments to be quoted
1583
  @rtype: str
1584
  @return: the quoted arguments concatenated with spaces
1585

1586
  """
1587
  return ' '.join([ShellQuote(i) for i in args])
1588

    
1589

    
1590
class ShellWriter:
1591
  """Helper class to write scripts with indentation.
1592

1593
  """
1594
  INDENT_STR = "  "
1595

    
1596
  def __init__(self, fh):
1597
    """Initializes this class.
1598

1599
    """
1600
    self._fh = fh
1601
    self._indent = 0
1602

    
1603
  def IncIndent(self):
1604
    """Increase indentation level by 1.
1605

1606
    """
1607
    self._indent += 1
1608

    
1609
  def DecIndent(self):
1610
    """Decrease indentation level by 1.
1611

1612
    """
1613
    assert self._indent > 0
1614
    self._indent -= 1
1615

    
1616
  def Write(self, txt, *args):
1617
    """Write line to output file.
1618

1619
    """
1620
    assert self._indent >= 0
1621

    
1622
    self._fh.write(self._indent * self.INDENT_STR)
1623

    
1624
    if args:
1625
      self._fh.write(txt % args)
1626
    else:
1627
      self._fh.write(txt)
1628

    
1629
    self._fh.write("\n")
1630

    
1631

    
1632
def ListVisibleFiles(path):
1633
  """Returns a list of visible files in a directory.
1634

1635
  @type path: str
1636
  @param path: the directory to enumerate
1637
  @rtype: list
1638
  @return: the list of all files not starting with a dot
1639
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1640

1641
  """
1642
  if not IsNormAbsPath(path):
1643
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1644
                                 " absolute/normalized: '%s'" % path)
1645
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1646
  return files
1647

    
1648

    
1649
def GetHomeDir(user, default=None):
1650
  """Try to get the homedir of the given user.
1651

1652
  The user can be passed either as a string (denoting the name) or as
1653
  an integer (denoting the user id). If the user is not found, the
1654
  'default' argument is returned, which defaults to None.
1655

1656
  """
1657
  try:
1658
    if isinstance(user, basestring):
1659
      result = pwd.getpwnam(user)
1660
    elif isinstance(user, (int, long)):
1661
      result = pwd.getpwuid(user)
1662
    else:
1663
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1664
                                   type(user))
1665
  except KeyError:
1666
    return default
1667
  return result.pw_dir
1668

    
1669

    
1670
def NewUUID():
1671
  """Returns a random UUID.
1672

1673
  @note: This is a Linux-specific method as it uses the /proc
1674
      filesystem.
1675
  @rtype: str
1676

1677
  """
1678
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1679

    
1680

    
1681
def GenerateSecret(numbytes=20):
1682
  """Generates a random secret.
1683

1684
  This will generate a pseudo-random secret returning an hex string
1685
  (so that it can be used where an ASCII string is needed).
1686

1687
  @param numbytes: the number of bytes which will be represented by the returned
1688
      string (defaulting to 20, the length of a SHA1 hash)
1689
  @rtype: str
1690
  @return: an hex representation of the pseudo-random sequence
1691

1692
  """
1693
  return os.urandom(numbytes).encode('hex')
1694

    
1695

    
1696
def EnsureDirs(dirs):
1697
  """Make required directories, if they don't exist.
1698

1699
  @param dirs: list of tuples (dir_name, dir_mode)
1700
  @type dirs: list of (string, integer)
1701

1702
  """
1703
  for dir_name, dir_mode in dirs:
1704
    try:
1705
      os.mkdir(dir_name, dir_mode)
1706
    except EnvironmentError, err:
1707
      if err.errno != errno.EEXIST:
1708
        raise errors.GenericError("Cannot create needed directory"
1709
                                  " '%s': %s" % (dir_name, err))
1710
    try:
1711
      os.chmod(dir_name, dir_mode)
1712
    except EnvironmentError, err:
1713
      raise errors.GenericError("Cannot change directory permissions on"
1714
                                " '%s': %s" % (dir_name, err))
1715
    if not os.path.isdir(dir_name):
1716
      raise errors.GenericError("%s is not a directory" % dir_name)
1717

    
1718

    
1719
def ReadFile(file_name, size=-1):
1720
  """Reads a file.
1721

1722
  @type size: int
1723
  @param size: Read at most size bytes (if negative, entire file)
1724
  @rtype: str
1725
  @return: the (possibly partial) content of the file
1726

1727
  """
1728
  f = open(file_name, "r")
1729
  try:
1730
    return f.read(size)
1731
  finally:
1732
    f.close()
1733

    
1734

    
1735
def WriteFile(file_name, fn=None, data=None,
1736
              mode=None, uid=-1, gid=-1,
1737
              atime=None, mtime=None, close=True,
1738
              dry_run=False, backup=False,
1739
              prewrite=None, postwrite=None):
1740
  """(Over)write a file atomically.
1741

1742
  The file_name and either fn (a function taking one argument, the
1743
  file descriptor, and which should write the data to it) or data (the
1744
  contents of the file) must be passed. The other arguments are
1745
  optional and allow setting the file mode, owner and group, and the
1746
  mtime/atime of the file.
1747

1748
  If the function doesn't raise an exception, it has succeeded and the
1749
  target file has the new contents. If the function has raised an
1750
  exception, an existing target file should be unmodified and the
1751
  temporary file should be removed.
1752

1753
  @type file_name: str
1754
  @param file_name: the target filename
1755
  @type fn: callable
1756
  @param fn: content writing function, called with
1757
      file descriptor as parameter
1758
  @type data: str
1759
  @param data: contents of the file
1760
  @type mode: int
1761
  @param mode: file mode
1762
  @type uid: int
1763
  @param uid: the owner of the file
1764
  @type gid: int
1765
  @param gid: the group of the file
1766
  @type atime: int
1767
  @param atime: a custom access time to be set on the file
1768
  @type mtime: int
1769
  @param mtime: a custom modification time to be set on the file
1770
  @type close: boolean
1771
  @param close: whether to close file after writing it
1772
  @type prewrite: callable
1773
  @param prewrite: function to be called before writing content
1774
  @type postwrite: callable
1775
  @param postwrite: function to be called after writing content
1776

1777
  @rtype: None or int
1778
  @return: None if the 'close' parameter evaluates to True,
1779
      otherwise the file descriptor
1780

1781
  @raise errors.ProgrammerError: if any of the arguments are not valid
1782

1783
  """
1784
  if not os.path.isabs(file_name):
1785
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1786
                                 " absolute: '%s'" % file_name)
1787

    
1788
  if [fn, data].count(None) != 1:
1789
    raise errors.ProgrammerError("fn or data required")
1790

    
1791
  if [atime, mtime].count(None) == 1:
1792
    raise errors.ProgrammerError("Both atime and mtime must be either"
1793
                                 " set or None")
1794

    
1795
  if backup and not dry_run and os.path.isfile(file_name):
1796
    CreateBackup(file_name)
1797

    
1798
  dir_name, base_name = os.path.split(file_name)
1799
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1800
  do_remove = True
1801
  # here we need to make sure we remove the temp file, if any error
1802
  # leaves it in place
1803
  try:
1804
    if uid != -1 or gid != -1:
1805
      os.chown(new_name, uid, gid)
1806
    if mode:
1807
      os.chmod(new_name, mode)
1808
    if callable(prewrite):
1809
      prewrite(fd)
1810
    if data is not None:
1811
      os.write(fd, data)
1812
    else:
1813
      fn(fd)
1814
    if callable(postwrite):
1815
      postwrite(fd)
1816
    os.fsync(fd)
1817
    if atime is not None and mtime is not None:
1818
      os.utime(new_name, (atime, mtime))
1819
    if not dry_run:
1820
      os.rename(new_name, file_name)
1821
      do_remove = False
1822
  finally:
1823
    if close:
1824
      os.close(fd)
1825
      result = None
1826
    else:
1827
      result = fd
1828
    if do_remove:
1829
      RemoveFile(new_name)
1830

    
1831
  return result
1832

    
1833

    
1834
def ReadOneLineFile(file_name, strict=False):
1835
  """Return the first non-empty line from a file.
1836

1837
  @type strict: boolean
1838
  @param strict: if True, abort if the file has more than one
1839
      non-empty line
1840

1841
  """
1842
  file_lines = ReadFile(file_name).splitlines()
1843
  full_lines = filter(bool, file_lines)
1844
  if not file_lines or not full_lines:
1845
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1846
  elif strict and len(full_lines) > 1:
1847
    raise errors.GenericError("Too many lines in one-liner file %s" %
1848
                              file_name)
1849
  return full_lines[0]
1850

    
1851

    
1852
def FirstFree(seq, base=0):
1853
  """Returns the first non-existing integer from seq.
1854

1855
  The seq argument should be a sorted list of positive integers. The
1856
  first time the index of an element is smaller than the element
1857
  value, the index will be returned.
1858

1859
  The base argument is used to start at a different offset,
1860
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1861

1862
  Example: C{[0, 1, 3]} will return I{2}.
1863

1864
  @type seq: sequence
1865
  @param seq: the sequence to be analyzed.
1866
  @type base: int
1867
  @param base: use this value as the base index of the sequence
1868
  @rtype: int
1869
  @return: the first non-used index in the sequence
1870

1871
  """
1872
  for idx, elem in enumerate(seq):
1873
    assert elem >= base, "Passed element is higher than base offset"
1874
    if elem > idx + base:
1875
      # idx is not used
1876
      return idx + base
1877
  return None
1878

    
1879

    
1880
def SingleWaitForFdCondition(fdobj, event, timeout):
1881
  """Waits for a condition to occur on the socket.
1882

1883
  Immediately returns at the first interruption.
1884

1885
  @type fdobj: integer or object supporting a fileno() method
1886
  @param fdobj: entity to wait for events on
1887
  @type event: integer
1888
  @param event: ORed condition (see select module)
1889
  @type timeout: float or None
1890
  @param timeout: Timeout in seconds
1891
  @rtype: int or None
1892
  @return: None for timeout, otherwise occured conditions
1893

1894
  """
1895
  check = (event | select.POLLPRI |
1896
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1897

    
1898
  if timeout is not None:
1899
    # Poller object expects milliseconds
1900
    timeout *= 1000
1901

    
1902
  poller = select.poll()
1903
  poller.register(fdobj, event)
1904
  try:
1905
    # TODO: If the main thread receives a signal and we have no timeout, we
1906
    # could wait forever. This should check a global "quit" flag or something
1907
    # every so often.
1908
    io_events = poller.poll(timeout)
1909
  except select.error, err:
1910
    if err[0] != errno.EINTR:
1911
      raise
1912
    io_events = []
1913
  if io_events and io_events[0][1] & check:
1914
    return io_events[0][1]
1915
  else:
1916
    return None
1917

    
1918

    
1919
class FdConditionWaiterHelper(object):
1920
  """Retry helper for WaitForFdCondition.
1921

1922
  This class contains the retried and wait functions that make sure
1923
  WaitForFdCondition can continue waiting until the timeout is actually
1924
  expired.
1925

1926
  """
1927

    
1928
  def __init__(self, timeout):
1929
    self.timeout = timeout
1930

    
1931
  def Poll(self, fdobj, event):
1932
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1933
    if result is None:
1934
      raise RetryAgain()
1935
    else:
1936
      return result
1937

    
1938
  def UpdateTimeout(self, timeout):
1939
    self.timeout = timeout
1940

    
1941

    
1942
def WaitForFdCondition(fdobj, event, timeout):
1943
  """Waits for a condition to occur on the socket.
1944

1945
  Retries until the timeout is expired, even if interrupted.
1946

1947
  @type fdobj: integer or object supporting a fileno() method
1948
  @param fdobj: entity to wait for events on
1949
  @type event: integer
1950
  @param event: ORed condition (see select module)
1951
  @type timeout: float or None
1952
  @param timeout: Timeout in seconds
1953
  @rtype: int or None
1954
  @return: None for timeout, otherwise occured conditions
1955

1956
  """
1957
  if timeout is not None:
1958
    retrywaiter = FdConditionWaiterHelper(timeout)
1959
    try:
1960
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1961
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1962
    except RetryTimeout:
1963
      result = None
1964
  else:
1965
    result = None
1966
    while result is None:
1967
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1968
  return result
1969

    
1970

    
1971
def UniqueSequence(seq):
1972
  """Returns a list with unique elements.
1973

1974
  Element order is preserved.
1975

1976
  @type seq: sequence
1977
  @param seq: the sequence with the source elements
1978
  @rtype: list
1979
  @return: list of unique elements from seq
1980

1981
  """
1982
  seen = set()
1983
  return [i for i in seq if i not in seen and not seen.add(i)]
1984

    
1985

    
1986
def NormalizeAndValidateMac(mac):
1987
  """Normalizes and check if a MAC address is valid.
1988

1989
  Checks whether the supplied MAC address is formally correct, only
1990
  accepts colon separated format. Normalize it to all lower.
1991

1992
  @type mac: str
1993
  @param mac: the MAC to be validated
1994
  @rtype: str
1995
  @return: returns the normalized and validated MAC.
1996

1997
  @raise errors.OpPrereqError: If the MAC isn't valid
1998

1999
  """
2000
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
2001
  if not mac_check.match(mac):
2002
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2003
                               mac, errors.ECODE_INVAL)
2004

    
2005
  return mac.lower()
2006

    
2007

    
2008
def TestDelay(duration):
2009
  """Sleep for a fixed amount of time.
2010

2011
  @type duration: float
2012
  @param duration: the sleep duration
2013
  @rtype: boolean
2014
  @return: False for negative value, True otherwise
2015

2016
  """
2017
  if duration < 0:
2018
    return False, "Invalid sleep duration"
2019
  time.sleep(duration)
2020
  return True, None
2021

    
2022

    
2023
def _CloseFDNoErr(fd, retries=5):
2024
  """Close a file descriptor ignoring errors.
2025

2026
  @type fd: int
2027
  @param fd: the file descriptor
2028
  @type retries: int
2029
  @param retries: how many retries to make, in case we get any
2030
      other error than EBADF
2031

2032
  """
2033
  try:
2034
    os.close(fd)
2035
  except OSError, err:
2036
    if err.errno != errno.EBADF:
2037
      if retries > 0:
2038
        _CloseFDNoErr(fd, retries - 1)
2039
    # else either it's closed already or we're out of retries, so we
2040
    # ignore this and go on
2041

    
2042

    
2043
def CloseFDs(noclose_fds=None):
2044
  """Close file descriptors.
2045

2046
  This closes all file descriptors above 2 (i.e. except
2047
  stdin/out/err).
2048

2049
  @type noclose_fds: list or None
2050
  @param noclose_fds: if given, it denotes a list of file descriptor
2051
      that should not be closed
2052

2053
  """
2054
  # Default maximum for the number of available file descriptors.
2055
  if 'SC_OPEN_MAX' in os.sysconf_names:
2056
    try:
2057
      MAXFD = os.sysconf('SC_OPEN_MAX')
2058
      if MAXFD < 0:
2059
        MAXFD = 1024
2060
    except OSError:
2061
      MAXFD = 1024
2062
  else:
2063
    MAXFD = 1024
2064
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2065
  if (maxfd == resource.RLIM_INFINITY):
2066
    maxfd = MAXFD
2067

    
2068
  # Iterate through and close all file descriptors (except the standard ones)
2069
  for fd in range(3, maxfd):
2070
    if noclose_fds and fd in noclose_fds:
2071
      continue
2072
    _CloseFDNoErr(fd)
2073

    
2074

    
2075
def Mlockall(_ctypes=ctypes):
2076
  """Lock current process' virtual address space into RAM.
2077

2078
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2079
  see mlock(2) for more details. This function requires ctypes module.
2080

2081
  @raises errors.NoCtypesError: if ctypes module is not found
2082

2083
  """
2084
  if _ctypes is None:
2085
    raise errors.NoCtypesError()
2086

    
2087
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2088
  if libc is None:
2089
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2090
    return
2091

    
2092
  # Some older version of the ctypes module don't have built-in functionality
2093
  # to access the errno global variable, where function error codes are stored.
2094
  # By declaring this variable as a pointer to an integer we can then access
2095
  # its value correctly, should the mlockall call fail, in order to see what
2096
  # the actual error code was.
2097
  # pylint: disable-msg=W0212
2098
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2099

    
2100
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2101
    # pylint: disable-msg=W0212
2102
    logging.error("Cannot set memory lock: %s",
2103
                  os.strerror(libc.__errno_location().contents.value))
2104
    return
2105

    
2106
  logging.debug("Memory lock set")
2107

    
2108

    
2109
def Daemonize(logfile, run_uid, run_gid):
2110
  """Daemonize the current process.
2111

2112
  This detaches the current process from the controlling terminal and
2113
  runs it in the background as a daemon.
2114

2115
  @type logfile: str
2116
  @param logfile: the logfile to which we should redirect stdout/stderr
2117
  @type run_uid: int
2118
  @param run_uid: Run the child under this uid
2119
  @type run_gid: int
2120
  @param run_gid: Run the child under this gid
2121
  @rtype: int
2122
  @return: the value zero
2123

2124
  """
2125
  # pylint: disable-msg=W0212
2126
  # yes, we really want os._exit
2127
  UMASK = 077
2128
  WORKDIR = "/"
2129

    
2130
  # this might fail
2131
  pid = os.fork()
2132
  if (pid == 0):  # The first child.
2133
    os.setsid()
2134
    # FIXME: When removing again and moving to start-stop-daemon privilege drop
2135
    #        make sure to check for config permission and bail out when invoked
2136
    #        with wrong user.
2137
    os.setgid(run_gid)
2138
    os.setuid(run_uid)
2139
    # this might fail
2140
    pid = os.fork() # Fork a second child.
2141
    if (pid == 0):  # The second child.
2142
      os.chdir(WORKDIR)
2143
      os.umask(UMASK)
2144
    else:
2145
      # exit() or _exit()?  See below.
2146
      os._exit(0) # Exit parent (the first child) of the second child.
2147
  else:
2148
    os._exit(0) # Exit parent of the first child.
2149

    
2150
  for fd in range(3):
2151
    _CloseFDNoErr(fd)
2152
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2153
  assert i == 0, "Can't close/reopen stdin"
2154
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2155
  assert i == 1, "Can't close/reopen stdout"
2156
  # Duplicate standard output to standard error.
2157
  os.dup2(1, 2)
2158
  return 0
2159

    
2160

    
2161
def DaemonPidFileName(name):
2162
  """Compute a ganeti pid file absolute path
2163

2164
  @type name: str
2165
  @param name: the daemon name
2166
  @rtype: str
2167
  @return: the full path to the pidfile corresponding to the given
2168
      daemon name
2169

2170
  """
2171
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2172

    
2173

    
2174
def EnsureDaemon(name):
2175
  """Check for and start daemon if not alive.
2176

2177
  """
2178
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2179
  if result.failed:
2180
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2181
                  name, result.fail_reason, result.output)
2182
    return False
2183

    
2184
  return True
2185

    
2186

    
2187
def StopDaemon(name):
2188
  """Stop daemon
2189

2190
  """
2191
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2192
  if result.failed:
2193
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2194
                  name, result.fail_reason, result.output)
2195
    return False
2196

    
2197
  return True
2198

    
2199

    
2200
def WritePidFile(name):
2201
  """Write the current process pidfile.
2202

2203
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2204

2205
  @type name: str
2206
  @param name: the daemon name to use
2207
  @raise errors.GenericError: if the pid file already exists and
2208
      points to a live process
2209

2210
  """
2211
  pid = os.getpid()
2212
  pidfilename = DaemonPidFileName(name)
2213
  if IsProcessAlive(ReadPidFile(pidfilename)):
2214
    raise errors.GenericError("%s contains a live process" % pidfilename)
2215

    
2216
  WriteFile(pidfilename, data="%d\n" % pid)
2217

    
2218

    
2219
def RemovePidFile(name):
2220
  """Remove the current process pidfile.
2221

2222
  Any errors are ignored.
2223

2224
  @type name: str
2225
  @param name: the daemon name used to derive the pidfile name
2226

2227
  """
2228
  pidfilename = DaemonPidFileName(name)
2229
  # TODO: we could check here that the file contains our pid
2230
  try:
2231
    RemoveFile(pidfilename)
2232
  except: # pylint: disable-msg=W0702
2233
    pass
2234

    
2235

    
2236
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2237
                waitpid=False):
2238
  """Kill a process given by its pid.
2239

2240
  @type pid: int
2241
  @param pid: The PID to terminate.
2242
  @type signal_: int
2243
  @param signal_: The signal to send, by default SIGTERM
2244
  @type timeout: int
2245
  @param timeout: The timeout after which, if the process is still alive,
2246
                  a SIGKILL will be sent. If not positive, no such checking
2247
                  will be done
2248
  @type waitpid: boolean
2249
  @param waitpid: If true, we should waitpid on this process after
2250
      sending signals, since it's our own child and otherwise it
2251
      would remain as zombie
2252

2253
  """
2254
  def _helper(pid, signal_, wait):
2255
    """Simple helper to encapsulate the kill/waitpid sequence"""
2256
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2257
      try:
2258
        os.waitpid(pid, os.WNOHANG)
2259
      except OSError:
2260
        pass
2261

    
2262
  if pid <= 0:
2263
    # kill with pid=0 == suicide
2264
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2265

    
2266
  if not IsProcessAlive(pid):
2267
    return
2268

    
2269
  _helper(pid, signal_, waitpid)
2270

    
2271
  if timeout <= 0:
2272
    return
2273

    
2274
  def _CheckProcess():
2275
    if not IsProcessAlive(pid):
2276
      return
2277

    
2278
    try:
2279
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2280
    except OSError:
2281
      raise RetryAgain()
2282

    
2283
    if result_pid > 0:
2284
      return
2285

    
2286
    raise RetryAgain()
2287

    
2288
  try:
2289
    # Wait up to $timeout seconds
2290
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2291
  except RetryTimeout:
2292
    pass
2293

    
2294
  if IsProcessAlive(pid):
2295
    # Kill process if it's still alive
2296
    _helper(pid, signal.SIGKILL, waitpid)
2297

    
2298

    
2299
def FindFile(name, search_path, test=os.path.exists):
2300
  """Look for a filesystem object in a given path.
2301

2302
  This is an abstract method to search for filesystem object (files,
2303
  dirs) under a given search path.
2304

2305
  @type name: str
2306
  @param name: the name to look for
2307
  @type search_path: str
2308
  @param search_path: location to start at
2309
  @type test: callable
2310
  @param test: a function taking one argument that should return True
2311
      if the a given object is valid; the default value is
2312
      os.path.exists, causing only existing files to be returned
2313
  @rtype: str or None
2314
  @return: full path to the object if found, None otherwise
2315

2316
  """
2317
  # validate the filename mask
2318
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2319
    logging.critical("Invalid value passed for external script name: '%s'",
2320
                     name)
2321
    return None
2322

    
2323
  for dir_name in search_path:
2324
    # FIXME: investigate switch to PathJoin
2325
    item_name = os.path.sep.join([dir_name, name])
2326
    # check the user test and that we're indeed resolving to the given
2327
    # basename
2328
    if test(item_name) and os.path.basename(item_name) == name:
2329
      return item_name
2330
  return None
2331

    
2332

    
2333
def CheckVolumeGroupSize(vglist, vgname, minsize):
2334
  """Checks if the volume group list is valid.
2335

2336
  The function will check if a given volume group is in the list of
2337
  volume groups and has a minimum size.
2338

2339
  @type vglist: dict
2340
  @param vglist: dictionary of volume group names and their size
2341
  @type vgname: str
2342
  @param vgname: the volume group we should check
2343
  @type minsize: int
2344
  @param minsize: the minimum size we accept
2345
  @rtype: None or str
2346
  @return: None for success, otherwise the error message
2347

2348
  """
2349
  vgsize = vglist.get(vgname, None)
2350
  if vgsize is None:
2351
    return "volume group '%s' missing" % vgname
2352
  elif vgsize < minsize:
2353
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2354
            (vgname, minsize, vgsize))
2355
  return None
2356

    
2357

    
2358
def SplitTime(value):
2359
  """Splits time as floating point number into a tuple.
2360

2361
  @param value: Time in seconds
2362
  @type value: int or float
2363
  @return: Tuple containing (seconds, microseconds)
2364

2365
  """
2366
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2367

    
2368
  assert 0 <= seconds, \
2369
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2370
  assert 0 <= microseconds <= 999999, \
2371
    "Microseconds must be 0-999999, but are %s" % microseconds
2372

    
2373
  return (int(seconds), int(microseconds))
2374

    
2375

    
2376
def MergeTime(timetuple):
2377
  """Merges a tuple into time as a floating point number.
2378

2379
  @param timetuple: Time as tuple, (seconds, microseconds)
2380
  @type timetuple: tuple
2381
  @return: Time as a floating point number expressed in seconds
2382

2383
  """
2384
  (seconds, microseconds) = timetuple
2385

    
2386
  assert 0 <= seconds, \
2387
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2388
  assert 0 <= microseconds <= 999999, \
2389
    "Microseconds must be 0-999999, but are %s" % microseconds
2390

    
2391
  return float(seconds) + (float(microseconds) * 0.000001)
2392

    
2393

    
2394
class LogFileHandler(logging.FileHandler):
2395
  """Log handler that doesn't fallback to stderr.
2396

2397
  When an error occurs while writing on the logfile, logging.FileHandler tries
2398
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2399
  the logfile. This class avoids failures reporting errors to /dev/console.
2400

2401
  """
2402
  def __init__(self, filename, mode="a", encoding=None):
2403
    """Open the specified file and use it as the stream for logging.
2404

2405
    Also open /dev/console to report errors while logging.
2406

2407
    """
2408
    logging.FileHandler.__init__(self, filename, mode, encoding)
2409
    self.console = open(constants.DEV_CONSOLE, "a")
2410

    
2411
  def handleError(self, record): # pylint: disable-msg=C0103
2412
    """Handle errors which occur during an emit() call.
2413

2414
    Try to handle errors with FileHandler method, if it fails write to
2415
    /dev/console.
2416

2417
    """
2418
    try:
2419
      logging.FileHandler.handleError(self, record)
2420
    except Exception: # pylint: disable-msg=W0703
2421
      try:
2422
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2423
      except Exception: # pylint: disable-msg=W0703
2424
        # Log handler tried everything it could, now just give up
2425
        pass
2426

    
2427

    
2428
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2429
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2430
                 console_logging=False):
2431
  """Configures the logging module.
2432

2433
  @type logfile: str
2434
  @param logfile: the filename to which we should log
2435
  @type debug: integer
2436
  @param debug: if greater than zero, enable debug messages, otherwise
2437
      only those at C{INFO} and above level
2438
  @type stderr_logging: boolean
2439
  @param stderr_logging: whether we should also log to the standard error
2440
  @type program: str
2441
  @param program: the name under which we should log messages
2442
  @type multithreaded: boolean
2443
  @param multithreaded: if True, will add the thread name to the log file
2444
  @type syslog: string
2445
  @param syslog: one of 'no', 'yes', 'only':
2446
      - if no, syslog is not used
2447
      - if yes, syslog is used (in addition to file-logging)
2448
      - if only, only syslog is used
2449
  @type console_logging: boolean
2450
  @param console_logging: if True, will use a FileHandler which falls back to
2451
      the system console if logging fails
2452
  @raise EnvironmentError: if we can't open the log file and
2453
      syslog/stderr logging is disabled
2454

2455
  """
2456
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2457
  sft = program + "[%(process)d]:"
2458
  if multithreaded:
2459
    fmt += "/%(threadName)s"
2460
    sft += " (%(threadName)s)"
2461
  if debug:
2462
    fmt += " %(module)s:%(lineno)s"
2463
    # no debug info for syslog loggers
2464
  fmt += " %(levelname)s %(message)s"
2465
  # yes, we do want the textual level, as remote syslog will probably
2466
  # lose the error level, and it's easier to grep for it
2467
  sft += " %(levelname)s %(message)s"
2468
  formatter = logging.Formatter(fmt)
2469
  sys_fmt = logging.Formatter(sft)
2470

    
2471
  root_logger = logging.getLogger("")
2472
  root_logger.setLevel(logging.NOTSET)
2473

    
2474
  # Remove all previously setup handlers
2475
  for handler in root_logger.handlers:
2476
    handler.close()
2477
    root_logger.removeHandler(handler)
2478

    
2479
  if stderr_logging:
2480
    stderr_handler = logging.StreamHandler()
2481
    stderr_handler.setFormatter(formatter)
2482
    if debug:
2483
      stderr_handler.setLevel(logging.NOTSET)
2484
    else:
2485
      stderr_handler.setLevel(logging.CRITICAL)
2486
    root_logger.addHandler(stderr_handler)
2487

    
2488
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2489
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2490
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2491
                                                    facility)
2492
    syslog_handler.setFormatter(sys_fmt)
2493
    # Never enable debug over syslog
2494
    syslog_handler.setLevel(logging.INFO)
2495
    root_logger.addHandler(syslog_handler)
2496

    
2497
  if syslog != constants.SYSLOG_ONLY:
2498
    # this can fail, if the logging directories are not setup or we have
2499
    # a permisssion problem; in this case, it's best to log but ignore
2500
    # the error if stderr_logging is True, and if false we re-raise the
2501
    # exception since otherwise we could run but without any logs at all
2502
    try:
2503
      if console_logging:
2504
        logfile_handler = LogFileHandler(logfile)
2505
      else:
2506
        logfile_handler = logging.FileHandler(logfile)
2507
      logfile_handler.setFormatter(formatter)
2508
      if debug:
2509
        logfile_handler.setLevel(logging.DEBUG)
2510
      else:
2511
        logfile_handler.setLevel(logging.INFO)
2512
      root_logger.addHandler(logfile_handler)
2513
    except EnvironmentError:
2514
      if stderr_logging or syslog == constants.SYSLOG_YES:
2515
        logging.exception("Failed to enable logging to file '%s'", logfile)
2516
      else:
2517
        # we need to re-raise the exception
2518
        raise
2519

    
2520

    
2521
def IsNormAbsPath(path):
2522
  """Check whether a path is absolute and also normalized
2523

2524
  This avoids things like /dir/../../other/path to be valid.
2525

2526
  """
2527
  return os.path.normpath(path) == path and os.path.isabs(path)
2528

    
2529

    
2530
def PathJoin(*args):
2531
  """Safe-join a list of path components.
2532

2533
  Requirements:
2534
      - the first argument must be an absolute path
2535
      - no component in the path must have backtracking (e.g. /../),
2536
        since we check for normalization at the end
2537

2538
  @param args: the path components to be joined
2539
  @raise ValueError: for invalid paths
2540

2541
  """
2542
  # ensure we're having at least one path passed in
2543
  assert args
2544
  # ensure the first component is an absolute and normalized path name
2545
  root = args[0]
2546
  if not IsNormAbsPath(root):
2547
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2548
  result = os.path.join(*args)
2549
  # ensure that the whole path is normalized
2550
  if not IsNormAbsPath(result):
2551
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2552
  # check that we're still under the original prefix
2553
  prefix = os.path.commonprefix([root, result])
2554
  if prefix != root:
2555
    raise ValueError("Error: path joining resulted in different prefix"
2556
                     " (%s != %s)" % (prefix, root))
2557
  return result
2558

    
2559

    
2560
def TailFile(fname, lines=20):
2561
  """Return the last lines from a file.
2562

2563
  @note: this function will only read and parse the last 4KB of
2564
      the file; if the lines are very long, it could be that less
2565
      than the requested number of lines are returned
2566

2567
  @param fname: the file name
2568
  @type lines: int
2569
  @param lines: the (maximum) number of lines to return
2570

2571
  """
2572
  fd = open(fname, "r")
2573
  try:
2574
    fd.seek(0, 2)
2575
    pos = fd.tell()
2576
    pos = max(0, pos-4096)
2577
    fd.seek(pos, 0)
2578
    raw_data = fd.read()
2579
  finally:
2580
    fd.close()
2581

    
2582
  rows = raw_data.splitlines()
2583
  return rows[-lines:]
2584

    
2585

    
2586
def FormatTimestampWithTZ(secs):
2587
  """Formats a Unix timestamp with the local timezone.
2588

2589
  """
2590
  return time.strftime("%F %T %Z", time.gmtime(secs))
2591

    
2592

    
2593
def _ParseAsn1Generalizedtime(value):
2594
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2595

2596
  @type value: string
2597
  @param value: ASN1 GENERALIZEDTIME timestamp
2598

2599
  """
2600
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2601
  if m:
2602
    # We have an offset
2603
    asn1time = m.group(1)
2604
    hours = int(m.group(2))
2605
    minutes = int(m.group(3))
2606
    utcoffset = (60 * hours) + minutes
2607
  else:
2608
    if not value.endswith("Z"):
2609
      raise ValueError("Missing timezone")
2610
    asn1time = value[:-1]
2611
    utcoffset = 0
2612

    
2613
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2614

    
2615
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2616

    
2617
  return calendar.timegm(tt.utctimetuple())
2618

    
2619

    
2620
def GetX509CertValidity(cert):
2621
  """Returns the validity period of the certificate.
2622

2623
  @type cert: OpenSSL.crypto.X509
2624
  @param cert: X509 certificate object
2625

2626
  """
2627
  # The get_notBefore and get_notAfter functions are only supported in
2628
  # pyOpenSSL 0.7 and above.
2629
  try:
2630
    get_notbefore_fn = cert.get_notBefore
2631
  except AttributeError:
2632
    not_before = None
2633
  else:
2634
    not_before_asn1 = get_notbefore_fn()
2635

    
2636
    if not_before_asn1 is None:
2637
      not_before = None
2638
    else:
2639
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2640

    
2641
  try:
2642
    get_notafter_fn = cert.get_notAfter
2643
  except AttributeError:
2644
    not_after = None
2645
  else:
2646
    not_after_asn1 = get_notafter_fn()
2647

    
2648
    if not_after_asn1 is None:
2649
      not_after = None
2650
    else:
2651
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2652

    
2653
  return (not_before, not_after)
2654

    
2655

    
2656
def _VerifyCertificateInner(expired, not_before, not_after, now,
2657
                            warn_days, error_days):
2658
  """Verifies certificate validity.
2659

2660
  @type expired: bool
2661
  @param expired: Whether pyOpenSSL considers the certificate as expired
2662
  @type not_before: number or None
2663
  @param not_before: Unix timestamp before which certificate is not valid
2664
  @type not_after: number or None
2665
  @param not_after: Unix timestamp after which certificate is invalid
2666
  @type now: number
2667
  @param now: Current time as Unix timestamp
2668
  @type warn_days: number or None
2669
  @param warn_days: How many days before expiration a warning should be reported
2670
  @type error_days: number or None
2671
  @param error_days: How many days before expiration an error should be reported
2672

2673
  """
2674
  if expired:
2675
    msg = "Certificate is expired"
2676

    
2677
    if not_before is not None and not_after is not None:
2678
      msg += (" (valid from %s to %s)" %
2679
              (FormatTimestampWithTZ(not_before),
2680
               FormatTimestampWithTZ(not_after)))
2681
    elif not_before is not None:
2682
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2683
    elif not_after is not None:
2684
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2685

    
2686
    return (CERT_ERROR, msg)
2687

    
2688
  elif not_before is not None and not_before > now:
2689
    return (CERT_WARNING,
2690
            "Certificate not yet valid (valid from %s)" %
2691
            FormatTimestampWithTZ(not_before))
2692

    
2693
  elif not_after is not None:
2694
    remaining_days = int((not_after - now) / (24 * 3600))
2695

    
2696
    msg = "Certificate expires in about %d days" % remaining_days
2697

    
2698
    if error_days is not None and remaining_days <= error_days:
2699
      return (CERT_ERROR, msg)
2700

    
2701
    if warn_days is not None and remaining_days <= warn_days:
2702
      return (CERT_WARNING, msg)
2703

    
2704
  return (None, None)
2705

    
2706

    
2707
def VerifyX509Certificate(cert, warn_days, error_days):
2708
  """Verifies a certificate for LUVerifyCluster.
2709

2710
  @type cert: OpenSSL.crypto.X509
2711
  @param cert: X509 certificate object
2712
  @type warn_days: number or None
2713
  @param warn_days: How many days before expiration a warning should be reported
2714
  @type error_days: number or None
2715
  @param error_days: How many days before expiration an error should be reported
2716

2717
  """
2718
  # Depending on the pyOpenSSL version, this can just return (None, None)
2719
  (not_before, not_after) = GetX509CertValidity(cert)
2720

    
2721
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2722
                                 time.time(), warn_days, error_days)
2723

    
2724

    
2725
def SignX509Certificate(cert, key, salt):
2726
  """Sign a X509 certificate.
2727

2728
  An RFC822-like signature header is added in front of the certificate.
2729

2730
  @type cert: OpenSSL.crypto.X509
2731
  @param cert: X509 certificate object
2732
  @type key: string
2733
  @param key: Key for HMAC
2734
  @type salt: string
2735
  @param salt: Salt for HMAC
2736
  @rtype: string
2737
  @return: Serialized and signed certificate in PEM format
2738

2739
  """
2740
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2741
    raise errors.GenericError("Invalid salt: %r" % salt)
2742

    
2743
  # Dumping as PEM here ensures the certificate is in a sane format
2744
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2745

    
2746
  return ("%s: %s/%s\n\n%s" %
2747
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2748
           Sha1Hmac(key, cert_pem, salt=salt),
2749
           cert_pem))
2750

    
2751

    
2752
def _ExtractX509CertificateSignature(cert_pem):
2753
  """Helper function to extract signature from X509 certificate.
2754

2755
  """
2756
  # Extract signature from original PEM data
2757
  for line in cert_pem.splitlines():
2758
    if line.startswith("---"):
2759
      break
2760

    
2761
    m = X509_SIGNATURE.match(line.strip())
2762
    if m:
2763
      return (m.group("salt"), m.group("sign"))
2764

    
2765
  raise errors.GenericError("X509 certificate signature is missing")
2766

    
2767

    
2768
def LoadSignedX509Certificate(cert_pem, key):
2769
  """Verifies a signed X509 certificate.
2770

2771
  @type cert_pem: string
2772
  @param cert_pem: Certificate in PEM format and with signature header
2773
  @type key: string
2774
  @param key: Key for HMAC
2775
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2776
  @return: X509 certificate object and salt
2777

2778
  """
2779
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2780

    
2781
  # Load certificate
2782
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2783

    
2784
  # Dump again to ensure it's in a sane format
2785
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2786

    
2787
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2788
    raise errors.GenericError("X509 certificate signature is invalid")
2789

    
2790
  return (cert, salt)
2791

    
2792

    
2793
def Sha1Hmac(key, text, salt=None):
2794
  """Calculates the HMAC-SHA1 digest of a text.
2795

2796
  HMAC is defined in RFC2104.
2797

2798
  @type key: string
2799
  @param key: Secret key
2800
  @type text: string
2801

2802
  """
2803
  if salt:
2804
    salted_text = salt + text
2805
  else:
2806
    salted_text = text
2807

    
2808
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2809

    
2810

    
2811
def VerifySha1Hmac(key, text, digest, salt=None):
2812
  """Verifies the HMAC-SHA1 digest of a text.
2813

2814
  HMAC is defined in RFC2104.
2815

2816
  @type key: string
2817
  @param key: Secret key
2818
  @type text: string
2819
  @type digest: string
2820
  @param digest: Expected digest
2821
  @rtype: bool
2822
  @return: Whether HMAC-SHA1 digest matches
2823

2824
  """
2825
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2826

    
2827

    
2828
def SafeEncode(text):
2829
  """Return a 'safe' version of a source string.
2830

2831
  This function mangles the input string and returns a version that
2832
  should be safe to display/encode as ASCII. To this end, we first
2833
  convert it to ASCII using the 'backslashreplace' encoding which
2834
  should get rid of any non-ASCII chars, and then we process it
2835
  through a loop copied from the string repr sources in the python; we
2836
  don't use string_escape anymore since that escape single quotes and
2837
  backslashes too, and that is too much; and that escaping is not
2838
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2839

2840
  @type text: str or unicode
2841
  @param text: input data
2842
  @rtype: str
2843
  @return: a safe version of text
2844

2845
  """
2846
  if isinstance(text, unicode):
2847
    # only if unicode; if str already, we handle it below
2848
    text = text.encode('ascii', 'backslashreplace')
2849
  resu = ""
2850
  for char in text:
2851
    c = ord(char)
2852
    if char  == '\t':
2853
      resu += r'\t'
2854
    elif char == '\n':
2855
      resu += r'\n'
2856
    elif char == '\r':
2857
      resu += r'\'r'
2858
    elif c < 32 or c >= 127: # non-printable
2859
      resu += "\\x%02x" % (c & 0xff)
2860
    else:
2861
      resu += char
2862
  return resu
2863

    
2864

    
2865
def UnescapeAndSplit(text, sep=","):
2866
  """Split and unescape a string based on a given separator.
2867

2868
  This function splits a string based on a separator where the
2869
  separator itself can be escape in order to be an element of the
2870
  elements. The escaping rules are (assuming coma being the
2871
  separator):
2872
    - a plain , separates the elements
2873
    - a sequence \\\\, (double backslash plus comma) is handled as a
2874
      backslash plus a separator comma
2875
    - a sequence \, (backslash plus comma) is handled as a
2876
      non-separator comma
2877

2878
  @type text: string
2879
  @param text: the string to split
2880
  @type sep: string
2881
  @param text: the separator
2882
  @rtype: string
2883
  @return: a list of strings
2884

2885
  """
2886
  # we split the list by sep (with no escaping at this stage)
2887
  slist = text.split(sep)
2888
  # next, we revisit the elements and if any of them ended with an odd
2889
  # number of backslashes, then we join it with the next
2890
  rlist = []
2891
  while slist:
2892
    e1 = slist.pop(0)
2893
    if e1.endswith("\\"):
2894
      num_b = len(e1) - len(e1.rstrip("\\"))
2895
      if num_b % 2 == 1:
2896
        e2 = slist.pop(0)
2897
        # here the backslashes remain (all), and will be reduced in
2898
        # the next step
2899
        rlist.append(e1 + sep + e2)
2900
        continue
2901
    rlist.append(e1)
2902
  # finally, replace backslash-something with something
2903
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2904
  return rlist
2905

    
2906

    
2907
def CommaJoin(names):
2908
  """Nicely join a set of identifiers.
2909

2910
  @param names: set, list or tuple
2911
  @return: a string with the formatted results
2912

2913
  """
2914
  return ", ".join([str(val) for val in names])
2915

    
2916

    
2917
def BytesToMebibyte(value):
2918
  """Converts bytes to mebibytes.
2919

2920
  @type value: int
2921
  @param value: Value in bytes
2922
  @rtype: int
2923
  @return: Value in mebibytes
2924

2925
  """
2926
  return int(round(value / (1024.0 * 1024.0), 0))
2927

    
2928

    
2929
def CalculateDirectorySize(path):
2930
  """Calculates the size of a directory recursively.
2931

2932
  @type path: string
2933
  @param path: Path to directory
2934
  @rtype: int
2935
  @return: Size in mebibytes
2936

2937
  """
2938
  size = 0
2939

    
2940
  for (curpath, _, files) in os.walk(path):
2941
    for filename in files:
2942
      st = os.lstat(PathJoin(curpath, filename))
2943
      size += st.st_size
2944

    
2945
  return BytesToMebibyte(size)
2946

    
2947

    
2948
def GetMounts(filename=constants.PROC_MOUNTS):
2949
  """Returns the list of mounted filesystems.
2950

2951
  This function is Linux-specific.
2952

2953
  @param filename: path of mounts file (/proc/mounts by default)
2954
  @rtype: list of tuples
2955
  @return: list of mount entries (device, mountpoint, fstype, options)
2956

2957
  """
2958
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2959
  data = []
2960
  mountlines = ReadFile(filename).splitlines()
2961
  for line in mountlines:
2962
    device, mountpoint, fstype, options, _ = line.split(None, 4)
2963
    data.append((device, mountpoint, fstype, options))
2964

    
2965
  return data
2966

    
2967

    
2968
def GetFilesystemStats(path):
2969
  """Returns the total and free space on a filesystem.
2970

2971
  @type path: string
2972
  @param path: Path on filesystem to be examined
2973
  @rtype: int
2974
  @return: tuple of (Total space, Free space) in mebibytes
2975

2976
  """
2977
  st = os.statvfs(path)
2978

    
2979
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2980
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2981
  return (tsize, fsize)
2982

    
2983

    
2984
def RunInSeparateProcess(fn, *args):
2985
  """Runs a function in a separate process.
2986

2987
  Note: Only boolean return values are supported.
2988

2989
  @type fn: callable
2990
  @param fn: Function to be called
2991
  @rtype: bool
2992
  @return: Function's result
2993

2994
  """
2995
  pid = os.fork()
2996
  if pid == 0:
2997
    # Child process
2998
    try:
2999
      # In case the function uses temporary files
3000
      ResetTempfileModule()
3001

    
3002
      # Call function
3003
      result = int(bool(fn(*args)))
3004
      assert result in (0, 1)
3005
    except: # pylint: disable-msg=W0702
3006
      logging.exception("Error while calling function in separate process")
3007
      # 0 and 1 are reserved for the return value
3008
      result = 33
3009

    
3010
    os._exit(result) # pylint: disable-msg=W0212
3011

    
3012
  # Parent process
3013

    
3014
  # Avoid zombies and check exit code
3015
  (_, status) = os.waitpid(pid, 0)
3016

    
3017
  if os.WIFSIGNALED(status):
3018
    exitcode = None
3019
    signum = os.WTERMSIG(status)
3020
  else:
3021
    exitcode = os.WEXITSTATUS(status)
3022
    signum = None
3023

    
3024
  if not (exitcode in (0, 1) and signum is None):
3025
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3026
                              (exitcode, signum))
3027

    
3028
  return bool(exitcode)
3029

    
3030

    
3031
def IgnoreProcessNotFound(fn, *args, **kwargs):
3032
  """Ignores ESRCH when calling a process-related function.
3033

3034
  ESRCH is raised when a process is not found.
3035

3036
  @rtype: bool
3037
  @return: Whether process was found
3038

3039
  """
3040
  try:
3041
    fn(*args, **kwargs)
3042
  except EnvironmentError, err:
3043
    # Ignore ESRCH
3044
    if err.errno == errno.ESRCH:
3045
      return False
3046
    raise
3047

    
3048
  return True
3049

    
3050

    
3051
def IgnoreSignals(fn, *args, **kwargs):
3052
  """Tries to call a function ignoring failures due to EINTR.
3053

3054
  """
3055
  try:
3056
    return fn(*args, **kwargs)
3057
  except EnvironmentError, err:
3058
    if err.errno == errno.EINTR:
3059
      return None
3060
    else:
3061
      raise
3062
  except (select.error, socket.error), err:
3063
    # In python 2.6 and above select.error is an IOError, so it's handled
3064
    # above, in 2.5 and below it's not, and it's handled here.
3065
    if err.args and err.args[0] == errno.EINTR:
3066
      return None
3067
    else:
3068
      raise
3069

    
3070

    
3071
def LockFile(fd):
3072
  """Locks a file using POSIX locks.
3073

3074
  @type fd: int
3075
  @param fd: the file descriptor we need to lock
3076

3077
  """
3078
  try:
3079
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3080
  except IOError, err:
3081
    if err.errno == errno.EAGAIN:
3082
      raise errors.LockError("File already locked")
3083
    raise
3084

    
3085

    
3086
def FormatTime(val):
3087
  """Formats a time value.
3088

3089
  @type val: float or None
3090
  @param val: the timestamp as returned by time.time()
3091
  @return: a string value or N/A if we don't have a valid timestamp
3092

3093
  """
3094
  if val is None or not isinstance(val, (int, float)):
3095
    return "N/A"
3096
  # these two codes works on Linux, but they are not guaranteed on all
3097
  # platforms
3098
  return time.strftime("%F %T", time.localtime(val))
3099

    
3100

    
3101
def FormatSeconds(secs):
3102
  """Formats seconds for easier reading.
3103

3104
  @type secs: number
3105
  @param secs: Number of seconds
3106
  @rtype: string
3107
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3108

3109
  """
3110
  parts = []
3111

    
3112
  secs = round(secs, 0)
3113

    
3114
  if secs > 0:
3115
    # Negative values would be a bit tricky
3116
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3117
      (complete, secs) = divmod(secs, one)
3118
      if complete or parts:
3119
        parts.append("%d%s" % (complete, unit))
3120

    
3121
  parts.append("%ds" % secs)
3122

    
3123
  return " ".join(parts)
3124

    
3125

    
3126
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3127
  """Reads the watcher pause file.
3128

3129
  @type filename: string
3130
  @param filename: Path to watcher pause file
3131
  @type now: None, float or int
3132
  @param now: Current time as Unix timestamp
3133
  @type remove_after: int
3134
  @param remove_after: Remove watcher pause file after specified amount of
3135
    seconds past the pause end time
3136

3137
  """
3138
  if now is None:
3139
    now = time.time()
3140

    
3141
  try:
3142
    value = ReadFile(filename)
3143
  except IOError, err:
3144
    if err.errno != errno.ENOENT:
3145
      raise
3146
    value = None
3147

    
3148
  if value is not None:
3149
    try:
3150
      value = int(value)
3151
    except ValueError:
3152
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3153
                       " removing it"), filename)
3154
      RemoveFile(filename)
3155
      value = None
3156

    
3157
    if value is not None:
3158
      # Remove file if it's outdated
3159
      if now > (value + remove_after):
3160
        RemoveFile(filename)
3161
        value = None
3162

    
3163
      elif now > value:
3164
        value = None
3165

    
3166
  return value
3167

    
3168

    
3169
class RetryTimeout(Exception):
3170
  """Retry loop timed out.
3171

3172
  Any arguments which was passed by the retried function to RetryAgain will be
3173
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3174
  the RaiseInner helper method will reraise it.
3175

3176
  """
3177
  def RaiseInner(self):
3178
    if self.args and isinstance(self.args[0], Exception):
3179
      raise self.args[0]
3180
    else:
3181
      raise RetryTimeout(*self.args)
3182

    
3183

    
3184
class RetryAgain(Exception):
3185
  """Retry again.
3186

3187
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3188
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3189
  of the RetryTimeout() method can be used to reraise it.
3190

3191
  """
3192

    
3193

    
3194
class _RetryDelayCalculator(object):
3195
  """Calculator for increasing delays.
3196

3197
  """
3198
  __slots__ = [
3199
    "_factor",
3200
    "_limit",
3201
    "_next",
3202
    "_start",
3203
    ]
3204

    
3205
  def __init__(self, start, factor, limit):
3206
    """Initializes this class.
3207

3208
    @type start: float
3209
    @param start: Initial delay
3210
    @type factor: float
3211
    @param factor: Factor for delay increase
3212
    @type limit: float or None
3213
    @param limit: Upper limit for delay or None for no limit
3214

3215
    """
3216
    assert start > 0.0
3217
    assert factor >= 1.0
3218
    assert limit is None or limit >= 0.0
3219

    
3220
    self._start = start
3221
    self._factor = factor
3222
    self._limit = limit
3223

    
3224
    self._next = start
3225

    
3226
  def __call__(self):
3227
    """Returns current delay and calculates the next one.
3228

3229
    """
3230
    current = self._next
3231

    
3232
    # Update for next run
3233
    if self._limit is None or self._next < self._limit:
3234
      self._next = min(self._limit, self._next * self._factor)
3235

    
3236
    return current
3237

    
3238

    
3239
#: Special delay to specify whole remaining timeout
3240
RETRY_REMAINING_TIME = object()
3241

    
3242

    
3243
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3244
          _time_fn=time.time):
3245
  """Call a function repeatedly until it succeeds.
3246

3247
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3248
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3249
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3250

3251
  C{delay} can be one of the following:
3252
    - callable returning the delay length as a float
3253
    - Tuple of (start, factor, limit)
3254
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3255
      useful when overriding L{wait_fn} to wait for an external event)
3256
    - A static delay as a number (int or float)
3257

3258
  @type fn: callable
3259
  @param fn: Function to be called
3260
  @param delay: Either a callable (returning the delay), a tuple of (start,
3261
                factor, limit) (see L{_RetryDelayCalculator}),
3262
                L{RETRY_REMAINING_TIME} or a number (int or float)
3263
  @type timeout: float
3264
  @param timeout: Total timeout
3265
  @type wait_fn: callable
3266
  @param wait_fn: Waiting function
3267
  @return: Return value of function
3268

3269
  """
3270
  assert callable(fn)
3271
  assert callable(wait_fn)
3272
  assert callable(_time_fn)
3273

    
3274
  if args is None:
3275
    args = []
3276

    
3277
  end_time = _time_fn() + timeout
3278

    
3279
  if callable(delay):
3280
    # External function to calculate delay
3281
    calc_delay = delay
3282

    
3283
  elif isinstance(delay, (tuple, list)):
3284
    # Increasing delay with optional upper boundary
3285
    (start, factor, limit) = delay
3286
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3287

    
3288
  elif delay is RETRY_REMAINING_TIME:
3289
    # Always use the remaining time
3290
    calc_delay = None
3291

    
3292
  else:
3293
    # Static delay
3294
    calc_delay = lambda: delay
3295

    
3296
  assert calc_delay is None or callable(calc_delay)
3297

    
3298
  while True:
3299
    retry_args = []
3300
    try:
3301
      # pylint: disable-msg=W0142
3302
      return fn(*args)
3303
    except RetryAgain, err:
3304
      retry_args = err.args
3305
    except RetryTimeout:
3306
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3307
                                   " handle RetryTimeout")
3308

    
3309
    remaining_time = end_time - _time_fn()
3310

    
3311
    if remaining_time < 0.0:
3312
      # pylint: disable-msg=W0142
3313
      raise RetryTimeout(*retry_args)
3314

    
3315
    assert remaining_time >= 0.0
3316

    
3317
    if calc_delay is None:
3318
      wait_fn(remaining_time)
3319
    else:
3320
      current_delay = calc_delay()
3321
      if current_delay > 0.0:
3322
        wait_fn(current_delay)
3323

    
3324

    
3325
def GetClosedTempfile(*args, **kwargs):
3326
  """Creates a temporary file and returns its path.
3327

3328
  """
3329
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3330
  _CloseFDNoErr(fd)
3331
  return path
3332

    
3333

    
3334
def GenerateSelfSignedX509Cert(common_name, validity):
3335
  """Generates a self-signed X509 certificate.
3336

3337
  @type common_name: string
3338
  @param common_name: commonName value
3339
  @type validity: int
3340
  @param validity: Validity for certificate in seconds
3341

3342
  """
3343
  # Create private and public key
3344
  key = OpenSSL.crypto.PKey()
3345
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3346

    
3347
  # Create self-signed certificate
3348
  cert = OpenSSL.crypto.X509()
3349
  if common_name:
3350
    cert.get_subject().CN = common_name
3351
  cert.set_serial_number(1)
3352
  cert.gmtime_adj_notBefore(0)
3353
  cert.gmtime_adj_notAfter(validity)
3354
  cert.set_issuer(cert.get_subject())
3355
  cert.set_pubkey(key)
3356
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3357

    
3358
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3359
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3360

    
3361
  return (key_pem, cert_pem)
3362

    
3363

    
3364
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3365
  """Legacy function to generate self-signed X509 certificate.
3366

3367
  """
3368
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3369
                                                   validity * 24 * 60 * 60)
3370

    
3371
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3372

    
3373

    
3374
class FileLock(object):
3375
  """Utility class for file locks.
3376

3377
  """
3378
  def __init__(self, fd, filename):
3379
    """Constructor for FileLock.
3380

3381
    @type fd: file
3382
    @param fd: File object
3383
    @type filename: str
3384
    @param filename: Path of the file opened at I{fd}
3385

3386
    """
3387
    self.fd = fd
3388
    self.filename = filename
3389

    
3390
  @classmethod
3391
  def Open(cls, filename):
3392
    """Creates and opens a file to be used as a file-based lock.
3393

3394
    @type filename: string
3395
    @param filename: path to the file to be locked
3396

3397
    """
3398
    # Using "os.open" is necessary to allow both opening existing file
3399
    # read/write and creating if not existing. Vanilla "open" will truncate an
3400
    # existing file -or- allow creating if not existing.
3401
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3402
               filename)
3403

    
3404
  def __del__(self):
3405
    self.Close()
3406

    
3407
  def Close(self):
3408
    """Close the file and release the lock.
3409

3410
    """
3411
    if hasattr(self, "fd") and self.fd:
3412
      self.fd.close()
3413
      self.fd = None
3414

    
3415
  def _flock(self, flag, blocking, timeout, errmsg):
3416
    """Wrapper for fcntl.flock.
3417

3418
    @type flag: int
3419
    @param flag: operation flag
3420
    @type blocking: bool
3421
    @param blocking: whether the operation should be done in blocking mode.
3422
    @type timeout: None or float
3423
    @param timeout: for how long the operation should be retried (implies
3424
                    non-blocking mode).
3425
    @type errmsg: string
3426
    @param errmsg: error message in case operation fails.
3427

3428
    """
3429
    assert self.fd, "Lock was closed"
3430
    assert timeout is None or timeout >= 0, \
3431
      "If specified, timeout must be positive"
3432
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3433

    
3434
    # When a timeout is used, LOCK_NB must always be set
3435
    if not (timeout is None and blocking):
3436
      flag |= fcntl.LOCK_NB
3437

    
3438
    if timeout is None:
3439
      self._Lock(self.fd, flag, timeout)
3440
    else:
3441
      try:
3442
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3443
              args=(self.fd, flag, timeout))
3444
      except RetryTimeout:
3445
        raise errors.LockError(errmsg)
3446

    
3447
  @staticmethod
3448
  def _Lock(fd, flag, timeout):
3449
    try:
3450
      fcntl.flock(fd, flag)
3451
    except IOError, err:
3452
      if timeout is not None and err.errno == errno.EAGAIN:
3453
        raise RetryAgain()
3454

    
3455
      logging.exception("fcntl.flock failed")
3456
      raise
3457

    
3458
  def Exclusive(self, blocking=False, timeout=None):
3459
    """Locks the file in exclusive mode.
3460

3461
    @type blocking: boolean
3462
    @param blocking: whether to block and wait until we
3463
        can lock the file or return immediately
3464
    @type timeout: int or None
3465
    @param timeout: if not None, the duration to wait for the lock
3466
        (in blocking mode)
3467

3468
    """
3469
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3470
                "Failed to lock %s in exclusive mode" % self.filename)
3471

    
3472
  def Shared(self, blocking=False, timeout=None):
3473
    """Locks the file in shared mode.
3474

3475
    @type blocking: boolean
3476
    @param blocking: whether to block and wait until we
3477
        can lock the file or return immediately
3478
    @type timeout: int or None
3479
    @param timeout: if not None, the duration to wait for the lock
3480
        (in blocking mode)
3481

3482
    """
3483
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3484
                "Failed to lock %s in shared mode" % self.filename)
3485

    
3486
  def Unlock(self, blocking=True, timeout=None):
3487
    """Unlocks the file.
3488

3489
    According to C{flock(2)}, unlocking can also be a nonblocking
3490
    operation::
3491

3492
      To make a non-blocking request, include LOCK_NB with any of the above
3493
      operations.
3494

3495
    @type blocking: boolean
3496
    @param blocking: whether to block and wait until we
3497
        can lock the file or return immediately
3498
    @type timeout: int or None
3499
    @param timeout: if not None, the duration to wait for the lock
3500
        (in blocking mode)
3501

3502
    """
3503
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3504
                "Failed to unlock %s" % self.filename)
3505

    
3506

    
3507
class LineSplitter:
3508
  """Splits data chunks into lines separated by newline.
3509

3510
  Instances provide a file-like interface.
3511

3512
  """
3513
  def __init__(self, line_fn, *args):
3514
    """Initializes this class.
3515

3516
    @type line_fn: callable
3517
    @param line_fn: Function called for each line, first parameter is line
3518
    @param args: Extra arguments for L{line_fn}
3519

3520
    """
3521
    assert callable(line_fn)
3522

    
3523
    if args:
3524
      # Python 2.4 doesn't have functools.partial yet
3525
      self._line_fn = \
3526
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3527
    else:
3528
      self._line_fn = line_fn
3529

    
3530
    self._lines = collections.deque()
3531
    self._buffer = ""
3532

    
3533
  def write(self, data):
3534
    parts = (self._buffer + data).split("\n")
3535
    self._buffer = parts.pop()
3536
    self._lines.extend(parts)
3537

    
3538
  def flush(self):
3539
    while self._lines:
3540
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3541

    
3542
  def close(self):
3543
    self.flush()
3544
    if self._buffer:
3545
      self._line_fn(self._buffer)
3546

    
3547

    
3548
def SignalHandled(signums):
3549
  """Signal Handled decoration.
3550

3551
  This special decorator installs a signal handler and then calls the target
3552
  function. The function must accept a 'signal_handlers' keyword argument,
3553
  which will contain a dict indexed by signal number, with SignalHandler
3554
  objects as values.
3555

3556
  The decorator can be safely stacked with iself, to handle multiple signals
3557
  with different handlers.
3558

3559
  @type signums: list
3560
  @param signums: signals to intercept
3561

3562
  """
3563
  def wrap(fn):
3564
    def sig_function(*args, **kwargs):
3565
      assert 'signal_handlers' not in kwargs or \
3566
             kwargs['signal_handlers'] is None or \
3567
             isinstance(kwargs['signal_handlers'], dict), \
3568
             "Wrong signal_handlers parameter in original function call"
3569
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3570
        signal_handlers = kwargs['signal_handlers']
3571
      else:
3572
        signal_handlers = {}
3573
        kwargs['signal_handlers'] = signal_handlers
3574
      sighandler = SignalHandler(signums)
3575
      try:
3576
        for sig in signums:
3577
          signal_handlers[sig] = sighandler
3578
        return fn(*args, **kwargs)
3579
      finally:
3580
        sighandler.Reset()
3581
    return sig_function
3582
  return wrap
3583

    
3584

    
3585
class SignalWakeupFd(object):
3586
  try:
3587
    # This is only supported in Python 2.5 and above (some distributions
3588
    # backported it to Python 2.4)
3589
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3590
  except AttributeError:
3591
    # Not supported
3592
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3593
      return -1
3594
  else:
3595
    def _SetWakeupFd(self, fd):
3596
      return self._set_wakeup_fd_fn(fd)
3597

    
3598
  def __init__(self):
3599
    """Initializes this class.
3600

3601
    """
3602
    (read_fd, write_fd) = os.pipe()
3603

    
3604
    # Once these succeeded, the file descriptors will be closed automatically.
3605
    # Buffer size 0 is important, otherwise .read() with a specified length
3606
    # might buffer data and the file descriptors won't be marked readable.
3607
    self._read_fh = os.fdopen(read_fd, "r", 0)
3608
    self._write_fh = os.fdopen(write_fd, "w", 0)
3609

    
3610
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3611

    
3612
    # Utility functions
3613
    self.fileno = self._read_fh.fileno
3614
    self.read = self._read_fh.read
3615

    
3616
  def Reset(self):
3617
    """Restores the previous wakeup file descriptor.
3618

3619
    """
3620
    if hasattr(self, "_previous") and self._previous is not None:
3621
      self._SetWakeupFd(self._previous)
3622
      self._previous = None
3623

    
3624
  def Notify(self):
3625
    """Notifies the wakeup file descriptor.
3626

3627
    """
3628
    self._write_fh.write("\0")
3629

    
3630
  def __del__(self):
3631
    """Called before object deletion.
3632

3633
    """
3634
    self.Reset()
3635

    
3636

    
3637
class SignalHandler(object):
3638
  """Generic signal handler class.
3639

3640
  It automatically restores the original handler when deconstructed or
3641
  when L{Reset} is called. You can either pass your own handler
3642
  function in or query the L{called} attribute to detect whether the
3643
  signal was sent.
3644

3645
  @type signum: list
3646
  @ivar signum: the signals we handle
3647
  @type called: boolean
3648
  @ivar called: tracks whether any of the signals have been raised
3649

3650
  """
3651
  def __init__(self, signum, handler_fn=None, wakeup=None):
3652
    """Constructs a new SignalHandler instance.
3653

3654
    @type signum: int or list of ints
3655
    @param signum: Single signal number or set of signal numbers
3656
    @type handler_fn: callable
3657
    @param handler_fn: Signal handling function
3658

3659
    """
3660
    assert handler_fn is None or callable(handler_fn)
3661

    
3662
    self.signum = set(signum)
3663
    self.called = False
3664

    
3665
    self._handler_fn = handler_fn
3666
    self._wakeup = wakeup
3667

    
3668
    self._previous = {}
3669
    try:
3670
      for signum in self.signum:
3671
        # Setup handler
3672
        prev_handler = signal.signal(signum, self._HandleSignal)
3673
        try:
3674
          self._previous[signum] = prev_handler
3675
        except:
3676
          # Restore previous handler
3677
          signal.signal(signum, prev_handler)
3678
          raise
3679
    except:
3680
      # Reset all handlers
3681
      self.Reset()
3682
      # Here we have a race condition: a handler may have already been called,
3683
      # but there's not much we can do about it at this point.
3684
      raise
3685

    
3686
  def __del__(self):
3687
    self.Reset()
3688

    
3689
  def Reset(self):
3690
    """Restore previous handler.
3691

3692
    This will reset all the signals to their previous handlers.
3693

3694
    """
3695
    for signum, prev_handler in self._previous.items():
3696
      signal.signal(signum, prev_handler)
3697
      # If successful, remove from dict
3698
      del self._previous[signum]
3699

    
3700
  def Clear(self):
3701
    """Unsets the L{called} flag.
3702

3703
    This function can be used in case a signal may arrive several times.
3704

3705
    """
3706
    self.called = False
3707

    
3708
  def _HandleSignal(self, signum, frame):
3709
    """Actual signal handling function.
3710

3711
    """
3712
    # This is not nice and not absolutely atomic, but it appears to be the only
3713
    # solution in Python -- there are no atomic types.
3714
    self.called = True
3715

    
3716
    if self._wakeup:
3717
      # Notify whoever is interested in signals
3718
      self._wakeup.Notify()
3719

    
3720
    if self._handler_fn:
3721
      self._handler_fn(signum, frame)
3722

    
3723

    
3724
class FieldSet(object):
3725
  """A simple field set.
3726

3727
  Among the features are:
3728
    - checking if a string is among a list of static string or regex objects
3729
    - checking if a whole list of string matches
3730
    - returning the matching groups from a regex match
3731

3732
  Internally, all fields are held as regular expression objects.
3733

3734
  """
3735
  def __init__(self, *items):
3736
    self.items = [re.compile("^%s$" % value) for value in items]
3737

    
3738
  def Extend(self, other_set):
3739
    """Extend the field set with the items from another one"""
3740
    self.items.extend(other_set.items)
3741

    
3742
  def Matches(self, field):
3743
    """Checks if a field matches the current set
3744

3745
    @type field: str
3746
    @param field: the string to match
3747
    @return: either None or a regular expression match object
3748

3749
    """
3750
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3751
      return m
3752
    return None
3753

    
3754
  def NonMatching(self, items):
3755
    """Returns the list of fields not matching the current set
3756

3757
    @type items: list
3758
    @param items: the list of fields to check
3759
    @rtype: list
3760
    @return: list of non-matching fields
3761

3762
    """
3763
    return [val for val in items if not self.Matches(val)]