Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 0963d545

History | View | Annotate | Download (102.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
# Certificate verification results
86
(CERT_WARNING,
87
 CERT_ERROR) = range(1, 3)
88

    
89
# Flags for mlockall() (from bits/mman.h)
90
_MCL_CURRENT = 1
91
_MCL_FUTURE = 2
92

    
93

    
94
class RunResult(object):
95
  """Holds the result of running external programs.
96

97
  @type exit_code: int
98
  @ivar exit_code: the exit code of the program, or None (if the program
99
      didn't exit())
100
  @type signal: int or None
101
  @ivar signal: the signal that caused the program to finish, or None
102
      (if the program wasn't terminated by a signal)
103
  @type stdout: str
104
  @ivar stdout: the standard output of the program
105
  @type stderr: str
106
  @ivar stderr: the standard error of the program
107
  @type failed: boolean
108
  @ivar failed: True in case the program was
109
      terminated by a signal or exited with a non-zero exit code
110
  @ivar fail_reason: a string detailing the termination reason
111

112
  """
113
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
114
               "failed", "fail_reason", "cmd"]
115

    
116

    
117
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
118
    self.cmd = cmd
119
    self.exit_code = exit_code
120
    self.signal = signal_
121
    self.stdout = stdout
122
    self.stderr = stderr
123
    self.failed = (signal_ is not None or exit_code != 0)
124

    
125
    if self.signal is not None:
126
      self.fail_reason = "terminated by signal %s" % self.signal
127
    elif self.exit_code is not None:
128
      self.fail_reason = "exited with exit code %s" % self.exit_code
129
    else:
130
      self.fail_reason = "unable to determine termination reason"
131

    
132
    if self.failed:
133
      logging.debug("Command '%s' failed (%s); output: %s",
134
                    self.cmd, self.fail_reason, self.output)
135

    
136
  def _GetOutput(self):
137
    """Returns the combined stdout and stderr for easier usage.
138

139
    """
140
    return self.stdout + self.stderr
141

    
142
  output = property(_GetOutput, None, None, "Return full output")
143

    
144

    
145
def _BuildCmdEnvironment(env, reset):
146
  """Builds the environment for an external program.
147

148
  """
149
  if reset:
150
    cmd_env = {}
151
  else:
152
    cmd_env = os.environ.copy()
153
    cmd_env["LC_ALL"] = "C"
154

    
155
  if env is not None:
156
    cmd_env.update(env)
157

    
158
  return cmd_env
159

    
160

    
161
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
162
           interactive=False):
163
  """Execute a (shell) command.
164

165
  The command should not read from its standard input, as it will be
166
  closed.
167

168
  @type cmd: string or list
169
  @param cmd: Command to run
170
  @type env: dict
171
  @param env: Additional environment variables
172
  @type output: str
173
  @param output: if desired, the output of the command can be
174
      saved in a file instead of the RunResult instance; this
175
      parameter denotes the file name (if not None)
176
  @type cwd: string
177
  @param cwd: if specified, will be used as the working
178
      directory for the command; the default will be /
179
  @type reset_env: boolean
180
  @param reset_env: whether to reset or keep the default os environment
181
  @type interactive: boolean
182
  @param interactive: weather we pipe stdin, stdout and stderr
183
                      (default behaviour) or run the command interactive
184
  @rtype: L{RunResult}
185
  @return: RunResult instance
186
  @raise errors.ProgrammerError: if we call this when forks are disabled
187

188
  """
189
  if no_fork:
190
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
191

    
192
  if output and interactive:
193
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
194
                                 " not be provided at the same time")
195

    
196
  if isinstance(cmd, basestring):
197
    strcmd = cmd
198
    shell = True
199
  else:
200
    cmd = [str(val) for val in cmd]
201
    strcmd = ShellQuoteArgs(cmd)
202
    shell = False
203

    
204
  if output:
205
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
206
  else:
207
    logging.debug("RunCmd %s", strcmd)
208

    
209
  cmd_env = _BuildCmdEnvironment(env, reset_env)
210

    
211
  try:
212
    if output is None:
213
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd, interactive)
214
    else:
215
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
216
      out = err = ""
217
  except OSError, err:
218
    if err.errno == errno.ENOENT:
219
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
220
                               (strcmd, err))
221
    else:
222
      raise
223

    
224
  if status >= 0:
225
    exitcode = status
226
    signal_ = None
227
  else:
228
    exitcode = None
229
    signal_ = -status
230

    
231
  return RunResult(exitcode, signal_, out, err, strcmd)
232

    
233

    
234
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
235
                pidfile=None):
236
  """Start a daemon process after forking twice.
237

238
  @type cmd: string or list
239
  @param cmd: Command to run
240
  @type env: dict
241
  @param env: Additional environment variables
242
  @type cwd: string
243
  @param cwd: Working directory for the program
244
  @type output: string
245
  @param output: Path to file in which to save the output
246
  @type output_fd: int
247
  @param output_fd: File descriptor for output
248
  @type pidfile: string
249
  @param pidfile: Process ID file
250
  @rtype: int
251
  @return: Daemon process ID
252
  @raise errors.ProgrammerError: if we call this when forks are disabled
253

254
  """
255
  if no_fork:
256
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
257
                                 " disabled")
258

    
259
  if output and not (bool(output) ^ (output_fd is not None)):
260
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
261
                                 " specified")
262

    
263
  if isinstance(cmd, basestring):
264
    cmd = ["/bin/sh", "-c", cmd]
265

    
266
  strcmd = ShellQuoteArgs(cmd)
267

    
268
  if output:
269
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
270
  else:
271
    logging.debug("StartDaemon %s", strcmd)
272

    
273
  cmd_env = _BuildCmdEnvironment(env, False)
274

    
275
  # Create pipe for sending PID back
276
  (pidpipe_read, pidpipe_write) = os.pipe()
277
  try:
278
    try:
279
      # Create pipe for sending error messages
280
      (errpipe_read, errpipe_write) = os.pipe()
281
      try:
282
        try:
283
          # First fork
284
          pid = os.fork()
285
          if pid == 0:
286
            try:
287
              # Child process, won't return
288
              _StartDaemonChild(errpipe_read, errpipe_write,
289
                                pidpipe_read, pidpipe_write,
290
                                cmd, cmd_env, cwd,
291
                                output, output_fd, pidfile)
292
            finally:
293
              # Well, maybe child process failed
294
              os._exit(1) # pylint: disable-msg=W0212
295
        finally:
296
          _CloseFDNoErr(errpipe_write)
297

    
298
        # Wait for daemon to be started (or an error message to arrive) and read
299
        # up to 100 KB as an error message
300
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
301
      finally:
302
        _CloseFDNoErr(errpipe_read)
303
    finally:
304
      _CloseFDNoErr(pidpipe_write)
305

    
306
    # Read up to 128 bytes for PID
307
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
308
  finally:
309
    _CloseFDNoErr(pidpipe_read)
310

    
311
  # Try to avoid zombies by waiting for child process
312
  try:
313
    os.waitpid(pid, 0)
314
  except OSError:
315
    pass
316

    
317
  if errormsg:
318
    raise errors.OpExecError("Error when starting daemon process: %r" %
319
                             errormsg)
320

    
321
  try:
322
    return int(pidtext)
323
  except (ValueError, TypeError), err:
324
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
325
                             (pidtext, err))
326

    
327

    
328
def _StartDaemonChild(errpipe_read, errpipe_write,
329
                      pidpipe_read, pidpipe_write,
330
                      args, env, cwd,
331
                      output, fd_output, pidfile):
332
  """Child process for starting daemon.
333

334
  """
335
  try:
336
    # Close parent's side
337
    _CloseFDNoErr(errpipe_read)
338
    _CloseFDNoErr(pidpipe_read)
339

    
340
    # First child process
341
    os.chdir("/")
342
    os.umask(077)
343
    os.setsid()
344

    
345
    # And fork for the second time
346
    pid = os.fork()
347
    if pid != 0:
348
      # Exit first child process
349
      os._exit(0) # pylint: disable-msg=W0212
350

    
351
    # Make sure pipe is closed on execv* (and thereby notifies original process)
352
    SetCloseOnExecFlag(errpipe_write, True)
353

    
354
    # List of file descriptors to be left open
355
    noclose_fds = [errpipe_write]
356

    
357
    # Open PID file
358
    if pidfile:
359
      try:
360
        # TODO: Atomic replace with another locked file instead of writing into
361
        # it after creating
362
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
363

    
364
        # Lock the PID file (and fail if not possible to do so). Any code
365
        # wanting to send a signal to the daemon should try to lock the PID
366
        # file before reading it. If acquiring the lock succeeds, the daemon is
367
        # no longer running and the signal should not be sent.
368
        LockFile(fd_pidfile)
369

    
370
        os.write(fd_pidfile, "%d\n" % os.getpid())
371
      except Exception, err:
372
        raise Exception("Creating and locking PID file failed: %s" % err)
373

    
374
      # Keeping the file open to hold the lock
375
      noclose_fds.append(fd_pidfile)
376

    
377
      SetCloseOnExecFlag(fd_pidfile, False)
378
    else:
379
      fd_pidfile = None
380

    
381
    # Open /dev/null
382
    fd_devnull = os.open(os.devnull, os.O_RDWR)
383

    
384
    assert not output or (bool(output) ^ (fd_output is not None))
385

    
386
    if fd_output is not None:
387
      pass
388
    elif output:
389
      # Open output file
390
      try:
391
        # TODO: Implement flag to set append=yes/no
392
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
393
      except EnvironmentError, err:
394
        raise Exception("Opening output file failed: %s" % err)
395
    else:
396
      fd_output = fd_devnull
397

    
398
    # Redirect standard I/O
399
    os.dup2(fd_devnull, 0)
400
    os.dup2(fd_output, 1)
401
    os.dup2(fd_output, 2)
402

    
403
    # Send daemon PID to parent
404
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
405

    
406
    # Close all file descriptors except stdio and error message pipe
407
    CloseFDs(noclose_fds=noclose_fds)
408

    
409
    # Change working directory
410
    os.chdir(cwd)
411

    
412
    if env is None:
413
      os.execvp(args[0], args)
414
    else:
415
      os.execvpe(args[0], args, env)
416
  except: # pylint: disable-msg=W0702
417
    try:
418
      # Report errors to original process
419
      buf = str(sys.exc_info()[1])
420

    
421
      RetryOnSignal(os.write, errpipe_write, buf)
422
    except: # pylint: disable-msg=W0702
423
      # Ignore errors in error handling
424
      pass
425

    
426
  os._exit(1) # pylint: disable-msg=W0212
427

    
428

    
429
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive):
430
  """Run a command and return its output.
431

432
  @type  cmd: string or list
433
  @param cmd: Command to run
434
  @type env: dict
435
  @param env: The environment to use
436
  @type via_shell: bool
437
  @param via_shell: if we should run via the shell
438
  @type cwd: string
439
  @param cwd: the working directory for the program
440
  @type interactive: boolean
441
  @param interactive: Run command interactive (without piping)
442
  @rtype: tuple
443
  @return: (out, err, status)
444

445
  """
446
  poller = select.poll()
447

    
448
  stderr = subprocess.PIPE
449
  stdout = subprocess.PIPE
450
  stdin = subprocess.PIPE
451

    
452
  if interactive:
453
    stderr = stdout = stdin = None
454

    
455
  child = subprocess.Popen(cmd, shell=via_shell,
456
                           stderr=stderr,
457
                           stdout=stdout,
458
                           stdin=stdin,
459
                           close_fds=True, env=env,
460
                           cwd=cwd)
461

    
462
  out = StringIO()
463
  err = StringIO()
464
  if not interactive:
465
    child.stdin.close()
466
    poller.register(child.stdout, select.POLLIN)
467
    poller.register(child.stderr, select.POLLIN)
468
    fdmap = {
469
      child.stdout.fileno(): (out, child.stdout),
470
      child.stderr.fileno(): (err, child.stderr),
471
      }
472
    for fd in fdmap:
473
      SetNonblockFlag(fd, True)
474

    
475
    while fdmap:
476
      pollresult = RetryOnSignal(poller.poll)
477

    
478
      for fd, event in pollresult:
479
        if event & select.POLLIN or event & select.POLLPRI:
480
          data = fdmap[fd][1].read()
481
          # no data from read signifies EOF (the same as POLLHUP)
482
          if not data:
483
            poller.unregister(fd)
484
            del fdmap[fd]
485
            continue
486
          fdmap[fd][0].write(data)
487
        if (event & select.POLLNVAL or event & select.POLLHUP or
488
            event & select.POLLERR):
489
          poller.unregister(fd)
490
          del fdmap[fd]
491

    
492
  out = out.getvalue()
493
  err = err.getvalue()
494

    
495
  status = child.wait()
496
  return out, err, status
497

    
498

    
499
def _RunCmdFile(cmd, env, via_shell, output, cwd):
500
  """Run a command and save its output to a file.
501

502
  @type  cmd: string or list
503
  @param cmd: Command to run
504
  @type env: dict
505
  @param env: The environment to use
506
  @type via_shell: bool
507
  @param via_shell: if we should run via the shell
508
  @type output: str
509
  @param output: the filename in which to save the output
510
  @type cwd: string
511
  @param cwd: the working directory for the program
512
  @rtype: int
513
  @return: the exit status
514

515
  """
516
  fh = open(output, "a")
517
  try:
518
    child = subprocess.Popen(cmd, shell=via_shell,
519
                             stderr=subprocess.STDOUT,
520
                             stdout=fh,
521
                             stdin=subprocess.PIPE,
522
                             close_fds=True, env=env,
523
                             cwd=cwd)
524

    
525
    child.stdin.close()
526
    status = child.wait()
527
  finally:
528
    fh.close()
529
  return status
530

    
531

    
532
def SetCloseOnExecFlag(fd, enable):
533
  """Sets or unsets the close-on-exec flag on a file descriptor.
534

535
  @type fd: int
536
  @param fd: File descriptor
537
  @type enable: bool
538
  @param enable: Whether to set or unset it.
539

540
  """
541
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
542

    
543
  if enable:
544
    flags |= fcntl.FD_CLOEXEC
545
  else:
546
    flags &= ~fcntl.FD_CLOEXEC
547

    
548
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
549

    
550

    
551
def SetNonblockFlag(fd, enable):
552
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
553

554
  @type fd: int
555
  @param fd: File descriptor
556
  @type enable: bool
557
  @param enable: Whether to set or unset it
558

559
  """
560
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
561

    
562
  if enable:
563
    flags |= os.O_NONBLOCK
564
  else:
565
    flags &= ~os.O_NONBLOCK
566

    
567
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
568

    
569

    
570
def RetryOnSignal(fn, *args, **kwargs):
571
  """Calls a function again if it failed due to EINTR.
572

573
  """
574
  while True:
575
    try:
576
      return fn(*args, **kwargs)
577
    except EnvironmentError, err:
578
      if err.errno != errno.EINTR:
579
        raise
580
    except (socket.error, select.error), err:
581
      # In python 2.6 and above select.error is an IOError, so it's handled
582
      # above, in 2.5 and below it's not, and it's handled here.
583
      if not (err.args and err.args[0] == errno.EINTR):
584
        raise
585

    
586

    
587
def RunParts(dir_name, env=None, reset_env=False):
588
  """Run Scripts or programs in a directory
589

590
  @type dir_name: string
591
  @param dir_name: absolute path to a directory
592
  @type env: dict
593
  @param env: The environment to use
594
  @type reset_env: boolean
595
  @param reset_env: whether to reset or keep the default os environment
596
  @rtype: list of tuples
597
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
598

599
  """
600
  rr = []
601

    
602
  try:
603
    dir_contents = ListVisibleFiles(dir_name)
604
  except OSError, err:
605
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
606
    return rr
607

    
608
  for relname in sorted(dir_contents):
609
    fname = PathJoin(dir_name, relname)
610
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
611
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
612
      rr.append((relname, constants.RUNPARTS_SKIP, None))
613
    else:
614
      try:
615
        result = RunCmd([fname], env=env, reset_env=reset_env)
616
      except Exception, err: # pylint: disable-msg=W0703
617
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
618
      else:
619
        rr.append((relname, constants.RUNPARTS_RUN, result))
620

    
621
  return rr
622

    
623

    
624
def RemoveFile(filename):
625
  """Remove a file ignoring some errors.
626

627
  Remove a file, ignoring non-existing ones or directories. Other
628
  errors are passed.
629

630
  @type filename: str
631
  @param filename: the file to be removed
632

633
  """
634
  try:
635
    os.unlink(filename)
636
  except OSError, err:
637
    if err.errno not in (errno.ENOENT, errno.EISDIR):
638
      raise
639

    
640

    
641
def RemoveDir(dirname):
642
  """Remove an empty directory.
643

644
  Remove a directory, ignoring non-existing ones.
645
  Other errors are passed. This includes the case,
646
  where the directory is not empty, so it can't be removed.
647

648
  @type dirname: str
649
  @param dirname: the empty directory to be removed
650

651
  """
652
  try:
653
    os.rmdir(dirname)
654
  except OSError, err:
655
    if err.errno != errno.ENOENT:
656
      raise
657

    
658

    
659
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
660
  """Renames a file.
661

662
  @type old: string
663
  @param old: Original path
664
  @type new: string
665
  @param new: New path
666
  @type mkdir: bool
667
  @param mkdir: Whether to create target directory if it doesn't exist
668
  @type mkdir_mode: int
669
  @param mkdir_mode: Mode for newly created directories
670

671
  """
672
  try:
673
    return os.rename(old, new)
674
  except OSError, err:
675
    # In at least one use case of this function, the job queue, directory
676
    # creation is very rare. Checking for the directory before renaming is not
677
    # as efficient.
678
    if mkdir and err.errno == errno.ENOENT:
679
      # Create directory and try again
680
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
681

    
682
      return os.rename(old, new)
683

    
684
    raise
685

    
686

    
687
def Makedirs(path, mode=0750):
688
  """Super-mkdir; create a leaf directory and all intermediate ones.
689

690
  This is a wrapper around C{os.makedirs} adding error handling not implemented
691
  before Python 2.5.
692

693
  """
694
  try:
695
    os.makedirs(path, mode)
696
  except OSError, err:
697
    # Ignore EEXIST. This is only handled in os.makedirs as included in
698
    # Python 2.5 and above.
699
    if err.errno != errno.EEXIST or not os.path.exists(path):
700
      raise
701

    
702

    
703
def ResetTempfileModule():
704
  """Resets the random name generator of the tempfile module.
705

706
  This function should be called after C{os.fork} in the child process to
707
  ensure it creates a newly seeded random generator. Otherwise it would
708
  generate the same random parts as the parent process. If several processes
709
  race for the creation of a temporary file, this could lead to one not getting
710
  a temporary name.
711

712
  """
713
  # pylint: disable-msg=W0212
714
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
715
    tempfile._once_lock.acquire()
716
    try:
717
      # Reset random name generator
718
      tempfile._name_sequence = None
719
    finally:
720
      tempfile._once_lock.release()
721
  else:
722
    logging.critical("The tempfile module misses at least one of the"
723
                     " '_once_lock' and '_name_sequence' attributes")
724

    
725

    
726
def _FingerprintFile(filename):
727
  """Compute the fingerprint of a file.
728

729
  If the file does not exist, a None will be returned
730
  instead.
731

732
  @type filename: str
733
  @param filename: the filename to checksum
734
  @rtype: str
735
  @return: the hex digest of the sha checksum of the contents
736
      of the file
737

738
  """
739
  if not (os.path.exists(filename) and os.path.isfile(filename)):
740
    return None
741

    
742
  f = open(filename)
743

    
744
  fp = compat.sha1_hash()
745
  while True:
746
    data = f.read(4096)
747
    if not data:
748
      break
749

    
750
    fp.update(data)
751

    
752
  return fp.hexdigest()
753

    
754

    
755
def FingerprintFiles(files):
756
  """Compute fingerprints for a list of files.
757

758
  @type files: list
759
  @param files: the list of filename to fingerprint
760
  @rtype: dict
761
  @return: a dictionary filename: fingerprint, holding only
762
      existing files
763

764
  """
765
  ret = {}
766

    
767
  for filename in files:
768
    cksum = _FingerprintFile(filename)
769
    if cksum:
770
      ret[filename] = cksum
771

    
772
  return ret
773

    
774

    
775
def ForceDictType(target, key_types, allowed_values=None):
776
  """Force the values of a dict to have certain types.
777

778
  @type target: dict
779
  @param target: the dict to update
780
  @type key_types: dict
781
  @param key_types: dict mapping target dict keys to types
782
                    in constants.ENFORCEABLE_TYPES
783
  @type allowed_values: list
784
  @keyword allowed_values: list of specially allowed values
785

786
  """
787
  if allowed_values is None:
788
    allowed_values = []
789

    
790
  if not isinstance(target, dict):
791
    msg = "Expected dictionary, got '%s'" % target
792
    raise errors.TypeEnforcementError(msg)
793

    
794
  for key in target:
795
    if key not in key_types:
796
      msg = "Unknown key '%s'" % key
797
      raise errors.TypeEnforcementError(msg)
798

    
799
    if target[key] in allowed_values:
800
      continue
801

    
802
    ktype = key_types[key]
803
    if ktype not in constants.ENFORCEABLE_TYPES:
804
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
805
      raise errors.ProgrammerError(msg)
806

    
807
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
808
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
809
        pass
810
      elif not isinstance(target[key], basestring):
811
        if isinstance(target[key], bool) and not target[key]:
812
          target[key] = ''
813
        else:
814
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
815
          raise errors.TypeEnforcementError(msg)
816
    elif ktype == constants.VTYPE_BOOL:
817
      if isinstance(target[key], basestring) and target[key]:
818
        if target[key].lower() == constants.VALUE_FALSE:
819
          target[key] = False
820
        elif target[key].lower() == constants.VALUE_TRUE:
821
          target[key] = True
822
        else:
823
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
824
          raise errors.TypeEnforcementError(msg)
825
      elif target[key]:
826
        target[key] = True
827
      else:
828
        target[key] = False
829
    elif ktype == constants.VTYPE_SIZE:
830
      try:
831
        target[key] = ParseUnit(target[key])
832
      except errors.UnitParseError, err:
833
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
834
              (key, target[key], err)
835
        raise errors.TypeEnforcementError(msg)
836
    elif ktype == constants.VTYPE_INT:
837
      try:
838
        target[key] = int(target[key])
839
      except (ValueError, TypeError):
840
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
841
        raise errors.TypeEnforcementError(msg)
842

    
843

    
844
def _GetProcStatusPath(pid):
845
  """Returns the path for a PID's proc status file.
846

847
  @type pid: int
848
  @param pid: Process ID
849
  @rtype: string
850

851
  """
852
  return "/proc/%d/status" % pid
853

    
854

    
855
def IsProcessAlive(pid):
856
  """Check if a given pid exists on the system.
857

858
  @note: zombie status is not handled, so zombie processes
859
      will be returned as alive
860
  @type pid: int
861
  @param pid: the process ID to check
862
  @rtype: boolean
863
  @return: True if the process exists
864

865
  """
866
  def _TryStat(name):
867
    try:
868
      os.stat(name)
869
      return True
870
    except EnvironmentError, err:
871
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
872
        return False
873
      elif err.errno == errno.EINVAL:
874
        raise RetryAgain(err)
875
      raise
876

    
877
  assert isinstance(pid, int), "pid must be an integer"
878
  if pid <= 0:
879
    return False
880

    
881
  # /proc in a multiprocessor environment can have strange behaviors.
882
  # Retry the os.stat a few times until we get a good result.
883
  try:
884
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
885
                 args=[_GetProcStatusPath(pid)])
886
  except RetryTimeout, err:
887
    err.RaiseInner()
888

    
889

    
890
def _ParseSigsetT(sigset):
891
  """Parse a rendered sigset_t value.
892

893
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
894
  function.
895

896
  @type sigset: string
897
  @param sigset: Rendered signal set from /proc/$pid/status
898
  @rtype: set
899
  @return: Set of all enabled signal numbers
900

901
  """
902
  result = set()
903

    
904
  signum = 0
905
  for ch in reversed(sigset):
906
    chv = int(ch, 16)
907

    
908
    # The following could be done in a loop, but it's easier to read and
909
    # understand in the unrolled form
910
    if chv & 1:
911
      result.add(signum + 1)
912
    if chv & 2:
913
      result.add(signum + 2)
914
    if chv & 4:
915
      result.add(signum + 3)
916
    if chv & 8:
917
      result.add(signum + 4)
918

    
919
    signum += 4
920

    
921
  return result
922

    
923

    
924
def _GetProcStatusField(pstatus, field):
925
  """Retrieves a field from the contents of a proc status file.
926

927
  @type pstatus: string
928
  @param pstatus: Contents of /proc/$pid/status
929
  @type field: string
930
  @param field: Name of field whose value should be returned
931
  @rtype: string
932

933
  """
934
  for line in pstatus.splitlines():
935
    parts = line.split(":", 1)
936

    
937
    if len(parts) < 2 or parts[0] != field:
938
      continue
939

    
940
    return parts[1].strip()
941

    
942
  return None
943

    
944

    
945
def IsProcessHandlingSignal(pid, signum, status_path=None):
946
  """Checks whether a process is handling a signal.
947

948
  @type pid: int
949
  @param pid: Process ID
950
  @type signum: int
951
  @param signum: Signal number
952
  @rtype: bool
953

954
  """
955
  if status_path is None:
956
    status_path = _GetProcStatusPath(pid)
957

    
958
  try:
959
    proc_status = ReadFile(status_path)
960
  except EnvironmentError, err:
961
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
962
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
963
      return False
964
    raise
965

    
966
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
967
  if sigcgt is None:
968
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
969

    
970
  # Now check whether signal is handled
971
  return signum in _ParseSigsetT(sigcgt)
972

    
973

    
974
def ReadPidFile(pidfile):
975
  """Read a pid from a file.
976

977
  @type  pidfile: string
978
  @param pidfile: path to the file containing the pid
979
  @rtype: int
980
  @return: The process id, if the file exists and contains a valid PID,
981
           otherwise 0
982

983
  """
984
  try:
985
    raw_data = ReadOneLineFile(pidfile)
986
  except EnvironmentError, err:
987
    if err.errno != errno.ENOENT:
988
      logging.exception("Can't read pid file")
989
    return 0
990

    
991
  try:
992
    pid = int(raw_data)
993
  except (TypeError, ValueError), err:
994
    logging.info("Can't parse pid file contents", exc_info=True)
995
    return 0
996

    
997
  return pid
998

    
999

    
1000
def ReadLockedPidFile(path):
1001
  """Reads a locked PID file.
1002

1003
  This can be used together with L{StartDaemon}.
1004

1005
  @type path: string
1006
  @param path: Path to PID file
1007
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1008

1009
  """
1010
  try:
1011
    fd = os.open(path, os.O_RDONLY)
1012
  except EnvironmentError, err:
1013
    if err.errno == errno.ENOENT:
1014
      # PID file doesn't exist
1015
      return None
1016
    raise
1017

    
1018
  try:
1019
    try:
1020
      # Try to acquire lock
1021
      LockFile(fd)
1022
    except errors.LockError:
1023
      # Couldn't lock, daemon is running
1024
      return int(os.read(fd, 100))
1025
  finally:
1026
    os.close(fd)
1027

    
1028
  return None
1029

    
1030

    
1031
def MatchNameComponent(key, name_list, case_sensitive=True):
1032
  """Try to match a name against a list.
1033

1034
  This function will try to match a name like test1 against a list
1035
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1036
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1037
  not I{'test1.ex'}. A multiple match will be considered as no match
1038
  at all (e.g. I{'test1'} against C{['test1.example.com',
1039
  'test1.example.org']}), except when the key fully matches an entry
1040
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1041

1042
  @type key: str
1043
  @param key: the name to be searched
1044
  @type name_list: list
1045
  @param name_list: the list of strings against which to search the key
1046
  @type case_sensitive: boolean
1047
  @param case_sensitive: whether to provide a case-sensitive match
1048

1049
  @rtype: None or str
1050
  @return: None if there is no match I{or} if there are multiple matches,
1051
      otherwise the element from the list which matches
1052

1053
  """
1054
  if key in name_list:
1055
    return key
1056

    
1057
  re_flags = 0
1058
  if not case_sensitive:
1059
    re_flags |= re.IGNORECASE
1060
    key = key.upper()
1061
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1062
  names_filtered = []
1063
  string_matches = []
1064
  for name in name_list:
1065
    if mo.match(name) is not None:
1066
      names_filtered.append(name)
1067
      if not case_sensitive and key == name.upper():
1068
        string_matches.append(name)
1069

    
1070
  if len(string_matches) == 1:
1071
    return string_matches[0]
1072
  if len(names_filtered) == 1:
1073
    return names_filtered[0]
1074
  return None
1075

    
1076

    
1077
def ValidateServiceName(name):
1078
  """Validate the given service name.
1079

1080
  @type name: number or string
1081
  @param name: Service name or port specification
1082

1083
  """
1084
  try:
1085
    numport = int(name)
1086
  except (ValueError, TypeError):
1087
    # Non-numeric service name
1088
    valid = _VALID_SERVICE_NAME_RE.match(name)
1089
  else:
1090
    # Numeric port (protocols other than TCP or UDP might need adjustments
1091
    # here)
1092
    valid = (numport >= 0 and numport < (1 << 16))
1093

    
1094
  if not valid:
1095
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1096
                               errors.ECODE_INVAL)
1097

    
1098
  return name
1099

    
1100

    
1101
def ListVolumeGroups():
1102
  """List volume groups and their size
1103

1104
  @rtype: dict
1105
  @return:
1106
       Dictionary with keys volume name and values
1107
       the size of the volume
1108

1109
  """
1110
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1111
  result = RunCmd(command)
1112
  retval = {}
1113
  if result.failed:
1114
    return retval
1115

    
1116
  for line in result.stdout.splitlines():
1117
    try:
1118
      name, size = line.split()
1119
      size = int(float(size))
1120
    except (IndexError, ValueError), err:
1121
      logging.error("Invalid output from vgs (%s): %s", err, line)
1122
      continue
1123

    
1124
    retval[name] = size
1125

    
1126
  return retval
1127

    
1128

    
1129
def BridgeExists(bridge):
1130
  """Check whether the given bridge exists in the system
1131

1132
  @type bridge: str
1133
  @param bridge: the bridge name to check
1134
  @rtype: boolean
1135
  @return: True if it does
1136

1137
  """
1138
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1139

    
1140

    
1141
def NiceSort(name_list):
1142
  """Sort a list of strings based on digit and non-digit groupings.
1143

1144
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1145
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1146
  'a11']}.
1147

1148
  The sort algorithm breaks each name in groups of either only-digits
1149
  or no-digits. Only the first eight such groups are considered, and
1150
  after that we just use what's left of the string.
1151

1152
  @type name_list: list
1153
  @param name_list: the names to be sorted
1154
  @rtype: list
1155
  @return: a copy of the name list sorted with our algorithm
1156

1157
  """
1158
  _SORTER_BASE = "(\D+|\d+)"
1159
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1160
                                                  _SORTER_BASE, _SORTER_BASE,
1161
                                                  _SORTER_BASE, _SORTER_BASE,
1162
                                                  _SORTER_BASE, _SORTER_BASE)
1163
  _SORTER_RE = re.compile(_SORTER_FULL)
1164
  _SORTER_NODIGIT = re.compile("^\D*$")
1165
  def _TryInt(val):
1166
    """Attempts to convert a variable to integer."""
1167
    if val is None or _SORTER_NODIGIT.match(val):
1168
      return val
1169
    rval = int(val)
1170
    return rval
1171

    
1172
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1173
             for name in name_list]
1174
  to_sort.sort()
1175
  return [tup[1] for tup in to_sort]
1176

    
1177

    
1178
def TryConvert(fn, val):
1179
  """Try to convert a value ignoring errors.
1180

1181
  This function tries to apply function I{fn} to I{val}. If no
1182
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1183
  the result, else it will return the original value. Any other
1184
  exceptions are propagated to the caller.
1185

1186
  @type fn: callable
1187
  @param fn: function to apply to the value
1188
  @param val: the value to be converted
1189
  @return: The converted value if the conversion was successful,
1190
      otherwise the original value.
1191

1192
  """
1193
  try:
1194
    nv = fn(val)
1195
  except (ValueError, TypeError):
1196
    nv = val
1197
  return nv
1198

    
1199

    
1200
def IsValidShellParam(word):
1201
  """Verifies is the given word is safe from the shell's p.o.v.
1202

1203
  This means that we can pass this to a command via the shell and be
1204
  sure that it doesn't alter the command line and is passed as such to
1205
  the actual command.
1206

1207
  Note that we are overly restrictive here, in order to be on the safe
1208
  side.
1209

1210
  @type word: str
1211
  @param word: the word to check
1212
  @rtype: boolean
1213
  @return: True if the word is 'safe'
1214

1215
  """
1216
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1217

    
1218

    
1219
def BuildShellCmd(template, *args):
1220
  """Build a safe shell command line from the given arguments.
1221

1222
  This function will check all arguments in the args list so that they
1223
  are valid shell parameters (i.e. they don't contain shell
1224
  metacharacters). If everything is ok, it will return the result of
1225
  template % args.
1226

1227
  @type template: str
1228
  @param template: the string holding the template for the
1229
      string formatting
1230
  @rtype: str
1231
  @return: the expanded command line
1232

1233
  """
1234
  for word in args:
1235
    if not IsValidShellParam(word):
1236
      raise errors.ProgrammerError("Shell argument '%s' contains"
1237
                                   " invalid characters" % word)
1238
  return template % args
1239

    
1240

    
1241
def FormatUnit(value, units):
1242
  """Formats an incoming number of MiB with the appropriate unit.
1243

1244
  @type value: int
1245
  @param value: integer representing the value in MiB (1048576)
1246
  @type units: char
1247
  @param units: the type of formatting we should do:
1248
      - 'h' for automatic scaling
1249
      - 'm' for MiBs
1250
      - 'g' for GiBs
1251
      - 't' for TiBs
1252
  @rtype: str
1253
  @return: the formatted value (with suffix)
1254

1255
  """
1256
  if units not in ('m', 'g', 't', 'h'):
1257
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1258

    
1259
  suffix = ''
1260

    
1261
  if units == 'm' or (units == 'h' and value < 1024):
1262
    if units == 'h':
1263
      suffix = 'M'
1264
    return "%d%s" % (round(value, 0), suffix)
1265

    
1266
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1267
    if units == 'h':
1268
      suffix = 'G'
1269
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1270

    
1271
  else:
1272
    if units == 'h':
1273
      suffix = 'T'
1274
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1275

    
1276

    
1277
def ParseUnit(input_string):
1278
  """Tries to extract number and scale from the given string.
1279

1280
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1281
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1282
  is always an int in MiB.
1283

1284
  """
1285
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1286
  if not m:
1287
    raise errors.UnitParseError("Invalid format")
1288

    
1289
  value = float(m.groups()[0])
1290

    
1291
  unit = m.groups()[1]
1292
  if unit:
1293
    lcunit = unit.lower()
1294
  else:
1295
    lcunit = 'm'
1296

    
1297
  if lcunit in ('m', 'mb', 'mib'):
1298
    # Value already in MiB
1299
    pass
1300

    
1301
  elif lcunit in ('g', 'gb', 'gib'):
1302
    value *= 1024
1303

    
1304
  elif lcunit in ('t', 'tb', 'tib'):
1305
    value *= 1024 * 1024
1306

    
1307
  else:
1308
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1309

    
1310
  # Make sure we round up
1311
  if int(value) < value:
1312
    value += 1
1313

    
1314
  # Round up to the next multiple of 4
1315
  value = int(value)
1316
  if value % 4:
1317
    value += 4 - value % 4
1318

    
1319
  return value
1320

    
1321

    
1322
def ParseCpuMask(cpu_mask):
1323
  """Parse a CPU mask definition and return the list of CPU IDs.
1324

1325
  CPU mask format: comma-separated list of CPU IDs
1326
  or dash-separated ID ranges
1327
  Example: "0-2,5" -> "0,1,2,5"
1328

1329
  @type cpu_mask: str
1330
  @param cpu_mask: CPU mask definition
1331
  @rtype: list of int
1332
  @return: list of CPU IDs
1333

1334
  """
1335
  if not cpu_mask:
1336
    return []
1337
  cpu_list = []
1338
  for range_def in cpu_mask.split(","):
1339
    boundaries = range_def.split("-")
1340
    n_elements = len(boundaries)
1341
    if n_elements > 2:
1342
      raise errors.ParseError("Invalid CPU ID range definition"
1343
                              " (only one hyphen allowed): %s" % range_def)
1344
    try:
1345
      lower = int(boundaries[0])
1346
    except (ValueError, TypeError), err:
1347
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1348
                              " CPU ID range: %s" % str(err))
1349
    try:
1350
      higher = int(boundaries[-1])
1351
    except (ValueError, TypeError), err:
1352
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1353
                              " CPU ID range: %s" % str(err))
1354
    if lower > higher:
1355
      raise errors.ParseError("Invalid CPU ID range definition"
1356
                              " (%d > %d): %s" % (lower, higher, range_def))
1357
    cpu_list.extend(range(lower, higher + 1))
1358
  return cpu_list
1359

    
1360

    
1361
def AddAuthorizedKey(file_obj, key):
1362
  """Adds an SSH public key to an authorized_keys file.
1363

1364
  @type file_obj: str or file handle
1365
  @param file_obj: path to authorized_keys file
1366
  @type key: str
1367
  @param key: string containing key
1368

1369
  """
1370
  key_fields = key.split()
1371

    
1372
  if isinstance(file_obj, basestring):
1373
    f = open(file_obj, 'a+')
1374
  else:
1375
    f = file_obj
1376

    
1377
  try:
1378
    nl = True
1379
    for line in f:
1380
      # Ignore whitespace changes
1381
      if line.split() == key_fields:
1382
        break
1383
      nl = line.endswith('\n')
1384
    else:
1385
      if not nl:
1386
        f.write("\n")
1387
      f.write(key.rstrip('\r\n'))
1388
      f.write("\n")
1389
      f.flush()
1390
  finally:
1391
    f.close()
1392

    
1393

    
1394
def RemoveAuthorizedKey(file_name, key):
1395
  """Removes an SSH public key from an authorized_keys file.
1396

1397
  @type file_name: str
1398
  @param file_name: path to authorized_keys file
1399
  @type key: str
1400
  @param key: string containing key
1401

1402
  """
1403
  key_fields = key.split()
1404

    
1405
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1406
  try:
1407
    out = os.fdopen(fd, 'w')
1408
    try:
1409
      f = open(file_name, 'r')
1410
      try:
1411
        for line in f:
1412
          # Ignore whitespace changes while comparing lines
1413
          if line.split() != key_fields:
1414
            out.write(line)
1415

    
1416
        out.flush()
1417
        os.rename(tmpname, file_name)
1418
      finally:
1419
        f.close()
1420
    finally:
1421
      out.close()
1422
  except:
1423
    RemoveFile(tmpname)
1424
    raise
1425

    
1426

    
1427
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1428
  """Sets the name of an IP address and hostname in /etc/hosts.
1429

1430
  @type file_name: str
1431
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1432
  @type ip: str
1433
  @param ip: the IP address
1434
  @type hostname: str
1435
  @param hostname: the hostname to be added
1436
  @type aliases: list
1437
  @param aliases: the list of aliases to add for the hostname
1438

1439
  """
1440
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1441
  # Ensure aliases are unique
1442
  aliases = UniqueSequence([hostname] + aliases)[1:]
1443

    
1444
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1445
  try:
1446
    out = os.fdopen(fd, 'w')
1447
    try:
1448
      f = open(file_name, 'r')
1449
      try:
1450
        for line in f:
1451
          fields = line.split()
1452
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1453
            continue
1454
          out.write(line)
1455

    
1456
        out.write("%s\t%s" % (ip, hostname))
1457
        if aliases:
1458
          out.write(" %s" % ' '.join(aliases))
1459
        out.write('\n')
1460

    
1461
        out.flush()
1462
        os.fsync(out)
1463
        os.chmod(tmpname, 0644)
1464
        os.rename(tmpname, file_name)
1465
      finally:
1466
        f.close()
1467
    finally:
1468
      out.close()
1469
  except:
1470
    RemoveFile(tmpname)
1471
    raise
1472

    
1473

    
1474
def AddHostToEtcHosts(hostname):
1475
  """Wrapper around SetEtcHostsEntry.
1476

1477
  @type hostname: str
1478
  @param hostname: a hostname that will be resolved and added to
1479
      L{constants.ETC_HOSTS}
1480

1481
  """
1482
  SetEtcHostsEntry(constants.ETC_HOSTS, hostname.ip, hostname.name,
1483
                   [hostname.name.split(".")[0]])
1484

    
1485

    
1486
def RemoveEtcHostsEntry(file_name, hostname):
1487
  """Removes a hostname from /etc/hosts.
1488

1489
  IP addresses without names are removed from the file.
1490

1491
  @type file_name: str
1492
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1493
  @type hostname: str
1494
  @param hostname: the hostname to be removed
1495

1496
  """
1497
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1498
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1499
  try:
1500
    out = os.fdopen(fd, 'w')
1501
    try:
1502
      f = open(file_name, 'r')
1503
      try:
1504
        for line in f:
1505
          fields = line.split()
1506
          if len(fields) > 1 and not fields[0].startswith('#'):
1507
            names = fields[1:]
1508
            if hostname in names:
1509
              while hostname in names:
1510
                names.remove(hostname)
1511
              if names:
1512
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1513
              continue
1514

    
1515
          out.write(line)
1516

    
1517
        out.flush()
1518
        os.fsync(out)
1519
        os.chmod(tmpname, 0644)
1520
        os.rename(tmpname, file_name)
1521
      finally:
1522
        f.close()
1523
    finally:
1524
      out.close()
1525
  except:
1526
    RemoveFile(tmpname)
1527
    raise
1528

    
1529

    
1530
def RemoveHostFromEtcHosts(hostname):
1531
  """Wrapper around RemoveEtcHostsEntry.
1532

1533
  @type hostname: str
1534
  @param hostname: hostname that will be resolved and its
1535
      full and shot name will be removed from
1536
      L{constants.ETC_HOSTS}
1537

1538
  """
1539
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1540
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1541

    
1542

    
1543
def TimestampForFilename():
1544
  """Returns the current time formatted for filenames.
1545

1546
  The format doesn't contain colons as some shells and applications them as
1547
  separators.
1548

1549
  """
1550
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1551

    
1552

    
1553
def CreateBackup(file_name):
1554
  """Creates a backup of a file.
1555

1556
  @type file_name: str
1557
  @param file_name: file to be backed up
1558
  @rtype: str
1559
  @return: the path to the newly created backup
1560
  @raise errors.ProgrammerError: for invalid file names
1561

1562
  """
1563
  if not os.path.isfile(file_name):
1564
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1565
                                file_name)
1566

    
1567
  prefix = ("%s.backup-%s." %
1568
            (os.path.basename(file_name), TimestampForFilename()))
1569
  dir_name = os.path.dirname(file_name)
1570

    
1571
  fsrc = open(file_name, 'rb')
1572
  try:
1573
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1574
    fdst = os.fdopen(fd, 'wb')
1575
    try:
1576
      logging.debug("Backing up %s at %s", file_name, backup_name)
1577
      shutil.copyfileobj(fsrc, fdst)
1578
    finally:
1579
      fdst.close()
1580
  finally:
1581
    fsrc.close()
1582

    
1583
  return backup_name
1584

    
1585

    
1586
def ShellQuote(value):
1587
  """Quotes shell argument according to POSIX.
1588

1589
  @type value: str
1590
  @param value: the argument to be quoted
1591
  @rtype: str
1592
  @return: the quoted value
1593

1594
  """
1595
  if _re_shell_unquoted.match(value):
1596
    return value
1597
  else:
1598
    return "'%s'" % value.replace("'", "'\\''")
1599

    
1600

    
1601
def ShellQuoteArgs(args):
1602
  """Quotes a list of shell arguments.
1603

1604
  @type args: list
1605
  @param args: list of arguments to be quoted
1606
  @rtype: str
1607
  @return: the quoted arguments concatenated with spaces
1608

1609
  """
1610
  return ' '.join([ShellQuote(i) for i in args])
1611

    
1612

    
1613
class ShellWriter:
1614
  """Helper class to write scripts with indentation.
1615

1616
  """
1617
  INDENT_STR = "  "
1618

    
1619
  def __init__(self, fh):
1620
    """Initializes this class.
1621

1622
    """
1623
    self._fh = fh
1624
    self._indent = 0
1625

    
1626
  def IncIndent(self):
1627
    """Increase indentation level by 1.
1628

1629
    """
1630
    self._indent += 1
1631

    
1632
  def DecIndent(self):
1633
    """Decrease indentation level by 1.
1634

1635
    """
1636
    assert self._indent > 0
1637
    self._indent -= 1
1638

    
1639
  def Write(self, txt, *args):
1640
    """Write line to output file.
1641

1642
    """
1643
    assert self._indent >= 0
1644

    
1645
    self._fh.write(self._indent * self.INDENT_STR)
1646

    
1647
    if args:
1648
      self._fh.write(txt % args)
1649
    else:
1650
      self._fh.write(txt)
1651

    
1652
    self._fh.write("\n")
1653

    
1654

    
1655
def ListVisibleFiles(path):
1656
  """Returns a list of visible files in a directory.
1657

1658
  @type path: str
1659
  @param path: the directory to enumerate
1660
  @rtype: list
1661
  @return: the list of all files not starting with a dot
1662
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1663

1664
  """
1665
  if not IsNormAbsPath(path):
1666
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1667
                                 " absolute/normalized: '%s'" % path)
1668
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1669
  return files
1670

    
1671

    
1672
def GetHomeDir(user, default=None):
1673
  """Try to get the homedir of the given user.
1674

1675
  The user can be passed either as a string (denoting the name) or as
1676
  an integer (denoting the user id). If the user is not found, the
1677
  'default' argument is returned, which defaults to None.
1678

1679
  """
1680
  try:
1681
    if isinstance(user, basestring):
1682
      result = pwd.getpwnam(user)
1683
    elif isinstance(user, (int, long)):
1684
      result = pwd.getpwuid(user)
1685
    else:
1686
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1687
                                   type(user))
1688
  except KeyError:
1689
    return default
1690
  return result.pw_dir
1691

    
1692

    
1693
def NewUUID():
1694
  """Returns a random UUID.
1695

1696
  @note: This is a Linux-specific method as it uses the /proc
1697
      filesystem.
1698
  @rtype: str
1699

1700
  """
1701
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1702

    
1703

    
1704
def GenerateSecret(numbytes=20):
1705
  """Generates a random secret.
1706

1707
  This will generate a pseudo-random secret returning an hex string
1708
  (so that it can be used where an ASCII string is needed).
1709

1710
  @param numbytes: the number of bytes which will be represented by the returned
1711
      string (defaulting to 20, the length of a SHA1 hash)
1712
  @rtype: str
1713
  @return: an hex representation of the pseudo-random sequence
1714

1715
  """
1716
  return os.urandom(numbytes).encode('hex')
1717

    
1718

    
1719
def EnsureDirs(dirs):
1720
  """Make required directories, if they don't exist.
1721

1722
  @param dirs: list of tuples (dir_name, dir_mode)
1723
  @type dirs: list of (string, integer)
1724

1725
  """
1726
  for dir_name, dir_mode in dirs:
1727
    try:
1728
      os.mkdir(dir_name, dir_mode)
1729
    except EnvironmentError, err:
1730
      if err.errno != errno.EEXIST:
1731
        raise errors.GenericError("Cannot create needed directory"
1732
                                  " '%s': %s" % (dir_name, err))
1733
    try:
1734
      os.chmod(dir_name, dir_mode)
1735
    except EnvironmentError, err:
1736
      raise errors.GenericError("Cannot change directory permissions on"
1737
                                " '%s': %s" % (dir_name, err))
1738
    if not os.path.isdir(dir_name):
1739
      raise errors.GenericError("%s is not a directory" % dir_name)
1740

    
1741

    
1742
def ReadFile(file_name, size=-1):
1743
  """Reads a file.
1744

1745
  @type size: int
1746
  @param size: Read at most size bytes (if negative, entire file)
1747
  @rtype: str
1748
  @return: the (possibly partial) content of the file
1749

1750
  """
1751
  f = open(file_name, "r")
1752
  try:
1753
    return f.read(size)
1754
  finally:
1755
    f.close()
1756

    
1757

    
1758
def WriteFile(file_name, fn=None, data=None,
1759
              mode=None, uid=-1, gid=-1,
1760
              atime=None, mtime=None, close=True,
1761
              dry_run=False, backup=False,
1762
              prewrite=None, postwrite=None):
1763
  """(Over)write a file atomically.
1764

1765
  The file_name and either fn (a function taking one argument, the
1766
  file descriptor, and which should write the data to it) or data (the
1767
  contents of the file) must be passed. The other arguments are
1768
  optional and allow setting the file mode, owner and group, and the
1769
  mtime/atime of the file.
1770

1771
  If the function doesn't raise an exception, it has succeeded and the
1772
  target file has the new contents. If the function has raised an
1773
  exception, an existing target file should be unmodified and the
1774
  temporary file should be removed.
1775

1776
  @type file_name: str
1777
  @param file_name: the target filename
1778
  @type fn: callable
1779
  @param fn: content writing function, called with
1780
      file descriptor as parameter
1781
  @type data: str
1782
  @param data: contents of the file
1783
  @type mode: int
1784
  @param mode: file mode
1785
  @type uid: int
1786
  @param uid: the owner of the file
1787
  @type gid: int
1788
  @param gid: the group of the file
1789
  @type atime: int
1790
  @param atime: a custom access time to be set on the file
1791
  @type mtime: int
1792
  @param mtime: a custom modification time to be set on the file
1793
  @type close: boolean
1794
  @param close: whether to close file after writing it
1795
  @type prewrite: callable
1796
  @param prewrite: function to be called before writing content
1797
  @type postwrite: callable
1798
  @param postwrite: function to be called after writing content
1799

1800
  @rtype: None or int
1801
  @return: None if the 'close' parameter evaluates to True,
1802
      otherwise the file descriptor
1803

1804
  @raise errors.ProgrammerError: if any of the arguments are not valid
1805

1806
  """
1807
  if not os.path.isabs(file_name):
1808
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1809
                                 " absolute: '%s'" % file_name)
1810

    
1811
  if [fn, data].count(None) != 1:
1812
    raise errors.ProgrammerError("fn or data required")
1813

    
1814
  if [atime, mtime].count(None) == 1:
1815
    raise errors.ProgrammerError("Both atime and mtime must be either"
1816
                                 " set or None")
1817

    
1818
  if backup and not dry_run and os.path.isfile(file_name):
1819
    CreateBackup(file_name)
1820

    
1821
  dir_name, base_name = os.path.split(file_name)
1822
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1823
  do_remove = True
1824
  # here we need to make sure we remove the temp file, if any error
1825
  # leaves it in place
1826
  try:
1827
    if uid != -1 or gid != -1:
1828
      os.chown(new_name, uid, gid)
1829
    if mode:
1830
      os.chmod(new_name, mode)
1831
    if callable(prewrite):
1832
      prewrite(fd)
1833
    if data is not None:
1834
      os.write(fd, data)
1835
    else:
1836
      fn(fd)
1837
    if callable(postwrite):
1838
      postwrite(fd)
1839
    os.fsync(fd)
1840
    if atime is not None and mtime is not None:
1841
      os.utime(new_name, (atime, mtime))
1842
    if not dry_run:
1843
      os.rename(new_name, file_name)
1844
      do_remove = False
1845
  finally:
1846
    if close:
1847
      os.close(fd)
1848
      result = None
1849
    else:
1850
      result = fd
1851
    if do_remove:
1852
      RemoveFile(new_name)
1853

    
1854
  return result
1855

    
1856

    
1857
def ReadOneLineFile(file_name, strict=False):
1858
  """Return the first non-empty line from a file.
1859

1860
  @type strict: boolean
1861
  @param strict: if True, abort if the file has more than one
1862
      non-empty line
1863

1864
  """
1865
  file_lines = ReadFile(file_name).splitlines()
1866
  full_lines = filter(bool, file_lines)
1867
  if not file_lines or not full_lines:
1868
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1869
  elif strict and len(full_lines) > 1:
1870
    raise errors.GenericError("Too many lines in one-liner file %s" %
1871
                              file_name)
1872
  return full_lines[0]
1873

    
1874

    
1875
def FirstFree(seq, base=0):
1876
  """Returns the first non-existing integer from seq.
1877

1878
  The seq argument should be a sorted list of positive integers. The
1879
  first time the index of an element is smaller than the element
1880
  value, the index will be returned.
1881

1882
  The base argument is used to start at a different offset,
1883
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1884

1885
  Example: C{[0, 1, 3]} will return I{2}.
1886

1887
  @type seq: sequence
1888
  @param seq: the sequence to be analyzed.
1889
  @type base: int
1890
  @param base: use this value as the base index of the sequence
1891
  @rtype: int
1892
  @return: the first non-used index in the sequence
1893

1894
  """
1895
  for idx, elem in enumerate(seq):
1896
    assert elem >= base, "Passed element is higher than base offset"
1897
    if elem > idx + base:
1898
      # idx is not used
1899
      return idx + base
1900
  return None
1901

    
1902

    
1903
def SingleWaitForFdCondition(fdobj, event, timeout):
1904
  """Waits for a condition to occur on the socket.
1905

1906
  Immediately returns at the first interruption.
1907

1908
  @type fdobj: integer or object supporting a fileno() method
1909
  @param fdobj: entity to wait for events on
1910
  @type event: integer
1911
  @param event: ORed condition (see select module)
1912
  @type timeout: float or None
1913
  @param timeout: Timeout in seconds
1914
  @rtype: int or None
1915
  @return: None for timeout, otherwise occured conditions
1916

1917
  """
1918
  check = (event | select.POLLPRI |
1919
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1920

    
1921
  if timeout is not None:
1922
    # Poller object expects milliseconds
1923
    timeout *= 1000
1924

    
1925
  poller = select.poll()
1926
  poller.register(fdobj, event)
1927
  try:
1928
    # TODO: If the main thread receives a signal and we have no timeout, we
1929
    # could wait forever. This should check a global "quit" flag or something
1930
    # every so often.
1931
    io_events = poller.poll(timeout)
1932
  except select.error, err:
1933
    if err[0] != errno.EINTR:
1934
      raise
1935
    io_events = []
1936
  if io_events and io_events[0][1] & check:
1937
    return io_events[0][1]
1938
  else:
1939
    return None
1940

    
1941

    
1942
class FdConditionWaiterHelper(object):
1943
  """Retry helper for WaitForFdCondition.
1944

1945
  This class contains the retried and wait functions that make sure
1946
  WaitForFdCondition can continue waiting until the timeout is actually
1947
  expired.
1948

1949
  """
1950

    
1951
  def __init__(self, timeout):
1952
    self.timeout = timeout
1953

    
1954
  def Poll(self, fdobj, event):
1955
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1956
    if result is None:
1957
      raise RetryAgain()
1958
    else:
1959
      return result
1960

    
1961
  def UpdateTimeout(self, timeout):
1962
    self.timeout = timeout
1963

    
1964

    
1965
def WaitForFdCondition(fdobj, event, timeout):
1966
  """Waits for a condition to occur on the socket.
1967

1968
  Retries until the timeout is expired, even if interrupted.
1969

1970
  @type fdobj: integer or object supporting a fileno() method
1971
  @param fdobj: entity to wait for events on
1972
  @type event: integer
1973
  @param event: ORed condition (see select module)
1974
  @type timeout: float or None
1975
  @param timeout: Timeout in seconds
1976
  @rtype: int or None
1977
  @return: None for timeout, otherwise occured conditions
1978

1979
  """
1980
  if timeout is not None:
1981
    retrywaiter = FdConditionWaiterHelper(timeout)
1982
    try:
1983
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1984
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1985
    except RetryTimeout:
1986
      result = None
1987
  else:
1988
    result = None
1989
    while result is None:
1990
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1991
  return result
1992

    
1993

    
1994
def UniqueSequence(seq):
1995
  """Returns a list with unique elements.
1996

1997
  Element order is preserved.
1998

1999
  @type seq: sequence
2000
  @param seq: the sequence with the source elements
2001
  @rtype: list
2002
  @return: list of unique elements from seq
2003

2004
  """
2005
  seen = set()
2006
  return [i for i in seq if i not in seen and not seen.add(i)]
2007

    
2008

    
2009
def NormalizeAndValidateMac(mac):
2010
  """Normalizes and check if a MAC address is valid.
2011

2012
  Checks whether the supplied MAC address is formally correct, only
2013
  accepts colon separated format. Normalize it to all lower.
2014

2015
  @type mac: str
2016
  @param mac: the MAC to be validated
2017
  @rtype: str
2018
  @return: returns the normalized and validated MAC.
2019

2020
  @raise errors.OpPrereqError: If the MAC isn't valid
2021

2022
  """
2023
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
2024
  if not mac_check.match(mac):
2025
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2026
                               mac, errors.ECODE_INVAL)
2027

    
2028
  return mac.lower()
2029

    
2030

    
2031
def TestDelay(duration):
2032
  """Sleep for a fixed amount of time.
2033

2034
  @type duration: float
2035
  @param duration: the sleep duration
2036
  @rtype: boolean
2037
  @return: False for negative value, True otherwise
2038

2039
  """
2040
  if duration < 0:
2041
    return False, "Invalid sleep duration"
2042
  time.sleep(duration)
2043
  return True, None
2044

    
2045

    
2046
def _CloseFDNoErr(fd, retries=5):
2047
  """Close a file descriptor ignoring errors.
2048

2049
  @type fd: int
2050
  @param fd: the file descriptor
2051
  @type retries: int
2052
  @param retries: how many retries to make, in case we get any
2053
      other error than EBADF
2054

2055
  """
2056
  try:
2057
    os.close(fd)
2058
  except OSError, err:
2059
    if err.errno != errno.EBADF:
2060
      if retries > 0:
2061
        _CloseFDNoErr(fd, retries - 1)
2062
    # else either it's closed already or we're out of retries, so we
2063
    # ignore this and go on
2064

    
2065

    
2066
def CloseFDs(noclose_fds=None):
2067
  """Close file descriptors.
2068

2069
  This closes all file descriptors above 2 (i.e. except
2070
  stdin/out/err).
2071

2072
  @type noclose_fds: list or None
2073
  @param noclose_fds: if given, it denotes a list of file descriptor
2074
      that should not be closed
2075

2076
  """
2077
  # Default maximum for the number of available file descriptors.
2078
  if 'SC_OPEN_MAX' in os.sysconf_names:
2079
    try:
2080
      MAXFD = os.sysconf('SC_OPEN_MAX')
2081
      if MAXFD < 0:
2082
        MAXFD = 1024
2083
    except OSError:
2084
      MAXFD = 1024
2085
  else:
2086
    MAXFD = 1024
2087
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2088
  if (maxfd == resource.RLIM_INFINITY):
2089
    maxfd = MAXFD
2090

    
2091
  # Iterate through and close all file descriptors (except the standard ones)
2092
  for fd in range(3, maxfd):
2093
    if noclose_fds and fd in noclose_fds:
2094
      continue
2095
    _CloseFDNoErr(fd)
2096

    
2097

    
2098
def Mlockall(_ctypes=ctypes):
2099
  """Lock current process' virtual address space into RAM.
2100

2101
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2102
  see mlock(2) for more details. This function requires ctypes module.
2103

2104
  @raises errors.NoCtypesError: if ctypes module is not found
2105

2106
  """
2107
  if _ctypes is None:
2108
    raise errors.NoCtypesError()
2109

    
2110
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2111
  if libc is None:
2112
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2113
    return
2114

    
2115
  # Some older version of the ctypes module don't have built-in functionality
2116
  # to access the errno global variable, where function error codes are stored.
2117
  # By declaring this variable as a pointer to an integer we can then access
2118
  # its value correctly, should the mlockall call fail, in order to see what
2119
  # the actual error code was.
2120
  # pylint: disable-msg=W0212
2121
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2122

    
2123
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2124
    # pylint: disable-msg=W0212
2125
    logging.error("Cannot set memory lock: %s",
2126
                  os.strerror(libc.__errno_location().contents.value))
2127
    return
2128

    
2129
  logging.debug("Memory lock set")
2130

    
2131

    
2132
def Daemonize(logfile, run_uid, run_gid):
2133
  """Daemonize the current process.
2134

2135
  This detaches the current process from the controlling terminal and
2136
  runs it in the background as a daemon.
2137

2138
  @type logfile: str
2139
  @param logfile: the logfile to which we should redirect stdout/stderr
2140
  @type run_uid: int
2141
  @param run_uid: Run the child under this uid
2142
  @type run_gid: int
2143
  @param run_gid: Run the child under this gid
2144
  @rtype: int
2145
  @return: the value zero
2146

2147
  """
2148
  # pylint: disable-msg=W0212
2149
  # yes, we really want os._exit
2150
  UMASK = 077
2151
  WORKDIR = "/"
2152

    
2153
  # this might fail
2154
  pid = os.fork()
2155
  if (pid == 0):  # The first child.
2156
    os.setsid()
2157
    # FIXME: When removing again and moving to start-stop-daemon privilege drop
2158
    #        make sure to check for config permission and bail out when invoked
2159
    #        with wrong user.
2160
    os.setgid(run_gid)
2161
    os.setuid(run_uid)
2162
    # this might fail
2163
    pid = os.fork() # Fork a second child.
2164
    if (pid == 0):  # The second child.
2165
      os.chdir(WORKDIR)
2166
      os.umask(UMASK)
2167
    else:
2168
      # exit() or _exit()?  See below.
2169
      os._exit(0) # Exit parent (the first child) of the second child.
2170
  else:
2171
    os._exit(0) # Exit parent of the first child.
2172

    
2173
  for fd in range(3):
2174
    _CloseFDNoErr(fd)
2175
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2176
  assert i == 0, "Can't close/reopen stdin"
2177
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2178
  assert i == 1, "Can't close/reopen stdout"
2179
  # Duplicate standard output to standard error.
2180
  os.dup2(1, 2)
2181
  return 0
2182

    
2183

    
2184
def DaemonPidFileName(name):
2185
  """Compute a ganeti pid file absolute path
2186

2187
  @type name: str
2188
  @param name: the daemon name
2189
  @rtype: str
2190
  @return: the full path to the pidfile corresponding to the given
2191
      daemon name
2192

2193
  """
2194
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2195

    
2196

    
2197
def EnsureDaemon(name):
2198
  """Check for and start daemon if not alive.
2199

2200
  """
2201
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2202
  if result.failed:
2203
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2204
                  name, result.fail_reason, result.output)
2205
    return False
2206

    
2207
  return True
2208

    
2209

    
2210
def StopDaemon(name):
2211
  """Stop daemon
2212

2213
  """
2214
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2215
  if result.failed:
2216
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2217
                  name, result.fail_reason, result.output)
2218
    return False
2219

    
2220
  return True
2221

    
2222

    
2223
def WritePidFile(name):
2224
  """Write the current process pidfile.
2225

2226
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2227

2228
  @type name: str
2229
  @param name: the daemon name to use
2230
  @raise errors.GenericError: if the pid file already exists and
2231
      points to a live process
2232

2233
  """
2234
  pid = os.getpid()
2235
  pidfilename = DaemonPidFileName(name)
2236
  if IsProcessAlive(ReadPidFile(pidfilename)):
2237
    raise errors.GenericError("%s contains a live process" % pidfilename)
2238

    
2239
  WriteFile(pidfilename, data="%d\n" % pid)
2240

    
2241

    
2242
def RemovePidFile(name):
2243
  """Remove the current process pidfile.
2244

2245
  Any errors are ignored.
2246

2247
  @type name: str
2248
  @param name: the daemon name used to derive the pidfile name
2249

2250
  """
2251
  pidfilename = DaemonPidFileName(name)
2252
  # TODO: we could check here that the file contains our pid
2253
  try:
2254
    RemoveFile(pidfilename)
2255
  except: # pylint: disable-msg=W0702
2256
    pass
2257

    
2258

    
2259
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2260
                waitpid=False):
2261
  """Kill a process given by its pid.
2262

2263
  @type pid: int
2264
  @param pid: The PID to terminate.
2265
  @type signal_: int
2266
  @param signal_: The signal to send, by default SIGTERM
2267
  @type timeout: int
2268
  @param timeout: The timeout after which, if the process is still alive,
2269
                  a SIGKILL will be sent. If not positive, no such checking
2270
                  will be done
2271
  @type waitpid: boolean
2272
  @param waitpid: If true, we should waitpid on this process after
2273
      sending signals, since it's our own child and otherwise it
2274
      would remain as zombie
2275

2276
  """
2277
  def _helper(pid, signal_, wait):
2278
    """Simple helper to encapsulate the kill/waitpid sequence"""
2279
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2280
      try:
2281
        os.waitpid(pid, os.WNOHANG)
2282
      except OSError:
2283
        pass
2284

    
2285
  if pid <= 0:
2286
    # kill with pid=0 == suicide
2287
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2288

    
2289
  if not IsProcessAlive(pid):
2290
    return
2291

    
2292
  _helper(pid, signal_, waitpid)
2293

    
2294
  if timeout <= 0:
2295
    return
2296

    
2297
  def _CheckProcess():
2298
    if not IsProcessAlive(pid):
2299
      return
2300

    
2301
    try:
2302
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2303
    except OSError:
2304
      raise RetryAgain()
2305

    
2306
    if result_pid > 0:
2307
      return
2308

    
2309
    raise RetryAgain()
2310

    
2311
  try:
2312
    # Wait up to $timeout seconds
2313
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2314
  except RetryTimeout:
2315
    pass
2316

    
2317
  if IsProcessAlive(pid):
2318
    # Kill process if it's still alive
2319
    _helper(pid, signal.SIGKILL, waitpid)
2320

    
2321

    
2322
def FindFile(name, search_path, test=os.path.exists):
2323
  """Look for a filesystem object in a given path.
2324

2325
  This is an abstract method to search for filesystem object (files,
2326
  dirs) under a given search path.
2327

2328
  @type name: str
2329
  @param name: the name to look for
2330
  @type search_path: str
2331
  @param search_path: location to start at
2332
  @type test: callable
2333
  @param test: a function taking one argument that should return True
2334
      if the a given object is valid; the default value is
2335
      os.path.exists, causing only existing files to be returned
2336
  @rtype: str or None
2337
  @return: full path to the object if found, None otherwise
2338

2339
  """
2340
  # validate the filename mask
2341
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2342
    logging.critical("Invalid value passed for external script name: '%s'",
2343
                     name)
2344
    return None
2345

    
2346
  for dir_name in search_path:
2347
    # FIXME: investigate switch to PathJoin
2348
    item_name = os.path.sep.join([dir_name, name])
2349
    # check the user test and that we're indeed resolving to the given
2350
    # basename
2351
    if test(item_name) and os.path.basename(item_name) == name:
2352
      return item_name
2353
  return None
2354

    
2355

    
2356
def CheckVolumeGroupSize(vglist, vgname, minsize):
2357
  """Checks if the volume group list is valid.
2358

2359
  The function will check if a given volume group is in the list of
2360
  volume groups and has a minimum size.
2361

2362
  @type vglist: dict
2363
  @param vglist: dictionary of volume group names and their size
2364
  @type vgname: str
2365
  @param vgname: the volume group we should check
2366
  @type minsize: int
2367
  @param minsize: the minimum size we accept
2368
  @rtype: None or str
2369
  @return: None for success, otherwise the error message
2370

2371
  """
2372
  vgsize = vglist.get(vgname, None)
2373
  if vgsize is None:
2374
    return "volume group '%s' missing" % vgname
2375
  elif vgsize < minsize:
2376
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2377
            (vgname, minsize, vgsize))
2378
  return None
2379

    
2380

    
2381
def SplitTime(value):
2382
  """Splits time as floating point number into a tuple.
2383

2384
  @param value: Time in seconds
2385
  @type value: int or float
2386
  @return: Tuple containing (seconds, microseconds)
2387

2388
  """
2389
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2390

    
2391
  assert 0 <= seconds, \
2392
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2393
  assert 0 <= microseconds <= 999999, \
2394
    "Microseconds must be 0-999999, but are %s" % microseconds
2395

    
2396
  return (int(seconds), int(microseconds))
2397

    
2398

    
2399
def MergeTime(timetuple):
2400
  """Merges a tuple into time as a floating point number.
2401

2402
  @param timetuple: Time as tuple, (seconds, microseconds)
2403
  @type timetuple: tuple
2404
  @return: Time as a floating point number expressed in seconds
2405

2406
  """
2407
  (seconds, microseconds) = timetuple
2408

    
2409
  assert 0 <= seconds, \
2410
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2411
  assert 0 <= microseconds <= 999999, \
2412
    "Microseconds must be 0-999999, but are %s" % microseconds
2413

    
2414
  return float(seconds) + (float(microseconds) * 0.000001)
2415

    
2416

    
2417
class LogFileHandler(logging.FileHandler):
2418
  """Log handler that doesn't fallback to stderr.
2419

2420
  When an error occurs while writing on the logfile, logging.FileHandler tries
2421
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2422
  the logfile. This class avoids failures reporting errors to /dev/console.
2423

2424
  """
2425
  def __init__(self, filename, mode="a", encoding=None):
2426
    """Open the specified file and use it as the stream for logging.
2427

2428
    Also open /dev/console to report errors while logging.
2429

2430
    """
2431
    logging.FileHandler.__init__(self, filename, mode, encoding)
2432
    self.console = open(constants.DEV_CONSOLE, "a")
2433

    
2434
  def handleError(self, record): # pylint: disable-msg=C0103
2435
    """Handle errors which occur during an emit() call.
2436

2437
    Try to handle errors with FileHandler method, if it fails write to
2438
    /dev/console.
2439

2440
    """
2441
    try:
2442
      logging.FileHandler.handleError(self, record)
2443
    except Exception: # pylint: disable-msg=W0703
2444
      try:
2445
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2446
      except Exception: # pylint: disable-msg=W0703
2447
        # Log handler tried everything it could, now just give up
2448
        pass
2449

    
2450

    
2451
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2452
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2453
                 console_logging=False):
2454
  """Configures the logging module.
2455

2456
  @type logfile: str
2457
  @param logfile: the filename to which we should log
2458
  @type debug: integer
2459
  @param debug: if greater than zero, enable debug messages, otherwise
2460
      only those at C{INFO} and above level
2461
  @type stderr_logging: boolean
2462
  @param stderr_logging: whether we should also log to the standard error
2463
  @type program: str
2464
  @param program: the name under which we should log messages
2465
  @type multithreaded: boolean
2466
  @param multithreaded: if True, will add the thread name to the log file
2467
  @type syslog: string
2468
  @param syslog: one of 'no', 'yes', 'only':
2469
      - if no, syslog is not used
2470
      - if yes, syslog is used (in addition to file-logging)
2471
      - if only, only syslog is used
2472
  @type console_logging: boolean
2473
  @param console_logging: if True, will use a FileHandler which falls back to
2474
      the system console if logging fails
2475
  @raise EnvironmentError: if we can't open the log file and
2476
      syslog/stderr logging is disabled
2477

2478
  """
2479
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2480
  sft = program + "[%(process)d]:"
2481
  if multithreaded:
2482
    fmt += "/%(threadName)s"
2483
    sft += " (%(threadName)s)"
2484
  if debug:
2485
    fmt += " %(module)s:%(lineno)s"
2486
    # no debug info for syslog loggers
2487
  fmt += " %(levelname)s %(message)s"
2488
  # yes, we do want the textual level, as remote syslog will probably
2489
  # lose the error level, and it's easier to grep for it
2490
  sft += " %(levelname)s %(message)s"
2491
  formatter = logging.Formatter(fmt)
2492
  sys_fmt = logging.Formatter(sft)
2493

    
2494
  root_logger = logging.getLogger("")
2495
  root_logger.setLevel(logging.NOTSET)
2496

    
2497
  # Remove all previously setup handlers
2498
  for handler in root_logger.handlers:
2499
    handler.close()
2500
    root_logger.removeHandler(handler)
2501

    
2502
  if stderr_logging:
2503
    stderr_handler = logging.StreamHandler()
2504
    stderr_handler.setFormatter(formatter)
2505
    if debug:
2506
      stderr_handler.setLevel(logging.NOTSET)
2507
    else:
2508
      stderr_handler.setLevel(logging.CRITICAL)
2509
    root_logger.addHandler(stderr_handler)
2510

    
2511
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2512
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2513
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2514
                                                    facility)
2515
    syslog_handler.setFormatter(sys_fmt)
2516
    # Never enable debug over syslog
2517
    syslog_handler.setLevel(logging.INFO)
2518
    root_logger.addHandler(syslog_handler)
2519

    
2520
  if syslog != constants.SYSLOG_ONLY:
2521
    # this can fail, if the logging directories are not setup or we have
2522
    # a permisssion problem; in this case, it's best to log but ignore
2523
    # the error if stderr_logging is True, and if false we re-raise the
2524
    # exception since otherwise we could run but without any logs at all
2525
    try:
2526
      if console_logging:
2527
        logfile_handler = LogFileHandler(logfile)
2528
      else:
2529
        logfile_handler = logging.FileHandler(logfile)
2530
      logfile_handler.setFormatter(formatter)
2531
      if debug:
2532
        logfile_handler.setLevel(logging.DEBUG)
2533
      else:
2534
        logfile_handler.setLevel(logging.INFO)
2535
      root_logger.addHandler(logfile_handler)
2536
    except EnvironmentError:
2537
      if stderr_logging or syslog == constants.SYSLOG_YES:
2538
        logging.exception("Failed to enable logging to file '%s'", logfile)
2539
      else:
2540
        # we need to re-raise the exception
2541
        raise
2542

    
2543

    
2544
def IsNormAbsPath(path):
2545
  """Check whether a path is absolute and also normalized
2546

2547
  This avoids things like /dir/../../other/path to be valid.
2548

2549
  """
2550
  return os.path.normpath(path) == path and os.path.isabs(path)
2551

    
2552

    
2553
def PathJoin(*args):
2554
  """Safe-join a list of path components.
2555

2556
  Requirements:
2557
      - the first argument must be an absolute path
2558
      - no component in the path must have backtracking (e.g. /../),
2559
        since we check for normalization at the end
2560

2561
  @param args: the path components to be joined
2562
  @raise ValueError: for invalid paths
2563

2564
  """
2565
  # ensure we're having at least one path passed in
2566
  assert args
2567
  # ensure the first component is an absolute and normalized path name
2568
  root = args[0]
2569
  if not IsNormAbsPath(root):
2570
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2571
  result = os.path.join(*args)
2572
  # ensure that the whole path is normalized
2573
  if not IsNormAbsPath(result):
2574
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2575
  # check that we're still under the original prefix
2576
  prefix = os.path.commonprefix([root, result])
2577
  if prefix != root:
2578
    raise ValueError("Error: path joining resulted in different prefix"
2579
                     " (%s != %s)" % (prefix, root))
2580
  return result
2581

    
2582

    
2583
def TailFile(fname, lines=20):
2584
  """Return the last lines from a file.
2585

2586
  @note: this function will only read and parse the last 4KB of
2587
      the file; if the lines are very long, it could be that less
2588
      than the requested number of lines are returned
2589

2590
  @param fname: the file name
2591
  @type lines: int
2592
  @param lines: the (maximum) number of lines to return
2593

2594
  """
2595
  fd = open(fname, "r")
2596
  try:
2597
    fd.seek(0, 2)
2598
    pos = fd.tell()
2599
    pos = max(0, pos-4096)
2600
    fd.seek(pos, 0)
2601
    raw_data = fd.read()
2602
  finally:
2603
    fd.close()
2604

    
2605
  rows = raw_data.splitlines()
2606
  return rows[-lines:]
2607

    
2608

    
2609
def FormatTimestampWithTZ(secs):
2610
  """Formats a Unix timestamp with the local timezone.
2611

2612
  """
2613
  return time.strftime("%F %T %Z", time.gmtime(secs))
2614

    
2615

    
2616
def _ParseAsn1Generalizedtime(value):
2617
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2618

2619
  @type value: string
2620
  @param value: ASN1 GENERALIZEDTIME timestamp
2621

2622
  """
2623
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2624
  if m:
2625
    # We have an offset
2626
    asn1time = m.group(1)
2627
    hours = int(m.group(2))
2628
    minutes = int(m.group(3))
2629
    utcoffset = (60 * hours) + minutes
2630
  else:
2631
    if not value.endswith("Z"):
2632
      raise ValueError("Missing timezone")
2633
    asn1time = value[:-1]
2634
    utcoffset = 0
2635

    
2636
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2637

    
2638
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2639

    
2640
  return calendar.timegm(tt.utctimetuple())
2641

    
2642

    
2643
def GetX509CertValidity(cert):
2644
  """Returns the validity period of the certificate.
2645

2646
  @type cert: OpenSSL.crypto.X509
2647
  @param cert: X509 certificate object
2648

2649
  """
2650
  # The get_notBefore and get_notAfter functions are only supported in
2651
  # pyOpenSSL 0.7 and above.
2652
  try:
2653
    get_notbefore_fn = cert.get_notBefore
2654
  except AttributeError:
2655
    not_before = None
2656
  else:
2657
    not_before_asn1 = get_notbefore_fn()
2658

    
2659
    if not_before_asn1 is None:
2660
      not_before = None
2661
    else:
2662
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2663

    
2664
  try:
2665
    get_notafter_fn = cert.get_notAfter
2666
  except AttributeError:
2667
    not_after = None
2668
  else:
2669
    not_after_asn1 = get_notafter_fn()
2670

    
2671
    if not_after_asn1 is None:
2672
      not_after = None
2673
    else:
2674
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2675

    
2676
  return (not_before, not_after)
2677

    
2678

    
2679
def _VerifyCertificateInner(expired, not_before, not_after, now,
2680
                            warn_days, error_days):
2681
  """Verifies certificate validity.
2682

2683
  @type expired: bool
2684
  @param expired: Whether pyOpenSSL considers the certificate as expired
2685
  @type not_before: number or None
2686
  @param not_before: Unix timestamp before which certificate is not valid
2687
  @type not_after: number or None
2688
  @param not_after: Unix timestamp after which certificate is invalid
2689
  @type now: number
2690
  @param now: Current time as Unix timestamp
2691
  @type warn_days: number or None
2692
  @param warn_days: How many days before expiration a warning should be reported
2693
  @type error_days: number or None
2694
  @param error_days: How many days before expiration an error should be reported
2695

2696
  """
2697
  if expired:
2698
    msg = "Certificate is expired"
2699

    
2700
    if not_before is not None and not_after is not None:
2701
      msg += (" (valid from %s to %s)" %
2702
              (FormatTimestampWithTZ(not_before),
2703
               FormatTimestampWithTZ(not_after)))
2704
    elif not_before is not None:
2705
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2706
    elif not_after is not None:
2707
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2708

    
2709
    return (CERT_ERROR, msg)
2710

    
2711
  elif not_before is not None and not_before > now:
2712
    return (CERT_WARNING,
2713
            "Certificate not yet valid (valid from %s)" %
2714
            FormatTimestampWithTZ(not_before))
2715

    
2716
  elif not_after is not None:
2717
    remaining_days = int((not_after - now) / (24 * 3600))
2718

    
2719
    msg = "Certificate expires in about %d days" % remaining_days
2720

    
2721
    if error_days is not None and remaining_days <= error_days:
2722
      return (CERT_ERROR, msg)
2723

    
2724
    if warn_days is not None and remaining_days <= warn_days:
2725
      return (CERT_WARNING, msg)
2726

    
2727
  return (None, None)
2728

    
2729

    
2730
def VerifyX509Certificate(cert, warn_days, error_days):
2731
  """Verifies a certificate for LUVerifyCluster.
2732

2733
  @type cert: OpenSSL.crypto.X509
2734
  @param cert: X509 certificate object
2735
  @type warn_days: number or None
2736
  @param warn_days: How many days before expiration a warning should be reported
2737
  @type error_days: number or None
2738
  @param error_days: How many days before expiration an error should be reported
2739

2740
  """
2741
  # Depending on the pyOpenSSL version, this can just return (None, None)
2742
  (not_before, not_after) = GetX509CertValidity(cert)
2743

    
2744
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2745
                                 time.time(), warn_days, error_days)
2746

    
2747

    
2748
def SignX509Certificate(cert, key, salt):
2749
  """Sign a X509 certificate.
2750

2751
  An RFC822-like signature header is added in front of the certificate.
2752

2753
  @type cert: OpenSSL.crypto.X509
2754
  @param cert: X509 certificate object
2755
  @type key: string
2756
  @param key: Key for HMAC
2757
  @type salt: string
2758
  @param salt: Salt for HMAC
2759
  @rtype: string
2760
  @return: Serialized and signed certificate in PEM format
2761

2762
  """
2763
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2764
    raise errors.GenericError("Invalid salt: %r" % salt)
2765

    
2766
  # Dumping as PEM here ensures the certificate is in a sane format
2767
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2768

    
2769
  return ("%s: %s/%s\n\n%s" %
2770
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2771
           Sha1Hmac(key, cert_pem, salt=salt),
2772
           cert_pem))
2773

    
2774

    
2775
def _ExtractX509CertificateSignature(cert_pem):
2776
  """Helper function to extract signature from X509 certificate.
2777

2778
  """
2779
  # Extract signature from original PEM data
2780
  for line in cert_pem.splitlines():
2781
    if line.startswith("---"):
2782
      break
2783

    
2784
    m = X509_SIGNATURE.match(line.strip())
2785
    if m:
2786
      return (m.group("salt"), m.group("sign"))
2787

    
2788
  raise errors.GenericError("X509 certificate signature is missing")
2789

    
2790

    
2791
def LoadSignedX509Certificate(cert_pem, key):
2792
  """Verifies a signed X509 certificate.
2793

2794
  @type cert_pem: string
2795
  @param cert_pem: Certificate in PEM format and with signature header
2796
  @type key: string
2797
  @param key: Key for HMAC
2798
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2799
  @return: X509 certificate object and salt
2800

2801
  """
2802
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2803

    
2804
  # Load certificate
2805
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2806

    
2807
  # Dump again to ensure it's in a sane format
2808
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2809

    
2810
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2811
    raise errors.GenericError("X509 certificate signature is invalid")
2812

    
2813
  return (cert, salt)
2814

    
2815

    
2816
def Sha1Hmac(key, text, salt=None):
2817
  """Calculates the HMAC-SHA1 digest of a text.
2818

2819
  HMAC is defined in RFC2104.
2820

2821
  @type key: string
2822
  @param key: Secret key
2823
  @type text: string
2824

2825
  """
2826
  if salt:
2827
    salted_text = salt + text
2828
  else:
2829
    salted_text = text
2830

    
2831
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2832

    
2833

    
2834
def VerifySha1Hmac(key, text, digest, salt=None):
2835
  """Verifies the HMAC-SHA1 digest of a text.
2836

2837
  HMAC is defined in RFC2104.
2838

2839
  @type key: string
2840
  @param key: Secret key
2841
  @type text: string
2842
  @type digest: string
2843
  @param digest: Expected digest
2844
  @rtype: bool
2845
  @return: Whether HMAC-SHA1 digest matches
2846

2847
  """
2848
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2849

    
2850

    
2851
def SafeEncode(text):
2852
  """Return a 'safe' version of a source string.
2853

2854
  This function mangles the input string and returns a version that
2855
  should be safe to display/encode as ASCII. To this end, we first
2856
  convert it to ASCII using the 'backslashreplace' encoding which
2857
  should get rid of any non-ASCII chars, and then we process it
2858
  through a loop copied from the string repr sources in the python; we
2859
  don't use string_escape anymore since that escape single quotes and
2860
  backslashes too, and that is too much; and that escaping is not
2861
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2862

2863
  @type text: str or unicode
2864
  @param text: input data
2865
  @rtype: str
2866
  @return: a safe version of text
2867

2868
  """
2869
  if isinstance(text, unicode):
2870
    # only if unicode; if str already, we handle it below
2871
    text = text.encode('ascii', 'backslashreplace')
2872
  resu = ""
2873
  for char in text:
2874
    c = ord(char)
2875
    if char  == '\t':
2876
      resu += r'\t'
2877
    elif char == '\n':
2878
      resu += r'\n'
2879
    elif char == '\r':
2880
      resu += r'\'r'
2881
    elif c < 32 or c >= 127: # non-printable
2882
      resu += "\\x%02x" % (c & 0xff)
2883
    else:
2884
      resu += char
2885
  return resu
2886

    
2887

    
2888
def UnescapeAndSplit(text, sep=","):
2889
  """Split and unescape a string based on a given separator.
2890

2891
  This function splits a string based on a separator where the
2892
  separator itself can be escape in order to be an element of the
2893
  elements. The escaping rules are (assuming coma being the
2894
  separator):
2895
    - a plain , separates the elements
2896
    - a sequence \\\\, (double backslash plus comma) is handled as a
2897
      backslash plus a separator comma
2898
    - a sequence \, (backslash plus comma) is handled as a
2899
      non-separator comma
2900

2901
  @type text: string
2902
  @param text: the string to split
2903
  @type sep: string
2904
  @param text: the separator
2905
  @rtype: string
2906
  @return: a list of strings
2907

2908
  """
2909
  # we split the list by sep (with no escaping at this stage)
2910
  slist = text.split(sep)
2911
  # next, we revisit the elements and if any of them ended with an odd
2912
  # number of backslashes, then we join it with the next
2913
  rlist = []
2914
  while slist:
2915
    e1 = slist.pop(0)
2916
    if e1.endswith("\\"):
2917
      num_b = len(e1) - len(e1.rstrip("\\"))
2918
      if num_b % 2 == 1:
2919
        e2 = slist.pop(0)
2920
        # here the backslashes remain (all), and will be reduced in
2921
        # the next step
2922
        rlist.append(e1 + sep + e2)
2923
        continue
2924
    rlist.append(e1)
2925
  # finally, replace backslash-something with something
2926
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2927
  return rlist
2928

    
2929

    
2930
def CommaJoin(names):
2931
  """Nicely join a set of identifiers.
2932

2933
  @param names: set, list or tuple
2934
  @return: a string with the formatted results
2935

2936
  """
2937
  return ", ".join([str(val) for val in names])
2938

    
2939

    
2940
def BytesToMebibyte(value):
2941
  """Converts bytes to mebibytes.
2942

2943
  @type value: int
2944
  @param value: Value in bytes
2945
  @rtype: int
2946
  @return: Value in mebibytes
2947

2948
  """
2949
  return int(round(value / (1024.0 * 1024.0), 0))
2950

    
2951

    
2952
def CalculateDirectorySize(path):
2953
  """Calculates the size of a directory recursively.
2954

2955
  @type path: string
2956
  @param path: Path to directory
2957
  @rtype: int
2958
  @return: Size in mebibytes
2959

2960
  """
2961
  size = 0
2962

    
2963
  for (curpath, _, files) in os.walk(path):
2964
    for filename in files:
2965
      st = os.lstat(PathJoin(curpath, filename))
2966
      size += st.st_size
2967

    
2968
  return BytesToMebibyte(size)
2969

    
2970

    
2971
def GetMounts(filename=constants.PROC_MOUNTS):
2972
  """Returns the list of mounted filesystems.
2973

2974
  This function is Linux-specific.
2975

2976
  @param filename: path of mounts file (/proc/mounts by default)
2977
  @rtype: list of tuples
2978
  @return: list of mount entries (device, mountpoint, fstype, options)
2979

2980
  """
2981
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2982
  data = []
2983
  mountlines = ReadFile(filename).splitlines()
2984
  for line in mountlines:
2985
    device, mountpoint, fstype, options, _ = line.split(None, 4)
2986
    data.append((device, mountpoint, fstype, options))
2987

    
2988
  return data
2989

    
2990

    
2991
def GetFilesystemStats(path):
2992
  """Returns the total and free space on a filesystem.
2993

2994
  @type path: string
2995
  @param path: Path on filesystem to be examined
2996
  @rtype: int
2997
  @return: tuple of (Total space, Free space) in mebibytes
2998

2999
  """
3000
  st = os.statvfs(path)
3001

    
3002
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3003
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3004
  return (tsize, fsize)
3005

    
3006

    
3007
def RunInSeparateProcess(fn, *args):
3008
  """Runs a function in a separate process.
3009

3010
  Note: Only boolean return values are supported.
3011

3012
  @type fn: callable
3013
  @param fn: Function to be called
3014
  @rtype: bool
3015
  @return: Function's result
3016

3017
  """
3018
  pid = os.fork()
3019
  if pid == 0:
3020
    # Child process
3021
    try:
3022
      # In case the function uses temporary files
3023
      ResetTempfileModule()
3024

    
3025
      # Call function
3026
      result = int(bool(fn(*args)))
3027
      assert result in (0, 1)
3028
    except: # pylint: disable-msg=W0702
3029
      logging.exception("Error while calling function in separate process")
3030
      # 0 and 1 are reserved for the return value
3031
      result = 33
3032

    
3033
    os._exit(result) # pylint: disable-msg=W0212
3034

    
3035
  # Parent process
3036

    
3037
  # Avoid zombies and check exit code
3038
  (_, status) = os.waitpid(pid, 0)
3039

    
3040
  if os.WIFSIGNALED(status):
3041
    exitcode = None
3042
    signum = os.WTERMSIG(status)
3043
  else:
3044
    exitcode = os.WEXITSTATUS(status)
3045
    signum = None
3046

    
3047
  if not (exitcode in (0, 1) and signum is None):
3048
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3049
                              (exitcode, signum))
3050

    
3051
  return bool(exitcode)
3052

    
3053

    
3054
def IgnoreProcessNotFound(fn, *args, **kwargs):
3055
  """Ignores ESRCH when calling a process-related function.
3056

3057
  ESRCH is raised when a process is not found.
3058

3059
  @rtype: bool
3060
  @return: Whether process was found
3061

3062
  """
3063
  try:
3064
    fn(*args, **kwargs)
3065
  except EnvironmentError, err:
3066
    # Ignore ESRCH
3067
    if err.errno == errno.ESRCH:
3068
      return False
3069
    raise
3070

    
3071
  return True
3072

    
3073

    
3074
def IgnoreSignals(fn, *args, **kwargs):
3075
  """Tries to call a function ignoring failures due to EINTR.
3076

3077
  """
3078
  try:
3079
    return fn(*args, **kwargs)
3080
  except EnvironmentError, err:
3081
    if err.errno == errno.EINTR:
3082
      return None
3083
    else:
3084
      raise
3085
  except (select.error, socket.error), err:
3086
    # In python 2.6 and above select.error is an IOError, so it's handled
3087
    # above, in 2.5 and below it's not, and it's handled here.
3088
    if err.args and err.args[0] == errno.EINTR:
3089
      return None
3090
    else:
3091
      raise
3092

    
3093

    
3094
def LockFile(fd):
3095
  """Locks a file using POSIX locks.
3096

3097
  @type fd: int
3098
  @param fd: the file descriptor we need to lock
3099

3100
  """
3101
  try:
3102
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3103
  except IOError, err:
3104
    if err.errno == errno.EAGAIN:
3105
      raise errors.LockError("File already locked")
3106
    raise
3107

    
3108

    
3109
def FormatTime(val):
3110
  """Formats a time value.
3111

3112
  @type val: float or None
3113
  @param val: the timestamp as returned by time.time()
3114
  @return: a string value or N/A if we don't have a valid timestamp
3115

3116
  """
3117
  if val is None or not isinstance(val, (int, float)):
3118
    return "N/A"
3119
  # these two codes works on Linux, but they are not guaranteed on all
3120
  # platforms
3121
  return time.strftime("%F %T", time.localtime(val))
3122

    
3123

    
3124
def FormatSeconds(secs):
3125
  """Formats seconds for easier reading.
3126

3127
  @type secs: number
3128
  @param secs: Number of seconds
3129
  @rtype: string
3130
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3131

3132
  """
3133
  parts = []
3134

    
3135
  secs = round(secs, 0)
3136

    
3137
  if secs > 0:
3138
    # Negative values would be a bit tricky
3139
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3140
      (complete, secs) = divmod(secs, one)
3141
      if complete or parts:
3142
        parts.append("%d%s" % (complete, unit))
3143

    
3144
  parts.append("%ds" % secs)
3145

    
3146
  return " ".join(parts)
3147

    
3148

    
3149
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3150
  """Reads the watcher pause file.
3151

3152
  @type filename: string
3153
  @param filename: Path to watcher pause file
3154
  @type now: None, float or int
3155
  @param now: Current time as Unix timestamp
3156
  @type remove_after: int
3157
  @param remove_after: Remove watcher pause file after specified amount of
3158
    seconds past the pause end time
3159

3160
  """
3161
  if now is None:
3162
    now = time.time()
3163

    
3164
  try:
3165
    value = ReadFile(filename)
3166
  except IOError, err:
3167
    if err.errno != errno.ENOENT:
3168
      raise
3169
    value = None
3170

    
3171
  if value is not None:
3172
    try:
3173
      value = int(value)
3174
    except ValueError:
3175
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3176
                       " removing it"), filename)
3177
      RemoveFile(filename)
3178
      value = None
3179

    
3180
    if value is not None:
3181
      # Remove file if it's outdated
3182
      if now > (value + remove_after):
3183
        RemoveFile(filename)
3184
        value = None
3185

    
3186
      elif now > value:
3187
        value = None
3188

    
3189
  return value
3190

    
3191

    
3192
class RetryTimeout(Exception):
3193
  """Retry loop timed out.
3194

3195
  Any arguments which was passed by the retried function to RetryAgain will be
3196
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3197
  the RaiseInner helper method will reraise it.
3198

3199
  """
3200
  def RaiseInner(self):
3201
    if self.args and isinstance(self.args[0], Exception):
3202
      raise self.args[0]
3203
    else:
3204
      raise RetryTimeout(*self.args)
3205

    
3206

    
3207
class RetryAgain(Exception):
3208
  """Retry again.
3209

3210
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3211
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3212
  of the RetryTimeout() method can be used to reraise it.
3213

3214
  """
3215

    
3216

    
3217
class _RetryDelayCalculator(object):
3218
  """Calculator for increasing delays.
3219

3220
  """
3221
  __slots__ = [
3222
    "_factor",
3223
    "_limit",
3224
    "_next",
3225
    "_start",
3226
    ]
3227

    
3228
  def __init__(self, start, factor, limit):
3229
    """Initializes this class.
3230

3231
    @type start: float
3232
    @param start: Initial delay
3233
    @type factor: float
3234
    @param factor: Factor for delay increase
3235
    @type limit: float or None
3236
    @param limit: Upper limit for delay or None for no limit
3237

3238
    """
3239
    assert start > 0.0
3240
    assert factor >= 1.0
3241
    assert limit is None or limit >= 0.0
3242

    
3243
    self._start = start
3244
    self._factor = factor
3245
    self._limit = limit
3246

    
3247
    self._next = start
3248

    
3249
  def __call__(self):
3250
    """Returns current delay and calculates the next one.
3251

3252
    """
3253
    current = self._next
3254

    
3255
    # Update for next run
3256
    if self._limit is None or self._next < self._limit:
3257
      self._next = min(self._limit, self._next * self._factor)
3258

    
3259
    return current
3260

    
3261

    
3262
#: Special delay to specify whole remaining timeout
3263
RETRY_REMAINING_TIME = object()
3264

    
3265

    
3266
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3267
          _time_fn=time.time):
3268
  """Call a function repeatedly until it succeeds.
3269

3270
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3271
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3272
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3273

3274
  C{delay} can be one of the following:
3275
    - callable returning the delay length as a float
3276
    - Tuple of (start, factor, limit)
3277
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3278
      useful when overriding L{wait_fn} to wait for an external event)
3279
    - A static delay as a number (int or float)
3280

3281
  @type fn: callable
3282
  @param fn: Function to be called
3283
  @param delay: Either a callable (returning the delay), a tuple of (start,
3284
                factor, limit) (see L{_RetryDelayCalculator}),
3285
                L{RETRY_REMAINING_TIME} or a number (int or float)
3286
  @type timeout: float
3287
  @param timeout: Total timeout
3288
  @type wait_fn: callable
3289
  @param wait_fn: Waiting function
3290
  @return: Return value of function
3291

3292
  """
3293
  assert callable(fn)
3294
  assert callable(wait_fn)
3295
  assert callable(_time_fn)
3296

    
3297
  if args is None:
3298
    args = []
3299

    
3300
  end_time = _time_fn() + timeout
3301

    
3302
  if callable(delay):
3303
    # External function to calculate delay
3304
    calc_delay = delay
3305

    
3306
  elif isinstance(delay, (tuple, list)):
3307
    # Increasing delay with optional upper boundary
3308
    (start, factor, limit) = delay
3309
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3310

    
3311
  elif delay is RETRY_REMAINING_TIME:
3312
    # Always use the remaining time
3313
    calc_delay = None
3314

    
3315
  else:
3316
    # Static delay
3317
    calc_delay = lambda: delay
3318

    
3319
  assert calc_delay is None or callable(calc_delay)
3320

    
3321
  while True:
3322
    retry_args = []
3323
    try:
3324
      # pylint: disable-msg=W0142
3325
      return fn(*args)
3326
    except RetryAgain, err:
3327
      retry_args = err.args
3328
    except RetryTimeout:
3329
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3330
                                   " handle RetryTimeout")
3331

    
3332
    remaining_time = end_time - _time_fn()
3333

    
3334
    if remaining_time < 0.0:
3335
      # pylint: disable-msg=W0142
3336
      raise RetryTimeout(*retry_args)
3337

    
3338
    assert remaining_time >= 0.0
3339

    
3340
    if calc_delay is None:
3341
      wait_fn(remaining_time)
3342
    else:
3343
      current_delay = calc_delay()
3344
      if current_delay > 0.0:
3345
        wait_fn(current_delay)
3346

    
3347

    
3348
def GetClosedTempfile(*args, **kwargs):
3349
  """Creates a temporary file and returns its path.
3350

3351
  """
3352
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3353
  _CloseFDNoErr(fd)
3354
  return path
3355

    
3356

    
3357
def GenerateSelfSignedX509Cert(common_name, validity):
3358
  """Generates a self-signed X509 certificate.
3359

3360
  @type common_name: string
3361
  @param common_name: commonName value
3362
  @type validity: int
3363
  @param validity: Validity for certificate in seconds
3364

3365
  """
3366
  # Create private and public key
3367
  key = OpenSSL.crypto.PKey()
3368
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3369

    
3370
  # Create self-signed certificate
3371
  cert = OpenSSL.crypto.X509()
3372
  if common_name:
3373
    cert.get_subject().CN = common_name
3374
  cert.set_serial_number(1)
3375
  cert.gmtime_adj_notBefore(0)
3376
  cert.gmtime_adj_notAfter(validity)
3377
  cert.set_issuer(cert.get_subject())
3378
  cert.set_pubkey(key)
3379
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3380

    
3381
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3382
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3383

    
3384
  return (key_pem, cert_pem)
3385

    
3386

    
3387
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3388
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3389
  """Legacy function to generate self-signed X509 certificate.
3390

3391
  @type filename: str
3392
  @param filename: path to write certificate to
3393
  @type common_name: string
3394
  @param common_name: commonName value
3395
  @type validity: int
3396
  @param validity: validity of certificate in number of days
3397

3398
  """
3399
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3400
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3401
  # and node daemon certificates have the proper Subject/Issuer.
3402
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3403
                                                   validity * 24 * 60 * 60)
3404

    
3405
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3406

    
3407

    
3408
class FileLock(object):
3409
  """Utility class for file locks.
3410

3411
  """
3412
  def __init__(self, fd, filename):
3413
    """Constructor for FileLock.
3414

3415
    @type fd: file
3416
    @param fd: File object
3417
    @type filename: str
3418
    @param filename: Path of the file opened at I{fd}
3419

3420
    """
3421
    self.fd = fd
3422
    self.filename = filename
3423

    
3424
  @classmethod
3425
  def Open(cls, filename):
3426
    """Creates and opens a file to be used as a file-based lock.
3427

3428
    @type filename: string
3429
    @param filename: path to the file to be locked
3430

3431
    """
3432
    # Using "os.open" is necessary to allow both opening existing file
3433
    # read/write and creating if not existing. Vanilla "open" will truncate an
3434
    # existing file -or- allow creating if not existing.
3435
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3436
               filename)
3437

    
3438
  def __del__(self):
3439
    self.Close()
3440

    
3441
  def Close(self):
3442
    """Close the file and release the lock.
3443

3444
    """
3445
    if hasattr(self, "fd") and self.fd:
3446
      self.fd.close()
3447
      self.fd = None
3448

    
3449
  def _flock(self, flag, blocking, timeout, errmsg):
3450
    """Wrapper for fcntl.flock.
3451

3452
    @type flag: int
3453
    @param flag: operation flag
3454
    @type blocking: bool
3455
    @param blocking: whether the operation should be done in blocking mode.
3456
    @type timeout: None or float
3457
    @param timeout: for how long the operation should be retried (implies
3458
                    non-blocking mode).
3459
    @type errmsg: string
3460
    @param errmsg: error message in case operation fails.
3461

3462
    """
3463
    assert self.fd, "Lock was closed"
3464
    assert timeout is None or timeout >= 0, \
3465
      "If specified, timeout must be positive"
3466
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3467

    
3468
    # When a timeout is used, LOCK_NB must always be set
3469
    if not (timeout is None and blocking):
3470
      flag |= fcntl.LOCK_NB
3471

    
3472
    if timeout is None:
3473
      self._Lock(self.fd, flag, timeout)
3474
    else:
3475
      try:
3476
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3477
              args=(self.fd, flag, timeout))
3478
      except RetryTimeout:
3479
        raise errors.LockError(errmsg)
3480

    
3481
  @staticmethod
3482
  def _Lock(fd, flag, timeout):
3483
    try:
3484
      fcntl.flock(fd, flag)
3485
    except IOError, err:
3486
      if timeout is not None and err.errno == errno.EAGAIN:
3487
        raise RetryAgain()
3488

    
3489
      logging.exception("fcntl.flock failed")
3490
      raise
3491

    
3492
  def Exclusive(self, blocking=False, timeout=None):
3493
    """Locks the file in exclusive mode.
3494

3495
    @type blocking: boolean
3496
    @param blocking: whether to block and wait until we
3497
        can lock the file or return immediately
3498
    @type timeout: int or None
3499
    @param timeout: if not None, the duration to wait for the lock
3500
        (in blocking mode)
3501

3502
    """
3503
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3504
                "Failed to lock %s in exclusive mode" % self.filename)
3505

    
3506
  def Shared(self, blocking=False, timeout=None):
3507
    """Locks the file in shared mode.
3508

3509
    @type blocking: boolean
3510
    @param blocking: whether to block and wait until we
3511
        can lock the file or return immediately
3512
    @type timeout: int or None
3513
    @param timeout: if not None, the duration to wait for the lock
3514
        (in blocking mode)
3515

3516
    """
3517
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3518
                "Failed to lock %s in shared mode" % self.filename)
3519

    
3520
  def Unlock(self, blocking=True, timeout=None):
3521
    """Unlocks the file.
3522

3523
    According to C{flock(2)}, unlocking can also be a nonblocking
3524
    operation::
3525

3526
      To make a non-blocking request, include LOCK_NB with any of the above
3527
      operations.
3528

3529
    @type blocking: boolean
3530
    @param blocking: whether to block and wait until we
3531
        can lock the file or return immediately
3532
    @type timeout: int or None
3533
    @param timeout: if not None, the duration to wait for the lock
3534
        (in blocking mode)
3535

3536
    """
3537
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3538
                "Failed to unlock %s" % self.filename)
3539

    
3540

    
3541
class LineSplitter:
3542
  """Splits data chunks into lines separated by newline.
3543

3544
  Instances provide a file-like interface.
3545

3546
  """
3547
  def __init__(self, line_fn, *args):
3548
    """Initializes this class.
3549

3550
    @type line_fn: callable
3551
    @param line_fn: Function called for each line, first parameter is line
3552
    @param args: Extra arguments for L{line_fn}
3553

3554
    """
3555
    assert callable(line_fn)
3556

    
3557
    if args:
3558
      # Python 2.4 doesn't have functools.partial yet
3559
      self._line_fn = \
3560
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3561
    else:
3562
      self._line_fn = line_fn
3563

    
3564
    self._lines = collections.deque()
3565
    self._buffer = ""
3566

    
3567
  def write(self, data):
3568
    parts = (self._buffer + data).split("\n")
3569
    self._buffer = parts.pop()
3570
    self._lines.extend(parts)
3571

    
3572
  def flush(self):
3573
    while self._lines:
3574
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3575

    
3576
  def close(self):
3577
    self.flush()
3578
    if self._buffer:
3579
      self._line_fn(self._buffer)
3580

    
3581

    
3582
def SignalHandled(signums):
3583
  """Signal Handled decoration.
3584

3585
  This special decorator installs a signal handler and then calls the target
3586
  function. The function must accept a 'signal_handlers' keyword argument,
3587
  which will contain a dict indexed by signal number, with SignalHandler
3588
  objects as values.
3589

3590
  The decorator can be safely stacked with iself, to handle multiple signals
3591
  with different handlers.
3592

3593
  @type signums: list
3594
  @param signums: signals to intercept
3595

3596
  """
3597
  def wrap(fn):
3598
    def sig_function(*args, **kwargs):
3599
      assert 'signal_handlers' not in kwargs or \
3600
             kwargs['signal_handlers'] is None or \
3601
             isinstance(kwargs['signal_handlers'], dict), \
3602
             "Wrong signal_handlers parameter in original function call"
3603
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3604
        signal_handlers = kwargs['signal_handlers']
3605
      else:
3606
        signal_handlers = {}
3607
        kwargs['signal_handlers'] = signal_handlers
3608
      sighandler = SignalHandler(signums)
3609
      try:
3610
        for sig in signums:
3611
          signal_handlers[sig] = sighandler
3612
        return fn(*args, **kwargs)
3613
      finally:
3614
        sighandler.Reset()
3615
    return sig_function
3616
  return wrap
3617

    
3618

    
3619
class SignalWakeupFd(object):
3620
  try:
3621
    # This is only supported in Python 2.5 and above (some distributions
3622
    # backported it to Python 2.4)
3623
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3624
  except AttributeError:
3625
    # Not supported
3626
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3627
      return -1
3628
  else:
3629
    def _SetWakeupFd(self, fd):
3630
      return self._set_wakeup_fd_fn(fd)
3631

    
3632
  def __init__(self):
3633
    """Initializes this class.
3634

3635
    """
3636
    (read_fd, write_fd) = os.pipe()
3637

    
3638
    # Once these succeeded, the file descriptors will be closed automatically.
3639
    # Buffer size 0 is important, otherwise .read() with a specified length
3640
    # might buffer data and the file descriptors won't be marked readable.
3641
    self._read_fh = os.fdopen(read_fd, "r", 0)
3642
    self._write_fh = os.fdopen(write_fd, "w", 0)
3643

    
3644
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3645

    
3646
    # Utility functions
3647
    self.fileno = self._read_fh.fileno
3648
    self.read = self._read_fh.read
3649

    
3650
  def Reset(self):
3651
    """Restores the previous wakeup file descriptor.
3652

3653
    """
3654
    if hasattr(self, "_previous") and self._previous is not None:
3655
      self._SetWakeupFd(self._previous)
3656
      self._previous = None
3657

    
3658
  def Notify(self):
3659
    """Notifies the wakeup file descriptor.
3660

3661
    """
3662
    self._write_fh.write("\0")
3663

    
3664
  def __del__(self):
3665
    """Called before object deletion.
3666

3667
    """
3668
    self.Reset()
3669

    
3670

    
3671
class SignalHandler(object):
3672
  """Generic signal handler class.
3673

3674
  It automatically restores the original handler when deconstructed or
3675
  when L{Reset} is called. You can either pass your own handler
3676
  function in or query the L{called} attribute to detect whether the
3677
  signal was sent.
3678

3679
  @type signum: list
3680
  @ivar signum: the signals we handle
3681
  @type called: boolean
3682
  @ivar called: tracks whether any of the signals have been raised
3683

3684
  """
3685
  def __init__(self, signum, handler_fn=None, wakeup=None):
3686
    """Constructs a new SignalHandler instance.
3687

3688
    @type signum: int or list of ints
3689
    @param signum: Single signal number or set of signal numbers
3690
    @type handler_fn: callable
3691
    @param handler_fn: Signal handling function
3692

3693
    """
3694
    assert handler_fn is None or callable(handler_fn)
3695

    
3696
    self.signum = set(signum)
3697
    self.called = False
3698

    
3699
    self._handler_fn = handler_fn
3700
    self._wakeup = wakeup
3701

    
3702
    self._previous = {}
3703
    try:
3704
      for signum in self.signum:
3705
        # Setup handler
3706
        prev_handler = signal.signal(signum, self._HandleSignal)
3707
        try:
3708
          self._previous[signum] = prev_handler
3709
        except:
3710
          # Restore previous handler
3711
          signal.signal(signum, prev_handler)
3712
          raise
3713
    except:
3714
      # Reset all handlers
3715
      self.Reset()
3716
      # Here we have a race condition: a handler may have already been called,
3717
      # but there's not much we can do about it at this point.
3718
      raise
3719

    
3720
  def __del__(self):
3721
    self.Reset()
3722

    
3723
  def Reset(self):
3724
    """Restore previous handler.
3725

3726
    This will reset all the signals to their previous handlers.
3727

3728
    """
3729
    for signum, prev_handler in self._previous.items():
3730
      signal.signal(signum, prev_handler)
3731
      # If successful, remove from dict
3732
      del self._previous[signum]
3733

    
3734
  def Clear(self):
3735
    """Unsets the L{called} flag.
3736

3737
    This function can be used in case a signal may arrive several times.
3738

3739
    """
3740
    self.called = False
3741

    
3742
  def _HandleSignal(self, signum, frame):
3743
    """Actual signal handling function.
3744

3745
    """
3746
    # This is not nice and not absolutely atomic, but it appears to be the only
3747
    # solution in Python -- there are no atomic types.
3748
    self.called = True
3749

    
3750
    if self._wakeup:
3751
      # Notify whoever is interested in signals
3752
      self._wakeup.Notify()
3753

    
3754
    if self._handler_fn:
3755
      self._handler_fn(signum, frame)
3756

    
3757

    
3758
class FieldSet(object):
3759
  """A simple field set.
3760

3761
  Among the features are:
3762
    - checking if a string is among a list of static string or regex objects
3763
    - checking if a whole list of string matches
3764
    - returning the matching groups from a regex match
3765

3766
  Internally, all fields are held as regular expression objects.
3767

3768
  """
3769
  def __init__(self, *items):
3770
    self.items = [re.compile("^%s$" % value) for value in items]
3771

    
3772
  def Extend(self, other_set):
3773
    """Extend the field set with the items from another one"""
3774
    self.items.extend(other_set.items)
3775

    
3776
  def Matches(self, field):
3777
    """Checks if a field matches the current set
3778

3779
    @type field: str
3780
    @param field: the string to match
3781
    @return: either None or a regular expression match object
3782

3783
    """
3784
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3785
      return m
3786
    return None
3787

    
3788
  def NonMatching(self, items):
3789
    """Returns the list of fields not matching the current set
3790

3791
    @type items: list
3792
    @param items: the list of fields to check
3793
    @rtype: list
3794
    @return: list of non-matching fields
3795

3796
    """
3797
    return [val for val in items if not self.Matches(val)]