Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 26d3fd2f

History | View | Annotate | Download (102.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
# Certificate verification results
86
(CERT_WARNING,
87
 CERT_ERROR) = range(1, 3)
88

    
89
# Flags for mlockall() (from bits/mman.h)
90
_MCL_CURRENT = 1
91
_MCL_FUTURE = 2
92

    
93
#: MAC checker regexp
94
_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
95

    
96

    
97
class RunResult(object):
98
  """Holds the result of running external programs.
99

100
  @type exit_code: int
101
  @ivar exit_code: the exit code of the program, or None (if the program
102
      didn't exit())
103
  @type signal: int or None
104
  @ivar signal: the signal that caused the program to finish, or None
105
      (if the program wasn't terminated by a signal)
106
  @type stdout: str
107
  @ivar stdout: the standard output of the program
108
  @type stderr: str
109
  @ivar stderr: the standard error of the program
110
  @type failed: boolean
111
  @ivar failed: True in case the program was
112
      terminated by a signal or exited with a non-zero exit code
113
  @ivar fail_reason: a string detailing the termination reason
114

115
  """
116
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
117
               "failed", "fail_reason", "cmd"]
118

    
119

    
120
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
121
    self.cmd = cmd
122
    self.exit_code = exit_code
123
    self.signal = signal_
124
    self.stdout = stdout
125
    self.stderr = stderr
126
    self.failed = (signal_ is not None or exit_code != 0)
127

    
128
    if self.signal is not None:
129
      self.fail_reason = "terminated by signal %s" % self.signal
130
    elif self.exit_code is not None:
131
      self.fail_reason = "exited with exit code %s" % self.exit_code
132
    else:
133
      self.fail_reason = "unable to determine termination reason"
134

    
135
    if self.failed:
136
      logging.debug("Command '%s' failed (%s); output: %s",
137
                    self.cmd, self.fail_reason, self.output)
138

    
139
  def _GetOutput(self):
140
    """Returns the combined stdout and stderr for easier usage.
141

142
    """
143
    return self.stdout + self.stderr
144

    
145
  output = property(_GetOutput, None, None, "Return full output")
146

    
147

    
148
def _BuildCmdEnvironment(env, reset):
149
  """Builds the environment for an external program.
150

151
  """
152
  if reset:
153
    cmd_env = {}
154
  else:
155
    cmd_env = os.environ.copy()
156
    cmd_env["LC_ALL"] = "C"
157

    
158
  if env is not None:
159
    cmd_env.update(env)
160

    
161
  return cmd_env
162

    
163

    
164
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
165
           interactive=False):
166
  """Execute a (shell) command.
167

168
  The command should not read from its standard input, as it will be
169
  closed.
170

171
  @type cmd: string or list
172
  @param cmd: Command to run
173
  @type env: dict
174
  @param env: Additional environment variables
175
  @type output: str
176
  @param output: if desired, the output of the command can be
177
      saved in a file instead of the RunResult instance; this
178
      parameter denotes the file name (if not None)
179
  @type cwd: string
180
  @param cwd: if specified, will be used as the working
181
      directory for the command; the default will be /
182
  @type reset_env: boolean
183
  @param reset_env: whether to reset or keep the default os environment
184
  @type interactive: boolean
185
  @param interactive: weather we pipe stdin, stdout and stderr
186
                      (default behaviour) or run the command interactive
187
  @rtype: L{RunResult}
188
  @return: RunResult instance
189
  @raise errors.ProgrammerError: if we call this when forks are disabled
190

191
  """
192
  if no_fork:
193
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
194

    
195
  if output and interactive:
196
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
197
                                 " not be provided at the same time")
198

    
199
  if isinstance(cmd, basestring):
200
    strcmd = cmd
201
    shell = True
202
  else:
203
    cmd = [str(val) for val in cmd]
204
    strcmd = ShellQuoteArgs(cmd)
205
    shell = False
206

    
207
  if output:
208
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
209
  else:
210
    logging.debug("RunCmd %s", strcmd)
211

    
212
  cmd_env = _BuildCmdEnvironment(env, reset_env)
213

    
214
  try:
215
    if output is None:
216
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd, interactive)
217
    else:
218
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
219
      out = err = ""
220
  except OSError, err:
221
    if err.errno == errno.ENOENT:
222
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
223
                               (strcmd, err))
224
    else:
225
      raise
226

    
227
  if status >= 0:
228
    exitcode = status
229
    signal_ = None
230
  else:
231
    exitcode = None
232
    signal_ = -status
233

    
234
  return RunResult(exitcode, signal_, out, err, strcmd)
235

    
236

    
237
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
238
                pidfile=None):
239
  """Start a daemon process after forking twice.
240

241
  @type cmd: string or list
242
  @param cmd: Command to run
243
  @type env: dict
244
  @param env: Additional environment variables
245
  @type cwd: string
246
  @param cwd: Working directory for the program
247
  @type output: string
248
  @param output: Path to file in which to save the output
249
  @type output_fd: int
250
  @param output_fd: File descriptor for output
251
  @type pidfile: string
252
  @param pidfile: Process ID file
253
  @rtype: int
254
  @return: Daemon process ID
255
  @raise errors.ProgrammerError: if we call this when forks are disabled
256

257
  """
258
  if no_fork:
259
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
260
                                 " disabled")
261

    
262
  if output and not (bool(output) ^ (output_fd is not None)):
263
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
264
                                 " specified")
265

    
266
  if isinstance(cmd, basestring):
267
    cmd = ["/bin/sh", "-c", cmd]
268

    
269
  strcmd = ShellQuoteArgs(cmd)
270

    
271
  if output:
272
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
273
  else:
274
    logging.debug("StartDaemon %s", strcmd)
275

    
276
  cmd_env = _BuildCmdEnvironment(env, False)
277

    
278
  # Create pipe for sending PID back
279
  (pidpipe_read, pidpipe_write) = os.pipe()
280
  try:
281
    try:
282
      # Create pipe for sending error messages
283
      (errpipe_read, errpipe_write) = os.pipe()
284
      try:
285
        try:
286
          # First fork
287
          pid = os.fork()
288
          if pid == 0:
289
            try:
290
              # Child process, won't return
291
              _StartDaemonChild(errpipe_read, errpipe_write,
292
                                pidpipe_read, pidpipe_write,
293
                                cmd, cmd_env, cwd,
294
                                output, output_fd, pidfile)
295
            finally:
296
              # Well, maybe child process failed
297
              os._exit(1) # pylint: disable-msg=W0212
298
        finally:
299
          _CloseFDNoErr(errpipe_write)
300

    
301
        # Wait for daemon to be started (or an error message to arrive) and read
302
        # up to 100 KB as an error message
303
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
304
      finally:
305
        _CloseFDNoErr(errpipe_read)
306
    finally:
307
      _CloseFDNoErr(pidpipe_write)
308

    
309
    # Read up to 128 bytes for PID
310
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
311
  finally:
312
    _CloseFDNoErr(pidpipe_read)
313

    
314
  # Try to avoid zombies by waiting for child process
315
  try:
316
    os.waitpid(pid, 0)
317
  except OSError:
318
    pass
319

    
320
  if errormsg:
321
    raise errors.OpExecError("Error when starting daemon process: %r" %
322
                             errormsg)
323

    
324
  try:
325
    return int(pidtext)
326
  except (ValueError, TypeError), err:
327
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
328
                             (pidtext, err))
329

    
330

    
331
def _StartDaemonChild(errpipe_read, errpipe_write,
332
                      pidpipe_read, pidpipe_write,
333
                      args, env, cwd,
334
                      output, fd_output, pidfile):
335
  """Child process for starting daemon.
336

337
  """
338
  try:
339
    # Close parent's side
340
    _CloseFDNoErr(errpipe_read)
341
    _CloseFDNoErr(pidpipe_read)
342

    
343
    # First child process
344
    os.chdir("/")
345
    os.umask(077)
346
    os.setsid()
347

    
348
    # And fork for the second time
349
    pid = os.fork()
350
    if pid != 0:
351
      # Exit first child process
352
      os._exit(0) # pylint: disable-msg=W0212
353

    
354
    # Make sure pipe is closed on execv* (and thereby notifies original process)
355
    SetCloseOnExecFlag(errpipe_write, True)
356

    
357
    # List of file descriptors to be left open
358
    noclose_fds = [errpipe_write]
359

    
360
    # Open PID file
361
    if pidfile:
362
      try:
363
        # TODO: Atomic replace with another locked file instead of writing into
364
        # it after creating
365
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
366

    
367
        # Lock the PID file (and fail if not possible to do so). Any code
368
        # wanting to send a signal to the daemon should try to lock the PID
369
        # file before reading it. If acquiring the lock succeeds, the daemon is
370
        # no longer running and the signal should not be sent.
371
        LockFile(fd_pidfile)
372

    
373
        os.write(fd_pidfile, "%d\n" % os.getpid())
374
      except Exception, err:
375
        raise Exception("Creating and locking PID file failed: %s" % err)
376

    
377
      # Keeping the file open to hold the lock
378
      noclose_fds.append(fd_pidfile)
379

    
380
      SetCloseOnExecFlag(fd_pidfile, False)
381
    else:
382
      fd_pidfile = None
383

    
384
    # Open /dev/null
385
    fd_devnull = os.open(os.devnull, os.O_RDWR)
386

    
387
    assert not output or (bool(output) ^ (fd_output is not None))
388

    
389
    if fd_output is not None:
390
      pass
391
    elif output:
392
      # Open output file
393
      try:
394
        # TODO: Implement flag to set append=yes/no
395
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
396
      except EnvironmentError, err:
397
        raise Exception("Opening output file failed: %s" % err)
398
    else:
399
      fd_output = fd_devnull
400

    
401
    # Redirect standard I/O
402
    os.dup2(fd_devnull, 0)
403
    os.dup2(fd_output, 1)
404
    os.dup2(fd_output, 2)
405

    
406
    # Send daemon PID to parent
407
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
408

    
409
    # Close all file descriptors except stdio and error message pipe
410
    CloseFDs(noclose_fds=noclose_fds)
411

    
412
    # Change working directory
413
    os.chdir(cwd)
414

    
415
    if env is None:
416
      os.execvp(args[0], args)
417
    else:
418
      os.execvpe(args[0], args, env)
419
  except: # pylint: disable-msg=W0702
420
    try:
421
      # Report errors to original process
422
      buf = str(sys.exc_info()[1])
423

    
424
      RetryOnSignal(os.write, errpipe_write, buf)
425
    except: # pylint: disable-msg=W0702
426
      # Ignore errors in error handling
427
      pass
428

    
429
  os._exit(1) # pylint: disable-msg=W0212
430

    
431

    
432
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive):
433
  """Run a command and return its output.
434

435
  @type  cmd: string or list
436
  @param cmd: Command to run
437
  @type env: dict
438
  @param env: The environment to use
439
  @type via_shell: bool
440
  @param via_shell: if we should run via the shell
441
  @type cwd: string
442
  @param cwd: the working directory for the program
443
  @type interactive: boolean
444
  @param interactive: Run command interactive (without piping)
445
  @rtype: tuple
446
  @return: (out, err, status)
447

448
  """
449
  poller = select.poll()
450

    
451
  stderr = subprocess.PIPE
452
  stdout = subprocess.PIPE
453
  stdin = subprocess.PIPE
454

    
455
  if interactive:
456
    stderr = stdout = stdin = None
457

    
458
  child = subprocess.Popen(cmd, shell=via_shell,
459
                           stderr=stderr,
460
                           stdout=stdout,
461
                           stdin=stdin,
462
                           close_fds=True, env=env,
463
                           cwd=cwd)
464

    
465
  out = StringIO()
466
  err = StringIO()
467
  if not interactive:
468
    child.stdin.close()
469
    poller.register(child.stdout, select.POLLIN)
470
    poller.register(child.stderr, select.POLLIN)
471
    fdmap = {
472
      child.stdout.fileno(): (out, child.stdout),
473
      child.stderr.fileno(): (err, child.stderr),
474
      }
475
    for fd in fdmap:
476
      SetNonblockFlag(fd, True)
477

    
478
    while fdmap:
479
      pollresult = RetryOnSignal(poller.poll)
480

    
481
      for fd, event in pollresult:
482
        if event & select.POLLIN or event & select.POLLPRI:
483
          data = fdmap[fd][1].read()
484
          # no data from read signifies EOF (the same as POLLHUP)
485
          if not data:
486
            poller.unregister(fd)
487
            del fdmap[fd]
488
            continue
489
          fdmap[fd][0].write(data)
490
        if (event & select.POLLNVAL or event & select.POLLHUP or
491
            event & select.POLLERR):
492
          poller.unregister(fd)
493
          del fdmap[fd]
494

    
495
  out = out.getvalue()
496
  err = err.getvalue()
497

    
498
  status = child.wait()
499
  return out, err, status
500

    
501

    
502
def _RunCmdFile(cmd, env, via_shell, output, cwd):
503
  """Run a command and save its output to a file.
504

505
  @type  cmd: string or list
506
  @param cmd: Command to run
507
  @type env: dict
508
  @param env: The environment to use
509
  @type via_shell: bool
510
  @param via_shell: if we should run via the shell
511
  @type output: str
512
  @param output: the filename in which to save the output
513
  @type cwd: string
514
  @param cwd: the working directory for the program
515
  @rtype: int
516
  @return: the exit status
517

518
  """
519
  fh = open(output, "a")
520
  try:
521
    child = subprocess.Popen(cmd, shell=via_shell,
522
                             stderr=subprocess.STDOUT,
523
                             stdout=fh,
524
                             stdin=subprocess.PIPE,
525
                             close_fds=True, env=env,
526
                             cwd=cwd)
527

    
528
    child.stdin.close()
529
    status = child.wait()
530
  finally:
531
    fh.close()
532
  return status
533

    
534

    
535
def SetCloseOnExecFlag(fd, enable):
536
  """Sets or unsets the close-on-exec flag on a file descriptor.
537

538
  @type fd: int
539
  @param fd: File descriptor
540
  @type enable: bool
541
  @param enable: Whether to set or unset it.
542

543
  """
544
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
545

    
546
  if enable:
547
    flags |= fcntl.FD_CLOEXEC
548
  else:
549
    flags &= ~fcntl.FD_CLOEXEC
550

    
551
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
552

    
553

    
554
def SetNonblockFlag(fd, enable):
555
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
556

557
  @type fd: int
558
  @param fd: File descriptor
559
  @type enable: bool
560
  @param enable: Whether to set or unset it
561

562
  """
563
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
564

    
565
  if enable:
566
    flags |= os.O_NONBLOCK
567
  else:
568
    flags &= ~os.O_NONBLOCK
569

    
570
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
571

    
572

    
573
def RetryOnSignal(fn, *args, **kwargs):
574
  """Calls a function again if it failed due to EINTR.
575

576
  """
577
  while True:
578
    try:
579
      return fn(*args, **kwargs)
580
    except EnvironmentError, err:
581
      if err.errno != errno.EINTR:
582
        raise
583
    except (socket.error, select.error), err:
584
      # In python 2.6 and above select.error is an IOError, so it's handled
585
      # above, in 2.5 and below it's not, and it's handled here.
586
      if not (err.args and err.args[0] == errno.EINTR):
587
        raise
588

    
589

    
590
def RunParts(dir_name, env=None, reset_env=False):
591
  """Run Scripts or programs in a directory
592

593
  @type dir_name: string
594
  @param dir_name: absolute path to a directory
595
  @type env: dict
596
  @param env: The environment to use
597
  @type reset_env: boolean
598
  @param reset_env: whether to reset or keep the default os environment
599
  @rtype: list of tuples
600
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
601

602
  """
603
  rr = []
604

    
605
  try:
606
    dir_contents = ListVisibleFiles(dir_name)
607
  except OSError, err:
608
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
609
    return rr
610

    
611
  for relname in sorted(dir_contents):
612
    fname = PathJoin(dir_name, relname)
613
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
614
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
615
      rr.append((relname, constants.RUNPARTS_SKIP, None))
616
    else:
617
      try:
618
        result = RunCmd([fname], env=env, reset_env=reset_env)
619
      except Exception, err: # pylint: disable-msg=W0703
620
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
621
      else:
622
        rr.append((relname, constants.RUNPARTS_RUN, result))
623

    
624
  return rr
625

    
626

    
627
def RemoveFile(filename):
628
  """Remove a file ignoring some errors.
629

630
  Remove a file, ignoring non-existing ones or directories. Other
631
  errors are passed.
632

633
  @type filename: str
634
  @param filename: the file to be removed
635

636
  """
637
  try:
638
    os.unlink(filename)
639
  except OSError, err:
640
    if err.errno not in (errno.ENOENT, errno.EISDIR):
641
      raise
642

    
643

    
644
def RemoveDir(dirname):
645
  """Remove an empty directory.
646

647
  Remove a directory, ignoring non-existing ones.
648
  Other errors are passed. This includes the case,
649
  where the directory is not empty, so it can't be removed.
650

651
  @type dirname: str
652
  @param dirname: the empty directory to be removed
653

654
  """
655
  try:
656
    os.rmdir(dirname)
657
  except OSError, err:
658
    if err.errno != errno.ENOENT:
659
      raise
660

    
661

    
662
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
663
  """Renames a file.
664

665
  @type old: string
666
  @param old: Original path
667
  @type new: string
668
  @param new: New path
669
  @type mkdir: bool
670
  @param mkdir: Whether to create target directory if it doesn't exist
671
  @type mkdir_mode: int
672
  @param mkdir_mode: Mode for newly created directories
673

674
  """
675
  try:
676
    return os.rename(old, new)
677
  except OSError, err:
678
    # In at least one use case of this function, the job queue, directory
679
    # creation is very rare. Checking for the directory before renaming is not
680
    # as efficient.
681
    if mkdir and err.errno == errno.ENOENT:
682
      # Create directory and try again
683
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
684

    
685
      return os.rename(old, new)
686

    
687
    raise
688

    
689

    
690
def Makedirs(path, mode=0750):
691
  """Super-mkdir; create a leaf directory and all intermediate ones.
692

693
  This is a wrapper around C{os.makedirs} adding error handling not implemented
694
  before Python 2.5.
695

696
  """
697
  try:
698
    os.makedirs(path, mode)
699
  except OSError, err:
700
    # Ignore EEXIST. This is only handled in os.makedirs as included in
701
    # Python 2.5 and above.
702
    if err.errno != errno.EEXIST or not os.path.exists(path):
703
      raise
704

    
705

    
706
def ResetTempfileModule():
707
  """Resets the random name generator of the tempfile module.
708

709
  This function should be called after C{os.fork} in the child process to
710
  ensure it creates a newly seeded random generator. Otherwise it would
711
  generate the same random parts as the parent process. If several processes
712
  race for the creation of a temporary file, this could lead to one not getting
713
  a temporary name.
714

715
  """
716
  # pylint: disable-msg=W0212
717
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
718
    tempfile._once_lock.acquire()
719
    try:
720
      # Reset random name generator
721
      tempfile._name_sequence = None
722
    finally:
723
      tempfile._once_lock.release()
724
  else:
725
    logging.critical("The tempfile module misses at least one of the"
726
                     " '_once_lock' and '_name_sequence' attributes")
727

    
728

    
729
def _FingerprintFile(filename):
730
  """Compute the fingerprint of a file.
731

732
  If the file does not exist, a None will be returned
733
  instead.
734

735
  @type filename: str
736
  @param filename: the filename to checksum
737
  @rtype: str
738
  @return: the hex digest of the sha checksum of the contents
739
      of the file
740

741
  """
742
  if not (os.path.exists(filename) and os.path.isfile(filename)):
743
    return None
744

    
745
  f = open(filename)
746

    
747
  fp = compat.sha1_hash()
748
  while True:
749
    data = f.read(4096)
750
    if not data:
751
      break
752

    
753
    fp.update(data)
754

    
755
  return fp.hexdigest()
756

    
757

    
758
def FingerprintFiles(files):
759
  """Compute fingerprints for a list of files.
760

761
  @type files: list
762
  @param files: the list of filename to fingerprint
763
  @rtype: dict
764
  @return: a dictionary filename: fingerprint, holding only
765
      existing files
766

767
  """
768
  ret = {}
769

    
770
  for filename in files:
771
    cksum = _FingerprintFile(filename)
772
    if cksum:
773
      ret[filename] = cksum
774

    
775
  return ret
776

    
777

    
778
def ForceDictType(target, key_types, allowed_values=None):
779
  """Force the values of a dict to have certain types.
780

781
  @type target: dict
782
  @param target: the dict to update
783
  @type key_types: dict
784
  @param key_types: dict mapping target dict keys to types
785
                    in constants.ENFORCEABLE_TYPES
786
  @type allowed_values: list
787
  @keyword allowed_values: list of specially allowed values
788

789
  """
790
  if allowed_values is None:
791
    allowed_values = []
792

    
793
  if not isinstance(target, dict):
794
    msg = "Expected dictionary, got '%s'" % target
795
    raise errors.TypeEnforcementError(msg)
796

    
797
  for key in target:
798
    if key not in key_types:
799
      msg = "Unknown key '%s'" % key
800
      raise errors.TypeEnforcementError(msg)
801

    
802
    if target[key] in allowed_values:
803
      continue
804

    
805
    ktype = key_types[key]
806
    if ktype not in constants.ENFORCEABLE_TYPES:
807
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
808
      raise errors.ProgrammerError(msg)
809

    
810
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
811
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
812
        pass
813
      elif not isinstance(target[key], basestring):
814
        if isinstance(target[key], bool) and not target[key]:
815
          target[key] = ''
816
        else:
817
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
818
          raise errors.TypeEnforcementError(msg)
819
    elif ktype == constants.VTYPE_BOOL:
820
      if isinstance(target[key], basestring) and target[key]:
821
        if target[key].lower() == constants.VALUE_FALSE:
822
          target[key] = False
823
        elif target[key].lower() == constants.VALUE_TRUE:
824
          target[key] = True
825
        else:
826
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
827
          raise errors.TypeEnforcementError(msg)
828
      elif target[key]:
829
        target[key] = True
830
      else:
831
        target[key] = False
832
    elif ktype == constants.VTYPE_SIZE:
833
      try:
834
        target[key] = ParseUnit(target[key])
835
      except errors.UnitParseError, err:
836
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
837
              (key, target[key], err)
838
        raise errors.TypeEnforcementError(msg)
839
    elif ktype == constants.VTYPE_INT:
840
      try:
841
        target[key] = int(target[key])
842
      except (ValueError, TypeError):
843
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
844
        raise errors.TypeEnforcementError(msg)
845

    
846

    
847
def _GetProcStatusPath(pid):
848
  """Returns the path for a PID's proc status file.
849

850
  @type pid: int
851
  @param pid: Process ID
852
  @rtype: string
853

854
  """
855
  return "/proc/%d/status" % pid
856

    
857

    
858
def IsProcessAlive(pid):
859
  """Check if a given pid exists on the system.
860

861
  @note: zombie status is not handled, so zombie processes
862
      will be returned as alive
863
  @type pid: int
864
  @param pid: the process ID to check
865
  @rtype: boolean
866
  @return: True if the process exists
867

868
  """
869
  def _TryStat(name):
870
    try:
871
      os.stat(name)
872
      return True
873
    except EnvironmentError, err:
874
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
875
        return False
876
      elif err.errno == errno.EINVAL:
877
        raise RetryAgain(err)
878
      raise
879

    
880
  assert isinstance(pid, int), "pid must be an integer"
881
  if pid <= 0:
882
    return False
883

    
884
  # /proc in a multiprocessor environment can have strange behaviors.
885
  # Retry the os.stat a few times until we get a good result.
886
  try:
887
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
888
                 args=[_GetProcStatusPath(pid)])
889
  except RetryTimeout, err:
890
    err.RaiseInner()
891

    
892

    
893
def _ParseSigsetT(sigset):
894
  """Parse a rendered sigset_t value.
895

896
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
897
  function.
898

899
  @type sigset: string
900
  @param sigset: Rendered signal set from /proc/$pid/status
901
  @rtype: set
902
  @return: Set of all enabled signal numbers
903

904
  """
905
  result = set()
906

    
907
  signum = 0
908
  for ch in reversed(sigset):
909
    chv = int(ch, 16)
910

    
911
    # The following could be done in a loop, but it's easier to read and
912
    # understand in the unrolled form
913
    if chv & 1:
914
      result.add(signum + 1)
915
    if chv & 2:
916
      result.add(signum + 2)
917
    if chv & 4:
918
      result.add(signum + 3)
919
    if chv & 8:
920
      result.add(signum + 4)
921

    
922
    signum += 4
923

    
924
  return result
925

    
926

    
927
def _GetProcStatusField(pstatus, field):
928
  """Retrieves a field from the contents of a proc status file.
929

930
  @type pstatus: string
931
  @param pstatus: Contents of /proc/$pid/status
932
  @type field: string
933
  @param field: Name of field whose value should be returned
934
  @rtype: string
935

936
  """
937
  for line in pstatus.splitlines():
938
    parts = line.split(":", 1)
939

    
940
    if len(parts) < 2 or parts[0] != field:
941
      continue
942

    
943
    return parts[1].strip()
944

    
945
  return None
946

    
947

    
948
def IsProcessHandlingSignal(pid, signum, status_path=None):
949
  """Checks whether a process is handling a signal.
950

951
  @type pid: int
952
  @param pid: Process ID
953
  @type signum: int
954
  @param signum: Signal number
955
  @rtype: bool
956

957
  """
958
  if status_path is None:
959
    status_path = _GetProcStatusPath(pid)
960

    
961
  try:
962
    proc_status = ReadFile(status_path)
963
  except EnvironmentError, err:
964
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
965
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
966
      return False
967
    raise
968

    
969
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
970
  if sigcgt is None:
971
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
972

    
973
  # Now check whether signal is handled
974
  return signum in _ParseSigsetT(sigcgt)
975

    
976

    
977
def ReadPidFile(pidfile):
978
  """Read a pid from a file.
979

980
  @type  pidfile: string
981
  @param pidfile: path to the file containing the pid
982
  @rtype: int
983
  @return: The process id, if the file exists and contains a valid PID,
984
           otherwise 0
985

986
  """
987
  try:
988
    raw_data = ReadOneLineFile(pidfile)
989
  except EnvironmentError, err:
990
    if err.errno != errno.ENOENT:
991
      logging.exception("Can't read pid file")
992
    return 0
993

    
994
  try:
995
    pid = int(raw_data)
996
  except (TypeError, ValueError), err:
997
    logging.info("Can't parse pid file contents", exc_info=True)
998
    return 0
999

    
1000
  return pid
1001

    
1002

    
1003
def ReadLockedPidFile(path):
1004
  """Reads a locked PID file.
1005

1006
  This can be used together with L{StartDaemon}.
1007

1008
  @type path: string
1009
  @param path: Path to PID file
1010
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1011

1012
  """
1013
  try:
1014
    fd = os.open(path, os.O_RDONLY)
1015
  except EnvironmentError, err:
1016
    if err.errno == errno.ENOENT:
1017
      # PID file doesn't exist
1018
      return None
1019
    raise
1020

    
1021
  try:
1022
    try:
1023
      # Try to acquire lock
1024
      LockFile(fd)
1025
    except errors.LockError:
1026
      # Couldn't lock, daemon is running
1027
      return int(os.read(fd, 100))
1028
  finally:
1029
    os.close(fd)
1030

    
1031
  return None
1032

    
1033

    
1034
def MatchNameComponent(key, name_list, case_sensitive=True):
1035
  """Try to match a name against a list.
1036

1037
  This function will try to match a name like test1 against a list
1038
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1039
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1040
  not I{'test1.ex'}. A multiple match will be considered as no match
1041
  at all (e.g. I{'test1'} against C{['test1.example.com',
1042
  'test1.example.org']}), except when the key fully matches an entry
1043
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1044

1045
  @type key: str
1046
  @param key: the name to be searched
1047
  @type name_list: list
1048
  @param name_list: the list of strings against which to search the key
1049
  @type case_sensitive: boolean
1050
  @param case_sensitive: whether to provide a case-sensitive match
1051

1052
  @rtype: None or str
1053
  @return: None if there is no match I{or} if there are multiple matches,
1054
      otherwise the element from the list which matches
1055

1056
  """
1057
  if key in name_list:
1058
    return key
1059

    
1060
  re_flags = 0
1061
  if not case_sensitive:
1062
    re_flags |= re.IGNORECASE
1063
    key = key.upper()
1064
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1065
  names_filtered = []
1066
  string_matches = []
1067
  for name in name_list:
1068
    if mo.match(name) is not None:
1069
      names_filtered.append(name)
1070
      if not case_sensitive and key == name.upper():
1071
        string_matches.append(name)
1072

    
1073
  if len(string_matches) == 1:
1074
    return string_matches[0]
1075
  if len(names_filtered) == 1:
1076
    return names_filtered[0]
1077
  return None
1078

    
1079

    
1080
def ValidateServiceName(name):
1081
  """Validate the given service name.
1082

1083
  @type name: number or string
1084
  @param name: Service name or port specification
1085

1086
  """
1087
  try:
1088
    numport = int(name)
1089
  except (ValueError, TypeError):
1090
    # Non-numeric service name
1091
    valid = _VALID_SERVICE_NAME_RE.match(name)
1092
  else:
1093
    # Numeric port (protocols other than TCP or UDP might need adjustments
1094
    # here)
1095
    valid = (numport >= 0 and numport < (1 << 16))
1096

    
1097
  if not valid:
1098
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1099
                               errors.ECODE_INVAL)
1100

    
1101
  return name
1102

    
1103

    
1104
def ListVolumeGroups():
1105
  """List volume groups and their size
1106

1107
  @rtype: dict
1108
  @return:
1109
       Dictionary with keys volume name and values
1110
       the size of the volume
1111

1112
  """
1113
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1114
  result = RunCmd(command)
1115
  retval = {}
1116
  if result.failed:
1117
    return retval
1118

    
1119
  for line in result.stdout.splitlines():
1120
    try:
1121
      name, size = line.split()
1122
      size = int(float(size))
1123
    except (IndexError, ValueError), err:
1124
      logging.error("Invalid output from vgs (%s): %s", err, line)
1125
      continue
1126

    
1127
    retval[name] = size
1128

    
1129
  return retval
1130

    
1131

    
1132
def BridgeExists(bridge):
1133
  """Check whether the given bridge exists in the system
1134

1135
  @type bridge: str
1136
  @param bridge: the bridge name to check
1137
  @rtype: boolean
1138
  @return: True if it does
1139

1140
  """
1141
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1142

    
1143

    
1144
def NiceSort(name_list):
1145
  """Sort a list of strings based on digit and non-digit groupings.
1146

1147
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1148
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1149
  'a11']}.
1150

1151
  The sort algorithm breaks each name in groups of either only-digits
1152
  or no-digits. Only the first eight such groups are considered, and
1153
  after that we just use what's left of the string.
1154

1155
  @type name_list: list
1156
  @param name_list: the names to be sorted
1157
  @rtype: list
1158
  @return: a copy of the name list sorted with our algorithm
1159

1160
  """
1161
  _SORTER_BASE = "(\D+|\d+)"
1162
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1163
                                                  _SORTER_BASE, _SORTER_BASE,
1164
                                                  _SORTER_BASE, _SORTER_BASE,
1165
                                                  _SORTER_BASE, _SORTER_BASE)
1166
  _SORTER_RE = re.compile(_SORTER_FULL)
1167
  _SORTER_NODIGIT = re.compile("^\D*$")
1168
  def _TryInt(val):
1169
    """Attempts to convert a variable to integer."""
1170
    if val is None or _SORTER_NODIGIT.match(val):
1171
      return val
1172
    rval = int(val)
1173
    return rval
1174

    
1175
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1176
             for name in name_list]
1177
  to_sort.sort()
1178
  return [tup[1] for tup in to_sort]
1179

    
1180

    
1181
def TryConvert(fn, val):
1182
  """Try to convert a value ignoring errors.
1183

1184
  This function tries to apply function I{fn} to I{val}. If no
1185
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1186
  the result, else it will return the original value. Any other
1187
  exceptions are propagated to the caller.
1188

1189
  @type fn: callable
1190
  @param fn: function to apply to the value
1191
  @param val: the value to be converted
1192
  @return: The converted value if the conversion was successful,
1193
      otherwise the original value.
1194

1195
  """
1196
  try:
1197
    nv = fn(val)
1198
  except (ValueError, TypeError):
1199
    nv = val
1200
  return nv
1201

    
1202

    
1203
def IsValidShellParam(word):
1204
  """Verifies is the given word is safe from the shell's p.o.v.
1205

1206
  This means that we can pass this to a command via the shell and be
1207
  sure that it doesn't alter the command line and is passed as such to
1208
  the actual command.
1209

1210
  Note that we are overly restrictive here, in order to be on the safe
1211
  side.
1212

1213
  @type word: str
1214
  @param word: the word to check
1215
  @rtype: boolean
1216
  @return: True if the word is 'safe'
1217

1218
  """
1219
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1220

    
1221

    
1222
def BuildShellCmd(template, *args):
1223
  """Build a safe shell command line from the given arguments.
1224

1225
  This function will check all arguments in the args list so that they
1226
  are valid shell parameters (i.e. they don't contain shell
1227
  metacharacters). If everything is ok, it will return the result of
1228
  template % args.
1229

1230
  @type template: str
1231
  @param template: the string holding the template for the
1232
      string formatting
1233
  @rtype: str
1234
  @return: the expanded command line
1235

1236
  """
1237
  for word in args:
1238
    if not IsValidShellParam(word):
1239
      raise errors.ProgrammerError("Shell argument '%s' contains"
1240
                                   " invalid characters" % word)
1241
  return template % args
1242

    
1243

    
1244
def FormatUnit(value, units):
1245
  """Formats an incoming number of MiB with the appropriate unit.
1246

1247
  @type value: int
1248
  @param value: integer representing the value in MiB (1048576)
1249
  @type units: char
1250
  @param units: the type of formatting we should do:
1251
      - 'h' for automatic scaling
1252
      - 'm' for MiBs
1253
      - 'g' for GiBs
1254
      - 't' for TiBs
1255
  @rtype: str
1256
  @return: the formatted value (with suffix)
1257

1258
  """
1259
  if units not in ('m', 'g', 't', 'h'):
1260
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1261

    
1262
  suffix = ''
1263

    
1264
  if units == 'm' or (units == 'h' and value < 1024):
1265
    if units == 'h':
1266
      suffix = 'M'
1267
    return "%d%s" % (round(value, 0), suffix)
1268

    
1269
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1270
    if units == 'h':
1271
      suffix = 'G'
1272
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1273

    
1274
  else:
1275
    if units == 'h':
1276
      suffix = 'T'
1277
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1278

    
1279

    
1280
def ParseUnit(input_string):
1281
  """Tries to extract number and scale from the given string.
1282

1283
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1284
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1285
  is always an int in MiB.
1286

1287
  """
1288
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1289
  if not m:
1290
    raise errors.UnitParseError("Invalid format")
1291

    
1292
  value = float(m.groups()[0])
1293

    
1294
  unit = m.groups()[1]
1295
  if unit:
1296
    lcunit = unit.lower()
1297
  else:
1298
    lcunit = 'm'
1299

    
1300
  if lcunit in ('m', 'mb', 'mib'):
1301
    # Value already in MiB
1302
    pass
1303

    
1304
  elif lcunit in ('g', 'gb', 'gib'):
1305
    value *= 1024
1306

    
1307
  elif lcunit in ('t', 'tb', 'tib'):
1308
    value *= 1024 * 1024
1309

    
1310
  else:
1311
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1312

    
1313
  # Make sure we round up
1314
  if int(value) < value:
1315
    value += 1
1316

    
1317
  # Round up to the next multiple of 4
1318
  value = int(value)
1319
  if value % 4:
1320
    value += 4 - value % 4
1321

    
1322
  return value
1323

    
1324

    
1325
def ParseCpuMask(cpu_mask):
1326
  """Parse a CPU mask definition and return the list of CPU IDs.
1327

1328
  CPU mask format: comma-separated list of CPU IDs
1329
  or dash-separated ID ranges
1330
  Example: "0-2,5" -> "0,1,2,5"
1331

1332
  @type cpu_mask: str
1333
  @param cpu_mask: CPU mask definition
1334
  @rtype: list of int
1335
  @return: list of CPU IDs
1336

1337
  """
1338
  if not cpu_mask:
1339
    return []
1340
  cpu_list = []
1341
  for range_def in cpu_mask.split(","):
1342
    boundaries = range_def.split("-")
1343
    n_elements = len(boundaries)
1344
    if n_elements > 2:
1345
      raise errors.ParseError("Invalid CPU ID range definition"
1346
                              " (only one hyphen allowed): %s" % range_def)
1347
    try:
1348
      lower = int(boundaries[0])
1349
    except (ValueError, TypeError), err:
1350
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1351
                              " CPU ID range: %s" % str(err))
1352
    try:
1353
      higher = int(boundaries[-1])
1354
    except (ValueError, TypeError), err:
1355
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1356
                              " CPU ID range: %s" % str(err))
1357
    if lower > higher:
1358
      raise errors.ParseError("Invalid CPU ID range definition"
1359
                              " (%d > %d): %s" % (lower, higher, range_def))
1360
    cpu_list.extend(range(lower, higher + 1))
1361
  return cpu_list
1362

    
1363

    
1364
def AddAuthorizedKey(file_obj, key):
1365
  """Adds an SSH public key to an authorized_keys file.
1366

1367
  @type file_obj: str or file handle
1368
  @param file_obj: path to authorized_keys file
1369
  @type key: str
1370
  @param key: string containing key
1371

1372
  """
1373
  key_fields = key.split()
1374

    
1375
  if isinstance(file_obj, basestring):
1376
    f = open(file_obj, 'a+')
1377
  else:
1378
    f = file_obj
1379

    
1380
  try:
1381
    nl = True
1382
    for line in f:
1383
      # Ignore whitespace changes
1384
      if line.split() == key_fields:
1385
        break
1386
      nl = line.endswith('\n')
1387
    else:
1388
      if not nl:
1389
        f.write("\n")
1390
      f.write(key.rstrip('\r\n'))
1391
      f.write("\n")
1392
      f.flush()
1393
  finally:
1394
    f.close()
1395

    
1396

    
1397
def RemoveAuthorizedKey(file_name, key):
1398
  """Removes an SSH public key from an authorized_keys file.
1399

1400
  @type file_name: str
1401
  @param file_name: path to authorized_keys file
1402
  @type key: str
1403
  @param key: string containing key
1404

1405
  """
1406
  key_fields = key.split()
1407

    
1408
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1409
  try:
1410
    out = os.fdopen(fd, 'w')
1411
    try:
1412
      f = open(file_name, 'r')
1413
      try:
1414
        for line in f:
1415
          # Ignore whitespace changes while comparing lines
1416
          if line.split() != key_fields:
1417
            out.write(line)
1418

    
1419
        out.flush()
1420
        os.rename(tmpname, file_name)
1421
      finally:
1422
        f.close()
1423
    finally:
1424
      out.close()
1425
  except:
1426
    RemoveFile(tmpname)
1427
    raise
1428

    
1429

    
1430
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1431
  """Sets the name of an IP address and hostname in /etc/hosts.
1432

1433
  @type file_name: str
1434
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1435
  @type ip: str
1436
  @param ip: the IP address
1437
  @type hostname: str
1438
  @param hostname: the hostname to be added
1439
  @type aliases: list
1440
  @param aliases: the list of aliases to add for the hostname
1441

1442
  """
1443
  # Ensure aliases are unique
1444
  aliases = UniqueSequence([hostname] + aliases)[1:]
1445

    
1446
  def _WriteEtcHosts(fd):
1447
    # Duplicating file descriptor because os.fdopen's result will automatically
1448
    # close the descriptor, but we would still like to have its functionality.
1449
    out = os.fdopen(os.dup(fd), "w")
1450
    try:
1451
      for line in ReadFile(file_name).splitlines(True):
1452
        fields = line.split()
1453
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1454
          continue
1455
        out.write(line)
1456

    
1457
      out.write("%s\t%s" % (ip, hostname))
1458
      if aliases:
1459
        out.write(" %s" % " ".join(aliases))
1460
      out.write("\n")
1461
      out.flush()
1462
    finally:
1463
      out.close()
1464

    
1465
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1466

    
1467

    
1468
def AddHostToEtcHosts(hostname, ip):
1469
  """Wrapper around SetEtcHostsEntry.
1470

1471
  @type hostname: str
1472
  @param hostname: a hostname that will be resolved and added to
1473
      L{constants.ETC_HOSTS}
1474
  @type ip: str
1475
  @param ip: The ip address of the host
1476

1477
  """
1478
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1479

    
1480

    
1481
def RemoveEtcHostsEntry(file_name, hostname):
1482
  """Removes a hostname from /etc/hosts.
1483

1484
  IP addresses without names are removed from the file.
1485

1486
  @type file_name: str
1487
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1488
  @type hostname: str
1489
  @param hostname: the hostname to be removed
1490

1491
  """
1492
  def _WriteEtcHosts(fd):
1493
    # Duplicating file descriptor because os.fdopen's result will automatically
1494
    # close the descriptor, but we would still like to have its functionality.
1495
    out = os.fdopen(os.dup(fd), "w")
1496
    try:
1497
      for line in ReadFile(file_name).splitlines(True):
1498
        fields = line.split()
1499
        if len(fields) > 1 and not fields[0].startswith("#"):
1500
          names = fields[1:]
1501
          if hostname in names:
1502
            while hostname in names:
1503
              names.remove(hostname)
1504
            if names:
1505
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1506
            continue
1507

    
1508
        out.write(line)
1509

    
1510
      out.flush()
1511
    finally:
1512
      out.close()
1513

    
1514
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1515

    
1516

    
1517
def RemoveHostFromEtcHosts(hostname):
1518
  """Wrapper around RemoveEtcHostsEntry.
1519

1520
  @type hostname: str
1521
  @param hostname: hostname that will be resolved and its
1522
      full and shot name will be removed from
1523
      L{constants.ETC_HOSTS}
1524

1525
  """
1526
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1527
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1528

    
1529

    
1530
def TimestampForFilename():
1531
  """Returns the current time formatted for filenames.
1532

1533
  The format doesn't contain colons as some shells and applications them as
1534
  separators.
1535

1536
  """
1537
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1538

    
1539

    
1540
def CreateBackup(file_name):
1541
  """Creates a backup of a file.
1542

1543
  @type file_name: str
1544
  @param file_name: file to be backed up
1545
  @rtype: str
1546
  @return: the path to the newly created backup
1547
  @raise errors.ProgrammerError: for invalid file names
1548

1549
  """
1550
  if not os.path.isfile(file_name):
1551
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1552
                                file_name)
1553

    
1554
  prefix = ("%s.backup-%s." %
1555
            (os.path.basename(file_name), TimestampForFilename()))
1556
  dir_name = os.path.dirname(file_name)
1557

    
1558
  fsrc = open(file_name, 'rb')
1559
  try:
1560
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1561
    fdst = os.fdopen(fd, 'wb')
1562
    try:
1563
      logging.debug("Backing up %s at %s", file_name, backup_name)
1564
      shutil.copyfileobj(fsrc, fdst)
1565
    finally:
1566
      fdst.close()
1567
  finally:
1568
    fsrc.close()
1569

    
1570
  return backup_name
1571

    
1572

    
1573
def ShellQuote(value):
1574
  """Quotes shell argument according to POSIX.
1575

1576
  @type value: str
1577
  @param value: the argument to be quoted
1578
  @rtype: str
1579
  @return: the quoted value
1580

1581
  """
1582
  if _re_shell_unquoted.match(value):
1583
    return value
1584
  else:
1585
    return "'%s'" % value.replace("'", "'\\''")
1586

    
1587

    
1588
def ShellQuoteArgs(args):
1589
  """Quotes a list of shell arguments.
1590

1591
  @type args: list
1592
  @param args: list of arguments to be quoted
1593
  @rtype: str
1594
  @return: the quoted arguments concatenated with spaces
1595

1596
  """
1597
  return ' '.join([ShellQuote(i) for i in args])
1598

    
1599

    
1600
class ShellWriter:
1601
  """Helper class to write scripts with indentation.
1602

1603
  """
1604
  INDENT_STR = "  "
1605

    
1606
  def __init__(self, fh):
1607
    """Initializes this class.
1608

1609
    """
1610
    self._fh = fh
1611
    self._indent = 0
1612

    
1613
  def IncIndent(self):
1614
    """Increase indentation level by 1.
1615

1616
    """
1617
    self._indent += 1
1618

    
1619
  def DecIndent(self):
1620
    """Decrease indentation level by 1.
1621

1622
    """
1623
    assert self._indent > 0
1624
    self._indent -= 1
1625

    
1626
  def Write(self, txt, *args):
1627
    """Write line to output file.
1628

1629
    """
1630
    assert self._indent >= 0
1631

    
1632
    self._fh.write(self._indent * self.INDENT_STR)
1633

    
1634
    if args:
1635
      self._fh.write(txt % args)
1636
    else:
1637
      self._fh.write(txt)
1638

    
1639
    self._fh.write("\n")
1640

    
1641

    
1642
def ListVisibleFiles(path):
1643
  """Returns a list of visible files in a directory.
1644

1645
  @type path: str
1646
  @param path: the directory to enumerate
1647
  @rtype: list
1648
  @return: the list of all files not starting with a dot
1649
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1650

1651
  """
1652
  if not IsNormAbsPath(path):
1653
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1654
                                 " absolute/normalized: '%s'" % path)
1655
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1656
  return files
1657

    
1658

    
1659
def GetHomeDir(user, default=None):
1660
  """Try to get the homedir of the given user.
1661

1662
  The user can be passed either as a string (denoting the name) or as
1663
  an integer (denoting the user id). If the user is not found, the
1664
  'default' argument is returned, which defaults to None.
1665

1666
  """
1667
  try:
1668
    if isinstance(user, basestring):
1669
      result = pwd.getpwnam(user)
1670
    elif isinstance(user, (int, long)):
1671
      result = pwd.getpwuid(user)
1672
    else:
1673
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1674
                                   type(user))
1675
  except KeyError:
1676
    return default
1677
  return result.pw_dir
1678

    
1679

    
1680
def NewUUID():
1681
  """Returns a random UUID.
1682

1683
  @note: This is a Linux-specific method as it uses the /proc
1684
      filesystem.
1685
  @rtype: str
1686

1687
  """
1688
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1689

    
1690

    
1691
def GenerateSecret(numbytes=20):
1692
  """Generates a random secret.
1693

1694
  This will generate a pseudo-random secret returning an hex string
1695
  (so that it can be used where an ASCII string is needed).
1696

1697
  @param numbytes: the number of bytes which will be represented by the returned
1698
      string (defaulting to 20, the length of a SHA1 hash)
1699
  @rtype: str
1700
  @return: an hex representation of the pseudo-random sequence
1701

1702
  """
1703
  return os.urandom(numbytes).encode('hex')
1704

    
1705

    
1706
def EnsureDirs(dirs):
1707
  """Make required directories, if they don't exist.
1708

1709
  @param dirs: list of tuples (dir_name, dir_mode)
1710
  @type dirs: list of (string, integer)
1711

1712
  """
1713
  for dir_name, dir_mode in dirs:
1714
    try:
1715
      os.mkdir(dir_name, dir_mode)
1716
    except EnvironmentError, err:
1717
      if err.errno != errno.EEXIST:
1718
        raise errors.GenericError("Cannot create needed directory"
1719
                                  " '%s': %s" % (dir_name, err))
1720
    try:
1721
      os.chmod(dir_name, dir_mode)
1722
    except EnvironmentError, err:
1723
      raise errors.GenericError("Cannot change directory permissions on"
1724
                                " '%s': %s" % (dir_name, err))
1725
    if not os.path.isdir(dir_name):
1726
      raise errors.GenericError("%s is not a directory" % dir_name)
1727

    
1728

    
1729
def ReadFile(file_name, size=-1):
1730
  """Reads a file.
1731

1732
  @type size: int
1733
  @param size: Read at most size bytes (if negative, entire file)
1734
  @rtype: str
1735
  @return: the (possibly partial) content of the file
1736

1737
  """
1738
  f = open(file_name, "r")
1739
  try:
1740
    return f.read(size)
1741
  finally:
1742
    f.close()
1743

    
1744

    
1745
def WriteFile(file_name, fn=None, data=None,
1746
              mode=None, uid=-1, gid=-1,
1747
              atime=None, mtime=None, close=True,
1748
              dry_run=False, backup=False,
1749
              prewrite=None, postwrite=None):
1750
  """(Over)write a file atomically.
1751

1752
  The file_name and either fn (a function taking one argument, the
1753
  file descriptor, and which should write the data to it) or data (the
1754
  contents of the file) must be passed. The other arguments are
1755
  optional and allow setting the file mode, owner and group, and the
1756
  mtime/atime of the file.
1757

1758
  If the function doesn't raise an exception, it has succeeded and the
1759
  target file has the new contents. If the function has raised an
1760
  exception, an existing target file should be unmodified and the
1761
  temporary file should be removed.
1762

1763
  @type file_name: str
1764
  @param file_name: the target filename
1765
  @type fn: callable
1766
  @param fn: content writing function, called with
1767
      file descriptor as parameter
1768
  @type data: str
1769
  @param data: contents of the file
1770
  @type mode: int
1771
  @param mode: file mode
1772
  @type uid: int
1773
  @param uid: the owner of the file
1774
  @type gid: int
1775
  @param gid: the group of the file
1776
  @type atime: int
1777
  @param atime: a custom access time to be set on the file
1778
  @type mtime: int
1779
  @param mtime: a custom modification time to be set on the file
1780
  @type close: boolean
1781
  @param close: whether to close file after writing it
1782
  @type prewrite: callable
1783
  @param prewrite: function to be called before writing content
1784
  @type postwrite: callable
1785
  @param postwrite: function to be called after writing content
1786

1787
  @rtype: None or int
1788
  @return: None if the 'close' parameter evaluates to True,
1789
      otherwise the file descriptor
1790

1791
  @raise errors.ProgrammerError: if any of the arguments are not valid
1792

1793
  """
1794
  if not os.path.isabs(file_name):
1795
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1796
                                 " absolute: '%s'" % file_name)
1797

    
1798
  if [fn, data].count(None) != 1:
1799
    raise errors.ProgrammerError("fn or data required")
1800

    
1801
  if [atime, mtime].count(None) == 1:
1802
    raise errors.ProgrammerError("Both atime and mtime must be either"
1803
                                 " set or None")
1804

    
1805
  if backup and not dry_run and os.path.isfile(file_name):
1806
    CreateBackup(file_name)
1807

    
1808
  dir_name, base_name = os.path.split(file_name)
1809
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1810
  do_remove = True
1811
  # here we need to make sure we remove the temp file, if any error
1812
  # leaves it in place
1813
  try:
1814
    if uid != -1 or gid != -1:
1815
      os.chown(new_name, uid, gid)
1816
    if mode:
1817
      os.chmod(new_name, mode)
1818
    if callable(prewrite):
1819
      prewrite(fd)
1820
    if data is not None:
1821
      os.write(fd, data)
1822
    else:
1823
      fn(fd)
1824
    if callable(postwrite):
1825
      postwrite(fd)
1826
    os.fsync(fd)
1827
    if atime is not None and mtime is not None:
1828
      os.utime(new_name, (atime, mtime))
1829
    if not dry_run:
1830
      os.rename(new_name, file_name)
1831
      do_remove = False
1832
  finally:
1833
    if close:
1834
      os.close(fd)
1835
      result = None
1836
    else:
1837
      result = fd
1838
    if do_remove:
1839
      RemoveFile(new_name)
1840

    
1841
  return result
1842

    
1843

    
1844
def ReadOneLineFile(file_name, strict=False):
1845
  """Return the first non-empty line from a file.
1846

1847
  @type strict: boolean
1848
  @param strict: if True, abort if the file has more than one
1849
      non-empty line
1850

1851
  """
1852
  file_lines = ReadFile(file_name).splitlines()
1853
  full_lines = filter(bool, file_lines)
1854
  if not file_lines or not full_lines:
1855
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1856
  elif strict and len(full_lines) > 1:
1857
    raise errors.GenericError("Too many lines in one-liner file %s" %
1858
                              file_name)
1859
  return full_lines[0]
1860

    
1861

    
1862
def FirstFree(seq, base=0):
1863
  """Returns the first non-existing integer from seq.
1864

1865
  The seq argument should be a sorted list of positive integers. The
1866
  first time the index of an element is smaller than the element
1867
  value, the index will be returned.
1868

1869
  The base argument is used to start at a different offset,
1870
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1871

1872
  Example: C{[0, 1, 3]} will return I{2}.
1873

1874
  @type seq: sequence
1875
  @param seq: the sequence to be analyzed.
1876
  @type base: int
1877
  @param base: use this value as the base index of the sequence
1878
  @rtype: int
1879
  @return: the first non-used index in the sequence
1880

1881
  """
1882
  for idx, elem in enumerate(seq):
1883
    assert elem >= base, "Passed element is higher than base offset"
1884
    if elem > idx + base:
1885
      # idx is not used
1886
      return idx + base
1887
  return None
1888

    
1889

    
1890
def SingleWaitForFdCondition(fdobj, event, timeout):
1891
  """Waits for a condition to occur on the socket.
1892

1893
  Immediately returns at the first interruption.
1894

1895
  @type fdobj: integer or object supporting a fileno() method
1896
  @param fdobj: entity to wait for events on
1897
  @type event: integer
1898
  @param event: ORed condition (see select module)
1899
  @type timeout: float or None
1900
  @param timeout: Timeout in seconds
1901
  @rtype: int or None
1902
  @return: None for timeout, otherwise occured conditions
1903

1904
  """
1905
  check = (event | select.POLLPRI |
1906
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1907

    
1908
  if timeout is not None:
1909
    # Poller object expects milliseconds
1910
    timeout *= 1000
1911

    
1912
  poller = select.poll()
1913
  poller.register(fdobj, event)
1914
  try:
1915
    # TODO: If the main thread receives a signal and we have no timeout, we
1916
    # could wait forever. This should check a global "quit" flag or something
1917
    # every so often.
1918
    io_events = poller.poll(timeout)
1919
  except select.error, err:
1920
    if err[0] != errno.EINTR:
1921
      raise
1922
    io_events = []
1923
  if io_events and io_events[0][1] & check:
1924
    return io_events[0][1]
1925
  else:
1926
    return None
1927

    
1928

    
1929
class FdConditionWaiterHelper(object):
1930
  """Retry helper for WaitForFdCondition.
1931

1932
  This class contains the retried and wait functions that make sure
1933
  WaitForFdCondition can continue waiting until the timeout is actually
1934
  expired.
1935

1936
  """
1937

    
1938
  def __init__(self, timeout):
1939
    self.timeout = timeout
1940

    
1941
  def Poll(self, fdobj, event):
1942
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1943
    if result is None:
1944
      raise RetryAgain()
1945
    else:
1946
      return result
1947

    
1948
  def UpdateTimeout(self, timeout):
1949
    self.timeout = timeout
1950

    
1951

    
1952
def WaitForFdCondition(fdobj, event, timeout):
1953
  """Waits for a condition to occur on the socket.
1954

1955
  Retries until the timeout is expired, even if interrupted.
1956

1957
  @type fdobj: integer or object supporting a fileno() method
1958
  @param fdobj: entity to wait for events on
1959
  @type event: integer
1960
  @param event: ORed condition (see select module)
1961
  @type timeout: float or None
1962
  @param timeout: Timeout in seconds
1963
  @rtype: int or None
1964
  @return: None for timeout, otherwise occured conditions
1965

1966
  """
1967
  if timeout is not None:
1968
    retrywaiter = FdConditionWaiterHelper(timeout)
1969
    try:
1970
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1971
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1972
    except RetryTimeout:
1973
      result = None
1974
  else:
1975
    result = None
1976
    while result is None:
1977
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1978
  return result
1979

    
1980

    
1981
def UniqueSequence(seq):
1982
  """Returns a list with unique elements.
1983

1984
  Element order is preserved.
1985

1986
  @type seq: sequence
1987
  @param seq: the sequence with the source elements
1988
  @rtype: list
1989
  @return: list of unique elements from seq
1990

1991
  """
1992
  seen = set()
1993
  return [i for i in seq if i not in seen and not seen.add(i)]
1994

    
1995

    
1996
def NormalizeAndValidateMac(mac):
1997
  """Normalizes and check if a MAC address is valid.
1998

1999
  Checks whether the supplied MAC address is formally correct, only
2000
  accepts colon separated format. Normalize it to all lower.
2001

2002
  @type mac: str
2003
  @param mac: the MAC to be validated
2004
  @rtype: str
2005
  @return: returns the normalized and validated MAC.
2006

2007
  @raise errors.OpPrereqError: If the MAC isn't valid
2008

2009
  """
2010
  if not _MAC_CHECK.match(mac):
2011
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2012
                               mac, errors.ECODE_INVAL)
2013

    
2014
  return mac.lower()
2015

    
2016

    
2017
def TestDelay(duration):
2018
  """Sleep for a fixed amount of time.
2019

2020
  @type duration: float
2021
  @param duration: the sleep duration
2022
  @rtype: boolean
2023
  @return: False for negative value, True otherwise
2024

2025
  """
2026
  if duration < 0:
2027
    return False, "Invalid sleep duration"
2028
  time.sleep(duration)
2029
  return True, None
2030

    
2031

    
2032
def _CloseFDNoErr(fd, retries=5):
2033
  """Close a file descriptor ignoring errors.
2034

2035
  @type fd: int
2036
  @param fd: the file descriptor
2037
  @type retries: int
2038
  @param retries: how many retries to make, in case we get any
2039
      other error than EBADF
2040

2041
  """
2042
  try:
2043
    os.close(fd)
2044
  except OSError, err:
2045
    if err.errno != errno.EBADF:
2046
      if retries > 0:
2047
        _CloseFDNoErr(fd, retries - 1)
2048
    # else either it's closed already or we're out of retries, so we
2049
    # ignore this and go on
2050

    
2051

    
2052
def CloseFDs(noclose_fds=None):
2053
  """Close file descriptors.
2054

2055
  This closes all file descriptors above 2 (i.e. except
2056
  stdin/out/err).
2057

2058
  @type noclose_fds: list or None
2059
  @param noclose_fds: if given, it denotes a list of file descriptor
2060
      that should not be closed
2061

2062
  """
2063
  # Default maximum for the number of available file descriptors.
2064
  if 'SC_OPEN_MAX' in os.sysconf_names:
2065
    try:
2066
      MAXFD = os.sysconf('SC_OPEN_MAX')
2067
      if MAXFD < 0:
2068
        MAXFD = 1024
2069
    except OSError:
2070
      MAXFD = 1024
2071
  else:
2072
    MAXFD = 1024
2073
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2074
  if (maxfd == resource.RLIM_INFINITY):
2075
    maxfd = MAXFD
2076

    
2077
  # Iterate through and close all file descriptors (except the standard ones)
2078
  for fd in range(3, maxfd):
2079
    if noclose_fds and fd in noclose_fds:
2080
      continue
2081
    _CloseFDNoErr(fd)
2082

    
2083

    
2084
def Mlockall(_ctypes=ctypes):
2085
  """Lock current process' virtual address space into RAM.
2086

2087
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2088
  see mlock(2) for more details. This function requires ctypes module.
2089

2090
  @raises errors.NoCtypesError: if ctypes module is not found
2091

2092
  """
2093
  if _ctypes is None:
2094
    raise errors.NoCtypesError()
2095

    
2096
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2097
  if libc is None:
2098
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2099
    return
2100

    
2101
  # Some older version of the ctypes module don't have built-in functionality
2102
  # to access the errno global variable, where function error codes are stored.
2103
  # By declaring this variable as a pointer to an integer we can then access
2104
  # its value correctly, should the mlockall call fail, in order to see what
2105
  # the actual error code was.
2106
  # pylint: disable-msg=W0212
2107
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2108

    
2109
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2110
    # pylint: disable-msg=W0212
2111
    logging.error("Cannot set memory lock: %s",
2112
                  os.strerror(libc.__errno_location().contents.value))
2113
    return
2114

    
2115
  logging.debug("Memory lock set")
2116

    
2117

    
2118
def Daemonize(logfile):
2119
  """Daemonize the current process.
2120

2121
  This detaches the current process from the controlling terminal and
2122
  runs it in the background as a daemon.
2123

2124
  @type logfile: str
2125
  @param logfile: the logfile to which we should redirect stdout/stderr
2126
  @rtype: int
2127
  @return: the value zero
2128

2129
  """
2130
  # pylint: disable-msg=W0212
2131
  # yes, we really want os._exit
2132
  UMASK = 077
2133
  WORKDIR = "/"
2134

    
2135
  # this might fail
2136
  pid = os.fork()
2137
  if (pid == 0):  # The first child.
2138
    os.setsid()
2139
    # this might fail
2140
    pid = os.fork() # Fork a second child.
2141
    if (pid == 0):  # The second child.
2142
      os.chdir(WORKDIR)
2143
      os.umask(UMASK)
2144
    else:
2145
      # exit() or _exit()?  See below.
2146
      os._exit(0) # Exit parent (the first child) of the second child.
2147
  else:
2148
    os._exit(0) # Exit parent of the first child.
2149

    
2150
  for fd in range(3):
2151
    _CloseFDNoErr(fd)
2152
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2153
  assert i == 0, "Can't close/reopen stdin"
2154
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2155
  assert i == 1, "Can't close/reopen stdout"
2156
  # Duplicate standard output to standard error.
2157
  os.dup2(1, 2)
2158
  return 0
2159

    
2160

    
2161
def DaemonPidFileName(name):
2162
  """Compute a ganeti pid file absolute path
2163

2164
  @type name: str
2165
  @param name: the daemon name
2166
  @rtype: str
2167
  @return: the full path to the pidfile corresponding to the given
2168
      daemon name
2169

2170
  """
2171
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2172

    
2173

    
2174
def EnsureDaemon(name):
2175
  """Check for and start daemon if not alive.
2176

2177
  """
2178
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2179
  if result.failed:
2180
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2181
                  name, result.fail_reason, result.output)
2182
    return False
2183

    
2184
  return True
2185

    
2186

    
2187
def StopDaemon(name):
2188
  """Stop daemon
2189

2190
  """
2191
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2192
  if result.failed:
2193
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2194
                  name, result.fail_reason, result.output)
2195
    return False
2196

    
2197
  return True
2198

    
2199

    
2200
def WritePidFile(name):
2201
  """Write the current process pidfile.
2202

2203
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2204

2205
  @type name: str
2206
  @param name: the daemon name to use
2207
  @raise errors.GenericError: if the pid file already exists and
2208
      points to a live process
2209

2210
  """
2211
  pid = os.getpid()
2212
  pidfilename = DaemonPidFileName(name)
2213
  if IsProcessAlive(ReadPidFile(pidfilename)):
2214
    raise errors.GenericError("%s contains a live process" % pidfilename)
2215

    
2216
  WriteFile(pidfilename, data="%d\n" % pid)
2217

    
2218

    
2219
def RemovePidFile(name):
2220
  """Remove the current process pidfile.
2221

2222
  Any errors are ignored.
2223

2224
  @type name: str
2225
  @param name: the daemon name used to derive the pidfile name
2226

2227
  """
2228
  pidfilename = DaemonPidFileName(name)
2229
  # TODO: we could check here that the file contains our pid
2230
  try:
2231
    RemoveFile(pidfilename)
2232
  except: # pylint: disable-msg=W0702
2233
    pass
2234

    
2235

    
2236
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2237
                waitpid=False):
2238
  """Kill a process given by its pid.
2239

2240
  @type pid: int
2241
  @param pid: The PID to terminate.
2242
  @type signal_: int
2243
  @param signal_: The signal to send, by default SIGTERM
2244
  @type timeout: int
2245
  @param timeout: The timeout after which, if the process is still alive,
2246
                  a SIGKILL will be sent. If not positive, no such checking
2247
                  will be done
2248
  @type waitpid: boolean
2249
  @param waitpid: If true, we should waitpid on this process after
2250
      sending signals, since it's our own child and otherwise it
2251
      would remain as zombie
2252

2253
  """
2254
  def _helper(pid, signal_, wait):
2255
    """Simple helper to encapsulate the kill/waitpid sequence"""
2256
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2257
      try:
2258
        os.waitpid(pid, os.WNOHANG)
2259
      except OSError:
2260
        pass
2261

    
2262
  if pid <= 0:
2263
    # kill with pid=0 == suicide
2264
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2265

    
2266
  if not IsProcessAlive(pid):
2267
    return
2268

    
2269
  _helper(pid, signal_, waitpid)
2270

    
2271
  if timeout <= 0:
2272
    return
2273

    
2274
  def _CheckProcess():
2275
    if not IsProcessAlive(pid):
2276
      return
2277

    
2278
    try:
2279
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2280
    except OSError:
2281
      raise RetryAgain()
2282

    
2283
    if result_pid > 0:
2284
      return
2285

    
2286
    raise RetryAgain()
2287

    
2288
  try:
2289
    # Wait up to $timeout seconds
2290
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2291
  except RetryTimeout:
2292
    pass
2293

    
2294
  if IsProcessAlive(pid):
2295
    # Kill process if it's still alive
2296
    _helper(pid, signal.SIGKILL, waitpid)
2297

    
2298

    
2299
def FindFile(name, search_path, test=os.path.exists):
2300
  """Look for a filesystem object in a given path.
2301

2302
  This is an abstract method to search for filesystem object (files,
2303
  dirs) under a given search path.
2304

2305
  @type name: str
2306
  @param name: the name to look for
2307
  @type search_path: str
2308
  @param search_path: location to start at
2309
  @type test: callable
2310
  @param test: a function taking one argument that should return True
2311
      if the a given object is valid; the default value is
2312
      os.path.exists, causing only existing files to be returned
2313
  @rtype: str or None
2314
  @return: full path to the object if found, None otherwise
2315

2316
  """
2317
  # validate the filename mask
2318
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2319
    logging.critical("Invalid value passed for external script name: '%s'",
2320
                     name)
2321
    return None
2322

    
2323
  for dir_name in search_path:
2324
    # FIXME: investigate switch to PathJoin
2325
    item_name = os.path.sep.join([dir_name, name])
2326
    # check the user test and that we're indeed resolving to the given
2327
    # basename
2328
    if test(item_name) and os.path.basename(item_name) == name:
2329
      return item_name
2330
  return None
2331

    
2332

    
2333
def CheckVolumeGroupSize(vglist, vgname, minsize):
2334
  """Checks if the volume group list is valid.
2335

2336
  The function will check if a given volume group is in the list of
2337
  volume groups and has a minimum size.
2338

2339
  @type vglist: dict
2340
  @param vglist: dictionary of volume group names and their size
2341
  @type vgname: str
2342
  @param vgname: the volume group we should check
2343
  @type minsize: int
2344
  @param minsize: the minimum size we accept
2345
  @rtype: None or str
2346
  @return: None for success, otherwise the error message
2347

2348
  """
2349
  vgsize = vglist.get(vgname, None)
2350
  if vgsize is None:
2351
    return "volume group '%s' missing" % vgname
2352
  elif vgsize < minsize:
2353
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2354
            (vgname, minsize, vgsize))
2355
  return None
2356

    
2357

    
2358
def SplitTime(value):
2359
  """Splits time as floating point number into a tuple.
2360

2361
  @param value: Time in seconds
2362
  @type value: int or float
2363
  @return: Tuple containing (seconds, microseconds)
2364

2365
  """
2366
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2367

    
2368
  assert 0 <= seconds, \
2369
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2370
  assert 0 <= microseconds <= 999999, \
2371
    "Microseconds must be 0-999999, but are %s" % microseconds
2372

    
2373
  return (int(seconds), int(microseconds))
2374

    
2375

    
2376
def MergeTime(timetuple):
2377
  """Merges a tuple into time as a floating point number.
2378

2379
  @param timetuple: Time as tuple, (seconds, microseconds)
2380
  @type timetuple: tuple
2381
  @return: Time as a floating point number expressed in seconds
2382

2383
  """
2384
  (seconds, microseconds) = timetuple
2385

    
2386
  assert 0 <= seconds, \
2387
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2388
  assert 0 <= microseconds <= 999999, \
2389
    "Microseconds must be 0-999999, but are %s" % microseconds
2390

    
2391
  return float(seconds) + (float(microseconds) * 0.000001)
2392

    
2393

    
2394
class LogFileHandler(logging.FileHandler):
2395
  """Log handler that doesn't fallback to stderr.
2396

2397
  When an error occurs while writing on the logfile, logging.FileHandler tries
2398
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2399
  the logfile. This class avoids failures reporting errors to /dev/console.
2400

2401
  """
2402
  def __init__(self, filename, mode="a", encoding=None):
2403
    """Open the specified file and use it as the stream for logging.
2404

2405
    Also open /dev/console to report errors while logging.
2406

2407
    """
2408
    logging.FileHandler.__init__(self, filename, mode, encoding)
2409
    self.console = open(constants.DEV_CONSOLE, "a")
2410

    
2411
  def handleError(self, record): # pylint: disable-msg=C0103
2412
    """Handle errors which occur during an emit() call.
2413

2414
    Try to handle errors with FileHandler method, if it fails write to
2415
    /dev/console.
2416

2417
    """
2418
    try:
2419
      logging.FileHandler.handleError(self, record)
2420
    except Exception: # pylint: disable-msg=W0703
2421
      try:
2422
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2423
      except Exception: # pylint: disable-msg=W0703
2424
        # Log handler tried everything it could, now just give up
2425
        pass
2426

    
2427

    
2428
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2429
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2430
                 console_logging=False):
2431
  """Configures the logging module.
2432

2433
  @type logfile: str
2434
  @param logfile: the filename to which we should log
2435
  @type debug: integer
2436
  @param debug: if greater than zero, enable debug messages, otherwise
2437
      only those at C{INFO} and above level
2438
  @type stderr_logging: boolean
2439
  @param stderr_logging: whether we should also log to the standard error
2440
  @type program: str
2441
  @param program: the name under which we should log messages
2442
  @type multithreaded: boolean
2443
  @param multithreaded: if True, will add the thread name to the log file
2444
  @type syslog: string
2445
  @param syslog: one of 'no', 'yes', 'only':
2446
      - if no, syslog is not used
2447
      - if yes, syslog is used (in addition to file-logging)
2448
      - if only, only syslog is used
2449
  @type console_logging: boolean
2450
  @param console_logging: if True, will use a FileHandler which falls back to
2451
      the system console if logging fails
2452
  @raise EnvironmentError: if we can't open the log file and
2453
      syslog/stderr logging is disabled
2454

2455
  """
2456
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2457
  sft = program + "[%(process)d]:"
2458
  if multithreaded:
2459
    fmt += "/%(threadName)s"
2460
    sft += " (%(threadName)s)"
2461
  if debug:
2462
    fmt += " %(module)s:%(lineno)s"
2463
    # no debug info for syslog loggers
2464
  fmt += " %(levelname)s %(message)s"
2465
  # yes, we do want the textual level, as remote syslog will probably
2466
  # lose the error level, and it's easier to grep for it
2467
  sft += " %(levelname)s %(message)s"
2468
  formatter = logging.Formatter(fmt)
2469
  sys_fmt = logging.Formatter(sft)
2470

    
2471
  root_logger = logging.getLogger("")
2472
  root_logger.setLevel(logging.NOTSET)
2473

    
2474
  # Remove all previously setup handlers
2475
  for handler in root_logger.handlers:
2476
    handler.close()
2477
    root_logger.removeHandler(handler)
2478

    
2479
  if stderr_logging:
2480
    stderr_handler = logging.StreamHandler()
2481
    stderr_handler.setFormatter(formatter)
2482
    if debug:
2483
      stderr_handler.setLevel(logging.NOTSET)
2484
    else:
2485
      stderr_handler.setLevel(logging.CRITICAL)
2486
    root_logger.addHandler(stderr_handler)
2487

    
2488
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2489
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2490
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2491
                                                    facility)
2492
    syslog_handler.setFormatter(sys_fmt)
2493
    # Never enable debug over syslog
2494
    syslog_handler.setLevel(logging.INFO)
2495
    root_logger.addHandler(syslog_handler)
2496

    
2497
  if syslog != constants.SYSLOG_ONLY:
2498
    # this can fail, if the logging directories are not setup or we have
2499
    # a permisssion problem; in this case, it's best to log but ignore
2500
    # the error if stderr_logging is True, and if false we re-raise the
2501
    # exception since otherwise we could run but without any logs at all
2502
    try:
2503
      if console_logging:
2504
        logfile_handler = LogFileHandler(logfile)
2505
      else:
2506
        logfile_handler = logging.FileHandler(logfile)
2507
      logfile_handler.setFormatter(formatter)
2508
      if debug:
2509
        logfile_handler.setLevel(logging.DEBUG)
2510
      else:
2511
        logfile_handler.setLevel(logging.INFO)
2512
      root_logger.addHandler(logfile_handler)
2513
    except EnvironmentError:
2514
      if stderr_logging or syslog == constants.SYSLOG_YES:
2515
        logging.exception("Failed to enable logging to file '%s'", logfile)
2516
      else:
2517
        # we need to re-raise the exception
2518
        raise
2519

    
2520

    
2521
def IsNormAbsPath(path):
2522
  """Check whether a path is absolute and also normalized
2523

2524
  This avoids things like /dir/../../other/path to be valid.
2525

2526
  """
2527
  return os.path.normpath(path) == path and os.path.isabs(path)
2528

    
2529

    
2530
def PathJoin(*args):
2531
  """Safe-join a list of path components.
2532

2533
  Requirements:
2534
      - the first argument must be an absolute path
2535
      - no component in the path must have backtracking (e.g. /../),
2536
        since we check for normalization at the end
2537

2538
  @param args: the path components to be joined
2539
  @raise ValueError: for invalid paths
2540

2541
  """
2542
  # ensure we're having at least one path passed in
2543
  assert args
2544
  # ensure the first component is an absolute and normalized path name
2545
  root = args[0]
2546
  if not IsNormAbsPath(root):
2547
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2548
  result = os.path.join(*args)
2549
  # ensure that the whole path is normalized
2550
  if not IsNormAbsPath(result):
2551
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2552
  # check that we're still under the original prefix
2553
  prefix = os.path.commonprefix([root, result])
2554
  if prefix != root:
2555
    raise ValueError("Error: path joining resulted in different prefix"
2556
                     " (%s != %s)" % (prefix, root))
2557
  return result
2558

    
2559

    
2560
def TailFile(fname, lines=20):
2561
  """Return the last lines from a file.
2562

2563
  @note: this function will only read and parse the last 4KB of
2564
      the file; if the lines are very long, it could be that less
2565
      than the requested number of lines are returned
2566

2567
  @param fname: the file name
2568
  @type lines: int
2569
  @param lines: the (maximum) number of lines to return
2570

2571
  """
2572
  fd = open(fname, "r")
2573
  try:
2574
    fd.seek(0, 2)
2575
    pos = fd.tell()
2576
    pos = max(0, pos-4096)
2577
    fd.seek(pos, 0)
2578
    raw_data = fd.read()
2579
  finally:
2580
    fd.close()
2581

    
2582
  rows = raw_data.splitlines()
2583
  return rows[-lines:]
2584

    
2585

    
2586
def FormatTimestampWithTZ(secs):
2587
  """Formats a Unix timestamp with the local timezone.
2588

2589
  """
2590
  return time.strftime("%F %T %Z", time.gmtime(secs))
2591

    
2592

    
2593
def _ParseAsn1Generalizedtime(value):
2594
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2595

2596
  @type value: string
2597
  @param value: ASN1 GENERALIZEDTIME timestamp
2598

2599
  """
2600
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2601
  if m:
2602
    # We have an offset
2603
    asn1time = m.group(1)
2604
    hours = int(m.group(2))
2605
    minutes = int(m.group(3))
2606
    utcoffset = (60 * hours) + minutes
2607
  else:
2608
    if not value.endswith("Z"):
2609
      raise ValueError("Missing timezone")
2610
    asn1time = value[:-1]
2611
    utcoffset = 0
2612

    
2613
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2614

    
2615
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2616

    
2617
  return calendar.timegm(tt.utctimetuple())
2618

    
2619

    
2620
def GetX509CertValidity(cert):
2621
  """Returns the validity period of the certificate.
2622

2623
  @type cert: OpenSSL.crypto.X509
2624
  @param cert: X509 certificate object
2625

2626
  """
2627
  # The get_notBefore and get_notAfter functions are only supported in
2628
  # pyOpenSSL 0.7 and above.
2629
  try:
2630
    get_notbefore_fn = cert.get_notBefore
2631
  except AttributeError:
2632
    not_before = None
2633
  else:
2634
    not_before_asn1 = get_notbefore_fn()
2635

    
2636
    if not_before_asn1 is None:
2637
      not_before = None
2638
    else:
2639
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2640

    
2641
  try:
2642
    get_notafter_fn = cert.get_notAfter
2643
  except AttributeError:
2644
    not_after = None
2645
  else:
2646
    not_after_asn1 = get_notafter_fn()
2647

    
2648
    if not_after_asn1 is None:
2649
      not_after = None
2650
    else:
2651
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2652

    
2653
  return (not_before, not_after)
2654

    
2655

    
2656
def _VerifyCertificateInner(expired, not_before, not_after, now,
2657
                            warn_days, error_days):
2658
  """Verifies certificate validity.
2659

2660
  @type expired: bool
2661
  @param expired: Whether pyOpenSSL considers the certificate as expired
2662
  @type not_before: number or None
2663
  @param not_before: Unix timestamp before which certificate is not valid
2664
  @type not_after: number or None
2665
  @param not_after: Unix timestamp after which certificate is invalid
2666
  @type now: number
2667
  @param now: Current time as Unix timestamp
2668
  @type warn_days: number or None
2669
  @param warn_days: How many days before expiration a warning should be reported
2670
  @type error_days: number or None
2671
  @param error_days: How many days before expiration an error should be reported
2672

2673
  """
2674
  if expired:
2675
    msg = "Certificate is expired"
2676

    
2677
    if not_before is not None and not_after is not None:
2678
      msg += (" (valid from %s to %s)" %
2679
              (FormatTimestampWithTZ(not_before),
2680
               FormatTimestampWithTZ(not_after)))
2681
    elif not_before is not None:
2682
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2683
    elif not_after is not None:
2684
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2685

    
2686
    return (CERT_ERROR, msg)
2687

    
2688
  elif not_before is not None and not_before > now:
2689
    return (CERT_WARNING,
2690
            "Certificate not yet valid (valid from %s)" %
2691
            FormatTimestampWithTZ(not_before))
2692

    
2693
  elif not_after is not None:
2694
    remaining_days = int((not_after - now) / (24 * 3600))
2695

    
2696
    msg = "Certificate expires in about %d days" % remaining_days
2697

    
2698
    if error_days is not None and remaining_days <= error_days:
2699
      return (CERT_ERROR, msg)
2700

    
2701
    if warn_days is not None and remaining_days <= warn_days:
2702
      return (CERT_WARNING, msg)
2703

    
2704
  return (None, None)
2705

    
2706

    
2707
def VerifyX509Certificate(cert, warn_days, error_days):
2708
  """Verifies a certificate for LUVerifyCluster.
2709

2710
  @type cert: OpenSSL.crypto.X509
2711
  @param cert: X509 certificate object
2712
  @type warn_days: number or None
2713
  @param warn_days: How many days before expiration a warning should be reported
2714
  @type error_days: number or None
2715
  @param error_days: How many days before expiration an error should be reported
2716

2717
  """
2718
  # Depending on the pyOpenSSL version, this can just return (None, None)
2719
  (not_before, not_after) = GetX509CertValidity(cert)
2720

    
2721
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2722
                                 time.time(), warn_days, error_days)
2723

    
2724

    
2725
def SignX509Certificate(cert, key, salt):
2726
  """Sign a X509 certificate.
2727

2728
  An RFC822-like signature header is added in front of the certificate.
2729

2730
  @type cert: OpenSSL.crypto.X509
2731
  @param cert: X509 certificate object
2732
  @type key: string
2733
  @param key: Key for HMAC
2734
  @type salt: string
2735
  @param salt: Salt for HMAC
2736
  @rtype: string
2737
  @return: Serialized and signed certificate in PEM format
2738

2739
  """
2740
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2741
    raise errors.GenericError("Invalid salt: %r" % salt)
2742

    
2743
  # Dumping as PEM here ensures the certificate is in a sane format
2744
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2745

    
2746
  return ("%s: %s/%s\n\n%s" %
2747
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2748
           Sha1Hmac(key, cert_pem, salt=salt),
2749
           cert_pem))
2750

    
2751

    
2752
def _ExtractX509CertificateSignature(cert_pem):
2753
  """Helper function to extract signature from X509 certificate.
2754

2755
  """
2756
  # Extract signature from original PEM data
2757
  for line in cert_pem.splitlines():
2758
    if line.startswith("---"):
2759
      break
2760

    
2761
    m = X509_SIGNATURE.match(line.strip())
2762
    if m:
2763
      return (m.group("salt"), m.group("sign"))
2764

    
2765
  raise errors.GenericError("X509 certificate signature is missing")
2766

    
2767

    
2768
def LoadSignedX509Certificate(cert_pem, key):
2769
  """Verifies a signed X509 certificate.
2770

2771
  @type cert_pem: string
2772
  @param cert_pem: Certificate in PEM format and with signature header
2773
  @type key: string
2774
  @param key: Key for HMAC
2775
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2776
  @return: X509 certificate object and salt
2777

2778
  """
2779
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2780

    
2781
  # Load certificate
2782
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2783

    
2784
  # Dump again to ensure it's in a sane format
2785
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2786

    
2787
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2788
    raise errors.GenericError("X509 certificate signature is invalid")
2789

    
2790
  return (cert, salt)
2791

    
2792

    
2793
def Sha1Hmac(key, text, salt=None):
2794
  """Calculates the HMAC-SHA1 digest of a text.
2795

2796
  HMAC is defined in RFC2104.
2797

2798
  @type key: string
2799
  @param key: Secret key
2800
  @type text: string
2801

2802
  """
2803
  if salt:
2804
    salted_text = salt + text
2805
  else:
2806
    salted_text = text
2807

    
2808
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2809

    
2810

    
2811
def VerifySha1Hmac(key, text, digest, salt=None):
2812
  """Verifies the HMAC-SHA1 digest of a text.
2813

2814
  HMAC is defined in RFC2104.
2815

2816
  @type key: string
2817
  @param key: Secret key
2818
  @type text: string
2819
  @type digest: string
2820
  @param digest: Expected digest
2821
  @rtype: bool
2822
  @return: Whether HMAC-SHA1 digest matches
2823

2824
  """
2825
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2826

    
2827

    
2828
def SafeEncode(text):
2829
  """Return a 'safe' version of a source string.
2830

2831
  This function mangles the input string and returns a version that
2832
  should be safe to display/encode as ASCII. To this end, we first
2833
  convert it to ASCII using the 'backslashreplace' encoding which
2834
  should get rid of any non-ASCII chars, and then we process it
2835
  through a loop copied from the string repr sources in the python; we
2836
  don't use string_escape anymore since that escape single quotes and
2837
  backslashes too, and that is too much; and that escaping is not
2838
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2839

2840
  @type text: str or unicode
2841
  @param text: input data
2842
  @rtype: str
2843
  @return: a safe version of text
2844

2845
  """
2846
  if isinstance(text, unicode):
2847
    # only if unicode; if str already, we handle it below
2848
    text = text.encode('ascii', 'backslashreplace')
2849
  resu = ""
2850
  for char in text:
2851
    c = ord(char)
2852
    if char  == '\t':
2853
      resu += r'\t'
2854
    elif char == '\n':
2855
      resu += r'\n'
2856
    elif char == '\r':
2857
      resu += r'\'r'
2858
    elif c < 32 or c >= 127: # non-printable
2859
      resu += "\\x%02x" % (c & 0xff)
2860
    else:
2861
      resu += char
2862
  return resu
2863

    
2864

    
2865
def UnescapeAndSplit(text, sep=","):
2866
  """Split and unescape a string based on a given separator.
2867

2868
  This function splits a string based on a separator where the
2869
  separator itself can be escape in order to be an element of the
2870
  elements. The escaping rules are (assuming coma being the
2871
  separator):
2872
    - a plain , separates the elements
2873
    - a sequence \\\\, (double backslash plus comma) is handled as a
2874
      backslash plus a separator comma
2875
    - a sequence \, (backslash plus comma) is handled as a
2876
      non-separator comma
2877

2878
  @type text: string
2879
  @param text: the string to split
2880
  @type sep: string
2881
  @param text: the separator
2882
  @rtype: string
2883
  @return: a list of strings
2884

2885
  """
2886
  # we split the list by sep (with no escaping at this stage)
2887
  slist = text.split(sep)
2888
  # next, we revisit the elements and if any of them ended with an odd
2889
  # number of backslashes, then we join it with the next
2890
  rlist = []
2891
  while slist:
2892
    e1 = slist.pop(0)
2893
    if e1.endswith("\\"):
2894
      num_b = len(e1) - len(e1.rstrip("\\"))
2895
      if num_b % 2 == 1:
2896
        e2 = slist.pop(0)
2897
        # here the backslashes remain (all), and will be reduced in
2898
        # the next step
2899
        rlist.append(e1 + sep + e2)
2900
        continue
2901
    rlist.append(e1)
2902
  # finally, replace backslash-something with something
2903
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2904
  return rlist
2905

    
2906

    
2907
def CommaJoin(names):
2908
  """Nicely join a set of identifiers.
2909

2910
  @param names: set, list or tuple
2911
  @return: a string with the formatted results
2912

2913
  """
2914
  return ", ".join([str(val) for val in names])
2915

    
2916

    
2917
def BytesToMebibyte(value):
2918
  """Converts bytes to mebibytes.
2919

2920
  @type value: int
2921
  @param value: Value in bytes
2922
  @rtype: int
2923
  @return: Value in mebibytes
2924

2925
  """
2926
  return int(round(value / (1024.0 * 1024.0), 0))
2927

    
2928

    
2929
def CalculateDirectorySize(path):
2930
  """Calculates the size of a directory recursively.
2931

2932
  @type path: string
2933
  @param path: Path to directory
2934
  @rtype: int
2935
  @return: Size in mebibytes
2936

2937
  """
2938
  size = 0
2939

    
2940
  for (curpath, _, files) in os.walk(path):
2941
    for filename in files:
2942
      st = os.lstat(PathJoin(curpath, filename))
2943
      size += st.st_size
2944

    
2945
  return BytesToMebibyte(size)
2946

    
2947

    
2948
def GetMounts(filename=constants.PROC_MOUNTS):
2949
  """Returns the list of mounted filesystems.
2950

2951
  This function is Linux-specific.
2952

2953
  @param filename: path of mounts file (/proc/mounts by default)
2954
  @rtype: list of tuples
2955
  @return: list of mount entries (device, mountpoint, fstype, options)
2956

2957
  """
2958
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2959
  data = []
2960
  mountlines = ReadFile(filename).splitlines()
2961
  for line in mountlines:
2962
    device, mountpoint, fstype, options, _ = line.split(None, 4)
2963
    data.append((device, mountpoint, fstype, options))
2964

    
2965
  return data
2966

    
2967

    
2968
def GetFilesystemStats(path):
2969
  """Returns the total and free space on a filesystem.
2970

2971
  @type path: string
2972
  @param path: Path on filesystem to be examined
2973
  @rtype: int
2974
  @return: tuple of (Total space, Free space) in mebibytes
2975

2976
  """
2977
  st = os.statvfs(path)
2978

    
2979
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2980
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2981
  return (tsize, fsize)
2982

    
2983

    
2984
def RunInSeparateProcess(fn, *args):
2985
  """Runs a function in a separate process.
2986

2987
  Note: Only boolean return values are supported.
2988

2989
  @type fn: callable
2990
  @param fn: Function to be called
2991
  @rtype: bool
2992
  @return: Function's result
2993

2994
  """
2995
  pid = os.fork()
2996
  if pid == 0:
2997
    # Child process
2998
    try:
2999
      # In case the function uses temporary files
3000
      ResetTempfileModule()
3001

    
3002
      # Call function
3003
      result = int(bool(fn(*args)))
3004
      assert result in (0, 1)
3005
    except: # pylint: disable-msg=W0702
3006
      logging.exception("Error while calling function in separate process")
3007
      # 0 and 1 are reserved for the return value
3008
      result = 33
3009

    
3010
    os._exit(result) # pylint: disable-msg=W0212
3011

    
3012
  # Parent process
3013

    
3014
  # Avoid zombies and check exit code
3015
  (_, status) = os.waitpid(pid, 0)
3016

    
3017
  if os.WIFSIGNALED(status):
3018
    exitcode = None
3019
    signum = os.WTERMSIG(status)
3020
  else:
3021
    exitcode = os.WEXITSTATUS(status)
3022
    signum = None
3023

    
3024
  if not (exitcode in (0, 1) and signum is None):
3025
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3026
                              (exitcode, signum))
3027

    
3028
  return bool(exitcode)
3029

    
3030

    
3031
def IgnoreProcessNotFound(fn, *args, **kwargs):
3032
  """Ignores ESRCH when calling a process-related function.
3033

3034
  ESRCH is raised when a process is not found.
3035

3036
  @rtype: bool
3037
  @return: Whether process was found
3038

3039
  """
3040
  try:
3041
    fn(*args, **kwargs)
3042
  except EnvironmentError, err:
3043
    # Ignore ESRCH
3044
    if err.errno == errno.ESRCH:
3045
      return False
3046
    raise
3047

    
3048
  return True
3049

    
3050

    
3051
def IgnoreSignals(fn, *args, **kwargs):
3052
  """Tries to call a function ignoring failures due to EINTR.
3053

3054
  """
3055
  try:
3056
    return fn(*args, **kwargs)
3057
  except EnvironmentError, err:
3058
    if err.errno == errno.EINTR:
3059
      return None
3060
    else:
3061
      raise
3062
  except (select.error, socket.error), err:
3063
    # In python 2.6 and above select.error is an IOError, so it's handled
3064
    # above, in 2.5 and below it's not, and it's handled here.
3065
    if err.args and err.args[0] == errno.EINTR:
3066
      return None
3067
    else:
3068
      raise
3069

    
3070

    
3071
def LockFile(fd):
3072
  """Locks a file using POSIX locks.
3073

3074
  @type fd: int
3075
  @param fd: the file descriptor we need to lock
3076

3077
  """
3078
  try:
3079
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3080
  except IOError, err:
3081
    if err.errno == errno.EAGAIN:
3082
      raise errors.LockError("File already locked")
3083
    raise
3084

    
3085

    
3086
def FormatTime(val):
3087
  """Formats a time value.
3088

3089
  @type val: float or None
3090
  @param val: the timestamp as returned by time.time()
3091
  @return: a string value or N/A if we don't have a valid timestamp
3092

3093
  """
3094
  if val is None or not isinstance(val, (int, float)):
3095
    return "N/A"
3096
  # these two codes works on Linux, but they are not guaranteed on all
3097
  # platforms
3098
  return time.strftime("%F %T", time.localtime(val))
3099

    
3100

    
3101
def FormatSeconds(secs):
3102
  """Formats seconds for easier reading.
3103

3104
  @type secs: number
3105
  @param secs: Number of seconds
3106
  @rtype: string
3107
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3108

3109
  """
3110
  parts = []
3111

    
3112
  secs = round(secs, 0)
3113

    
3114
  if secs > 0:
3115
    # Negative values would be a bit tricky
3116
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3117
      (complete, secs) = divmod(secs, one)
3118
      if complete or parts:
3119
        parts.append("%d%s" % (complete, unit))
3120

    
3121
  parts.append("%ds" % secs)
3122

    
3123
  return " ".join(parts)
3124

    
3125

    
3126
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3127
  """Reads the watcher pause file.
3128

3129
  @type filename: string
3130
  @param filename: Path to watcher pause file
3131
  @type now: None, float or int
3132
  @param now: Current time as Unix timestamp
3133
  @type remove_after: int
3134
  @param remove_after: Remove watcher pause file after specified amount of
3135
    seconds past the pause end time
3136

3137
  """
3138
  if now is None:
3139
    now = time.time()
3140

    
3141
  try:
3142
    value = ReadFile(filename)
3143
  except IOError, err:
3144
    if err.errno != errno.ENOENT:
3145
      raise
3146
    value = None
3147

    
3148
  if value is not None:
3149
    try:
3150
      value = int(value)
3151
    except ValueError:
3152
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3153
                       " removing it"), filename)
3154
      RemoveFile(filename)
3155
      value = None
3156

    
3157
    if value is not None:
3158
      # Remove file if it's outdated
3159
      if now > (value + remove_after):
3160
        RemoveFile(filename)
3161
        value = None
3162

    
3163
      elif now > value:
3164
        value = None
3165

    
3166
  return value
3167

    
3168

    
3169
class RetryTimeout(Exception):
3170
  """Retry loop timed out.
3171

3172
  Any arguments which was passed by the retried function to RetryAgain will be
3173
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3174
  the RaiseInner helper method will reraise it.
3175

3176
  """
3177
  def RaiseInner(self):
3178
    if self.args and isinstance(self.args[0], Exception):
3179
      raise self.args[0]
3180
    else:
3181
      raise RetryTimeout(*self.args)
3182

    
3183

    
3184
class RetryAgain(Exception):
3185
  """Retry again.
3186

3187
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3188
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3189
  of the RetryTimeout() method can be used to reraise it.
3190

3191
  """
3192

    
3193

    
3194
class _RetryDelayCalculator(object):
3195
  """Calculator for increasing delays.
3196

3197
  """
3198
  __slots__ = [
3199
    "_factor",
3200
    "_limit",
3201
    "_next",
3202
    "_start",
3203
    ]
3204

    
3205
  def __init__(self, start, factor, limit):
3206
    """Initializes this class.
3207

3208
    @type start: float
3209
    @param start: Initial delay
3210
    @type factor: float
3211
    @param factor: Factor for delay increase
3212
    @type limit: float or None
3213
    @param limit: Upper limit for delay or None for no limit
3214

3215
    """
3216
    assert start > 0.0
3217
    assert factor >= 1.0
3218
    assert limit is None or limit >= 0.0
3219

    
3220
    self._start = start
3221
    self._factor = factor
3222
    self._limit = limit
3223

    
3224
    self._next = start
3225

    
3226
  def __call__(self):
3227
    """Returns current delay and calculates the next one.
3228

3229
    """
3230
    current = self._next
3231

    
3232
    # Update for next run
3233
    if self._limit is None or self._next < self._limit:
3234
      self._next = min(self._limit, self._next * self._factor)
3235

    
3236
    return current
3237

    
3238

    
3239
#: Special delay to specify whole remaining timeout
3240
RETRY_REMAINING_TIME = object()
3241

    
3242

    
3243
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3244
          _time_fn=time.time):
3245
  """Call a function repeatedly until it succeeds.
3246

3247
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3248
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3249
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3250

3251
  C{delay} can be one of the following:
3252
    - callable returning the delay length as a float
3253
    - Tuple of (start, factor, limit)
3254
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3255
      useful when overriding L{wait_fn} to wait for an external event)
3256
    - A static delay as a number (int or float)
3257

3258
  @type fn: callable
3259
  @param fn: Function to be called
3260
  @param delay: Either a callable (returning the delay), a tuple of (start,
3261
                factor, limit) (see L{_RetryDelayCalculator}),
3262
                L{RETRY_REMAINING_TIME} or a number (int or float)
3263
  @type timeout: float
3264
  @param timeout: Total timeout
3265
  @type wait_fn: callable
3266
  @param wait_fn: Waiting function
3267
  @return: Return value of function
3268

3269
  """
3270
  assert callable(fn)
3271
  assert callable(wait_fn)
3272
  assert callable(_time_fn)
3273

    
3274
  if args is None:
3275
    args = []
3276

    
3277
  end_time = _time_fn() + timeout
3278

    
3279
  if callable(delay):
3280
    # External function to calculate delay
3281
    calc_delay = delay
3282

    
3283
  elif isinstance(delay, (tuple, list)):
3284
    # Increasing delay with optional upper boundary
3285
    (start, factor, limit) = delay
3286
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3287

    
3288
  elif delay is RETRY_REMAINING_TIME:
3289
    # Always use the remaining time
3290
    calc_delay = None
3291

    
3292
  else:
3293
    # Static delay
3294
    calc_delay = lambda: delay
3295

    
3296
  assert calc_delay is None or callable(calc_delay)
3297

    
3298
  while True:
3299
    retry_args = []
3300
    try:
3301
      # pylint: disable-msg=W0142
3302
      return fn(*args)
3303
    except RetryAgain, err:
3304
      retry_args = err.args
3305
    except RetryTimeout:
3306
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3307
                                   " handle RetryTimeout")
3308

    
3309
    remaining_time = end_time - _time_fn()
3310

    
3311
    if remaining_time < 0.0:
3312
      # pylint: disable-msg=W0142
3313
      raise RetryTimeout(*retry_args)
3314

    
3315
    assert remaining_time >= 0.0
3316

    
3317
    if calc_delay is None:
3318
      wait_fn(remaining_time)
3319
    else:
3320
      current_delay = calc_delay()
3321
      if current_delay > 0.0:
3322
        wait_fn(current_delay)
3323

    
3324

    
3325
def GetClosedTempfile(*args, **kwargs):
3326
  """Creates a temporary file and returns its path.
3327

3328
  """
3329
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3330
  _CloseFDNoErr(fd)
3331
  return path
3332

    
3333

    
3334
def GenerateSelfSignedX509Cert(common_name, validity):
3335
  """Generates a self-signed X509 certificate.
3336

3337
  @type common_name: string
3338
  @param common_name: commonName value
3339
  @type validity: int
3340
  @param validity: Validity for certificate in seconds
3341

3342
  """
3343
  # Create private and public key
3344
  key = OpenSSL.crypto.PKey()
3345
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3346

    
3347
  # Create self-signed certificate
3348
  cert = OpenSSL.crypto.X509()
3349
  if common_name:
3350
    cert.get_subject().CN = common_name
3351
  cert.set_serial_number(1)
3352
  cert.gmtime_adj_notBefore(0)
3353
  cert.gmtime_adj_notAfter(validity)
3354
  cert.set_issuer(cert.get_subject())
3355
  cert.set_pubkey(key)
3356
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3357

    
3358
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3359
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3360

    
3361
  return (key_pem, cert_pem)
3362

    
3363

    
3364
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3365
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3366
  """Legacy function to generate self-signed X509 certificate.
3367

3368
  @type filename: str
3369
  @param filename: path to write certificate to
3370
  @type common_name: string
3371
  @param common_name: commonName value
3372
  @type validity: int
3373
  @param validity: validity of certificate in number of days
3374

3375
  """
3376
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3377
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3378
  # and node daemon certificates have the proper Subject/Issuer.
3379
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3380
                                                   validity * 24 * 60 * 60)
3381

    
3382
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3383

    
3384

    
3385
class FileLock(object):
3386
  """Utility class for file locks.
3387

3388
  """
3389
  def __init__(self, fd, filename):
3390
    """Constructor for FileLock.
3391

3392
    @type fd: file
3393
    @param fd: File object
3394
    @type filename: str
3395
    @param filename: Path of the file opened at I{fd}
3396

3397
    """
3398
    self.fd = fd
3399
    self.filename = filename
3400

    
3401
  @classmethod
3402
  def Open(cls, filename):
3403
    """Creates and opens a file to be used as a file-based lock.
3404

3405
    @type filename: string
3406
    @param filename: path to the file to be locked
3407

3408
    """
3409
    # Using "os.open" is necessary to allow both opening existing file
3410
    # read/write and creating if not existing. Vanilla "open" will truncate an
3411
    # existing file -or- allow creating if not existing.
3412
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3413
               filename)
3414

    
3415
  def __del__(self):
3416
    self.Close()
3417

    
3418
  def Close(self):
3419
    """Close the file and release the lock.
3420

3421
    """
3422
    if hasattr(self, "fd") and self.fd:
3423
      self.fd.close()
3424
      self.fd = None
3425

    
3426
  def _flock(self, flag, blocking, timeout, errmsg):
3427
    """Wrapper for fcntl.flock.
3428

3429
    @type flag: int
3430
    @param flag: operation flag
3431
    @type blocking: bool
3432
    @param blocking: whether the operation should be done in blocking mode.
3433
    @type timeout: None or float
3434
    @param timeout: for how long the operation should be retried (implies
3435
                    non-blocking mode).
3436
    @type errmsg: string
3437
    @param errmsg: error message in case operation fails.
3438

3439
    """
3440
    assert self.fd, "Lock was closed"
3441
    assert timeout is None or timeout >= 0, \
3442
      "If specified, timeout must be positive"
3443
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3444

    
3445
    # When a timeout is used, LOCK_NB must always be set
3446
    if not (timeout is None and blocking):
3447
      flag |= fcntl.LOCK_NB
3448

    
3449
    if timeout is None:
3450
      self._Lock(self.fd, flag, timeout)
3451
    else:
3452
      try:
3453
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3454
              args=(self.fd, flag, timeout))
3455
      except RetryTimeout:
3456
        raise errors.LockError(errmsg)
3457

    
3458
  @staticmethod
3459
  def _Lock(fd, flag, timeout):
3460
    try:
3461
      fcntl.flock(fd, flag)
3462
    except IOError, err:
3463
      if timeout is not None and err.errno == errno.EAGAIN:
3464
        raise RetryAgain()
3465

    
3466
      logging.exception("fcntl.flock failed")
3467
      raise
3468

    
3469
  def Exclusive(self, blocking=False, timeout=None):
3470
    """Locks the file in exclusive mode.
3471

3472
    @type blocking: boolean
3473
    @param blocking: whether to block and wait until we
3474
        can lock the file or return immediately
3475
    @type timeout: int or None
3476
    @param timeout: if not None, the duration to wait for the lock
3477
        (in blocking mode)
3478

3479
    """
3480
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3481
                "Failed to lock %s in exclusive mode" % self.filename)
3482

    
3483
  def Shared(self, blocking=False, timeout=None):
3484
    """Locks the file in shared mode.
3485

3486
    @type blocking: boolean
3487
    @param blocking: whether to block and wait until we
3488
        can lock the file or return immediately
3489
    @type timeout: int or None
3490
    @param timeout: if not None, the duration to wait for the lock
3491
        (in blocking mode)
3492

3493
    """
3494
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3495
                "Failed to lock %s in shared mode" % self.filename)
3496

    
3497
  def Unlock(self, blocking=True, timeout=None):
3498
    """Unlocks the file.
3499

3500
    According to C{flock(2)}, unlocking can also be a nonblocking
3501
    operation::
3502

3503
      To make a non-blocking request, include LOCK_NB with any of the above
3504
      operations.
3505

3506
    @type blocking: boolean
3507
    @param blocking: whether to block and wait until we
3508
        can lock the file or return immediately
3509
    @type timeout: int or None
3510
    @param timeout: if not None, the duration to wait for the lock
3511
        (in blocking mode)
3512

3513
    """
3514
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3515
                "Failed to unlock %s" % self.filename)
3516

    
3517

    
3518
class LineSplitter:
3519
  """Splits data chunks into lines separated by newline.
3520

3521
  Instances provide a file-like interface.
3522

3523
  """
3524
  def __init__(self, line_fn, *args):
3525
    """Initializes this class.
3526

3527
    @type line_fn: callable
3528
    @param line_fn: Function called for each line, first parameter is line
3529
    @param args: Extra arguments for L{line_fn}
3530

3531
    """
3532
    assert callable(line_fn)
3533

    
3534
    if args:
3535
      # Python 2.4 doesn't have functools.partial yet
3536
      self._line_fn = \
3537
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3538
    else:
3539
      self._line_fn = line_fn
3540

    
3541
    self._lines = collections.deque()
3542
    self._buffer = ""
3543

    
3544
  def write(self, data):
3545
    parts = (self._buffer + data).split("\n")
3546
    self._buffer = parts.pop()
3547
    self._lines.extend(parts)
3548

    
3549
  def flush(self):
3550
    while self._lines:
3551
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3552

    
3553
  def close(self):
3554
    self.flush()
3555
    if self._buffer:
3556
      self._line_fn(self._buffer)
3557

    
3558

    
3559
def SignalHandled(signums):
3560
  """Signal Handled decoration.
3561

3562
  This special decorator installs a signal handler and then calls the target
3563
  function. The function must accept a 'signal_handlers' keyword argument,
3564
  which will contain a dict indexed by signal number, with SignalHandler
3565
  objects as values.
3566

3567
  The decorator can be safely stacked with iself, to handle multiple signals
3568
  with different handlers.
3569

3570
  @type signums: list
3571
  @param signums: signals to intercept
3572

3573
  """
3574
  def wrap(fn):
3575
    def sig_function(*args, **kwargs):
3576
      assert 'signal_handlers' not in kwargs or \
3577
             kwargs['signal_handlers'] is None or \
3578
             isinstance(kwargs['signal_handlers'], dict), \
3579
             "Wrong signal_handlers parameter in original function call"
3580
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3581
        signal_handlers = kwargs['signal_handlers']
3582
      else:
3583
        signal_handlers = {}
3584
        kwargs['signal_handlers'] = signal_handlers
3585
      sighandler = SignalHandler(signums)
3586
      try:
3587
        for sig in signums:
3588
          signal_handlers[sig] = sighandler
3589
        return fn(*args, **kwargs)
3590
      finally:
3591
        sighandler.Reset()
3592
    return sig_function
3593
  return wrap
3594

    
3595

    
3596
class SignalWakeupFd(object):
3597
  try:
3598
    # This is only supported in Python 2.5 and above (some distributions
3599
    # backported it to Python 2.4)
3600
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3601
  except AttributeError:
3602
    # Not supported
3603
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3604
      return -1
3605
  else:
3606
    def _SetWakeupFd(self, fd):
3607
      return self._set_wakeup_fd_fn(fd)
3608

    
3609
  def __init__(self):
3610
    """Initializes this class.
3611

3612
    """
3613
    (read_fd, write_fd) = os.pipe()
3614

    
3615
    # Once these succeeded, the file descriptors will be closed automatically.
3616
    # Buffer size 0 is important, otherwise .read() with a specified length
3617
    # might buffer data and the file descriptors won't be marked readable.
3618
    self._read_fh = os.fdopen(read_fd, "r", 0)
3619
    self._write_fh = os.fdopen(write_fd, "w", 0)
3620

    
3621
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3622

    
3623
    # Utility functions
3624
    self.fileno = self._read_fh.fileno
3625
    self.read = self._read_fh.read
3626

    
3627
  def Reset(self):
3628
    """Restores the previous wakeup file descriptor.
3629

3630
    """
3631
    if hasattr(self, "_previous") and self._previous is not None:
3632
      self._SetWakeupFd(self._previous)
3633
      self._previous = None
3634

    
3635
  def Notify(self):
3636
    """Notifies the wakeup file descriptor.
3637

3638
    """
3639
    self._write_fh.write("\0")
3640

    
3641
  def __del__(self):
3642
    """Called before object deletion.
3643

3644
    """
3645
    self.Reset()
3646

    
3647

    
3648
class SignalHandler(object):
3649
  """Generic signal handler class.
3650

3651
  It automatically restores the original handler when deconstructed or
3652
  when L{Reset} is called. You can either pass your own handler
3653
  function in or query the L{called} attribute to detect whether the
3654
  signal was sent.
3655

3656
  @type signum: list
3657
  @ivar signum: the signals we handle
3658
  @type called: boolean
3659
  @ivar called: tracks whether any of the signals have been raised
3660

3661
  """
3662
  def __init__(self, signum, handler_fn=None, wakeup=None):
3663
    """Constructs a new SignalHandler instance.
3664

3665
    @type signum: int or list of ints
3666
    @param signum: Single signal number or set of signal numbers
3667
    @type handler_fn: callable
3668
    @param handler_fn: Signal handling function
3669

3670
    """
3671
    assert handler_fn is None or callable(handler_fn)
3672

    
3673
    self.signum = set(signum)
3674
    self.called = False
3675

    
3676
    self._handler_fn = handler_fn
3677
    self._wakeup = wakeup
3678

    
3679
    self._previous = {}
3680
    try:
3681
      for signum in self.signum:
3682
        # Setup handler
3683
        prev_handler = signal.signal(signum, self._HandleSignal)
3684
        try:
3685
          self._previous[signum] = prev_handler
3686
        except:
3687
          # Restore previous handler
3688
          signal.signal(signum, prev_handler)
3689
          raise
3690
    except:
3691
      # Reset all handlers
3692
      self.Reset()
3693
      # Here we have a race condition: a handler may have already been called,
3694
      # but there's not much we can do about it at this point.
3695
      raise
3696

    
3697
  def __del__(self):
3698
    self.Reset()
3699

    
3700
  def Reset(self):
3701
    """Restore previous handler.
3702

3703
    This will reset all the signals to their previous handlers.
3704

3705
    """
3706
    for signum, prev_handler in self._previous.items():
3707
      signal.signal(signum, prev_handler)
3708
      # If successful, remove from dict
3709
      del self._previous[signum]
3710

    
3711
  def Clear(self):
3712
    """Unsets the L{called} flag.
3713

3714
    This function can be used in case a signal may arrive several times.
3715

3716
    """
3717
    self.called = False
3718

    
3719
  def _HandleSignal(self, signum, frame):
3720
    """Actual signal handling function.
3721

3722
    """
3723
    # This is not nice and not absolutely atomic, but it appears to be the only
3724
    # solution in Python -- there are no atomic types.
3725
    self.called = True
3726

    
3727
    if self._wakeup:
3728
      # Notify whoever is interested in signals
3729
      self._wakeup.Notify()
3730

    
3731
    if self._handler_fn:
3732
      self._handler_fn(signum, frame)
3733

    
3734

    
3735
class FieldSet(object):
3736
  """A simple field set.
3737

3738
  Among the features are:
3739
    - checking if a string is among a list of static string or regex objects
3740
    - checking if a whole list of string matches
3741
    - returning the matching groups from a regex match
3742

3743
  Internally, all fields are held as regular expression objects.
3744

3745
  """
3746
  def __init__(self, *items):
3747
    self.items = [re.compile("^%s$" % value) for value in items]
3748

    
3749
  def Extend(self, other_set):
3750
    """Extend the field set with the items from another one"""
3751
    self.items.extend(other_set.items)
3752

    
3753
  def Matches(self, field):
3754
    """Checks if a field matches the current set
3755

3756
    @type field: str
3757
    @param field: the string to match
3758
    @return: either None or a regular expression match object
3759

3760
    """
3761
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3762
      return m
3763
    return None
3764

    
3765
  def NonMatching(self, items):
3766
    """Returns the list of fields not matching the current set
3767

3768
    @type items: list
3769
    @param items: the list of fields to check
3770
    @rtype: list
3771
    @return: list of non-matching fields
3772

3773
    """
3774
    return [val for val in items if not self.Matches(val)]