Statistics
| Branch: | Tag: | Revision:

root / lib / utils / __init__.py @ 7831fc5f

History | View | Annotate | Download (83.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import signal
46
import OpenSSL
47
import datetime
48
import calendar
49

    
50
from cStringIO import StringIO
51

    
52
from ganeti import errors
53
from ganeti import constants
54
from ganeti import compat
55

    
56
from ganeti.utils.algo import * # pylint: disable-msg=W0401
57
from ganeti.utils.retry import * # pylint: disable-msg=W0401
58
from ganeti.utils.text import * # pylint: disable-msg=W0401
59
from ganeti.utils.mlock import * # pylint: disable-msg=W0401
60
from ganeti.utils.log import * # pylint: disable-msg=W0401
61
from ganeti.utils.hash import * # pylint: disable-msg=W0401
62
from ganeti.utils.wrapper import * # pylint: disable-msg=W0401
63

    
64

    
65
#: when set to True, L{RunCmd} is disabled
66
_no_fork = False
67

    
68
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
69

    
70
HEX_CHAR_RE = r"[a-zA-Z0-9]"
71
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
72
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
73
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
74
                             HEX_CHAR_RE, HEX_CHAR_RE),
75
                            re.S | re.I)
76

    
77
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
78

    
79
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
80
                     '[a-f0-9]{4}-[a-f0-9]{12}$')
81

    
82
# Certificate verification results
83
(CERT_WARNING,
84
 CERT_ERROR) = range(1, 3)
85

    
86
(_TIMEOUT_NONE,
87
 _TIMEOUT_TERM,
88
 _TIMEOUT_KILL) = range(3)
89

    
90
#: Shell param checker regexp
91
_SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
92

    
93
#: ASN1 time regexp
94
_ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
95

    
96

    
97
def DisableFork():
98
  """Disables the use of fork(2).
99

100
  """
101
  global _no_fork # pylint: disable-msg=W0603
102

    
103
  _no_fork = True
104

    
105

    
106
class RunResult(object):
107
  """Holds the result of running external programs.
108

109
  @type exit_code: int
110
  @ivar exit_code: the exit code of the program, or None (if the program
111
      didn't exit())
112
  @type signal: int or None
113
  @ivar signal: the signal that caused the program to finish, or None
114
      (if the program wasn't terminated by a signal)
115
  @type stdout: str
116
  @ivar stdout: the standard output of the program
117
  @type stderr: str
118
  @ivar stderr: the standard error of the program
119
  @type failed: boolean
120
  @ivar failed: True in case the program was
121
      terminated by a signal or exited with a non-zero exit code
122
  @ivar fail_reason: a string detailing the termination reason
123

124
  """
125
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
126
               "failed", "fail_reason", "cmd"]
127

    
128

    
129
  def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
130
               timeout):
131
    self.cmd = cmd
132
    self.exit_code = exit_code
133
    self.signal = signal_
134
    self.stdout = stdout
135
    self.stderr = stderr
136
    self.failed = (signal_ is not None or exit_code != 0)
137

    
138
    fail_msgs = []
139
    if self.signal is not None:
140
      fail_msgs.append("terminated by signal %s" % self.signal)
141
    elif self.exit_code is not None:
142
      fail_msgs.append("exited with exit code %s" % self.exit_code)
143
    else:
144
      fail_msgs.append("unable to determine termination reason")
145

    
146
    if timeout_action == _TIMEOUT_TERM:
147
      fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
148
    elif timeout_action == _TIMEOUT_KILL:
149
      fail_msgs.append(("force termination after timeout of %.2f seconds"
150
                        " and linger for another %.2f seconds") %
151
                       (timeout, constants.CHILD_LINGER_TIMEOUT))
152

    
153
    if fail_msgs and self.failed:
154
      self.fail_reason = CommaJoin(fail_msgs)
155

    
156
    if self.failed:
157
      logging.debug("Command '%s' failed (%s); output: %s",
158
                    self.cmd, self.fail_reason, self.output)
159

    
160
  def _GetOutput(self):
161
    """Returns the combined stdout and stderr for easier usage.
162

163
    """
164
    return self.stdout + self.stderr
165

    
166
  output = property(_GetOutput, None, None, "Return full output")
167

    
168

    
169
def _BuildCmdEnvironment(env, reset):
170
  """Builds the environment for an external program.
171

172
  """
173
  if reset:
174
    cmd_env = {}
175
  else:
176
    cmd_env = os.environ.copy()
177
    cmd_env["LC_ALL"] = "C"
178

    
179
  if env is not None:
180
    cmd_env.update(env)
181

    
182
  return cmd_env
183

    
184

    
185
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
186
           interactive=False, timeout=None):
187
  """Execute a (shell) command.
188

189
  The command should not read from its standard input, as it will be
190
  closed.
191

192
  @type cmd: string or list
193
  @param cmd: Command to run
194
  @type env: dict
195
  @param env: Additional environment variables
196
  @type output: str
197
  @param output: if desired, the output of the command can be
198
      saved in a file instead of the RunResult instance; this
199
      parameter denotes the file name (if not None)
200
  @type cwd: string
201
  @param cwd: if specified, will be used as the working
202
      directory for the command; the default will be /
203
  @type reset_env: boolean
204
  @param reset_env: whether to reset or keep the default os environment
205
  @type interactive: boolean
206
  @param interactive: weather we pipe stdin, stdout and stderr
207
                      (default behaviour) or run the command interactive
208
  @type timeout: int
209
  @param timeout: If not None, timeout in seconds until child process gets
210
                  killed
211
  @rtype: L{RunResult}
212
  @return: RunResult instance
213
  @raise errors.ProgrammerError: if we call this when forks are disabled
214

215
  """
216
  if _no_fork:
217
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
218

    
219
  if output and interactive:
220
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
221
                                 " not be provided at the same time")
222

    
223
  if isinstance(cmd, basestring):
224
    strcmd = cmd
225
    shell = True
226
  else:
227
    cmd = [str(val) for val in cmd]
228
    strcmd = ShellQuoteArgs(cmd)
229
    shell = False
230

    
231
  if output:
232
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
233
  else:
234
    logging.debug("RunCmd %s", strcmd)
235

    
236
  cmd_env = _BuildCmdEnvironment(env, reset_env)
237

    
238
  try:
239
    if output is None:
240
      out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
241
                                                     interactive, timeout)
242
    else:
243
      timeout_action = _TIMEOUT_NONE
244
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
245
      out = err = ""
246
  except OSError, err:
247
    if err.errno == errno.ENOENT:
248
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
249
                               (strcmd, err))
250
    else:
251
      raise
252

    
253
  if status >= 0:
254
    exitcode = status
255
    signal_ = None
256
  else:
257
    exitcode = None
258
    signal_ = -status
259

    
260
  return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
261

    
262

    
263
def SetupDaemonEnv(cwd="/", umask=077):
264
  """Setup a daemon's environment.
265

266
  This should be called between the first and second fork, due to
267
  setsid usage.
268

269
  @param cwd: the directory to which to chdir
270
  @param umask: the umask to setup
271

272
  """
273
  os.chdir(cwd)
274
  os.umask(umask)
275
  os.setsid()
276

    
277

    
278
def SetupDaemonFDs(output_file, output_fd):
279
  """Setups up a daemon's file descriptors.
280

281
  @param output_file: if not None, the file to which to redirect
282
      stdout/stderr
283
  @param output_fd: if not None, the file descriptor for stdout/stderr
284

285
  """
286
  # check that at most one is defined
287
  assert [output_file, output_fd].count(None) >= 1
288

    
289
  # Open /dev/null (read-only, only for stdin)
290
  devnull_fd = os.open(os.devnull, os.O_RDONLY)
291

    
292
  if output_fd is not None:
293
    pass
294
  elif output_file is not None:
295
    # Open output file
296
    try:
297
      output_fd = os.open(output_file,
298
                          os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
299
    except EnvironmentError, err:
300
      raise Exception("Opening output file failed: %s" % err)
301
  else:
302
    output_fd = os.open(os.devnull, os.O_WRONLY)
303

    
304
  # Redirect standard I/O
305
  os.dup2(devnull_fd, 0)
306
  os.dup2(output_fd, 1)
307
  os.dup2(output_fd, 2)
308

    
309

    
310
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
311
                pidfile=None):
312
  """Start a daemon process after forking twice.
313

314
  @type cmd: string or list
315
  @param cmd: Command to run
316
  @type env: dict
317
  @param env: Additional environment variables
318
  @type cwd: string
319
  @param cwd: Working directory for the program
320
  @type output: string
321
  @param output: Path to file in which to save the output
322
  @type output_fd: int
323
  @param output_fd: File descriptor for output
324
  @type pidfile: string
325
  @param pidfile: Process ID file
326
  @rtype: int
327
  @return: Daemon process ID
328
  @raise errors.ProgrammerError: if we call this when forks are disabled
329

330
  """
331
  if _no_fork:
332
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
333
                                 " disabled")
334

    
335
  if output and not (bool(output) ^ (output_fd is not None)):
336
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
337
                                 " specified")
338

    
339
  if isinstance(cmd, basestring):
340
    cmd = ["/bin/sh", "-c", cmd]
341

    
342
  strcmd = ShellQuoteArgs(cmd)
343

    
344
  if output:
345
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
346
  else:
347
    logging.debug("StartDaemon %s", strcmd)
348

    
349
  cmd_env = _BuildCmdEnvironment(env, False)
350

    
351
  # Create pipe for sending PID back
352
  (pidpipe_read, pidpipe_write) = os.pipe()
353
  try:
354
    try:
355
      # Create pipe for sending error messages
356
      (errpipe_read, errpipe_write) = os.pipe()
357
      try:
358
        try:
359
          # First fork
360
          pid = os.fork()
361
          if pid == 0:
362
            try:
363
              # Child process, won't return
364
              _StartDaemonChild(errpipe_read, errpipe_write,
365
                                pidpipe_read, pidpipe_write,
366
                                cmd, cmd_env, cwd,
367
                                output, output_fd, pidfile)
368
            finally:
369
              # Well, maybe child process failed
370
              os._exit(1) # pylint: disable-msg=W0212
371
        finally:
372
          CloseFdNoError(errpipe_write)
373

    
374
        # Wait for daemon to be started (or an error message to
375
        # arrive) and read up to 100 KB as an error message
376
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
377
      finally:
378
        CloseFdNoError(errpipe_read)
379
    finally:
380
      CloseFdNoError(pidpipe_write)
381

    
382
    # Read up to 128 bytes for PID
383
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
384
  finally:
385
    CloseFdNoError(pidpipe_read)
386

    
387
  # Try to avoid zombies by waiting for child process
388
  try:
389
    os.waitpid(pid, 0)
390
  except OSError:
391
    pass
392

    
393
  if errormsg:
394
    raise errors.OpExecError("Error when starting daemon process: %r" %
395
                             errormsg)
396

    
397
  try:
398
    return int(pidtext)
399
  except (ValueError, TypeError), err:
400
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
401
                             (pidtext, err))
402

    
403

    
404
def _StartDaemonChild(errpipe_read, errpipe_write,
405
                      pidpipe_read, pidpipe_write,
406
                      args, env, cwd,
407
                      output, fd_output, pidfile):
408
  """Child process for starting daemon.
409

410
  """
411
  try:
412
    # Close parent's side
413
    CloseFdNoError(errpipe_read)
414
    CloseFdNoError(pidpipe_read)
415

    
416
    # First child process
417
    SetupDaemonEnv()
418

    
419
    # And fork for the second time
420
    pid = os.fork()
421
    if pid != 0:
422
      # Exit first child process
423
      os._exit(0) # pylint: disable-msg=W0212
424

    
425
    # Make sure pipe is closed on execv* (and thereby notifies
426
    # original process)
427
    SetCloseOnExecFlag(errpipe_write, True)
428

    
429
    # List of file descriptors to be left open
430
    noclose_fds = [errpipe_write]
431

    
432
    # Open PID file
433
    if pidfile:
434
      fd_pidfile = WritePidFile(pidfile)
435

    
436
      # Keeping the file open to hold the lock
437
      noclose_fds.append(fd_pidfile)
438

    
439
      SetCloseOnExecFlag(fd_pidfile, False)
440
    else:
441
      fd_pidfile = None
442

    
443
    SetupDaemonFDs(output, fd_output)
444

    
445
    # Send daemon PID to parent
446
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
447

    
448
    # Close all file descriptors except stdio and error message pipe
449
    CloseFDs(noclose_fds=noclose_fds)
450

    
451
    # Change working directory
452
    os.chdir(cwd)
453

    
454
    if env is None:
455
      os.execvp(args[0], args)
456
    else:
457
      os.execvpe(args[0], args, env)
458
  except: # pylint: disable-msg=W0702
459
    try:
460
      # Report errors to original process
461
      WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
462
    except: # pylint: disable-msg=W0702
463
      # Ignore errors in error handling
464
      pass
465

    
466
  os._exit(1) # pylint: disable-msg=W0212
467

    
468

    
469
def WriteErrorToFD(fd, err):
470
  """Possibly write an error message to a fd.
471

472
  @type fd: None or int (file descriptor)
473
  @param fd: if not None, the error will be written to this fd
474
  @param err: string, the error message
475

476
  """
477
  if fd is None:
478
    return
479

    
480
  if not err:
481
    err = "<unknown error>"
482

    
483
  RetryOnSignal(os.write, fd, err)
484

    
485

    
486
def _CheckIfAlive(child):
487
  """Raises L{RetryAgain} if child is still alive.
488

489
  @raises RetryAgain: If child is still alive
490

491
  """
492
  if child.poll() is None:
493
    raise RetryAgain()
494

    
495

    
496
def _WaitForProcess(child, timeout):
497
  """Waits for the child to terminate or until we reach timeout.
498

499
  """
500
  try:
501
    Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
502
  except RetryTimeout:
503
    pass
504

    
505

    
506
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
507
                _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
508
  """Run a command and return its output.
509

510
  @type  cmd: string or list
511
  @param cmd: Command to run
512
  @type env: dict
513
  @param env: The environment to use
514
  @type via_shell: bool
515
  @param via_shell: if we should run via the shell
516
  @type cwd: string
517
  @param cwd: the working directory for the program
518
  @type interactive: boolean
519
  @param interactive: Run command interactive (without piping)
520
  @type timeout: int
521
  @param timeout: Timeout after the programm gets terminated
522
  @rtype: tuple
523
  @return: (out, err, status)
524

525
  """
526
  poller = select.poll()
527

    
528
  stderr = subprocess.PIPE
529
  stdout = subprocess.PIPE
530
  stdin = subprocess.PIPE
531

    
532
  if interactive:
533
    stderr = stdout = stdin = None
534

    
535
  child = subprocess.Popen(cmd, shell=via_shell,
536
                           stderr=stderr,
537
                           stdout=stdout,
538
                           stdin=stdin,
539
                           close_fds=True, env=env,
540
                           cwd=cwd)
541

    
542
  out = StringIO()
543
  err = StringIO()
544

    
545
  linger_timeout = None
546

    
547
  if timeout is None:
548
    poll_timeout = None
549
  else:
550
    poll_timeout = RunningTimeout(timeout, True).Remaining
551

    
552
  msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
553
                 (cmd, child.pid))
554
  msg_linger = ("Command %s (%d) run into linger timeout, killing" %
555
                (cmd, child.pid))
556

    
557
  timeout_action = _TIMEOUT_NONE
558

    
559
  if not interactive:
560
    child.stdin.close()
561
    poller.register(child.stdout, select.POLLIN)
562
    poller.register(child.stderr, select.POLLIN)
563
    fdmap = {
564
      child.stdout.fileno(): (out, child.stdout),
565
      child.stderr.fileno(): (err, child.stderr),
566
      }
567
    for fd in fdmap:
568
      SetNonblockFlag(fd, True)
569

    
570
    while fdmap:
571
      if poll_timeout:
572
        pt = poll_timeout() * 1000
573
        if pt < 0:
574
          if linger_timeout is None:
575
            logging.warning(msg_timeout)
576
            if child.poll() is None:
577
              timeout_action = _TIMEOUT_TERM
578
              IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
579
            linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
580
          pt = linger_timeout() * 1000
581
          if pt < 0:
582
            break
583
      else:
584
        pt = None
585

    
586
      pollresult = RetryOnSignal(poller.poll, pt)
587

    
588
      for fd, event in pollresult:
589
        if event & select.POLLIN or event & select.POLLPRI:
590
          data = fdmap[fd][1].read()
591
          # no data from read signifies EOF (the same as POLLHUP)
592
          if not data:
593
            poller.unregister(fd)
594
            del fdmap[fd]
595
            continue
596
          fdmap[fd][0].write(data)
597
        if (event & select.POLLNVAL or event & select.POLLHUP or
598
            event & select.POLLERR):
599
          poller.unregister(fd)
600
          del fdmap[fd]
601

    
602
  if timeout is not None:
603
    assert callable(poll_timeout)
604

    
605
    # We have no I/O left but it might still run
606
    if child.poll() is None:
607
      _WaitForProcess(child, poll_timeout())
608

    
609
    # Terminate if still alive after timeout
610
    if child.poll() is None:
611
      if linger_timeout is None:
612
        logging.warning(msg_timeout)
613
        timeout_action = _TIMEOUT_TERM
614
        IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
615
        lt = _linger_timeout
616
      else:
617
        lt = linger_timeout()
618
      _WaitForProcess(child, lt)
619

    
620
    # Okay, still alive after timeout and linger timeout? Kill it!
621
    if child.poll() is None:
622
      timeout_action = _TIMEOUT_KILL
623
      logging.warning(msg_linger)
624
      IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
625

    
626
  out = out.getvalue()
627
  err = err.getvalue()
628

    
629
  status = child.wait()
630
  return out, err, status, timeout_action
631

    
632

    
633
def _RunCmdFile(cmd, env, via_shell, output, cwd):
634
  """Run a command and save its output to a file.
635

636
  @type  cmd: string or list
637
  @param cmd: Command to run
638
  @type env: dict
639
  @param env: The environment to use
640
  @type via_shell: bool
641
  @param via_shell: if we should run via the shell
642
  @type output: str
643
  @param output: the filename in which to save the output
644
  @type cwd: string
645
  @param cwd: the working directory for the program
646
  @rtype: int
647
  @return: the exit status
648

649
  """
650
  fh = open(output, "a")
651
  try:
652
    child = subprocess.Popen(cmd, shell=via_shell,
653
                             stderr=subprocess.STDOUT,
654
                             stdout=fh,
655
                             stdin=subprocess.PIPE,
656
                             close_fds=True, env=env,
657
                             cwd=cwd)
658

    
659
    child.stdin.close()
660
    status = child.wait()
661
  finally:
662
    fh.close()
663
  return status
664

    
665

    
666
def RunParts(dir_name, env=None, reset_env=False):
667
  """Run Scripts or programs in a directory
668

669
  @type dir_name: string
670
  @param dir_name: absolute path to a directory
671
  @type env: dict
672
  @param env: The environment to use
673
  @type reset_env: boolean
674
  @param reset_env: whether to reset or keep the default os environment
675
  @rtype: list of tuples
676
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
677

678
  """
679
  rr = []
680

    
681
  try:
682
    dir_contents = ListVisibleFiles(dir_name)
683
  except OSError, err:
684
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
685
    return rr
686

    
687
  for relname in sorted(dir_contents):
688
    fname = PathJoin(dir_name, relname)
689
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
690
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
691
      rr.append((relname, constants.RUNPARTS_SKIP, None))
692
    else:
693
      try:
694
        result = RunCmd([fname], env=env, reset_env=reset_env)
695
      except Exception, err: # pylint: disable-msg=W0703
696
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
697
      else:
698
        rr.append((relname, constants.RUNPARTS_RUN, result))
699

    
700
  return rr
701

    
702

    
703
def RemoveFile(filename):
704
  """Remove a file ignoring some errors.
705

706
  Remove a file, ignoring non-existing ones or directories. Other
707
  errors are passed.
708

709
  @type filename: str
710
  @param filename: the file to be removed
711

712
  """
713
  try:
714
    os.unlink(filename)
715
  except OSError, err:
716
    if err.errno not in (errno.ENOENT, errno.EISDIR):
717
      raise
718

    
719

    
720
def RemoveDir(dirname):
721
  """Remove an empty directory.
722

723
  Remove a directory, ignoring non-existing ones.
724
  Other errors are passed. This includes the case,
725
  where the directory is not empty, so it can't be removed.
726

727
  @type dirname: str
728
  @param dirname: the empty directory to be removed
729

730
  """
731
  try:
732
    os.rmdir(dirname)
733
  except OSError, err:
734
    if err.errno != errno.ENOENT:
735
      raise
736

    
737

    
738
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
739
  """Renames a file.
740

741
  @type old: string
742
  @param old: Original path
743
  @type new: string
744
  @param new: New path
745
  @type mkdir: bool
746
  @param mkdir: Whether to create target directory if it doesn't exist
747
  @type mkdir_mode: int
748
  @param mkdir_mode: Mode for newly created directories
749

750
  """
751
  try:
752
    return os.rename(old, new)
753
  except OSError, err:
754
    # In at least one use case of this function, the job queue, directory
755
    # creation is very rare. Checking for the directory before renaming is not
756
    # as efficient.
757
    if mkdir and err.errno == errno.ENOENT:
758
      # Create directory and try again
759
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
760

    
761
      return os.rename(old, new)
762

    
763
    raise
764

    
765

    
766
def Makedirs(path, mode=0750):
767
  """Super-mkdir; create a leaf directory and all intermediate ones.
768

769
  This is a wrapper around C{os.makedirs} adding error handling not implemented
770
  before Python 2.5.
771

772
  """
773
  try:
774
    os.makedirs(path, mode)
775
  except OSError, err:
776
    # Ignore EEXIST. This is only handled in os.makedirs as included in
777
    # Python 2.5 and above.
778
    if err.errno != errno.EEXIST or not os.path.exists(path):
779
      raise
780

    
781

    
782
def ResetTempfileModule():
783
  """Resets the random name generator of the tempfile module.
784

785
  This function should be called after C{os.fork} in the child process to
786
  ensure it creates a newly seeded random generator. Otherwise it would
787
  generate the same random parts as the parent process. If several processes
788
  race for the creation of a temporary file, this could lead to one not getting
789
  a temporary name.
790

791
  """
792
  # pylint: disable-msg=W0212
793
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
794
    tempfile._once_lock.acquire()
795
    try:
796
      # Reset random name generator
797
      tempfile._name_sequence = None
798
    finally:
799
      tempfile._once_lock.release()
800
  else:
801
    logging.critical("The tempfile module misses at least one of the"
802
                     " '_once_lock' and '_name_sequence' attributes")
803

    
804

    
805
def ForceDictType(target, key_types, allowed_values=None):
806
  """Force the values of a dict to have certain types.
807

808
  @type target: dict
809
  @param target: the dict to update
810
  @type key_types: dict
811
  @param key_types: dict mapping target dict keys to types
812
                    in constants.ENFORCEABLE_TYPES
813
  @type allowed_values: list
814
  @keyword allowed_values: list of specially allowed values
815

816
  """
817
  if allowed_values is None:
818
    allowed_values = []
819

    
820
  if not isinstance(target, dict):
821
    msg = "Expected dictionary, got '%s'" % target
822
    raise errors.TypeEnforcementError(msg)
823

    
824
  for key in target:
825
    if key not in key_types:
826
      msg = "Unknown key '%s'" % key
827
      raise errors.TypeEnforcementError(msg)
828

    
829
    if target[key] in allowed_values:
830
      continue
831

    
832
    ktype = key_types[key]
833
    if ktype not in constants.ENFORCEABLE_TYPES:
834
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
835
      raise errors.ProgrammerError(msg)
836

    
837
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
838
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
839
        pass
840
      elif not isinstance(target[key], basestring):
841
        if isinstance(target[key], bool) and not target[key]:
842
          target[key] = ''
843
        else:
844
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
845
          raise errors.TypeEnforcementError(msg)
846
    elif ktype == constants.VTYPE_BOOL:
847
      if isinstance(target[key], basestring) and target[key]:
848
        if target[key].lower() == constants.VALUE_FALSE:
849
          target[key] = False
850
        elif target[key].lower() == constants.VALUE_TRUE:
851
          target[key] = True
852
        else:
853
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
854
          raise errors.TypeEnforcementError(msg)
855
      elif target[key]:
856
        target[key] = True
857
      else:
858
        target[key] = False
859
    elif ktype == constants.VTYPE_SIZE:
860
      try:
861
        target[key] = ParseUnit(target[key])
862
      except errors.UnitParseError, err:
863
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
864
              (key, target[key], err)
865
        raise errors.TypeEnforcementError(msg)
866
    elif ktype == constants.VTYPE_INT:
867
      try:
868
        target[key] = int(target[key])
869
      except (ValueError, TypeError):
870
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
871
        raise errors.TypeEnforcementError(msg)
872

    
873

    
874
def _GetProcStatusPath(pid):
875
  """Returns the path for a PID's proc status file.
876

877
  @type pid: int
878
  @param pid: Process ID
879
  @rtype: string
880

881
  """
882
  return "/proc/%d/status" % pid
883

    
884

    
885
def IsProcessAlive(pid):
886
  """Check if a given pid exists on the system.
887

888
  @note: zombie status is not handled, so zombie processes
889
      will be returned as alive
890
  @type pid: int
891
  @param pid: the process ID to check
892
  @rtype: boolean
893
  @return: True if the process exists
894

895
  """
896
  def _TryStat(name):
897
    try:
898
      os.stat(name)
899
      return True
900
    except EnvironmentError, err:
901
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
902
        return False
903
      elif err.errno == errno.EINVAL:
904
        raise RetryAgain(err)
905
      raise
906

    
907
  assert isinstance(pid, int), "pid must be an integer"
908
  if pid <= 0:
909
    return False
910

    
911
  # /proc in a multiprocessor environment can have strange behaviors.
912
  # Retry the os.stat a few times until we get a good result.
913
  try:
914
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
915
                 args=[_GetProcStatusPath(pid)])
916
  except RetryTimeout, err:
917
    err.RaiseInner()
918

    
919

    
920
def _ParseSigsetT(sigset):
921
  """Parse a rendered sigset_t value.
922

923
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
924
  function.
925

926
  @type sigset: string
927
  @param sigset: Rendered signal set from /proc/$pid/status
928
  @rtype: set
929
  @return: Set of all enabled signal numbers
930

931
  """
932
  result = set()
933

    
934
  signum = 0
935
  for ch in reversed(sigset):
936
    chv = int(ch, 16)
937

    
938
    # The following could be done in a loop, but it's easier to read and
939
    # understand in the unrolled form
940
    if chv & 1:
941
      result.add(signum + 1)
942
    if chv & 2:
943
      result.add(signum + 2)
944
    if chv & 4:
945
      result.add(signum + 3)
946
    if chv & 8:
947
      result.add(signum + 4)
948

    
949
    signum += 4
950

    
951
  return result
952

    
953

    
954
def _GetProcStatusField(pstatus, field):
955
  """Retrieves a field from the contents of a proc status file.
956

957
  @type pstatus: string
958
  @param pstatus: Contents of /proc/$pid/status
959
  @type field: string
960
  @param field: Name of field whose value should be returned
961
  @rtype: string
962

963
  """
964
  for line in pstatus.splitlines():
965
    parts = line.split(":", 1)
966

    
967
    if len(parts) < 2 or parts[0] != field:
968
      continue
969

    
970
    return parts[1].strip()
971

    
972
  return None
973

    
974

    
975
def IsProcessHandlingSignal(pid, signum, status_path=None):
976
  """Checks whether a process is handling a signal.
977

978
  @type pid: int
979
  @param pid: Process ID
980
  @type signum: int
981
  @param signum: Signal number
982
  @rtype: bool
983

984
  """
985
  if status_path is None:
986
    status_path = _GetProcStatusPath(pid)
987

    
988
  try:
989
    proc_status = ReadFile(status_path)
990
  except EnvironmentError, err:
991
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
992
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
993
      return False
994
    raise
995

    
996
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
997
  if sigcgt is None:
998
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
999

    
1000
  # Now check whether signal is handled
1001
  return signum in _ParseSigsetT(sigcgt)
1002

    
1003

    
1004
def ReadPidFile(pidfile):
1005
  """Read a pid from a file.
1006

1007
  @type  pidfile: string
1008
  @param pidfile: path to the file containing the pid
1009
  @rtype: int
1010
  @return: The process id, if the file exists and contains a valid PID,
1011
           otherwise 0
1012

1013
  """
1014
  try:
1015
    raw_data = ReadOneLineFile(pidfile)
1016
  except EnvironmentError, err:
1017
    if err.errno != errno.ENOENT:
1018
      logging.exception("Can't read pid file")
1019
    return 0
1020

    
1021
  try:
1022
    pid = int(raw_data)
1023
  except (TypeError, ValueError), err:
1024
    logging.info("Can't parse pid file contents", exc_info=True)
1025
    return 0
1026

    
1027
  return pid
1028

    
1029

    
1030
def ReadLockedPidFile(path):
1031
  """Reads a locked PID file.
1032

1033
  This can be used together with L{StartDaemon}.
1034

1035
  @type path: string
1036
  @param path: Path to PID file
1037
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1038

1039
  """
1040
  try:
1041
    fd = os.open(path, os.O_RDONLY)
1042
  except EnvironmentError, err:
1043
    if err.errno == errno.ENOENT:
1044
      # PID file doesn't exist
1045
      return None
1046
    raise
1047

    
1048
  try:
1049
    try:
1050
      # Try to acquire lock
1051
      LockFile(fd)
1052
    except errors.LockError:
1053
      # Couldn't lock, daemon is running
1054
      return int(os.read(fd, 100))
1055
  finally:
1056
    os.close(fd)
1057

    
1058
  return None
1059

    
1060

    
1061
def ValidateServiceName(name):
1062
  """Validate the given service name.
1063

1064
  @type name: number or string
1065
  @param name: Service name or port specification
1066

1067
  """
1068
  try:
1069
    numport = int(name)
1070
  except (ValueError, TypeError):
1071
    # Non-numeric service name
1072
    valid = _VALID_SERVICE_NAME_RE.match(name)
1073
  else:
1074
    # Numeric port (protocols other than TCP or UDP might need adjustments
1075
    # here)
1076
    valid = (numport >= 0 and numport < (1 << 16))
1077

    
1078
  if not valid:
1079
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1080
                               errors.ECODE_INVAL)
1081

    
1082
  return name
1083

    
1084

    
1085
def ListVolumeGroups():
1086
  """List volume groups and their size
1087

1088
  @rtype: dict
1089
  @return:
1090
       Dictionary with keys volume name and values
1091
       the size of the volume
1092

1093
  """
1094
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1095
  result = RunCmd(command)
1096
  retval = {}
1097
  if result.failed:
1098
    return retval
1099

    
1100
  for line in result.stdout.splitlines():
1101
    try:
1102
      name, size = line.split()
1103
      size = int(float(size))
1104
    except (IndexError, ValueError), err:
1105
      logging.error("Invalid output from vgs (%s): %s", err, line)
1106
      continue
1107

    
1108
    retval[name] = size
1109

    
1110
  return retval
1111

    
1112

    
1113
def BridgeExists(bridge):
1114
  """Check whether the given bridge exists in the system
1115

1116
  @type bridge: str
1117
  @param bridge: the bridge name to check
1118
  @rtype: boolean
1119
  @return: True if it does
1120

1121
  """
1122
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1123

    
1124

    
1125
def TryConvert(fn, val):
1126
  """Try to convert a value ignoring errors.
1127

1128
  This function tries to apply function I{fn} to I{val}. If no
1129
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1130
  the result, else it will return the original value. Any other
1131
  exceptions are propagated to the caller.
1132

1133
  @type fn: callable
1134
  @param fn: function to apply to the value
1135
  @param val: the value to be converted
1136
  @return: The converted value if the conversion was successful,
1137
      otherwise the original value.
1138

1139
  """
1140
  try:
1141
    nv = fn(val)
1142
  except (ValueError, TypeError):
1143
    nv = val
1144
  return nv
1145

    
1146

    
1147
def IsValidShellParam(word):
1148
  """Verifies is the given word is safe from the shell's p.o.v.
1149

1150
  This means that we can pass this to a command via the shell and be
1151
  sure that it doesn't alter the command line and is passed as such to
1152
  the actual command.
1153

1154
  Note that we are overly restrictive here, in order to be on the safe
1155
  side.
1156

1157
  @type word: str
1158
  @param word: the word to check
1159
  @rtype: boolean
1160
  @return: True if the word is 'safe'
1161

1162
  """
1163
  return bool(_SHELLPARAM_REGEX.match(word))
1164

    
1165

    
1166
def BuildShellCmd(template, *args):
1167
  """Build a safe shell command line from the given arguments.
1168

1169
  This function will check all arguments in the args list so that they
1170
  are valid shell parameters (i.e. they don't contain shell
1171
  metacharacters). If everything is ok, it will return the result of
1172
  template % args.
1173

1174
  @type template: str
1175
  @param template: the string holding the template for the
1176
      string formatting
1177
  @rtype: str
1178
  @return: the expanded command line
1179

1180
  """
1181
  for word in args:
1182
    if not IsValidShellParam(word):
1183
      raise errors.ProgrammerError("Shell argument '%s' contains"
1184
                                   " invalid characters" % word)
1185
  return template % args
1186

    
1187

    
1188
def ParseCpuMask(cpu_mask):
1189
  """Parse a CPU mask definition and return the list of CPU IDs.
1190

1191
  CPU mask format: comma-separated list of CPU IDs
1192
  or dash-separated ID ranges
1193
  Example: "0-2,5" -> "0,1,2,5"
1194

1195
  @type cpu_mask: str
1196
  @param cpu_mask: CPU mask definition
1197
  @rtype: list of int
1198
  @return: list of CPU IDs
1199

1200
  """
1201
  if not cpu_mask:
1202
    return []
1203
  cpu_list = []
1204
  for range_def in cpu_mask.split(","):
1205
    boundaries = range_def.split("-")
1206
    n_elements = len(boundaries)
1207
    if n_elements > 2:
1208
      raise errors.ParseError("Invalid CPU ID range definition"
1209
                              " (only one hyphen allowed): %s" % range_def)
1210
    try:
1211
      lower = int(boundaries[0])
1212
    except (ValueError, TypeError), err:
1213
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1214
                              " CPU ID range: %s" % str(err))
1215
    try:
1216
      higher = int(boundaries[-1])
1217
    except (ValueError, TypeError), err:
1218
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1219
                              " CPU ID range: %s" % str(err))
1220
    if lower > higher:
1221
      raise errors.ParseError("Invalid CPU ID range definition"
1222
                              " (%d > %d): %s" % (lower, higher, range_def))
1223
    cpu_list.extend(range(lower, higher + 1))
1224
  return cpu_list
1225

    
1226

    
1227
def AddAuthorizedKey(file_obj, key):
1228
  """Adds an SSH public key to an authorized_keys file.
1229

1230
  @type file_obj: str or file handle
1231
  @param file_obj: path to authorized_keys file
1232
  @type key: str
1233
  @param key: string containing key
1234

1235
  """
1236
  key_fields = key.split()
1237

    
1238
  if isinstance(file_obj, basestring):
1239
    f = open(file_obj, 'a+')
1240
  else:
1241
    f = file_obj
1242

    
1243
  try:
1244
    nl = True
1245
    for line in f:
1246
      # Ignore whitespace changes
1247
      if line.split() == key_fields:
1248
        break
1249
      nl = line.endswith('\n')
1250
    else:
1251
      if not nl:
1252
        f.write("\n")
1253
      f.write(key.rstrip('\r\n'))
1254
      f.write("\n")
1255
      f.flush()
1256
  finally:
1257
    f.close()
1258

    
1259

    
1260
def RemoveAuthorizedKey(file_name, key):
1261
  """Removes an SSH public key from an authorized_keys file.
1262

1263
  @type file_name: str
1264
  @param file_name: path to authorized_keys file
1265
  @type key: str
1266
  @param key: string containing key
1267

1268
  """
1269
  key_fields = key.split()
1270

    
1271
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1272
  try:
1273
    out = os.fdopen(fd, 'w')
1274
    try:
1275
      f = open(file_name, 'r')
1276
      try:
1277
        for line in f:
1278
          # Ignore whitespace changes while comparing lines
1279
          if line.split() != key_fields:
1280
            out.write(line)
1281

    
1282
        out.flush()
1283
        os.rename(tmpname, file_name)
1284
      finally:
1285
        f.close()
1286
    finally:
1287
      out.close()
1288
  except:
1289
    RemoveFile(tmpname)
1290
    raise
1291

    
1292

    
1293
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1294
  """Sets the name of an IP address and hostname in /etc/hosts.
1295

1296
  @type file_name: str
1297
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1298
  @type ip: str
1299
  @param ip: the IP address
1300
  @type hostname: str
1301
  @param hostname: the hostname to be added
1302
  @type aliases: list
1303
  @param aliases: the list of aliases to add for the hostname
1304

1305
  """
1306
  # Ensure aliases are unique
1307
  aliases = UniqueSequence([hostname] + aliases)[1:]
1308

    
1309
  def _WriteEtcHosts(fd):
1310
    # Duplicating file descriptor because os.fdopen's result will automatically
1311
    # close the descriptor, but we would still like to have its functionality.
1312
    out = os.fdopen(os.dup(fd), "w")
1313
    try:
1314
      for line in ReadFile(file_name).splitlines(True):
1315
        fields = line.split()
1316
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1317
          continue
1318
        out.write(line)
1319

    
1320
      out.write("%s\t%s" % (ip, hostname))
1321
      if aliases:
1322
        out.write(" %s" % " ".join(aliases))
1323
      out.write("\n")
1324
      out.flush()
1325
    finally:
1326
      out.close()
1327

    
1328
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1329

    
1330

    
1331
def AddHostToEtcHosts(hostname, ip):
1332
  """Wrapper around SetEtcHostsEntry.
1333

1334
  @type hostname: str
1335
  @param hostname: a hostname that will be resolved and added to
1336
      L{constants.ETC_HOSTS}
1337
  @type ip: str
1338
  @param ip: The ip address of the host
1339

1340
  """
1341
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1342

    
1343

    
1344
def RemoveEtcHostsEntry(file_name, hostname):
1345
  """Removes a hostname from /etc/hosts.
1346

1347
  IP addresses without names are removed from the file.
1348

1349
  @type file_name: str
1350
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1351
  @type hostname: str
1352
  @param hostname: the hostname to be removed
1353

1354
  """
1355
  def _WriteEtcHosts(fd):
1356
    # Duplicating file descriptor because os.fdopen's result will automatically
1357
    # close the descriptor, but we would still like to have its functionality.
1358
    out = os.fdopen(os.dup(fd), "w")
1359
    try:
1360
      for line in ReadFile(file_name).splitlines(True):
1361
        fields = line.split()
1362
        if len(fields) > 1 and not fields[0].startswith("#"):
1363
          names = fields[1:]
1364
          if hostname in names:
1365
            while hostname in names:
1366
              names.remove(hostname)
1367
            if names:
1368
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1369
            continue
1370

    
1371
        out.write(line)
1372

    
1373
      out.flush()
1374
    finally:
1375
      out.close()
1376

    
1377
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1378

    
1379

    
1380
def RemoveHostFromEtcHosts(hostname):
1381
  """Wrapper around RemoveEtcHostsEntry.
1382

1383
  @type hostname: str
1384
  @param hostname: hostname that will be resolved and its
1385
      full and shot name will be removed from
1386
      L{constants.ETC_HOSTS}
1387

1388
  """
1389
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1390
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1391

    
1392

    
1393
def TimestampForFilename():
1394
  """Returns the current time formatted for filenames.
1395

1396
  The format doesn't contain colons as some shells and applications treat them
1397
  as separators. Uses the local timezone.
1398

1399
  """
1400
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1401

    
1402

    
1403
def CreateBackup(file_name):
1404
  """Creates a backup of a file.
1405

1406
  @type file_name: str
1407
  @param file_name: file to be backed up
1408
  @rtype: str
1409
  @return: the path to the newly created backup
1410
  @raise errors.ProgrammerError: for invalid file names
1411

1412
  """
1413
  if not os.path.isfile(file_name):
1414
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1415
                                file_name)
1416

    
1417
  prefix = ("%s.backup-%s." %
1418
            (os.path.basename(file_name), TimestampForFilename()))
1419
  dir_name = os.path.dirname(file_name)
1420

    
1421
  fsrc = open(file_name, 'rb')
1422
  try:
1423
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1424
    fdst = os.fdopen(fd, 'wb')
1425
    try:
1426
      logging.debug("Backing up %s at %s", file_name, backup_name)
1427
      shutil.copyfileobj(fsrc, fdst)
1428
    finally:
1429
      fdst.close()
1430
  finally:
1431
    fsrc.close()
1432

    
1433
  return backup_name
1434

    
1435

    
1436
def ListVisibleFiles(path):
1437
  """Returns a list of visible files in a directory.
1438

1439
  @type path: str
1440
  @param path: the directory to enumerate
1441
  @rtype: list
1442
  @return: the list of all files not starting with a dot
1443
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1444

1445
  """
1446
  if not IsNormAbsPath(path):
1447
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1448
                                 " absolute/normalized: '%s'" % path)
1449
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1450
  return files
1451

    
1452

    
1453
def GetHomeDir(user, default=None):
1454
  """Try to get the homedir of the given user.
1455

1456
  The user can be passed either as a string (denoting the name) or as
1457
  an integer (denoting the user id). If the user is not found, the
1458
  'default' argument is returned, which defaults to None.
1459

1460
  """
1461
  try:
1462
    if isinstance(user, basestring):
1463
      result = pwd.getpwnam(user)
1464
    elif isinstance(user, (int, long)):
1465
      result = pwd.getpwuid(user)
1466
    else:
1467
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1468
                                   type(user))
1469
  except KeyError:
1470
    return default
1471
  return result.pw_dir
1472

    
1473

    
1474
def NewUUID():
1475
  """Returns a random UUID.
1476

1477
  @note: This is a Linux-specific method as it uses the /proc
1478
      filesystem.
1479
  @rtype: str
1480

1481
  """
1482
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1483

    
1484

    
1485
def EnsureDirs(dirs):
1486
  """Make required directories, if they don't exist.
1487

1488
  @param dirs: list of tuples (dir_name, dir_mode)
1489
  @type dirs: list of (string, integer)
1490

1491
  """
1492
  for dir_name, dir_mode in dirs:
1493
    try:
1494
      os.mkdir(dir_name, dir_mode)
1495
    except EnvironmentError, err:
1496
      if err.errno != errno.EEXIST:
1497
        raise errors.GenericError("Cannot create needed directory"
1498
                                  " '%s': %s" % (dir_name, err))
1499
    try:
1500
      os.chmod(dir_name, dir_mode)
1501
    except EnvironmentError, err:
1502
      raise errors.GenericError("Cannot change directory permissions on"
1503
                                " '%s': %s" % (dir_name, err))
1504
    if not os.path.isdir(dir_name):
1505
      raise errors.GenericError("%s is not a directory" % dir_name)
1506

    
1507

    
1508
def ReadFile(file_name, size=-1):
1509
  """Reads a file.
1510

1511
  @type size: int
1512
  @param size: Read at most size bytes (if negative, entire file)
1513
  @rtype: str
1514
  @return: the (possibly partial) content of the file
1515

1516
  """
1517
  f = open(file_name, "r")
1518
  try:
1519
    return f.read(size)
1520
  finally:
1521
    f.close()
1522

    
1523

    
1524
def WriteFile(file_name, fn=None, data=None,
1525
              mode=None, uid=-1, gid=-1,
1526
              atime=None, mtime=None, close=True,
1527
              dry_run=False, backup=False,
1528
              prewrite=None, postwrite=None):
1529
  """(Over)write a file atomically.
1530

1531
  The file_name and either fn (a function taking one argument, the
1532
  file descriptor, and which should write the data to it) or data (the
1533
  contents of the file) must be passed. The other arguments are
1534
  optional and allow setting the file mode, owner and group, and the
1535
  mtime/atime of the file.
1536

1537
  If the function doesn't raise an exception, it has succeeded and the
1538
  target file has the new contents. If the function has raised an
1539
  exception, an existing target file should be unmodified and the
1540
  temporary file should be removed.
1541

1542
  @type file_name: str
1543
  @param file_name: the target filename
1544
  @type fn: callable
1545
  @param fn: content writing function, called with
1546
      file descriptor as parameter
1547
  @type data: str
1548
  @param data: contents of the file
1549
  @type mode: int
1550
  @param mode: file mode
1551
  @type uid: int
1552
  @param uid: the owner of the file
1553
  @type gid: int
1554
  @param gid: the group of the file
1555
  @type atime: int
1556
  @param atime: a custom access time to be set on the file
1557
  @type mtime: int
1558
  @param mtime: a custom modification time to be set on the file
1559
  @type close: boolean
1560
  @param close: whether to close file after writing it
1561
  @type prewrite: callable
1562
  @param prewrite: function to be called before writing content
1563
  @type postwrite: callable
1564
  @param postwrite: function to be called after writing content
1565

1566
  @rtype: None or int
1567
  @return: None if the 'close' parameter evaluates to True,
1568
      otherwise the file descriptor
1569

1570
  @raise errors.ProgrammerError: if any of the arguments are not valid
1571

1572
  """
1573
  if not os.path.isabs(file_name):
1574
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1575
                                 " absolute: '%s'" % file_name)
1576

    
1577
  if [fn, data].count(None) != 1:
1578
    raise errors.ProgrammerError("fn or data required")
1579

    
1580
  if [atime, mtime].count(None) == 1:
1581
    raise errors.ProgrammerError("Both atime and mtime must be either"
1582
                                 " set or None")
1583

    
1584
  if backup and not dry_run and os.path.isfile(file_name):
1585
    CreateBackup(file_name)
1586

    
1587
  dir_name, base_name = os.path.split(file_name)
1588
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1589
  do_remove = True
1590
  # here we need to make sure we remove the temp file, if any error
1591
  # leaves it in place
1592
  try:
1593
    if uid != -1 or gid != -1:
1594
      os.chown(new_name, uid, gid)
1595
    if mode:
1596
      os.chmod(new_name, mode)
1597
    if callable(prewrite):
1598
      prewrite(fd)
1599
    if data is not None:
1600
      os.write(fd, data)
1601
    else:
1602
      fn(fd)
1603
    if callable(postwrite):
1604
      postwrite(fd)
1605
    os.fsync(fd)
1606
    if atime is not None and mtime is not None:
1607
      os.utime(new_name, (atime, mtime))
1608
    if not dry_run:
1609
      os.rename(new_name, file_name)
1610
      do_remove = False
1611
  finally:
1612
    if close:
1613
      os.close(fd)
1614
      result = None
1615
    else:
1616
      result = fd
1617
    if do_remove:
1618
      RemoveFile(new_name)
1619

    
1620
  return result
1621

    
1622

    
1623
def GetFileID(path=None, fd=None):
1624
  """Returns the file 'id', i.e. the dev/inode and mtime information.
1625

1626
  Either the path to the file or the fd must be given.
1627

1628
  @param path: the file path
1629
  @param fd: a file descriptor
1630
  @return: a tuple of (device number, inode number, mtime)
1631

1632
  """
1633
  if [path, fd].count(None) != 1:
1634
    raise errors.ProgrammerError("One and only one of fd/path must be given")
1635

    
1636
  if fd is None:
1637
    st = os.stat(path)
1638
  else:
1639
    st = os.fstat(fd)
1640

    
1641
  return (st.st_dev, st.st_ino, st.st_mtime)
1642

    
1643

    
1644
def VerifyFileID(fi_disk, fi_ours):
1645
  """Verifies that two file IDs are matching.
1646

1647
  Differences in the inode/device are not accepted, but and older
1648
  timestamp for fi_disk is accepted.
1649

1650
  @param fi_disk: tuple (dev, inode, mtime) representing the actual
1651
      file data
1652
  @param fi_ours: tuple (dev, inode, mtime) representing the last
1653
      written file data
1654
  @rtype: boolean
1655

1656
  """
1657
  (d1, i1, m1) = fi_disk
1658
  (d2, i2, m2) = fi_ours
1659

    
1660
  return (d1, i1) == (d2, i2) and m1 <= m2
1661

    
1662

    
1663
def SafeWriteFile(file_name, file_id, **kwargs):
1664
  """Wraper over L{WriteFile} that locks the target file.
1665

1666
  By keeping the target file locked during WriteFile, we ensure that
1667
  cooperating writers will safely serialise access to the file.
1668

1669
  @type file_name: str
1670
  @param file_name: the target filename
1671
  @type file_id: tuple
1672
  @param file_id: a result from L{GetFileID}
1673

1674
  """
1675
  fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
1676
  try:
1677
    LockFile(fd)
1678
    if file_id is not None:
1679
      disk_id = GetFileID(fd=fd)
1680
      if not VerifyFileID(disk_id, file_id):
1681
        raise errors.LockError("Cannot overwrite file %s, it has been modified"
1682
                               " since last written" % file_name)
1683
    return WriteFile(file_name, **kwargs)
1684
  finally:
1685
    os.close(fd)
1686

    
1687

    
1688
def ReadOneLineFile(file_name, strict=False):
1689
  """Return the first non-empty line from a file.
1690

1691
  @type strict: boolean
1692
  @param strict: if True, abort if the file has more than one
1693
      non-empty line
1694

1695
  """
1696
  file_lines = ReadFile(file_name).splitlines()
1697
  full_lines = filter(bool, file_lines)
1698
  if not file_lines or not full_lines:
1699
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1700
  elif strict and len(full_lines) > 1:
1701
    raise errors.GenericError("Too many lines in one-liner file %s" %
1702
                              file_name)
1703
  return full_lines[0]
1704

    
1705

    
1706
def FirstFree(seq, base=0):
1707
  """Returns the first non-existing integer from seq.
1708

1709
  The seq argument should be a sorted list of positive integers. The
1710
  first time the index of an element is smaller than the element
1711
  value, the index will be returned.
1712

1713
  The base argument is used to start at a different offset,
1714
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1715

1716
  Example: C{[0, 1, 3]} will return I{2}.
1717

1718
  @type seq: sequence
1719
  @param seq: the sequence to be analyzed.
1720
  @type base: int
1721
  @param base: use this value as the base index of the sequence
1722
  @rtype: int
1723
  @return: the first non-used index in the sequence
1724

1725
  """
1726
  for idx, elem in enumerate(seq):
1727
    assert elem >= base, "Passed element is higher than base offset"
1728
    if elem > idx + base:
1729
      # idx is not used
1730
      return idx + base
1731
  return None
1732

    
1733

    
1734
def SingleWaitForFdCondition(fdobj, event, timeout):
1735
  """Waits for a condition to occur on the socket.
1736

1737
  Immediately returns at the first interruption.
1738

1739
  @type fdobj: integer or object supporting a fileno() method
1740
  @param fdobj: entity to wait for events on
1741
  @type event: integer
1742
  @param event: ORed condition (see select module)
1743
  @type timeout: float or None
1744
  @param timeout: Timeout in seconds
1745
  @rtype: int or None
1746
  @return: None for timeout, otherwise occured conditions
1747

1748
  """
1749
  check = (event | select.POLLPRI |
1750
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1751

    
1752
  if timeout is not None:
1753
    # Poller object expects milliseconds
1754
    timeout *= 1000
1755

    
1756
  poller = select.poll()
1757
  poller.register(fdobj, event)
1758
  try:
1759
    # TODO: If the main thread receives a signal and we have no timeout, we
1760
    # could wait forever. This should check a global "quit" flag or something
1761
    # every so often.
1762
    io_events = poller.poll(timeout)
1763
  except select.error, err:
1764
    if err[0] != errno.EINTR:
1765
      raise
1766
    io_events = []
1767
  if io_events and io_events[0][1] & check:
1768
    return io_events[0][1]
1769
  else:
1770
    return None
1771

    
1772

    
1773
class FdConditionWaiterHelper(object):
1774
  """Retry helper for WaitForFdCondition.
1775

1776
  This class contains the retried and wait functions that make sure
1777
  WaitForFdCondition can continue waiting until the timeout is actually
1778
  expired.
1779

1780
  """
1781

    
1782
  def __init__(self, timeout):
1783
    self.timeout = timeout
1784

    
1785
  def Poll(self, fdobj, event):
1786
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1787
    if result is None:
1788
      raise RetryAgain()
1789
    else:
1790
      return result
1791

    
1792
  def UpdateTimeout(self, timeout):
1793
    self.timeout = timeout
1794

    
1795

    
1796
def WaitForFdCondition(fdobj, event, timeout):
1797
  """Waits for a condition to occur on the socket.
1798

1799
  Retries until the timeout is expired, even if interrupted.
1800

1801
  @type fdobj: integer or object supporting a fileno() method
1802
  @param fdobj: entity to wait for events on
1803
  @type event: integer
1804
  @param event: ORed condition (see select module)
1805
  @type timeout: float or None
1806
  @param timeout: Timeout in seconds
1807
  @rtype: int or None
1808
  @return: None for timeout, otherwise occured conditions
1809

1810
  """
1811
  if timeout is not None:
1812
    retrywaiter = FdConditionWaiterHelper(timeout)
1813
    try:
1814
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1815
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1816
    except RetryTimeout:
1817
      result = None
1818
  else:
1819
    result = None
1820
    while result is None:
1821
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1822
  return result
1823

    
1824

    
1825
def CloseFDs(noclose_fds=None):
1826
  """Close file descriptors.
1827

1828
  This closes all file descriptors above 2 (i.e. except
1829
  stdin/out/err).
1830

1831
  @type noclose_fds: list or None
1832
  @param noclose_fds: if given, it denotes a list of file descriptor
1833
      that should not be closed
1834

1835
  """
1836
  # Default maximum for the number of available file descriptors.
1837
  if 'SC_OPEN_MAX' in os.sysconf_names:
1838
    try:
1839
      MAXFD = os.sysconf('SC_OPEN_MAX')
1840
      if MAXFD < 0:
1841
        MAXFD = 1024
1842
    except OSError:
1843
      MAXFD = 1024
1844
  else:
1845
    MAXFD = 1024
1846
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1847
  if (maxfd == resource.RLIM_INFINITY):
1848
    maxfd = MAXFD
1849

    
1850
  # Iterate through and close all file descriptors (except the standard ones)
1851
  for fd in range(3, maxfd):
1852
    if noclose_fds and fd in noclose_fds:
1853
      continue
1854
    CloseFdNoError(fd)
1855

    
1856

    
1857
def Daemonize(logfile):
1858
  """Daemonize the current process.
1859

1860
  This detaches the current process from the controlling terminal and
1861
  runs it in the background as a daemon.
1862

1863
  @type logfile: str
1864
  @param logfile: the logfile to which we should redirect stdout/stderr
1865
  @rtype: int
1866
  @return: the value zero
1867

1868
  """
1869
  # pylint: disable-msg=W0212
1870
  # yes, we really want os._exit
1871

    
1872
  # TODO: do another attempt to merge Daemonize and StartDaemon, or at
1873
  # least abstract the pipe functionality between them
1874

    
1875
  # Create pipe for sending error messages
1876
  (rpipe, wpipe) = os.pipe()
1877

    
1878
  # this might fail
1879
  pid = os.fork()
1880
  if (pid == 0):  # The first child.
1881
    SetupDaemonEnv()
1882

    
1883
    # this might fail
1884
    pid = os.fork() # Fork a second child.
1885
    if (pid == 0):  # The second child.
1886
      CloseFdNoError(rpipe)
1887
    else:
1888
      # exit() or _exit()?  See below.
1889
      os._exit(0) # Exit parent (the first child) of the second child.
1890
  else:
1891
    CloseFdNoError(wpipe)
1892
    # Wait for daemon to be started (or an error message to
1893
    # arrive) and read up to 100 KB as an error message
1894
    errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
1895
    if errormsg:
1896
      sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
1897
      rcode = 1
1898
    else:
1899
      rcode = 0
1900
    os._exit(rcode) # Exit parent of the first child.
1901

    
1902
  SetupDaemonFDs(logfile, None)
1903
  return wpipe
1904

    
1905

    
1906
def DaemonPidFileName(name):
1907
  """Compute a ganeti pid file absolute path
1908

1909
  @type name: str
1910
  @param name: the daemon name
1911
  @rtype: str
1912
  @return: the full path to the pidfile corresponding to the given
1913
      daemon name
1914

1915
  """
1916
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
1917

    
1918

    
1919
def EnsureDaemon(name):
1920
  """Check for and start daemon if not alive.
1921

1922
  """
1923
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
1924
  if result.failed:
1925
    logging.error("Can't start daemon '%s', failure %s, output: %s",
1926
                  name, result.fail_reason, result.output)
1927
    return False
1928

    
1929
  return True
1930

    
1931

    
1932
def StopDaemon(name):
1933
  """Stop daemon
1934

1935
  """
1936
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
1937
  if result.failed:
1938
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
1939
                  name, result.fail_reason, result.output)
1940
    return False
1941

    
1942
  return True
1943

    
1944

    
1945
def WritePidFile(pidfile):
1946
  """Write the current process pidfile.
1947

1948
  @type pidfile: string
1949
  @param pidfile: the path to the file to be written
1950
  @raise errors.LockError: if the pid file already exists and
1951
      points to a live process
1952
  @rtype: int
1953
  @return: the file descriptor of the lock file; do not close this unless
1954
      you want to unlock the pid file
1955

1956
  """
1957
  # We don't rename nor truncate the file to not drop locks under
1958
  # existing processes
1959
  fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
1960

    
1961
  # Lock the PID file (and fail if not possible to do so). Any code
1962
  # wanting to send a signal to the daemon should try to lock the PID
1963
  # file before reading it. If acquiring the lock succeeds, the daemon is
1964
  # no longer running and the signal should not be sent.
1965
  LockFile(fd_pidfile)
1966

    
1967
  os.write(fd_pidfile, "%d\n" % os.getpid())
1968

    
1969
  return fd_pidfile
1970

    
1971

    
1972
def RemovePidFile(pidfile):
1973
  """Remove the current process pidfile.
1974

1975
  Any errors are ignored.
1976

1977
  @type pidfile: string
1978
  @param pidfile: Path to the file to be removed
1979

1980
  """
1981
  # TODO: we could check here that the file contains our pid
1982
  try:
1983
    RemoveFile(pidfile)
1984
  except Exception: # pylint: disable-msg=W0703
1985
    pass
1986

    
1987

    
1988
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
1989
                waitpid=False):
1990
  """Kill a process given by its pid.
1991

1992
  @type pid: int
1993
  @param pid: The PID to terminate.
1994
  @type signal_: int
1995
  @param signal_: The signal to send, by default SIGTERM
1996
  @type timeout: int
1997
  @param timeout: The timeout after which, if the process is still alive,
1998
                  a SIGKILL will be sent. If not positive, no such checking
1999
                  will be done
2000
  @type waitpid: boolean
2001
  @param waitpid: If true, we should waitpid on this process after
2002
      sending signals, since it's our own child and otherwise it
2003
      would remain as zombie
2004

2005
  """
2006
  def _helper(pid, signal_, wait):
2007
    """Simple helper to encapsulate the kill/waitpid sequence"""
2008
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2009
      try:
2010
        os.waitpid(pid, os.WNOHANG)
2011
      except OSError:
2012
        pass
2013

    
2014
  if pid <= 0:
2015
    # kill with pid=0 == suicide
2016
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2017

    
2018
  if not IsProcessAlive(pid):
2019
    return
2020

    
2021
  _helper(pid, signal_, waitpid)
2022

    
2023
  if timeout <= 0:
2024
    return
2025

    
2026
  def _CheckProcess():
2027
    if not IsProcessAlive(pid):
2028
      return
2029

    
2030
    try:
2031
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2032
    except OSError:
2033
      raise RetryAgain()
2034

    
2035
    if result_pid > 0:
2036
      return
2037

    
2038
    raise RetryAgain()
2039

    
2040
  try:
2041
    # Wait up to $timeout seconds
2042
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2043
  except RetryTimeout:
2044
    pass
2045

    
2046
  if IsProcessAlive(pid):
2047
    # Kill process if it's still alive
2048
    _helper(pid, signal.SIGKILL, waitpid)
2049

    
2050

    
2051
def FindFile(name, search_path, test=os.path.exists):
2052
  """Look for a filesystem object in a given path.
2053

2054
  This is an abstract method to search for filesystem object (files,
2055
  dirs) under a given search path.
2056

2057
  @type name: str
2058
  @param name: the name to look for
2059
  @type search_path: str
2060
  @param search_path: location to start at
2061
  @type test: callable
2062
  @param test: a function taking one argument that should return True
2063
      if the a given object is valid; the default value is
2064
      os.path.exists, causing only existing files to be returned
2065
  @rtype: str or None
2066
  @return: full path to the object if found, None otherwise
2067

2068
  """
2069
  # validate the filename mask
2070
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2071
    logging.critical("Invalid value passed for external script name: '%s'",
2072
                     name)
2073
    return None
2074

    
2075
  for dir_name in search_path:
2076
    # FIXME: investigate switch to PathJoin
2077
    item_name = os.path.sep.join([dir_name, name])
2078
    # check the user test and that we're indeed resolving to the given
2079
    # basename
2080
    if test(item_name) and os.path.basename(item_name) == name:
2081
      return item_name
2082
  return None
2083

    
2084

    
2085
def CheckVolumeGroupSize(vglist, vgname, minsize):
2086
  """Checks if the volume group list is valid.
2087

2088
  The function will check if a given volume group is in the list of
2089
  volume groups and has a minimum size.
2090

2091
  @type vglist: dict
2092
  @param vglist: dictionary of volume group names and their size
2093
  @type vgname: str
2094
  @param vgname: the volume group we should check
2095
  @type minsize: int
2096
  @param minsize: the minimum size we accept
2097
  @rtype: None or str
2098
  @return: None for success, otherwise the error message
2099

2100
  """
2101
  vgsize = vglist.get(vgname, None)
2102
  if vgsize is None:
2103
    return "volume group '%s' missing" % vgname
2104
  elif vgsize < minsize:
2105
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2106
            (vgname, minsize, vgsize))
2107
  return None
2108

    
2109

    
2110
def SplitTime(value):
2111
  """Splits time as floating point number into a tuple.
2112

2113
  @param value: Time in seconds
2114
  @type value: int or float
2115
  @return: Tuple containing (seconds, microseconds)
2116

2117
  """
2118
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2119

    
2120
  assert 0 <= seconds, \
2121
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2122
  assert 0 <= microseconds <= 999999, \
2123
    "Microseconds must be 0-999999, but are %s" % microseconds
2124

    
2125
  return (int(seconds), int(microseconds))
2126

    
2127

    
2128
def MergeTime(timetuple):
2129
  """Merges a tuple into time as a floating point number.
2130

2131
  @param timetuple: Time as tuple, (seconds, microseconds)
2132
  @type timetuple: tuple
2133
  @return: Time as a floating point number expressed in seconds
2134

2135
  """
2136
  (seconds, microseconds) = timetuple
2137

    
2138
  assert 0 <= seconds, \
2139
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2140
  assert 0 <= microseconds <= 999999, \
2141
    "Microseconds must be 0-999999, but are %s" % microseconds
2142

    
2143
  return float(seconds) + (float(microseconds) * 0.000001)
2144

    
2145

    
2146
def IsNormAbsPath(path):
2147
  """Check whether a path is absolute and also normalized
2148

2149
  This avoids things like /dir/../../other/path to be valid.
2150

2151
  """
2152
  return os.path.normpath(path) == path and os.path.isabs(path)
2153

    
2154

    
2155
def PathJoin(*args):
2156
  """Safe-join a list of path components.
2157

2158
  Requirements:
2159
      - the first argument must be an absolute path
2160
      - no component in the path must have backtracking (e.g. /../),
2161
        since we check for normalization at the end
2162

2163
  @param args: the path components to be joined
2164
  @raise ValueError: for invalid paths
2165

2166
  """
2167
  # ensure we're having at least one path passed in
2168
  assert args
2169
  # ensure the first component is an absolute and normalized path name
2170
  root = args[0]
2171
  if not IsNormAbsPath(root):
2172
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2173
  result = os.path.join(*args)
2174
  # ensure that the whole path is normalized
2175
  if not IsNormAbsPath(result):
2176
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2177
  # check that we're still under the original prefix
2178
  prefix = os.path.commonprefix([root, result])
2179
  if prefix != root:
2180
    raise ValueError("Error: path joining resulted in different prefix"
2181
                     " (%s != %s)" % (prefix, root))
2182
  return result
2183

    
2184

    
2185
def TailFile(fname, lines=20):
2186
  """Return the last lines from a file.
2187

2188
  @note: this function will only read and parse the last 4KB of
2189
      the file; if the lines are very long, it could be that less
2190
      than the requested number of lines are returned
2191

2192
  @param fname: the file name
2193
  @type lines: int
2194
  @param lines: the (maximum) number of lines to return
2195

2196
  """
2197
  fd = open(fname, "r")
2198
  try:
2199
    fd.seek(0, 2)
2200
    pos = fd.tell()
2201
    pos = max(0, pos-4096)
2202
    fd.seek(pos, 0)
2203
    raw_data = fd.read()
2204
  finally:
2205
    fd.close()
2206

    
2207
  rows = raw_data.splitlines()
2208
  return rows[-lines:]
2209

    
2210

    
2211
def _ParseAsn1Generalizedtime(value):
2212
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2213

2214
  @type value: string
2215
  @param value: ASN1 GENERALIZEDTIME timestamp
2216
  @return: Seconds since the Epoch (1970-01-01 00:00:00 UTC)
2217

2218
  """
2219
  m = _ASN1_TIME_REGEX.match(value)
2220
  if m:
2221
    # We have an offset
2222
    asn1time = m.group(1)
2223
    hours = int(m.group(2))
2224
    minutes = int(m.group(3))
2225
    utcoffset = (60 * hours) + minutes
2226
  else:
2227
    if not value.endswith("Z"):
2228
      raise ValueError("Missing timezone")
2229
    asn1time = value[:-1]
2230
    utcoffset = 0
2231

    
2232
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2233

    
2234
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2235

    
2236
  return calendar.timegm(tt.utctimetuple())
2237

    
2238

    
2239
def GetX509CertValidity(cert):
2240
  """Returns the validity period of the certificate.
2241

2242
  @type cert: OpenSSL.crypto.X509
2243
  @param cert: X509 certificate object
2244

2245
  """
2246
  # The get_notBefore and get_notAfter functions are only supported in
2247
  # pyOpenSSL 0.7 and above.
2248
  try:
2249
    get_notbefore_fn = cert.get_notBefore
2250
  except AttributeError:
2251
    not_before = None
2252
  else:
2253
    not_before_asn1 = get_notbefore_fn()
2254

    
2255
    if not_before_asn1 is None:
2256
      not_before = None
2257
    else:
2258
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2259

    
2260
  try:
2261
    get_notafter_fn = cert.get_notAfter
2262
  except AttributeError:
2263
    not_after = None
2264
  else:
2265
    not_after_asn1 = get_notafter_fn()
2266

    
2267
    if not_after_asn1 is None:
2268
      not_after = None
2269
    else:
2270
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2271

    
2272
  return (not_before, not_after)
2273

    
2274

    
2275
def _VerifyCertificateInner(expired, not_before, not_after, now,
2276
                            warn_days, error_days):
2277
  """Verifies certificate validity.
2278

2279
  @type expired: bool
2280
  @param expired: Whether pyOpenSSL considers the certificate as expired
2281
  @type not_before: number or None
2282
  @param not_before: Unix timestamp before which certificate is not valid
2283
  @type not_after: number or None
2284
  @param not_after: Unix timestamp after which certificate is invalid
2285
  @type now: number
2286
  @param now: Current time as Unix timestamp
2287
  @type warn_days: number or None
2288
  @param warn_days: How many days before expiration a warning should be reported
2289
  @type error_days: number or None
2290
  @param error_days: How many days before expiration an error should be reported
2291

2292
  """
2293
  if expired:
2294
    msg = "Certificate is expired"
2295

    
2296
    if not_before is not None and not_after is not None:
2297
      msg += (" (valid from %s to %s)" %
2298
              (FormatTime(not_before), FormatTime(not_after)))
2299
    elif not_before is not None:
2300
      msg += " (valid from %s)" % FormatTime(not_before)
2301
    elif not_after is not None:
2302
      msg += " (valid until %s)" % FormatTime(not_after)
2303

    
2304
    return (CERT_ERROR, msg)
2305

    
2306
  elif not_before is not None and not_before > now:
2307
    return (CERT_WARNING,
2308
            "Certificate not yet valid (valid from %s)" %
2309
            FormatTime(not_before))
2310

    
2311
  elif not_after is not None:
2312
    remaining_days = int((not_after - now) / (24 * 3600))
2313

    
2314
    msg = "Certificate expires in about %d days" % remaining_days
2315

    
2316
    if error_days is not None and remaining_days <= error_days:
2317
      return (CERT_ERROR, msg)
2318

    
2319
    if warn_days is not None and remaining_days <= warn_days:
2320
      return (CERT_WARNING, msg)
2321

    
2322
  return (None, None)
2323

    
2324

    
2325
def VerifyX509Certificate(cert, warn_days, error_days):
2326
  """Verifies a certificate for LUVerifyCluster.
2327

2328
  @type cert: OpenSSL.crypto.X509
2329
  @param cert: X509 certificate object
2330
  @type warn_days: number or None
2331
  @param warn_days: How many days before expiration a warning should be reported
2332
  @type error_days: number or None
2333
  @param error_days: How many days before expiration an error should be reported
2334

2335
  """
2336
  # Depending on the pyOpenSSL version, this can just return (None, None)
2337
  (not_before, not_after) = GetX509CertValidity(cert)
2338

    
2339
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2340
                                 time.time(), warn_days, error_days)
2341

    
2342

    
2343
def SignX509Certificate(cert, key, salt):
2344
  """Sign a X509 certificate.
2345

2346
  An RFC822-like signature header is added in front of the certificate.
2347

2348
  @type cert: OpenSSL.crypto.X509
2349
  @param cert: X509 certificate object
2350
  @type key: string
2351
  @param key: Key for HMAC
2352
  @type salt: string
2353
  @param salt: Salt for HMAC
2354
  @rtype: string
2355
  @return: Serialized and signed certificate in PEM format
2356

2357
  """
2358
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2359
    raise errors.GenericError("Invalid salt: %r" % salt)
2360

    
2361
  # Dumping as PEM here ensures the certificate is in a sane format
2362
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2363

    
2364
  return ("%s: %s/%s\n\n%s" %
2365
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2366
           Sha1Hmac(key, cert_pem, salt=salt),
2367
           cert_pem))
2368

    
2369

    
2370
def _ExtractX509CertificateSignature(cert_pem):
2371
  """Helper function to extract signature from X509 certificate.
2372

2373
  """
2374
  # Extract signature from original PEM data
2375
  for line in cert_pem.splitlines():
2376
    if line.startswith("---"):
2377
      break
2378

    
2379
    m = X509_SIGNATURE.match(line.strip())
2380
    if m:
2381
      return (m.group("salt"), m.group("sign"))
2382

    
2383
  raise errors.GenericError("X509 certificate signature is missing")
2384

    
2385

    
2386
def LoadSignedX509Certificate(cert_pem, key):
2387
  """Verifies a signed X509 certificate.
2388

2389
  @type cert_pem: string
2390
  @param cert_pem: Certificate in PEM format and with signature header
2391
  @type key: string
2392
  @param key: Key for HMAC
2393
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2394
  @return: X509 certificate object and salt
2395

2396
  """
2397
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2398

    
2399
  # Load certificate
2400
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2401

    
2402
  # Dump again to ensure it's in a sane format
2403
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2404

    
2405
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2406
    raise errors.GenericError("X509 certificate signature is invalid")
2407

    
2408
  return (cert, salt)
2409

    
2410

    
2411
def FindMatch(data, name):
2412
  """Tries to find an item in a dictionary matching a name.
2413

2414
  Callers have to ensure the data names aren't contradictory (e.g. a regexp
2415
  that matches a string). If the name isn't a direct key, all regular
2416
  expression objects in the dictionary are matched against it.
2417

2418
  @type data: dict
2419
  @param data: Dictionary containing data
2420
  @type name: string
2421
  @param name: Name to look for
2422
  @rtype: tuple; (value in dictionary, matched groups as list)
2423

2424
  """
2425
  if name in data:
2426
    return (data[name], [])
2427

    
2428
  for key, value in data.items():
2429
    # Regex objects
2430
    if hasattr(key, "match"):
2431
      m = key.match(name)
2432
      if m:
2433
        return (value, list(m.groups()))
2434

    
2435
  return None
2436

    
2437

    
2438
def BytesToMebibyte(value):
2439
  """Converts bytes to mebibytes.
2440

2441
  @type value: int
2442
  @param value: Value in bytes
2443
  @rtype: int
2444
  @return: Value in mebibytes
2445

2446
  """
2447
  return int(round(value / (1024.0 * 1024.0), 0))
2448

    
2449

    
2450
def CalculateDirectorySize(path):
2451
  """Calculates the size of a directory recursively.
2452

2453
  @type path: string
2454
  @param path: Path to directory
2455
  @rtype: int
2456
  @return: Size in mebibytes
2457

2458
  """
2459
  size = 0
2460

    
2461
  for (curpath, _, files) in os.walk(path):
2462
    for filename in files:
2463
      st = os.lstat(PathJoin(curpath, filename))
2464
      size += st.st_size
2465

    
2466
  return BytesToMebibyte(size)
2467

    
2468

    
2469
def GetMounts(filename=constants.PROC_MOUNTS):
2470
  """Returns the list of mounted filesystems.
2471

2472
  This function is Linux-specific.
2473

2474
  @param filename: path of mounts file (/proc/mounts by default)
2475
  @rtype: list of tuples
2476
  @return: list of mount entries (device, mountpoint, fstype, options)
2477

2478
  """
2479
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2480
  data = []
2481
  mountlines = ReadFile(filename).splitlines()
2482
  for line in mountlines:
2483
    device, mountpoint, fstype, options, _ = line.split(None, 4)
2484
    data.append((device, mountpoint, fstype, options))
2485

    
2486
  return data
2487

    
2488

    
2489
def GetFilesystemStats(path):
2490
  """Returns the total and free space on a filesystem.
2491

2492
  @type path: string
2493
  @param path: Path on filesystem to be examined
2494
  @rtype: int
2495
  @return: tuple of (Total space, Free space) in mebibytes
2496

2497
  """
2498
  st = os.statvfs(path)
2499

    
2500
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2501
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2502
  return (tsize, fsize)
2503

    
2504

    
2505
def RunInSeparateProcess(fn, *args):
2506
  """Runs a function in a separate process.
2507

2508
  Note: Only boolean return values are supported.
2509

2510
  @type fn: callable
2511
  @param fn: Function to be called
2512
  @rtype: bool
2513
  @return: Function's result
2514

2515
  """
2516
  pid = os.fork()
2517
  if pid == 0:
2518
    # Child process
2519
    try:
2520
      # In case the function uses temporary files
2521
      ResetTempfileModule()
2522

    
2523
      # Call function
2524
      result = int(bool(fn(*args)))
2525
      assert result in (0, 1)
2526
    except: # pylint: disable-msg=W0702
2527
      logging.exception("Error while calling function in separate process")
2528
      # 0 and 1 are reserved for the return value
2529
      result = 33
2530

    
2531
    os._exit(result) # pylint: disable-msg=W0212
2532

    
2533
  # Parent process
2534

    
2535
  # Avoid zombies and check exit code
2536
  (_, status) = os.waitpid(pid, 0)
2537

    
2538
  if os.WIFSIGNALED(status):
2539
    exitcode = None
2540
    signum = os.WTERMSIG(status)
2541
  else:
2542
    exitcode = os.WEXITSTATUS(status)
2543
    signum = None
2544

    
2545
  if not (exitcode in (0, 1) and signum is None):
2546
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2547
                              (exitcode, signum))
2548

    
2549
  return bool(exitcode)
2550

    
2551

    
2552
def LockFile(fd):
2553
  """Locks a file using POSIX locks.
2554

2555
  @type fd: int
2556
  @param fd: the file descriptor we need to lock
2557

2558
  """
2559
  try:
2560
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2561
  except IOError, err:
2562
    if err.errno == errno.EAGAIN:
2563
      raise errors.LockError("File already locked")
2564
    raise
2565

    
2566

    
2567
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2568
  """Reads the watcher pause file.
2569

2570
  @type filename: string
2571
  @param filename: Path to watcher pause file
2572
  @type now: None, float or int
2573
  @param now: Current time as Unix timestamp
2574
  @type remove_after: int
2575
  @param remove_after: Remove watcher pause file after specified amount of
2576
    seconds past the pause end time
2577

2578
  """
2579
  if now is None:
2580
    now = time.time()
2581

    
2582
  try:
2583
    value = ReadFile(filename)
2584
  except IOError, err:
2585
    if err.errno != errno.ENOENT:
2586
      raise
2587
    value = None
2588

    
2589
  if value is not None:
2590
    try:
2591
      value = int(value)
2592
    except ValueError:
2593
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2594
                       " removing it"), filename)
2595
      RemoveFile(filename)
2596
      value = None
2597

    
2598
    if value is not None:
2599
      # Remove file if it's outdated
2600
      if now > (value + remove_after):
2601
        RemoveFile(filename)
2602
        value = None
2603

    
2604
      elif now > value:
2605
        value = None
2606

    
2607
  return value
2608

    
2609

    
2610
def GenerateSelfSignedX509Cert(common_name, validity):
2611
  """Generates a self-signed X509 certificate.
2612

2613
  @type common_name: string
2614
  @param common_name: commonName value
2615
  @type validity: int
2616
  @param validity: Validity for certificate in seconds
2617

2618
  """
2619
  # Create private and public key
2620
  key = OpenSSL.crypto.PKey()
2621
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
2622

    
2623
  # Create self-signed certificate
2624
  cert = OpenSSL.crypto.X509()
2625
  if common_name:
2626
    cert.get_subject().CN = common_name
2627
  cert.set_serial_number(1)
2628
  cert.gmtime_adj_notBefore(0)
2629
  cert.gmtime_adj_notAfter(validity)
2630
  cert.set_issuer(cert.get_subject())
2631
  cert.set_pubkey(key)
2632
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
2633

    
2634
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
2635
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2636

    
2637
  return (key_pem, cert_pem)
2638

    
2639

    
2640
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
2641
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
2642
  """Legacy function to generate self-signed X509 certificate.
2643

2644
  @type filename: str
2645
  @param filename: path to write certificate to
2646
  @type common_name: string
2647
  @param common_name: commonName value
2648
  @type validity: int
2649
  @param validity: validity of certificate in number of days
2650

2651
  """
2652
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
2653
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
2654
  # and node daemon certificates have the proper Subject/Issuer.
2655
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
2656
                                                   validity * 24 * 60 * 60)
2657

    
2658
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
2659

    
2660

    
2661
class FileLock(object):
2662
  """Utility class for file locks.
2663

2664
  """
2665
  def __init__(self, fd, filename):
2666
    """Constructor for FileLock.
2667

2668
    @type fd: file
2669
    @param fd: File object
2670
    @type filename: str
2671
    @param filename: Path of the file opened at I{fd}
2672

2673
    """
2674
    self.fd = fd
2675
    self.filename = filename
2676

    
2677
  @classmethod
2678
  def Open(cls, filename):
2679
    """Creates and opens a file to be used as a file-based lock.
2680

2681
    @type filename: string
2682
    @param filename: path to the file to be locked
2683

2684
    """
2685
    # Using "os.open" is necessary to allow both opening existing file
2686
    # read/write and creating if not existing. Vanilla "open" will truncate an
2687
    # existing file -or- allow creating if not existing.
2688
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2689
               filename)
2690

    
2691
  def __del__(self):
2692
    self.Close()
2693

    
2694
  def Close(self):
2695
    """Close the file and release the lock.
2696

2697
    """
2698
    if hasattr(self, "fd") and self.fd:
2699
      self.fd.close()
2700
      self.fd = None
2701

    
2702
  def _flock(self, flag, blocking, timeout, errmsg):
2703
    """Wrapper for fcntl.flock.
2704

2705
    @type flag: int
2706
    @param flag: operation flag
2707
    @type blocking: bool
2708
    @param blocking: whether the operation should be done in blocking mode.
2709
    @type timeout: None or float
2710
    @param timeout: for how long the operation should be retried (implies
2711
                    non-blocking mode).
2712
    @type errmsg: string
2713
    @param errmsg: error message in case operation fails.
2714

2715
    """
2716
    assert self.fd, "Lock was closed"
2717
    assert timeout is None or timeout >= 0, \
2718
      "If specified, timeout must be positive"
2719
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2720

    
2721
    # When a timeout is used, LOCK_NB must always be set
2722
    if not (timeout is None and blocking):
2723
      flag |= fcntl.LOCK_NB
2724

    
2725
    if timeout is None:
2726
      self._Lock(self.fd, flag, timeout)
2727
    else:
2728
      try:
2729
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2730
              args=(self.fd, flag, timeout))
2731
      except RetryTimeout:
2732
        raise errors.LockError(errmsg)
2733

    
2734
  @staticmethod
2735
  def _Lock(fd, flag, timeout):
2736
    try:
2737
      fcntl.flock(fd, flag)
2738
    except IOError, err:
2739
      if timeout is not None and err.errno == errno.EAGAIN:
2740
        raise RetryAgain()
2741

    
2742
      logging.exception("fcntl.flock failed")
2743
      raise
2744

    
2745
  def Exclusive(self, blocking=False, timeout=None):
2746
    """Locks the file in exclusive mode.
2747

2748
    @type blocking: boolean
2749
    @param blocking: whether to block and wait until we
2750
        can lock the file or return immediately
2751
    @type timeout: int or None
2752
    @param timeout: if not None, the duration to wait for the lock
2753
        (in blocking mode)
2754

2755
    """
2756
    self._flock(fcntl.LOCK_EX, blocking, timeout,
2757
                "Failed to lock %s in exclusive mode" % self.filename)
2758

    
2759
  def Shared(self, blocking=False, timeout=None):
2760
    """Locks the file in shared mode.
2761

2762
    @type blocking: boolean
2763
    @param blocking: whether to block and wait until we
2764
        can lock the file or return immediately
2765
    @type timeout: int or None
2766
    @param timeout: if not None, the duration to wait for the lock
2767
        (in blocking mode)
2768

2769
    """
2770
    self._flock(fcntl.LOCK_SH, blocking, timeout,
2771
                "Failed to lock %s in shared mode" % self.filename)
2772

    
2773
  def Unlock(self, blocking=True, timeout=None):
2774
    """Unlocks the file.
2775

2776
    According to C{flock(2)}, unlocking can also be a nonblocking
2777
    operation::
2778

2779
      To make a non-blocking request, include LOCK_NB with any of the above
2780
      operations.
2781

2782
    @type blocking: boolean
2783
    @param blocking: whether to block and wait until we
2784
        can lock the file or return immediately
2785
    @type timeout: int or None
2786
    @param timeout: if not None, the duration to wait for the lock
2787
        (in blocking mode)
2788

2789
    """
2790
    self._flock(fcntl.LOCK_UN, blocking, timeout,
2791
                "Failed to unlock %s" % self.filename)
2792

    
2793

    
2794
def SignalHandled(signums):
2795
  """Signal Handled decoration.
2796

2797
  This special decorator installs a signal handler and then calls the target
2798
  function. The function must accept a 'signal_handlers' keyword argument,
2799
  which will contain a dict indexed by signal number, with SignalHandler
2800
  objects as values.
2801

2802
  The decorator can be safely stacked with iself, to handle multiple signals
2803
  with different handlers.
2804

2805
  @type signums: list
2806
  @param signums: signals to intercept
2807

2808
  """
2809
  def wrap(fn):
2810
    def sig_function(*args, **kwargs):
2811
      assert 'signal_handlers' not in kwargs or \
2812
             kwargs['signal_handlers'] is None or \
2813
             isinstance(kwargs['signal_handlers'], dict), \
2814
             "Wrong signal_handlers parameter in original function call"
2815
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
2816
        signal_handlers = kwargs['signal_handlers']
2817
      else:
2818
        signal_handlers = {}
2819
        kwargs['signal_handlers'] = signal_handlers
2820
      sighandler = SignalHandler(signums)
2821
      try:
2822
        for sig in signums:
2823
          signal_handlers[sig] = sighandler
2824
        return fn(*args, **kwargs)
2825
      finally:
2826
        sighandler.Reset()
2827
    return sig_function
2828
  return wrap
2829

    
2830

    
2831
class SignalWakeupFd(object):
2832
  try:
2833
    # This is only supported in Python 2.5 and above (some distributions
2834
    # backported it to Python 2.4)
2835
    _set_wakeup_fd_fn = signal.set_wakeup_fd
2836
  except AttributeError:
2837
    # Not supported
2838
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
2839
      return -1
2840
  else:
2841
    def _SetWakeupFd(self, fd):
2842
      return self._set_wakeup_fd_fn(fd)
2843

    
2844
  def __init__(self):
2845
    """Initializes this class.
2846

2847
    """
2848
    (read_fd, write_fd) = os.pipe()
2849

    
2850
    # Once these succeeded, the file descriptors will be closed automatically.
2851
    # Buffer size 0 is important, otherwise .read() with a specified length
2852
    # might buffer data and the file descriptors won't be marked readable.
2853
    self._read_fh = os.fdopen(read_fd, "r", 0)
2854
    self._write_fh = os.fdopen(write_fd, "w", 0)
2855

    
2856
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
2857

    
2858
    # Utility functions
2859
    self.fileno = self._read_fh.fileno
2860
    self.read = self._read_fh.read
2861

    
2862
  def Reset(self):
2863
    """Restores the previous wakeup file descriptor.
2864

2865
    """
2866
    if hasattr(self, "_previous") and self._previous is not None:
2867
      self._SetWakeupFd(self._previous)
2868
      self._previous = None
2869

    
2870
  def Notify(self):
2871
    """Notifies the wakeup file descriptor.
2872

2873
    """
2874
    self._write_fh.write("\0")
2875

    
2876
  def __del__(self):
2877
    """Called before object deletion.
2878

2879
    """
2880
    self.Reset()
2881

    
2882

    
2883
class SignalHandler(object):
2884
  """Generic signal handler class.
2885

2886
  It automatically restores the original handler when deconstructed or
2887
  when L{Reset} is called. You can either pass your own handler
2888
  function in or query the L{called} attribute to detect whether the
2889
  signal was sent.
2890

2891
  @type signum: list
2892
  @ivar signum: the signals we handle
2893
  @type called: boolean
2894
  @ivar called: tracks whether any of the signals have been raised
2895

2896
  """
2897
  def __init__(self, signum, handler_fn=None, wakeup=None):
2898
    """Constructs a new SignalHandler instance.
2899

2900
    @type signum: int or list of ints
2901
    @param signum: Single signal number or set of signal numbers
2902
    @type handler_fn: callable
2903
    @param handler_fn: Signal handling function
2904

2905
    """
2906
    assert handler_fn is None or callable(handler_fn)
2907

    
2908
    self.signum = set(signum)
2909
    self.called = False
2910

    
2911
    self._handler_fn = handler_fn
2912
    self._wakeup = wakeup
2913

    
2914
    self._previous = {}
2915
    try:
2916
      for signum in self.signum:
2917
        # Setup handler
2918
        prev_handler = signal.signal(signum, self._HandleSignal)
2919
        try:
2920
          self._previous[signum] = prev_handler
2921
        except:
2922
          # Restore previous handler
2923
          signal.signal(signum, prev_handler)
2924
          raise
2925
    except:
2926
      # Reset all handlers
2927
      self.Reset()
2928
      # Here we have a race condition: a handler may have already been called,
2929
      # but there's not much we can do about it at this point.
2930
      raise
2931

    
2932
  def __del__(self):
2933
    self.Reset()
2934

    
2935
  def Reset(self):
2936
    """Restore previous handler.
2937

2938
    This will reset all the signals to their previous handlers.
2939

2940
    """
2941
    for signum, prev_handler in self._previous.items():
2942
      signal.signal(signum, prev_handler)
2943
      # If successful, remove from dict
2944
      del self._previous[signum]
2945

    
2946
  def Clear(self):
2947
    """Unsets the L{called} flag.
2948

2949
    This function can be used in case a signal may arrive several times.
2950

2951
    """
2952
    self.called = False
2953

    
2954
  def _HandleSignal(self, signum, frame):
2955
    """Actual signal handling function.
2956

2957
    """
2958
    # This is not nice and not absolutely atomic, but it appears to be the only
2959
    # solution in Python -- there are no atomic types.
2960
    self.called = True
2961

    
2962
    if self._wakeup:
2963
      # Notify whoever is interested in signals
2964
      self._wakeup.Notify()
2965

    
2966
    if self._handler_fn:
2967
      self._handler_fn(signum, frame)
2968

    
2969

    
2970
class FieldSet(object):
2971
  """A simple field set.
2972

2973
  Among the features are:
2974
    - checking if a string is among a list of static string or regex objects
2975
    - checking if a whole list of string matches
2976
    - returning the matching groups from a regex match
2977

2978
  Internally, all fields are held as regular expression objects.
2979

2980
  """
2981
  def __init__(self, *items):
2982
    self.items = [re.compile("^%s$" % value) for value in items]
2983

    
2984
  def Extend(self, other_set):
2985
    """Extend the field set with the items from another one"""
2986
    self.items.extend(other_set.items)
2987

    
2988
  def Matches(self, field):
2989
    """Checks if a field matches the current set
2990

2991
    @type field: str
2992
    @param field: the string to match
2993
    @return: either None or a regular expression match object
2994

2995
    """
2996
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
2997
      return m
2998
    return None
2999

    
3000
  def NonMatching(self, items):
3001
    """Returns the list of fields not matching the current set
3002

3003
    @type items: list
3004
    @param items: the list of fields to check
3005
    @rtype: list
3006
    @return: list of non-matching fields
3007

3008
    """
3009
    return [val for val in items if not self.Matches(val)]
3010

    
3011

    
3012
class RunningTimeout(object):
3013
  """Class to calculate remaining timeout when doing several operations.
3014

3015
  """
3016
  __slots__ = [
3017
    "_allow_negative",
3018
    "_start_time",
3019
    "_time_fn",
3020
    "_timeout",
3021
    ]
3022

    
3023
  def __init__(self, timeout, allow_negative, _time_fn=time.time):
3024
    """Initializes this class.
3025

3026
    @type timeout: float
3027
    @param timeout: Timeout duration
3028
    @type allow_negative: bool
3029
    @param allow_negative: Whether to return values below zero
3030
    @param _time_fn: Time function for unittests
3031

3032
    """
3033
    object.__init__(self)
3034

    
3035
    if timeout is not None and timeout < 0.0:
3036
      raise ValueError("Timeout must not be negative")
3037

    
3038
    self._timeout = timeout
3039
    self._allow_negative = allow_negative
3040
    self._time_fn = _time_fn
3041

    
3042
    self._start_time = None
3043

    
3044
  def Remaining(self):
3045
    """Returns the remaining timeout.
3046

3047
    """
3048
    if self._timeout is None:
3049
      return None
3050

    
3051
    # Get start time on first calculation
3052
    if self._start_time is None:
3053
      self._start_time = self._time_fn()
3054

    
3055
    # Calculate remaining time
3056
    remaining_timeout = self._start_time + self._timeout - self._time_fn()
3057

    
3058
    if not self._allow_negative:
3059
      # Ensure timeout is always >= 0
3060
      return max(0.0, remaining_timeout)
3061

    
3062
    return remaining_timeout