Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 66e884e1

History | View | Annotate | Download (110.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86
                     '[a-f0-9]{4}-[a-f0-9]{12}$')
87

    
88
# Certificate verification results
89
(CERT_WARNING,
90
 CERT_ERROR) = range(1, 3)
91

    
92
# Flags for mlockall() (from bits/mman.h)
93
_MCL_CURRENT = 1
94
_MCL_FUTURE = 2
95

    
96
#: MAC checker regexp
97
_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
98

    
99
(_TIMEOUT_NONE,
100
 _TIMEOUT_TERM,
101
 _TIMEOUT_KILL) = range(3)
102

    
103
#: Shell param checker regexp
104
_SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
105

    
106
#: Unit checker regexp
107
_PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$")
108

    
109
#: ASN1 time regexp
110
_ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
111

    
112

    
113
class RunResult(object):
114
  """Holds the result of running external programs.
115

116
  @type exit_code: int
117
  @ivar exit_code: the exit code of the program, or None (if the program
118
      didn't exit())
119
  @type signal: int or None
120
  @ivar signal: the signal that caused the program to finish, or None
121
      (if the program wasn't terminated by a signal)
122
  @type stdout: str
123
  @ivar stdout: the standard output of the program
124
  @type stderr: str
125
  @ivar stderr: the standard error of the program
126
  @type failed: boolean
127
  @ivar failed: True in case the program was
128
      terminated by a signal or exited with a non-zero exit code
129
  @ivar fail_reason: a string detailing the termination reason
130

131
  """
132
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
133
               "failed", "fail_reason", "cmd"]
134

    
135

    
136
  def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
137
               timeout):
138
    self.cmd = cmd
139
    self.exit_code = exit_code
140
    self.signal = signal_
141
    self.stdout = stdout
142
    self.stderr = stderr
143
    self.failed = (signal_ is not None or exit_code != 0)
144

    
145
    fail_msgs = []
146
    if self.signal is not None:
147
      fail_msgs.append("terminated by signal %s" % self.signal)
148
    elif self.exit_code is not None:
149
      fail_msgs.append("exited with exit code %s" % self.exit_code)
150
    else:
151
      fail_msgs.append("unable to determine termination reason")
152

    
153
    if timeout_action == _TIMEOUT_TERM:
154
      fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
155
    elif timeout_action == _TIMEOUT_KILL:
156
      fail_msgs.append(("force termination after timeout of %.2f seconds"
157
                        " and linger for another %.2f seconds") %
158
                       (timeout, constants.CHILD_LINGER_TIMEOUT))
159

    
160
    if fail_msgs and self.failed:
161
      self.fail_reason = CommaJoin(fail_msgs)
162

    
163
    if self.failed:
164
      logging.debug("Command '%s' failed (%s); output: %s",
165
                    self.cmd, self.fail_reason, self.output)
166

    
167
  def _GetOutput(self):
168
    """Returns the combined stdout and stderr for easier usage.
169

170
    """
171
    return self.stdout + self.stderr
172

    
173
  output = property(_GetOutput, None, None, "Return full output")
174

    
175

    
176
def _BuildCmdEnvironment(env, reset):
177
  """Builds the environment for an external program.
178

179
  """
180
  if reset:
181
    cmd_env = {}
182
  else:
183
    cmd_env = os.environ.copy()
184
    cmd_env["LC_ALL"] = "C"
185

    
186
  if env is not None:
187
    cmd_env.update(env)
188

    
189
  return cmd_env
190

    
191

    
192
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
193
           interactive=False, timeout=None):
194
  """Execute a (shell) command.
195

196
  The command should not read from its standard input, as it will be
197
  closed.
198

199
  @type cmd: string or list
200
  @param cmd: Command to run
201
  @type env: dict
202
  @param env: Additional environment variables
203
  @type output: str
204
  @param output: if desired, the output of the command can be
205
      saved in a file instead of the RunResult instance; this
206
      parameter denotes the file name (if not None)
207
  @type cwd: string
208
  @param cwd: if specified, will be used as the working
209
      directory for the command; the default will be /
210
  @type reset_env: boolean
211
  @param reset_env: whether to reset or keep the default os environment
212
  @type interactive: boolean
213
  @param interactive: weather we pipe stdin, stdout and stderr
214
                      (default behaviour) or run the command interactive
215
  @type timeout: int
216
  @param timeout: If not None, timeout in seconds until child process gets
217
                  killed
218
  @rtype: L{RunResult}
219
  @return: RunResult instance
220
  @raise errors.ProgrammerError: if we call this when forks are disabled
221

222
  """
223
  if no_fork:
224
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
225

    
226
  if output and interactive:
227
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
228
                                 " not be provided at the same time")
229

    
230
  if isinstance(cmd, basestring):
231
    strcmd = cmd
232
    shell = True
233
  else:
234
    cmd = [str(val) for val in cmd]
235
    strcmd = ShellQuoteArgs(cmd)
236
    shell = False
237

    
238
  if output:
239
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
240
  else:
241
    logging.debug("RunCmd %s", strcmd)
242

    
243
  cmd_env = _BuildCmdEnvironment(env, reset_env)
244

    
245
  try:
246
    if output is None:
247
      out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
248
                                                     interactive, timeout)
249
    else:
250
      timeout_action = _TIMEOUT_NONE
251
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
252
      out = err = ""
253
  except OSError, err:
254
    if err.errno == errno.ENOENT:
255
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
256
                               (strcmd, err))
257
    else:
258
      raise
259

    
260
  if status >= 0:
261
    exitcode = status
262
    signal_ = None
263
  else:
264
    exitcode = None
265
    signal_ = -status
266

    
267
  return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
268

    
269

    
270
def SetupDaemonEnv(cwd="/", umask=077):
271
  """Setup a daemon's environment.
272

273
  This should be called between the first and second fork, due to
274
  setsid usage.
275

276
  @param cwd: the directory to which to chdir
277
  @param umask: the umask to setup
278

279
  """
280
  os.chdir(cwd)
281
  os.umask(umask)
282
  os.setsid()
283

    
284

    
285
def SetupDaemonFDs(output_file, output_fd):
286
  """Setups up a daemon's file descriptors.
287

288
  @param output_file: if not None, the file to which to redirect
289
      stdout/stderr
290
  @param output_fd: if not None, the file descriptor for stdout/stderr
291

292
  """
293
  # check that at most one is defined
294
  assert [output_file, output_fd].count(None) >= 1
295

    
296
  # Open /dev/null (read-only, only for stdin)
297
  devnull_fd = os.open(os.devnull, os.O_RDONLY)
298

    
299
  if output_fd is not None:
300
    pass
301
  elif output_file is not None:
302
    # Open output file
303
    try:
304
      output_fd = os.open(output_file,
305
                          os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
306
    except EnvironmentError, err:
307
      raise Exception("Opening output file failed: %s" % err)
308
  else:
309
    output_fd = os.open(os.devnull, os.O_WRONLY)
310

    
311
  # Redirect standard I/O
312
  os.dup2(devnull_fd, 0)
313
  os.dup2(output_fd, 1)
314
  os.dup2(output_fd, 2)
315

    
316

    
317
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
318
                pidfile=None):
319
  """Start a daemon process after forking twice.
320

321
  @type cmd: string or list
322
  @param cmd: Command to run
323
  @type env: dict
324
  @param env: Additional environment variables
325
  @type cwd: string
326
  @param cwd: Working directory for the program
327
  @type output: string
328
  @param output: Path to file in which to save the output
329
  @type output_fd: int
330
  @param output_fd: File descriptor for output
331
  @type pidfile: string
332
  @param pidfile: Process ID file
333
  @rtype: int
334
  @return: Daemon process ID
335
  @raise errors.ProgrammerError: if we call this when forks are disabled
336

337
  """
338
  if no_fork:
339
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
340
                                 " disabled")
341

    
342
  if output and not (bool(output) ^ (output_fd is not None)):
343
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
344
                                 " specified")
345

    
346
  if isinstance(cmd, basestring):
347
    cmd = ["/bin/sh", "-c", cmd]
348

    
349
  strcmd = ShellQuoteArgs(cmd)
350

    
351
  if output:
352
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
353
  else:
354
    logging.debug("StartDaemon %s", strcmd)
355

    
356
  cmd_env = _BuildCmdEnvironment(env, False)
357

    
358
  # Create pipe for sending PID back
359
  (pidpipe_read, pidpipe_write) = os.pipe()
360
  try:
361
    try:
362
      # Create pipe for sending error messages
363
      (errpipe_read, errpipe_write) = os.pipe()
364
      try:
365
        try:
366
          # First fork
367
          pid = os.fork()
368
          if pid == 0:
369
            try:
370
              # Child process, won't return
371
              _StartDaemonChild(errpipe_read, errpipe_write,
372
                                pidpipe_read, pidpipe_write,
373
                                cmd, cmd_env, cwd,
374
                                output, output_fd, pidfile)
375
            finally:
376
              # Well, maybe child process failed
377
              os._exit(1) # pylint: disable-msg=W0212
378
        finally:
379
          _CloseFDNoErr(errpipe_write)
380

    
381
        # Wait for daemon to be started (or an error message to
382
        # arrive) and read up to 100 KB as an error message
383
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
384
      finally:
385
        _CloseFDNoErr(errpipe_read)
386
    finally:
387
      _CloseFDNoErr(pidpipe_write)
388

    
389
    # Read up to 128 bytes for PID
390
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
391
  finally:
392
    _CloseFDNoErr(pidpipe_read)
393

    
394
  # Try to avoid zombies by waiting for child process
395
  try:
396
    os.waitpid(pid, 0)
397
  except OSError:
398
    pass
399

    
400
  if errormsg:
401
    raise errors.OpExecError("Error when starting daemon process: %r" %
402
                             errormsg)
403

    
404
  try:
405
    return int(pidtext)
406
  except (ValueError, TypeError), err:
407
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
408
                             (pidtext, err))
409

    
410

    
411
def _StartDaemonChild(errpipe_read, errpipe_write,
412
                      pidpipe_read, pidpipe_write,
413
                      args, env, cwd,
414
                      output, fd_output, pidfile):
415
  """Child process for starting daemon.
416

417
  """
418
  try:
419
    # Close parent's side
420
    _CloseFDNoErr(errpipe_read)
421
    _CloseFDNoErr(pidpipe_read)
422

    
423
    # First child process
424
    SetupDaemonEnv()
425

    
426
    # And fork for the second time
427
    pid = os.fork()
428
    if pid != 0:
429
      # Exit first child process
430
      os._exit(0) # pylint: disable-msg=W0212
431

    
432
    # Make sure pipe is closed on execv* (and thereby notifies
433
    # original process)
434
    SetCloseOnExecFlag(errpipe_write, True)
435

    
436
    # List of file descriptors to be left open
437
    noclose_fds = [errpipe_write]
438

    
439
    # Open PID file
440
    if pidfile:
441
      fd_pidfile = WritePidFile(pidfile)
442

    
443
      # Keeping the file open to hold the lock
444
      noclose_fds.append(fd_pidfile)
445

    
446
      SetCloseOnExecFlag(fd_pidfile, False)
447
    else:
448
      fd_pidfile = None
449

    
450
    SetupDaemonFDs(output, fd_output)
451

    
452
    # Send daemon PID to parent
453
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
454

    
455
    # Close all file descriptors except stdio and error message pipe
456
    CloseFDs(noclose_fds=noclose_fds)
457

    
458
    # Change working directory
459
    os.chdir(cwd)
460

    
461
    if env is None:
462
      os.execvp(args[0], args)
463
    else:
464
      os.execvpe(args[0], args, env)
465
  except: # pylint: disable-msg=W0702
466
    try:
467
      # Report errors to original process
468
      WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
469
    except: # pylint: disable-msg=W0702
470
      # Ignore errors in error handling
471
      pass
472

    
473
  os._exit(1) # pylint: disable-msg=W0212
474

    
475

    
476
def WriteErrorToFD(fd, err):
477
  """Possibly write an error message to a fd.
478

479
  @type fd: None or int (file descriptor)
480
  @param fd: if not None, the error will be written to this fd
481
  @param err: string, the error message
482

483
  """
484
  if fd is None:
485
    return
486

    
487
  if not err:
488
    err = "<unknown error>"
489

    
490
  RetryOnSignal(os.write, fd, err)
491

    
492

    
493
def _CheckIfAlive(child):
494
  """Raises L{RetryAgain} if child is still alive.
495

496
  @raises RetryAgain: If child is still alive
497

498
  """
499
  if child.poll() is None:
500
    raise RetryAgain()
501

    
502

    
503
def _WaitForProcess(child, timeout):
504
  """Waits for the child to terminate or until we reach timeout.
505

506
  """
507
  try:
508
    Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
509
  except RetryTimeout:
510
    pass
511

    
512

    
513
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
514
                _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
515
  """Run a command and return its output.
516

517
  @type  cmd: string or list
518
  @param cmd: Command to run
519
  @type env: dict
520
  @param env: The environment to use
521
  @type via_shell: bool
522
  @param via_shell: if we should run via the shell
523
  @type cwd: string
524
  @param cwd: the working directory for the program
525
  @type interactive: boolean
526
  @param interactive: Run command interactive (without piping)
527
  @type timeout: int
528
  @param timeout: Timeout after the programm gets terminated
529
  @rtype: tuple
530
  @return: (out, err, status)
531

532
  """
533
  poller = select.poll()
534

    
535
  stderr = subprocess.PIPE
536
  stdout = subprocess.PIPE
537
  stdin = subprocess.PIPE
538

    
539
  if interactive:
540
    stderr = stdout = stdin = None
541

    
542
  child = subprocess.Popen(cmd, shell=via_shell,
543
                           stderr=stderr,
544
                           stdout=stdout,
545
                           stdin=stdin,
546
                           close_fds=True, env=env,
547
                           cwd=cwd)
548

    
549
  out = StringIO()
550
  err = StringIO()
551

    
552
  linger_timeout = None
553

    
554
  if timeout is None:
555
    poll_timeout = None
556
  else:
557
    poll_timeout = RunningTimeout(timeout, True).Remaining
558

    
559
  msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
560
                 (cmd, child.pid))
561
  msg_linger = ("Command %s (%d) run into linger timeout, killing" %
562
                (cmd, child.pid))
563

    
564
  timeout_action = _TIMEOUT_NONE
565

    
566
  if not interactive:
567
    child.stdin.close()
568
    poller.register(child.stdout, select.POLLIN)
569
    poller.register(child.stderr, select.POLLIN)
570
    fdmap = {
571
      child.stdout.fileno(): (out, child.stdout),
572
      child.stderr.fileno(): (err, child.stderr),
573
      }
574
    for fd in fdmap:
575
      SetNonblockFlag(fd, True)
576

    
577
    while fdmap:
578
      if poll_timeout:
579
        pt = poll_timeout() * 1000
580
        if pt < 0:
581
          if linger_timeout is None:
582
            logging.warning(msg_timeout)
583
            if child.poll() is None:
584
              timeout_action = _TIMEOUT_TERM
585
              IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
586
            linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
587
          pt = linger_timeout() * 1000
588
          if pt < 0:
589
            break
590
      else:
591
        pt = None
592

    
593
      pollresult = RetryOnSignal(poller.poll, pt)
594

    
595
      for fd, event in pollresult:
596
        if event & select.POLLIN or event & select.POLLPRI:
597
          data = fdmap[fd][1].read()
598
          # no data from read signifies EOF (the same as POLLHUP)
599
          if not data:
600
            poller.unregister(fd)
601
            del fdmap[fd]
602
            continue
603
          fdmap[fd][0].write(data)
604
        if (event & select.POLLNVAL or event & select.POLLHUP or
605
            event & select.POLLERR):
606
          poller.unregister(fd)
607
          del fdmap[fd]
608

    
609
  if timeout is not None:
610
    assert callable(poll_timeout)
611

    
612
    # We have no I/O left but it might still run
613
    if child.poll() is None:
614
      _WaitForProcess(child, poll_timeout())
615

    
616
    # Terminate if still alive after timeout
617
    if child.poll() is None:
618
      if linger_timeout is None:
619
        logging.warning(msg_timeout)
620
        timeout_action = _TIMEOUT_TERM
621
        IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
622
        lt = _linger_timeout
623
      else:
624
        lt = linger_timeout()
625
      _WaitForProcess(child, lt)
626

    
627
    # Okay, still alive after timeout and linger timeout? Kill it!
628
    if child.poll() is None:
629
      timeout_action = _TIMEOUT_KILL
630
      logging.warning(msg_linger)
631
      IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
632

    
633
  out = out.getvalue()
634
  err = err.getvalue()
635

    
636
  status = child.wait()
637
  return out, err, status, timeout_action
638

    
639

    
640
def _RunCmdFile(cmd, env, via_shell, output, cwd):
641
  """Run a command and save its output to a file.
642

643
  @type  cmd: string or list
644
  @param cmd: Command to run
645
  @type env: dict
646
  @param env: The environment to use
647
  @type via_shell: bool
648
  @param via_shell: if we should run via the shell
649
  @type output: str
650
  @param output: the filename in which to save the output
651
  @type cwd: string
652
  @param cwd: the working directory for the program
653
  @rtype: int
654
  @return: the exit status
655

656
  """
657
  fh = open(output, "a")
658
  try:
659
    child = subprocess.Popen(cmd, shell=via_shell,
660
                             stderr=subprocess.STDOUT,
661
                             stdout=fh,
662
                             stdin=subprocess.PIPE,
663
                             close_fds=True, env=env,
664
                             cwd=cwd)
665

    
666
    child.stdin.close()
667
    status = child.wait()
668
  finally:
669
    fh.close()
670
  return status
671

    
672

    
673
def SetCloseOnExecFlag(fd, enable):
674
  """Sets or unsets the close-on-exec flag on a file descriptor.
675

676
  @type fd: int
677
  @param fd: File descriptor
678
  @type enable: bool
679
  @param enable: Whether to set or unset it.
680

681
  """
682
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
683

    
684
  if enable:
685
    flags |= fcntl.FD_CLOEXEC
686
  else:
687
    flags &= ~fcntl.FD_CLOEXEC
688

    
689
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
690

    
691

    
692
def SetNonblockFlag(fd, enable):
693
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
694

695
  @type fd: int
696
  @param fd: File descriptor
697
  @type enable: bool
698
  @param enable: Whether to set or unset it
699

700
  """
701
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
702

    
703
  if enable:
704
    flags |= os.O_NONBLOCK
705
  else:
706
    flags &= ~os.O_NONBLOCK
707

    
708
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
709

    
710

    
711
def RetryOnSignal(fn, *args, **kwargs):
712
  """Calls a function again if it failed due to EINTR.
713

714
  """
715
  while True:
716
    try:
717
      return fn(*args, **kwargs)
718
    except EnvironmentError, err:
719
      if err.errno != errno.EINTR:
720
        raise
721
    except (socket.error, select.error), err:
722
      # In python 2.6 and above select.error is an IOError, so it's handled
723
      # above, in 2.5 and below it's not, and it's handled here.
724
      if not (err.args and err.args[0] == errno.EINTR):
725
        raise
726

    
727

    
728
def RunParts(dir_name, env=None, reset_env=False):
729
  """Run Scripts or programs in a directory
730

731
  @type dir_name: string
732
  @param dir_name: absolute path to a directory
733
  @type env: dict
734
  @param env: The environment to use
735
  @type reset_env: boolean
736
  @param reset_env: whether to reset or keep the default os environment
737
  @rtype: list of tuples
738
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
739

740
  """
741
  rr = []
742

    
743
  try:
744
    dir_contents = ListVisibleFiles(dir_name)
745
  except OSError, err:
746
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
747
    return rr
748

    
749
  for relname in sorted(dir_contents):
750
    fname = PathJoin(dir_name, relname)
751
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
752
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
753
      rr.append((relname, constants.RUNPARTS_SKIP, None))
754
    else:
755
      try:
756
        result = RunCmd([fname], env=env, reset_env=reset_env)
757
      except Exception, err: # pylint: disable-msg=W0703
758
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
759
      else:
760
        rr.append((relname, constants.RUNPARTS_RUN, result))
761

    
762
  return rr
763

    
764

    
765
def RemoveFile(filename):
766
  """Remove a file ignoring some errors.
767

768
  Remove a file, ignoring non-existing ones or directories. Other
769
  errors are passed.
770

771
  @type filename: str
772
  @param filename: the file to be removed
773

774
  """
775
  try:
776
    os.unlink(filename)
777
  except OSError, err:
778
    if err.errno not in (errno.ENOENT, errno.EISDIR):
779
      raise
780

    
781

    
782
def RemoveDir(dirname):
783
  """Remove an empty directory.
784

785
  Remove a directory, ignoring non-existing ones.
786
  Other errors are passed. This includes the case,
787
  where the directory is not empty, so it can't be removed.
788

789
  @type dirname: str
790
  @param dirname: the empty directory to be removed
791

792
  """
793
  try:
794
    os.rmdir(dirname)
795
  except OSError, err:
796
    if err.errno != errno.ENOENT:
797
      raise
798

    
799

    
800
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
801
  """Renames a file.
802

803
  @type old: string
804
  @param old: Original path
805
  @type new: string
806
  @param new: New path
807
  @type mkdir: bool
808
  @param mkdir: Whether to create target directory if it doesn't exist
809
  @type mkdir_mode: int
810
  @param mkdir_mode: Mode for newly created directories
811

812
  """
813
  try:
814
    return os.rename(old, new)
815
  except OSError, err:
816
    # In at least one use case of this function, the job queue, directory
817
    # creation is very rare. Checking for the directory before renaming is not
818
    # as efficient.
819
    if mkdir and err.errno == errno.ENOENT:
820
      # Create directory and try again
821
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
822

    
823
      return os.rename(old, new)
824

    
825
    raise
826

    
827

    
828
def Makedirs(path, mode=0750):
829
  """Super-mkdir; create a leaf directory and all intermediate ones.
830

831
  This is a wrapper around C{os.makedirs} adding error handling not implemented
832
  before Python 2.5.
833

834
  """
835
  try:
836
    os.makedirs(path, mode)
837
  except OSError, err:
838
    # Ignore EEXIST. This is only handled in os.makedirs as included in
839
    # Python 2.5 and above.
840
    if err.errno != errno.EEXIST or not os.path.exists(path):
841
      raise
842

    
843

    
844
def ResetTempfileModule():
845
  """Resets the random name generator of the tempfile module.
846

847
  This function should be called after C{os.fork} in the child process to
848
  ensure it creates a newly seeded random generator. Otherwise it would
849
  generate the same random parts as the parent process. If several processes
850
  race for the creation of a temporary file, this could lead to one not getting
851
  a temporary name.
852

853
  """
854
  # pylint: disable-msg=W0212
855
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
856
    tempfile._once_lock.acquire()
857
    try:
858
      # Reset random name generator
859
      tempfile._name_sequence = None
860
    finally:
861
      tempfile._once_lock.release()
862
  else:
863
    logging.critical("The tempfile module misses at least one of the"
864
                     " '_once_lock' and '_name_sequence' attributes")
865

    
866

    
867
def _FingerprintFile(filename):
868
  """Compute the fingerprint of a file.
869

870
  If the file does not exist, a None will be returned
871
  instead.
872

873
  @type filename: str
874
  @param filename: the filename to checksum
875
  @rtype: str
876
  @return: the hex digest of the sha checksum of the contents
877
      of the file
878

879
  """
880
  if not (os.path.exists(filename) and os.path.isfile(filename)):
881
    return None
882

    
883
  f = open(filename)
884

    
885
  fp = compat.sha1_hash()
886
  while True:
887
    data = f.read(4096)
888
    if not data:
889
      break
890

    
891
    fp.update(data)
892

    
893
  return fp.hexdigest()
894

    
895

    
896
def FingerprintFiles(files):
897
  """Compute fingerprints for a list of files.
898

899
  @type files: list
900
  @param files: the list of filename to fingerprint
901
  @rtype: dict
902
  @return: a dictionary filename: fingerprint, holding only
903
      existing files
904

905
  """
906
  ret = {}
907

    
908
  for filename in files:
909
    cksum = _FingerprintFile(filename)
910
    if cksum:
911
      ret[filename] = cksum
912

    
913
  return ret
914

    
915

    
916
def ForceDictType(target, key_types, allowed_values=None):
917
  """Force the values of a dict to have certain types.
918

919
  @type target: dict
920
  @param target: the dict to update
921
  @type key_types: dict
922
  @param key_types: dict mapping target dict keys to types
923
                    in constants.ENFORCEABLE_TYPES
924
  @type allowed_values: list
925
  @keyword allowed_values: list of specially allowed values
926

927
  """
928
  if allowed_values is None:
929
    allowed_values = []
930

    
931
  if not isinstance(target, dict):
932
    msg = "Expected dictionary, got '%s'" % target
933
    raise errors.TypeEnforcementError(msg)
934

    
935
  for key in target:
936
    if key not in key_types:
937
      msg = "Unknown key '%s'" % key
938
      raise errors.TypeEnforcementError(msg)
939

    
940
    if target[key] in allowed_values:
941
      continue
942

    
943
    ktype = key_types[key]
944
    if ktype not in constants.ENFORCEABLE_TYPES:
945
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
946
      raise errors.ProgrammerError(msg)
947

    
948
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
949
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
950
        pass
951
      elif not isinstance(target[key], basestring):
952
        if isinstance(target[key], bool) and not target[key]:
953
          target[key] = ''
954
        else:
955
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
956
          raise errors.TypeEnforcementError(msg)
957
    elif ktype == constants.VTYPE_BOOL:
958
      if isinstance(target[key], basestring) and target[key]:
959
        if target[key].lower() == constants.VALUE_FALSE:
960
          target[key] = False
961
        elif target[key].lower() == constants.VALUE_TRUE:
962
          target[key] = True
963
        else:
964
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
965
          raise errors.TypeEnforcementError(msg)
966
      elif target[key]:
967
        target[key] = True
968
      else:
969
        target[key] = False
970
    elif ktype == constants.VTYPE_SIZE:
971
      try:
972
        target[key] = ParseUnit(target[key])
973
      except errors.UnitParseError, err:
974
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
975
              (key, target[key], err)
976
        raise errors.TypeEnforcementError(msg)
977
    elif ktype == constants.VTYPE_INT:
978
      try:
979
        target[key] = int(target[key])
980
      except (ValueError, TypeError):
981
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
982
        raise errors.TypeEnforcementError(msg)
983

    
984

    
985
def _GetProcStatusPath(pid):
986
  """Returns the path for a PID's proc status file.
987

988
  @type pid: int
989
  @param pid: Process ID
990
  @rtype: string
991

992
  """
993
  return "/proc/%d/status" % pid
994

    
995

    
996
def IsProcessAlive(pid):
997
  """Check if a given pid exists on the system.
998

999
  @note: zombie status is not handled, so zombie processes
1000
      will be returned as alive
1001
  @type pid: int
1002
  @param pid: the process ID to check
1003
  @rtype: boolean
1004
  @return: True if the process exists
1005

1006
  """
1007
  def _TryStat(name):
1008
    try:
1009
      os.stat(name)
1010
      return True
1011
    except EnvironmentError, err:
1012
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
1013
        return False
1014
      elif err.errno == errno.EINVAL:
1015
        raise RetryAgain(err)
1016
      raise
1017

    
1018
  assert isinstance(pid, int), "pid must be an integer"
1019
  if pid <= 0:
1020
    return False
1021

    
1022
  # /proc in a multiprocessor environment can have strange behaviors.
1023
  # Retry the os.stat a few times until we get a good result.
1024
  try:
1025
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1026
                 args=[_GetProcStatusPath(pid)])
1027
  except RetryTimeout, err:
1028
    err.RaiseInner()
1029

    
1030

    
1031
def _ParseSigsetT(sigset):
1032
  """Parse a rendered sigset_t value.
1033

1034
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1035
  function.
1036

1037
  @type sigset: string
1038
  @param sigset: Rendered signal set from /proc/$pid/status
1039
  @rtype: set
1040
  @return: Set of all enabled signal numbers
1041

1042
  """
1043
  result = set()
1044

    
1045
  signum = 0
1046
  for ch in reversed(sigset):
1047
    chv = int(ch, 16)
1048

    
1049
    # The following could be done in a loop, but it's easier to read and
1050
    # understand in the unrolled form
1051
    if chv & 1:
1052
      result.add(signum + 1)
1053
    if chv & 2:
1054
      result.add(signum + 2)
1055
    if chv & 4:
1056
      result.add(signum + 3)
1057
    if chv & 8:
1058
      result.add(signum + 4)
1059

    
1060
    signum += 4
1061

    
1062
  return result
1063

    
1064

    
1065
def _GetProcStatusField(pstatus, field):
1066
  """Retrieves a field from the contents of a proc status file.
1067

1068
  @type pstatus: string
1069
  @param pstatus: Contents of /proc/$pid/status
1070
  @type field: string
1071
  @param field: Name of field whose value should be returned
1072
  @rtype: string
1073

1074
  """
1075
  for line in pstatus.splitlines():
1076
    parts = line.split(":", 1)
1077

    
1078
    if len(parts) < 2 or parts[0] != field:
1079
      continue
1080

    
1081
    return parts[1].strip()
1082

    
1083
  return None
1084

    
1085

    
1086
def IsProcessHandlingSignal(pid, signum, status_path=None):
1087
  """Checks whether a process is handling a signal.
1088

1089
  @type pid: int
1090
  @param pid: Process ID
1091
  @type signum: int
1092
  @param signum: Signal number
1093
  @rtype: bool
1094

1095
  """
1096
  if status_path is None:
1097
    status_path = _GetProcStatusPath(pid)
1098

    
1099
  try:
1100
    proc_status = ReadFile(status_path)
1101
  except EnvironmentError, err:
1102
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
1103
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1104
      return False
1105
    raise
1106

    
1107
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1108
  if sigcgt is None:
1109
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1110

    
1111
  # Now check whether signal is handled
1112
  return signum in _ParseSigsetT(sigcgt)
1113

    
1114

    
1115
def ReadPidFile(pidfile):
1116
  """Read a pid from a file.
1117

1118
  @type  pidfile: string
1119
  @param pidfile: path to the file containing the pid
1120
  @rtype: int
1121
  @return: The process id, if the file exists and contains a valid PID,
1122
           otherwise 0
1123

1124
  """
1125
  try:
1126
    raw_data = ReadOneLineFile(pidfile)
1127
  except EnvironmentError, err:
1128
    if err.errno != errno.ENOENT:
1129
      logging.exception("Can't read pid file")
1130
    return 0
1131

    
1132
  try:
1133
    pid = int(raw_data)
1134
  except (TypeError, ValueError), err:
1135
    logging.info("Can't parse pid file contents", exc_info=True)
1136
    return 0
1137

    
1138
  return pid
1139

    
1140

    
1141
def ReadLockedPidFile(path):
1142
  """Reads a locked PID file.
1143

1144
  This can be used together with L{StartDaemon}.
1145

1146
  @type path: string
1147
  @param path: Path to PID file
1148
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1149

1150
  """
1151
  try:
1152
    fd = os.open(path, os.O_RDONLY)
1153
  except EnvironmentError, err:
1154
    if err.errno == errno.ENOENT:
1155
      # PID file doesn't exist
1156
      return None
1157
    raise
1158

    
1159
  try:
1160
    try:
1161
      # Try to acquire lock
1162
      LockFile(fd)
1163
    except errors.LockError:
1164
      # Couldn't lock, daemon is running
1165
      return int(os.read(fd, 100))
1166
  finally:
1167
    os.close(fd)
1168

    
1169
  return None
1170

    
1171

    
1172
def MatchNameComponent(key, name_list, case_sensitive=True):
1173
  """Try to match a name against a list.
1174

1175
  This function will try to match a name like test1 against a list
1176
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1177
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1178
  not I{'test1.ex'}. A multiple match will be considered as no match
1179
  at all (e.g. I{'test1'} against C{['test1.example.com',
1180
  'test1.example.org']}), except when the key fully matches an entry
1181
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1182

1183
  @type key: str
1184
  @param key: the name to be searched
1185
  @type name_list: list
1186
  @param name_list: the list of strings against which to search the key
1187
  @type case_sensitive: boolean
1188
  @param case_sensitive: whether to provide a case-sensitive match
1189

1190
  @rtype: None or str
1191
  @return: None if there is no match I{or} if there are multiple matches,
1192
      otherwise the element from the list which matches
1193

1194
  """
1195
  if key in name_list:
1196
    return key
1197

    
1198
  re_flags = 0
1199
  if not case_sensitive:
1200
    re_flags |= re.IGNORECASE
1201
    key = key.upper()
1202
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1203
  names_filtered = []
1204
  string_matches = []
1205
  for name in name_list:
1206
    if mo.match(name) is not None:
1207
      names_filtered.append(name)
1208
      if not case_sensitive and key == name.upper():
1209
        string_matches.append(name)
1210

    
1211
  if len(string_matches) == 1:
1212
    return string_matches[0]
1213
  if len(names_filtered) == 1:
1214
    return names_filtered[0]
1215
  return None
1216

    
1217

    
1218
def ValidateServiceName(name):
1219
  """Validate the given service name.
1220

1221
  @type name: number or string
1222
  @param name: Service name or port specification
1223

1224
  """
1225
  try:
1226
    numport = int(name)
1227
  except (ValueError, TypeError):
1228
    # Non-numeric service name
1229
    valid = _VALID_SERVICE_NAME_RE.match(name)
1230
  else:
1231
    # Numeric port (protocols other than TCP or UDP might need adjustments
1232
    # here)
1233
    valid = (numport >= 0 and numport < (1 << 16))
1234

    
1235
  if not valid:
1236
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1237
                               errors.ECODE_INVAL)
1238

    
1239
  return name
1240

    
1241

    
1242
def ListVolumeGroups():
1243
  """List volume groups and their size
1244

1245
  @rtype: dict
1246
  @return:
1247
       Dictionary with keys volume name and values
1248
       the size of the volume
1249

1250
  """
1251
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1252
  result = RunCmd(command)
1253
  retval = {}
1254
  if result.failed:
1255
    return retval
1256

    
1257
  for line in result.stdout.splitlines():
1258
    try:
1259
      name, size = line.split()
1260
      size = int(float(size))
1261
    except (IndexError, ValueError), err:
1262
      logging.error("Invalid output from vgs (%s): %s", err, line)
1263
      continue
1264

    
1265
    retval[name] = size
1266

    
1267
  return retval
1268

    
1269

    
1270
def BridgeExists(bridge):
1271
  """Check whether the given bridge exists in the system
1272

1273
  @type bridge: str
1274
  @param bridge: the bridge name to check
1275
  @rtype: boolean
1276
  @return: True if it does
1277

1278
  """
1279
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1280

    
1281

    
1282
def NiceSort(name_list):
1283
  """Sort a list of strings based on digit and non-digit groupings.
1284

1285
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1286
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1287
  'a11']}.
1288

1289
  The sort algorithm breaks each name in groups of either only-digits
1290
  or no-digits. Only the first eight such groups are considered, and
1291
  after that we just use what's left of the string.
1292

1293
  @type name_list: list
1294
  @param name_list: the names to be sorted
1295
  @rtype: list
1296
  @return: a copy of the name list sorted with our algorithm
1297

1298
  """
1299
  _SORTER_BASE = "(\D+|\d+)"
1300
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1301
                                                  _SORTER_BASE, _SORTER_BASE,
1302
                                                  _SORTER_BASE, _SORTER_BASE,
1303
                                                  _SORTER_BASE, _SORTER_BASE)
1304
  _SORTER_RE = re.compile(_SORTER_FULL)
1305
  _SORTER_NODIGIT = re.compile("^\D*$")
1306
  def _TryInt(val):
1307
    """Attempts to convert a variable to integer."""
1308
    if val is None or _SORTER_NODIGIT.match(val):
1309
      return val
1310
    rval = int(val)
1311
    return rval
1312

    
1313
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1314
             for name in name_list]
1315
  to_sort.sort()
1316
  return [tup[1] for tup in to_sort]
1317

    
1318

    
1319
def TryConvert(fn, val):
1320
  """Try to convert a value ignoring errors.
1321

1322
  This function tries to apply function I{fn} to I{val}. If no
1323
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1324
  the result, else it will return the original value. Any other
1325
  exceptions are propagated to the caller.
1326

1327
  @type fn: callable
1328
  @param fn: function to apply to the value
1329
  @param val: the value to be converted
1330
  @return: The converted value if the conversion was successful,
1331
      otherwise the original value.
1332

1333
  """
1334
  try:
1335
    nv = fn(val)
1336
  except (ValueError, TypeError):
1337
    nv = val
1338
  return nv
1339

    
1340

    
1341
def IsValidShellParam(word):
1342
  """Verifies is the given word is safe from the shell's p.o.v.
1343

1344
  This means that we can pass this to a command via the shell and be
1345
  sure that it doesn't alter the command line and is passed as such to
1346
  the actual command.
1347

1348
  Note that we are overly restrictive here, in order to be on the safe
1349
  side.
1350

1351
  @type word: str
1352
  @param word: the word to check
1353
  @rtype: boolean
1354
  @return: True if the word is 'safe'
1355

1356
  """
1357
  return bool(_SHELLPARAM_REGEX.match(word))
1358

    
1359

    
1360
def BuildShellCmd(template, *args):
1361
  """Build a safe shell command line from the given arguments.
1362

1363
  This function will check all arguments in the args list so that they
1364
  are valid shell parameters (i.e. they don't contain shell
1365
  metacharacters). If everything is ok, it will return the result of
1366
  template % args.
1367

1368
  @type template: str
1369
  @param template: the string holding the template for the
1370
      string formatting
1371
  @rtype: str
1372
  @return: the expanded command line
1373

1374
  """
1375
  for word in args:
1376
    if not IsValidShellParam(word):
1377
      raise errors.ProgrammerError("Shell argument '%s' contains"
1378
                                   " invalid characters" % word)
1379
  return template % args
1380

    
1381

    
1382
def FormatUnit(value, units):
1383
  """Formats an incoming number of MiB with the appropriate unit.
1384

1385
  @type value: int
1386
  @param value: integer representing the value in MiB (1048576)
1387
  @type units: char
1388
  @param units: the type of formatting we should do:
1389
      - 'h' for automatic scaling
1390
      - 'm' for MiBs
1391
      - 'g' for GiBs
1392
      - 't' for TiBs
1393
  @rtype: str
1394
  @return: the formatted value (with suffix)
1395

1396
  """
1397
  if units not in ('m', 'g', 't', 'h'):
1398
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1399

    
1400
  suffix = ''
1401

    
1402
  if units == 'm' or (units == 'h' and value < 1024):
1403
    if units == 'h':
1404
      suffix = 'M'
1405
    return "%d%s" % (round(value, 0), suffix)
1406

    
1407
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1408
    if units == 'h':
1409
      suffix = 'G'
1410
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1411

    
1412
  else:
1413
    if units == 'h':
1414
      suffix = 'T'
1415
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1416

    
1417

    
1418
def ParseUnit(input_string):
1419
  """Tries to extract number and scale from the given string.
1420

1421
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1422
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1423
  is always an int in MiB.
1424

1425
  """
1426
  m = _PARSEUNIT_REGEX.match(str(input_string))
1427
  if not m:
1428
    raise errors.UnitParseError("Invalid format")
1429

    
1430
  value = float(m.groups()[0])
1431

    
1432
  unit = m.groups()[1]
1433
  if unit:
1434
    lcunit = unit.lower()
1435
  else:
1436
    lcunit = 'm'
1437

    
1438
  if lcunit in ('m', 'mb', 'mib'):
1439
    # Value already in MiB
1440
    pass
1441

    
1442
  elif lcunit in ('g', 'gb', 'gib'):
1443
    value *= 1024
1444

    
1445
  elif lcunit in ('t', 'tb', 'tib'):
1446
    value *= 1024 * 1024
1447

    
1448
  else:
1449
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1450

    
1451
  # Make sure we round up
1452
  if int(value) < value:
1453
    value += 1
1454

    
1455
  # Round up to the next multiple of 4
1456
  value = int(value)
1457
  if value % 4:
1458
    value += 4 - value % 4
1459

    
1460
  return value
1461

    
1462

    
1463
def ParseCpuMask(cpu_mask):
1464
  """Parse a CPU mask definition and return the list of CPU IDs.
1465

1466
  CPU mask format: comma-separated list of CPU IDs
1467
  or dash-separated ID ranges
1468
  Example: "0-2,5" -> "0,1,2,5"
1469

1470
  @type cpu_mask: str
1471
  @param cpu_mask: CPU mask definition
1472
  @rtype: list of int
1473
  @return: list of CPU IDs
1474

1475
  """
1476
  if not cpu_mask:
1477
    return []
1478
  cpu_list = []
1479
  for range_def in cpu_mask.split(","):
1480
    boundaries = range_def.split("-")
1481
    n_elements = len(boundaries)
1482
    if n_elements > 2:
1483
      raise errors.ParseError("Invalid CPU ID range definition"
1484
                              " (only one hyphen allowed): %s" % range_def)
1485
    try:
1486
      lower = int(boundaries[0])
1487
    except (ValueError, TypeError), err:
1488
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1489
                              " CPU ID range: %s" % str(err))
1490
    try:
1491
      higher = int(boundaries[-1])
1492
    except (ValueError, TypeError), err:
1493
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1494
                              " CPU ID range: %s" % str(err))
1495
    if lower > higher:
1496
      raise errors.ParseError("Invalid CPU ID range definition"
1497
                              " (%d > %d): %s" % (lower, higher, range_def))
1498
    cpu_list.extend(range(lower, higher + 1))
1499
  return cpu_list
1500

    
1501

    
1502
def AddAuthorizedKey(file_obj, key):
1503
  """Adds an SSH public key to an authorized_keys file.
1504

1505
  @type file_obj: str or file handle
1506
  @param file_obj: path to authorized_keys file
1507
  @type key: str
1508
  @param key: string containing key
1509

1510
  """
1511
  key_fields = key.split()
1512

    
1513
  if isinstance(file_obj, basestring):
1514
    f = open(file_obj, 'a+')
1515
  else:
1516
    f = file_obj
1517

    
1518
  try:
1519
    nl = True
1520
    for line in f:
1521
      # Ignore whitespace changes
1522
      if line.split() == key_fields:
1523
        break
1524
      nl = line.endswith('\n')
1525
    else:
1526
      if not nl:
1527
        f.write("\n")
1528
      f.write(key.rstrip('\r\n'))
1529
      f.write("\n")
1530
      f.flush()
1531
  finally:
1532
    f.close()
1533

    
1534

    
1535
def RemoveAuthorizedKey(file_name, key):
1536
  """Removes an SSH public key from an authorized_keys file.
1537

1538
  @type file_name: str
1539
  @param file_name: path to authorized_keys file
1540
  @type key: str
1541
  @param key: string containing key
1542

1543
  """
1544
  key_fields = key.split()
1545

    
1546
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1547
  try:
1548
    out = os.fdopen(fd, 'w')
1549
    try:
1550
      f = open(file_name, 'r')
1551
      try:
1552
        for line in f:
1553
          # Ignore whitespace changes while comparing lines
1554
          if line.split() != key_fields:
1555
            out.write(line)
1556

    
1557
        out.flush()
1558
        os.rename(tmpname, file_name)
1559
      finally:
1560
        f.close()
1561
    finally:
1562
      out.close()
1563
  except:
1564
    RemoveFile(tmpname)
1565
    raise
1566

    
1567

    
1568
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1569
  """Sets the name of an IP address and hostname in /etc/hosts.
1570

1571
  @type file_name: str
1572
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1573
  @type ip: str
1574
  @param ip: the IP address
1575
  @type hostname: str
1576
  @param hostname: the hostname to be added
1577
  @type aliases: list
1578
  @param aliases: the list of aliases to add for the hostname
1579

1580
  """
1581
  # Ensure aliases are unique
1582
  aliases = UniqueSequence([hostname] + aliases)[1:]
1583

    
1584
  def _WriteEtcHosts(fd):
1585
    # Duplicating file descriptor because os.fdopen's result will automatically
1586
    # close the descriptor, but we would still like to have its functionality.
1587
    out = os.fdopen(os.dup(fd), "w")
1588
    try:
1589
      for line in ReadFile(file_name).splitlines(True):
1590
        fields = line.split()
1591
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1592
          continue
1593
        out.write(line)
1594

    
1595
      out.write("%s\t%s" % (ip, hostname))
1596
      if aliases:
1597
        out.write(" %s" % " ".join(aliases))
1598
      out.write("\n")
1599
      out.flush()
1600
    finally:
1601
      out.close()
1602

    
1603
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1604

    
1605

    
1606
def AddHostToEtcHosts(hostname, ip):
1607
  """Wrapper around SetEtcHostsEntry.
1608

1609
  @type hostname: str
1610
  @param hostname: a hostname that will be resolved and added to
1611
      L{constants.ETC_HOSTS}
1612
  @type ip: str
1613
  @param ip: The ip address of the host
1614

1615
  """
1616
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1617

    
1618

    
1619
def RemoveEtcHostsEntry(file_name, hostname):
1620
  """Removes a hostname from /etc/hosts.
1621

1622
  IP addresses without names are removed from the file.
1623

1624
  @type file_name: str
1625
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1626
  @type hostname: str
1627
  @param hostname: the hostname to be removed
1628

1629
  """
1630
  def _WriteEtcHosts(fd):
1631
    # Duplicating file descriptor because os.fdopen's result will automatically
1632
    # close the descriptor, but we would still like to have its functionality.
1633
    out = os.fdopen(os.dup(fd), "w")
1634
    try:
1635
      for line in ReadFile(file_name).splitlines(True):
1636
        fields = line.split()
1637
        if len(fields) > 1 and not fields[0].startswith("#"):
1638
          names = fields[1:]
1639
          if hostname in names:
1640
            while hostname in names:
1641
              names.remove(hostname)
1642
            if names:
1643
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1644
            continue
1645

    
1646
        out.write(line)
1647

    
1648
      out.flush()
1649
    finally:
1650
      out.close()
1651

    
1652
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1653

    
1654

    
1655
def RemoveHostFromEtcHosts(hostname):
1656
  """Wrapper around RemoveEtcHostsEntry.
1657

1658
  @type hostname: str
1659
  @param hostname: hostname that will be resolved and its
1660
      full and shot name will be removed from
1661
      L{constants.ETC_HOSTS}
1662

1663
  """
1664
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1665
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1666

    
1667

    
1668
def TimestampForFilename():
1669
  """Returns the current time formatted for filenames.
1670

1671
  The format doesn't contain colons as some shells and applications them as
1672
  separators.
1673

1674
  """
1675
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1676

    
1677

    
1678
def CreateBackup(file_name):
1679
  """Creates a backup of a file.
1680

1681
  @type file_name: str
1682
  @param file_name: file to be backed up
1683
  @rtype: str
1684
  @return: the path to the newly created backup
1685
  @raise errors.ProgrammerError: for invalid file names
1686

1687
  """
1688
  if not os.path.isfile(file_name):
1689
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1690
                                file_name)
1691

    
1692
  prefix = ("%s.backup-%s." %
1693
            (os.path.basename(file_name), TimestampForFilename()))
1694
  dir_name = os.path.dirname(file_name)
1695

    
1696
  fsrc = open(file_name, 'rb')
1697
  try:
1698
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1699
    fdst = os.fdopen(fd, 'wb')
1700
    try:
1701
      logging.debug("Backing up %s at %s", file_name, backup_name)
1702
      shutil.copyfileobj(fsrc, fdst)
1703
    finally:
1704
      fdst.close()
1705
  finally:
1706
    fsrc.close()
1707

    
1708
  return backup_name
1709

    
1710

    
1711
def ShellQuote(value):
1712
  """Quotes shell argument according to POSIX.
1713

1714
  @type value: str
1715
  @param value: the argument to be quoted
1716
  @rtype: str
1717
  @return: the quoted value
1718

1719
  """
1720
  if _re_shell_unquoted.match(value):
1721
    return value
1722
  else:
1723
    return "'%s'" % value.replace("'", "'\\''")
1724

    
1725

    
1726
def ShellQuoteArgs(args):
1727
  """Quotes a list of shell arguments.
1728

1729
  @type args: list
1730
  @param args: list of arguments to be quoted
1731
  @rtype: str
1732
  @return: the quoted arguments concatenated with spaces
1733

1734
  """
1735
  return ' '.join([ShellQuote(i) for i in args])
1736

    
1737

    
1738
class ShellWriter:
1739
  """Helper class to write scripts with indentation.
1740

1741
  """
1742
  INDENT_STR = "  "
1743

    
1744
  def __init__(self, fh):
1745
    """Initializes this class.
1746

1747
    """
1748
    self._fh = fh
1749
    self._indent = 0
1750

    
1751
  def IncIndent(self):
1752
    """Increase indentation level by 1.
1753

1754
    """
1755
    self._indent += 1
1756

    
1757
  def DecIndent(self):
1758
    """Decrease indentation level by 1.
1759

1760
    """
1761
    assert self._indent > 0
1762
    self._indent -= 1
1763

    
1764
  def Write(self, txt, *args):
1765
    """Write line to output file.
1766

1767
    """
1768
    assert self._indent >= 0
1769

    
1770
    self._fh.write(self._indent * self.INDENT_STR)
1771

    
1772
    if args:
1773
      self._fh.write(txt % args)
1774
    else:
1775
      self._fh.write(txt)
1776

    
1777
    self._fh.write("\n")
1778

    
1779

    
1780
def ListVisibleFiles(path):
1781
  """Returns a list of visible files in a directory.
1782

1783
  @type path: str
1784
  @param path: the directory to enumerate
1785
  @rtype: list
1786
  @return: the list of all files not starting with a dot
1787
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1788

1789
  """
1790
  if not IsNormAbsPath(path):
1791
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1792
                                 " absolute/normalized: '%s'" % path)
1793
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1794
  return files
1795

    
1796

    
1797
def GetHomeDir(user, default=None):
1798
  """Try to get the homedir of the given user.
1799

1800
  The user can be passed either as a string (denoting the name) or as
1801
  an integer (denoting the user id). If the user is not found, the
1802
  'default' argument is returned, which defaults to None.
1803

1804
  """
1805
  try:
1806
    if isinstance(user, basestring):
1807
      result = pwd.getpwnam(user)
1808
    elif isinstance(user, (int, long)):
1809
      result = pwd.getpwuid(user)
1810
    else:
1811
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1812
                                   type(user))
1813
  except KeyError:
1814
    return default
1815
  return result.pw_dir
1816

    
1817

    
1818
def NewUUID():
1819
  """Returns a random UUID.
1820

1821
  @note: This is a Linux-specific method as it uses the /proc
1822
      filesystem.
1823
  @rtype: str
1824

1825
  """
1826
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1827

    
1828

    
1829
def GenerateSecret(numbytes=20):
1830
  """Generates a random secret.
1831

1832
  This will generate a pseudo-random secret returning an hex string
1833
  (so that it can be used where an ASCII string is needed).
1834

1835
  @param numbytes: the number of bytes which will be represented by the returned
1836
      string (defaulting to 20, the length of a SHA1 hash)
1837
  @rtype: str
1838
  @return: an hex representation of the pseudo-random sequence
1839

1840
  """
1841
  return os.urandom(numbytes).encode('hex')
1842

    
1843

    
1844
def EnsureDirs(dirs):
1845
  """Make required directories, if they don't exist.
1846

1847
  @param dirs: list of tuples (dir_name, dir_mode)
1848
  @type dirs: list of (string, integer)
1849

1850
  """
1851
  for dir_name, dir_mode in dirs:
1852
    try:
1853
      os.mkdir(dir_name, dir_mode)
1854
    except EnvironmentError, err:
1855
      if err.errno != errno.EEXIST:
1856
        raise errors.GenericError("Cannot create needed directory"
1857
                                  " '%s': %s" % (dir_name, err))
1858
    try:
1859
      os.chmod(dir_name, dir_mode)
1860
    except EnvironmentError, err:
1861
      raise errors.GenericError("Cannot change directory permissions on"
1862
                                " '%s': %s" % (dir_name, err))
1863
    if not os.path.isdir(dir_name):
1864
      raise errors.GenericError("%s is not a directory" % dir_name)
1865

    
1866

    
1867
def ReadFile(file_name, size=-1):
1868
  """Reads a file.
1869

1870
  @type size: int
1871
  @param size: Read at most size bytes (if negative, entire file)
1872
  @rtype: str
1873
  @return: the (possibly partial) content of the file
1874

1875
  """
1876
  f = open(file_name, "r")
1877
  try:
1878
    return f.read(size)
1879
  finally:
1880
    f.close()
1881

    
1882

    
1883
def WriteFile(file_name, fn=None, data=None,
1884
              mode=None, uid=-1, gid=-1,
1885
              atime=None, mtime=None, close=True,
1886
              dry_run=False, backup=False,
1887
              prewrite=None, postwrite=None):
1888
  """(Over)write a file atomically.
1889

1890
  The file_name and either fn (a function taking one argument, the
1891
  file descriptor, and which should write the data to it) or data (the
1892
  contents of the file) must be passed. The other arguments are
1893
  optional and allow setting the file mode, owner and group, and the
1894
  mtime/atime of the file.
1895

1896
  If the function doesn't raise an exception, it has succeeded and the
1897
  target file has the new contents. If the function has raised an
1898
  exception, an existing target file should be unmodified and the
1899
  temporary file should be removed.
1900

1901
  @type file_name: str
1902
  @param file_name: the target filename
1903
  @type fn: callable
1904
  @param fn: content writing function, called with
1905
      file descriptor as parameter
1906
  @type data: str
1907
  @param data: contents of the file
1908
  @type mode: int
1909
  @param mode: file mode
1910
  @type uid: int
1911
  @param uid: the owner of the file
1912
  @type gid: int
1913
  @param gid: the group of the file
1914
  @type atime: int
1915
  @param atime: a custom access time to be set on the file
1916
  @type mtime: int
1917
  @param mtime: a custom modification time to be set on the file
1918
  @type close: boolean
1919
  @param close: whether to close file after writing it
1920
  @type prewrite: callable
1921
  @param prewrite: function to be called before writing content
1922
  @type postwrite: callable
1923
  @param postwrite: function to be called after writing content
1924

1925
  @rtype: None or int
1926
  @return: None if the 'close' parameter evaluates to True,
1927
      otherwise the file descriptor
1928

1929
  @raise errors.ProgrammerError: if any of the arguments are not valid
1930

1931
  """
1932
  if not os.path.isabs(file_name):
1933
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1934
                                 " absolute: '%s'" % file_name)
1935

    
1936
  if [fn, data].count(None) != 1:
1937
    raise errors.ProgrammerError("fn or data required")
1938

    
1939
  if [atime, mtime].count(None) == 1:
1940
    raise errors.ProgrammerError("Both atime and mtime must be either"
1941
                                 " set or None")
1942

    
1943
  if backup and not dry_run and os.path.isfile(file_name):
1944
    CreateBackup(file_name)
1945

    
1946
  dir_name, base_name = os.path.split(file_name)
1947
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1948
  do_remove = True
1949
  # here we need to make sure we remove the temp file, if any error
1950
  # leaves it in place
1951
  try:
1952
    if uid != -1 or gid != -1:
1953
      os.chown(new_name, uid, gid)
1954
    if mode:
1955
      os.chmod(new_name, mode)
1956
    if callable(prewrite):
1957
      prewrite(fd)
1958
    if data is not None:
1959
      os.write(fd, data)
1960
    else:
1961
      fn(fd)
1962
    if callable(postwrite):
1963
      postwrite(fd)
1964
    os.fsync(fd)
1965
    if atime is not None and mtime is not None:
1966
      os.utime(new_name, (atime, mtime))
1967
    if not dry_run:
1968
      os.rename(new_name, file_name)
1969
      do_remove = False
1970
  finally:
1971
    if close:
1972
      os.close(fd)
1973
      result = None
1974
    else:
1975
      result = fd
1976
    if do_remove:
1977
      RemoveFile(new_name)
1978

    
1979
  return result
1980

    
1981

    
1982
def GetFileID(path=None, fd=None):
1983
  """Returns the file 'id', i.e. the dev/inode and mtime information.
1984

1985
  Either the path to the file or the fd must be given.
1986

1987
  @param path: the file path
1988
  @param fd: a file descriptor
1989
  @return: a tuple of (device number, inode number, mtime)
1990

1991
  """
1992
  if [path, fd].count(None) != 1:
1993
    raise errors.ProgrammerError("One and only one of fd/path must be given")
1994

    
1995
  if fd is None:
1996
    st = os.stat(path)
1997
  else:
1998
    st = os.fstat(fd)
1999

    
2000
  return (st.st_dev, st.st_ino, st.st_mtime)
2001

    
2002

    
2003
def VerifyFileID(fi_disk, fi_ours):
2004
  """Verifies that two file IDs are matching.
2005

2006
  Differences in the inode/device are not accepted, but and older
2007
  timestamp for fi_disk is accepted.
2008

2009
  @param fi_disk: tuple (dev, inode, mtime) representing the actual
2010
      file data
2011
  @param fi_ours: tuple (dev, inode, mtime) representing the last
2012
      written file data
2013
  @rtype: boolean
2014

2015
  """
2016
  (d1, i1, m1) = fi_disk
2017
  (d2, i2, m2) = fi_ours
2018

    
2019
  return (d1, i1) == (d2, i2) and m1 <= m2
2020

    
2021

    
2022
def SafeWriteFile(file_name, file_id, **kwargs):
2023
  """Wraper over L{WriteFile} that locks the target file.
2024

2025
  By keeping the target file locked during WriteFile, we ensure that
2026
  cooperating writers will safely serialise access to the file.
2027

2028
  @type file_name: str
2029
  @param file_name: the target filename
2030
  @type file_id: tuple
2031
  @param file_id: a result from L{GetFileID}
2032

2033
  """
2034
  fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
2035
  try:
2036
    LockFile(fd)
2037
    if file_id is not None:
2038
      disk_id = GetFileID(fd=fd)
2039
      if not VerifyFileID(disk_id, file_id):
2040
        raise errors.LockError("Cannot overwrite file %s, it has been modified"
2041
                               " since last written" % file_name)
2042
    return WriteFile(file_name, **kwargs)
2043
  finally:
2044
    os.close(fd)
2045

    
2046

    
2047
def ReadOneLineFile(file_name, strict=False):
2048
  """Return the first non-empty line from a file.
2049

2050
  @type strict: boolean
2051
  @param strict: if True, abort if the file has more than one
2052
      non-empty line
2053

2054
  """
2055
  file_lines = ReadFile(file_name).splitlines()
2056
  full_lines = filter(bool, file_lines)
2057
  if not file_lines or not full_lines:
2058
    raise errors.GenericError("No data in one-liner file %s" % file_name)
2059
  elif strict and len(full_lines) > 1:
2060
    raise errors.GenericError("Too many lines in one-liner file %s" %
2061
                              file_name)
2062
  return full_lines[0]
2063

    
2064

    
2065
def FirstFree(seq, base=0):
2066
  """Returns the first non-existing integer from seq.
2067

2068
  The seq argument should be a sorted list of positive integers. The
2069
  first time the index of an element is smaller than the element
2070
  value, the index will be returned.
2071

2072
  The base argument is used to start at a different offset,
2073
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
2074

2075
  Example: C{[0, 1, 3]} will return I{2}.
2076

2077
  @type seq: sequence
2078
  @param seq: the sequence to be analyzed.
2079
  @type base: int
2080
  @param base: use this value as the base index of the sequence
2081
  @rtype: int
2082
  @return: the first non-used index in the sequence
2083

2084
  """
2085
  for idx, elem in enumerate(seq):
2086
    assert elem >= base, "Passed element is higher than base offset"
2087
    if elem > idx + base:
2088
      # idx is not used
2089
      return idx + base
2090
  return None
2091

    
2092

    
2093
def SingleWaitForFdCondition(fdobj, event, timeout):
2094
  """Waits for a condition to occur on the socket.
2095

2096
  Immediately returns at the first interruption.
2097

2098
  @type fdobj: integer or object supporting a fileno() method
2099
  @param fdobj: entity to wait for events on
2100
  @type event: integer
2101
  @param event: ORed condition (see select module)
2102
  @type timeout: float or None
2103
  @param timeout: Timeout in seconds
2104
  @rtype: int or None
2105
  @return: None for timeout, otherwise occured conditions
2106

2107
  """
2108
  check = (event | select.POLLPRI |
2109
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
2110

    
2111
  if timeout is not None:
2112
    # Poller object expects milliseconds
2113
    timeout *= 1000
2114

    
2115
  poller = select.poll()
2116
  poller.register(fdobj, event)
2117
  try:
2118
    # TODO: If the main thread receives a signal and we have no timeout, we
2119
    # could wait forever. This should check a global "quit" flag or something
2120
    # every so often.
2121
    io_events = poller.poll(timeout)
2122
  except select.error, err:
2123
    if err[0] != errno.EINTR:
2124
      raise
2125
    io_events = []
2126
  if io_events and io_events[0][1] & check:
2127
    return io_events[0][1]
2128
  else:
2129
    return None
2130

    
2131

    
2132
class FdConditionWaiterHelper(object):
2133
  """Retry helper for WaitForFdCondition.
2134

2135
  This class contains the retried and wait functions that make sure
2136
  WaitForFdCondition can continue waiting until the timeout is actually
2137
  expired.
2138

2139
  """
2140

    
2141
  def __init__(self, timeout):
2142
    self.timeout = timeout
2143

    
2144
  def Poll(self, fdobj, event):
2145
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2146
    if result is None:
2147
      raise RetryAgain()
2148
    else:
2149
      return result
2150

    
2151
  def UpdateTimeout(self, timeout):
2152
    self.timeout = timeout
2153

    
2154

    
2155
def WaitForFdCondition(fdobj, event, timeout):
2156
  """Waits for a condition to occur on the socket.
2157

2158
  Retries until the timeout is expired, even if interrupted.
2159

2160
  @type fdobj: integer or object supporting a fileno() method
2161
  @param fdobj: entity to wait for events on
2162
  @type event: integer
2163
  @param event: ORed condition (see select module)
2164
  @type timeout: float or None
2165
  @param timeout: Timeout in seconds
2166
  @rtype: int or None
2167
  @return: None for timeout, otherwise occured conditions
2168

2169
  """
2170
  if timeout is not None:
2171
    retrywaiter = FdConditionWaiterHelper(timeout)
2172
    try:
2173
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2174
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2175
    except RetryTimeout:
2176
      result = None
2177
  else:
2178
    result = None
2179
    while result is None:
2180
      result = SingleWaitForFdCondition(fdobj, event, timeout)
2181
  return result
2182

    
2183

    
2184
def UniqueSequence(seq):
2185
  """Returns a list with unique elements.
2186

2187
  Element order is preserved.
2188

2189
  @type seq: sequence
2190
  @param seq: the sequence with the source elements
2191
  @rtype: list
2192
  @return: list of unique elements from seq
2193

2194
  """
2195
  seen = set()
2196
  return [i for i in seq if i not in seen and not seen.add(i)]
2197

    
2198

    
2199
def FindDuplicates(seq):
2200
  """Identifies duplicates in a list.
2201

2202
  Does not preserve element order.
2203

2204
  @type seq: sequence
2205
  @param seq: Sequence with source elements
2206
  @rtype: list
2207
  @return: List of duplicate elements from seq
2208

2209
  """
2210
  dup = set()
2211
  seen = set()
2212

    
2213
  for item in seq:
2214
    if item in seen:
2215
      dup.add(item)
2216
    else:
2217
      seen.add(item)
2218

    
2219
  return list(dup)
2220

    
2221

    
2222
def NormalizeAndValidateMac(mac):
2223
  """Normalizes and check if a MAC address is valid.
2224

2225
  Checks whether the supplied MAC address is formally correct, only
2226
  accepts colon separated format. Normalize it to all lower.
2227

2228
  @type mac: str
2229
  @param mac: the MAC to be validated
2230
  @rtype: str
2231
  @return: returns the normalized and validated MAC.
2232

2233
  @raise errors.OpPrereqError: If the MAC isn't valid
2234

2235
  """
2236
  if not _MAC_CHECK.match(mac):
2237
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2238
                               mac, errors.ECODE_INVAL)
2239

    
2240
  return mac.lower()
2241

    
2242

    
2243
def TestDelay(duration):
2244
  """Sleep for a fixed amount of time.
2245

2246
  @type duration: float
2247
  @param duration: the sleep duration
2248
  @rtype: boolean
2249
  @return: False for negative value, True otherwise
2250

2251
  """
2252
  if duration < 0:
2253
    return False, "Invalid sleep duration"
2254
  time.sleep(duration)
2255
  return True, None
2256

    
2257

    
2258
def _CloseFDNoErr(fd, retries=5):
2259
  """Close a file descriptor ignoring errors.
2260

2261
  @type fd: int
2262
  @param fd: the file descriptor
2263
  @type retries: int
2264
  @param retries: how many retries to make, in case we get any
2265
      other error than EBADF
2266

2267
  """
2268
  try:
2269
    os.close(fd)
2270
  except OSError, err:
2271
    if err.errno != errno.EBADF:
2272
      if retries > 0:
2273
        _CloseFDNoErr(fd, retries - 1)
2274
    # else either it's closed already or we're out of retries, so we
2275
    # ignore this and go on
2276

    
2277

    
2278
def CloseFDs(noclose_fds=None):
2279
  """Close file descriptors.
2280

2281
  This closes all file descriptors above 2 (i.e. except
2282
  stdin/out/err).
2283

2284
  @type noclose_fds: list or None
2285
  @param noclose_fds: if given, it denotes a list of file descriptor
2286
      that should not be closed
2287

2288
  """
2289
  # Default maximum for the number of available file descriptors.
2290
  if 'SC_OPEN_MAX' in os.sysconf_names:
2291
    try:
2292
      MAXFD = os.sysconf('SC_OPEN_MAX')
2293
      if MAXFD < 0:
2294
        MAXFD = 1024
2295
    except OSError:
2296
      MAXFD = 1024
2297
  else:
2298
    MAXFD = 1024
2299
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2300
  if (maxfd == resource.RLIM_INFINITY):
2301
    maxfd = MAXFD
2302

    
2303
  # Iterate through and close all file descriptors (except the standard ones)
2304
  for fd in range(3, maxfd):
2305
    if noclose_fds and fd in noclose_fds:
2306
      continue
2307
    _CloseFDNoErr(fd)
2308

    
2309

    
2310
def Mlockall(_ctypes=ctypes):
2311
  """Lock current process' virtual address space into RAM.
2312

2313
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2314
  see mlock(2) for more details. This function requires ctypes module.
2315

2316
  @raises errors.NoCtypesError: if ctypes module is not found
2317

2318
  """
2319
  if _ctypes is None:
2320
    raise errors.NoCtypesError()
2321

    
2322
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2323
  if libc is None:
2324
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2325
    return
2326

    
2327
  # Some older version of the ctypes module don't have built-in functionality
2328
  # to access the errno global variable, where function error codes are stored.
2329
  # By declaring this variable as a pointer to an integer we can then access
2330
  # its value correctly, should the mlockall call fail, in order to see what
2331
  # the actual error code was.
2332
  # pylint: disable-msg=W0212
2333
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2334

    
2335
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2336
    # pylint: disable-msg=W0212
2337
    logging.error("Cannot set memory lock: %s",
2338
                  os.strerror(libc.__errno_location().contents.value))
2339
    return
2340

    
2341
  logging.debug("Memory lock set")
2342

    
2343

    
2344
def Daemonize(logfile):
2345
  """Daemonize the current process.
2346

2347
  This detaches the current process from the controlling terminal and
2348
  runs it in the background as a daemon.
2349

2350
  @type logfile: str
2351
  @param logfile: the logfile to which we should redirect stdout/stderr
2352
  @rtype: int
2353
  @return: the value zero
2354

2355
  """
2356
  # pylint: disable-msg=W0212
2357
  # yes, we really want os._exit
2358

    
2359
  # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2360
  # least abstract the pipe functionality between them
2361

    
2362
  # Create pipe for sending error messages
2363
  (rpipe, wpipe) = os.pipe()
2364

    
2365
  # this might fail
2366
  pid = os.fork()
2367
  if (pid == 0):  # The first child.
2368
    SetupDaemonEnv()
2369

    
2370
    # this might fail
2371
    pid = os.fork() # Fork a second child.
2372
    if (pid == 0):  # The second child.
2373
      _CloseFDNoErr(rpipe)
2374
    else:
2375
      # exit() or _exit()?  See below.
2376
      os._exit(0) # Exit parent (the first child) of the second child.
2377
  else:
2378
    _CloseFDNoErr(wpipe)
2379
    # Wait for daemon to be started (or an error message to
2380
    # arrive) and read up to 100 KB as an error message
2381
    errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2382
    if errormsg:
2383
      sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2384
      rcode = 1
2385
    else:
2386
      rcode = 0
2387
    os._exit(rcode) # Exit parent of the first child.
2388

    
2389
  SetupDaemonFDs(logfile, None)
2390
  return wpipe
2391

    
2392

    
2393
def DaemonPidFileName(name):
2394
  """Compute a ganeti pid file absolute path
2395

2396
  @type name: str
2397
  @param name: the daemon name
2398
  @rtype: str
2399
  @return: the full path to the pidfile corresponding to the given
2400
      daemon name
2401

2402
  """
2403
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2404

    
2405

    
2406
def EnsureDaemon(name):
2407
  """Check for and start daemon if not alive.
2408

2409
  """
2410
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2411
  if result.failed:
2412
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2413
                  name, result.fail_reason, result.output)
2414
    return False
2415

    
2416
  return True
2417

    
2418

    
2419
def StopDaemon(name):
2420
  """Stop daemon
2421

2422
  """
2423
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2424
  if result.failed:
2425
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2426
                  name, result.fail_reason, result.output)
2427
    return False
2428

    
2429
  return True
2430

    
2431

    
2432
def WritePidFile(pidfile):
2433
  """Write the current process pidfile.
2434

2435
  @type pidfile: sting
2436
  @param pidfile: the path to the file to be written
2437
  @raise errors.LockError: if the pid file already exists and
2438
      points to a live process
2439
  @rtype: int
2440
  @return: the file descriptor of the lock file; do not close this unless
2441
      you want to unlock the pid file
2442

2443
  """
2444
  # We don't rename nor truncate the file to not drop locks under
2445
  # existing processes
2446
  fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2447

    
2448
  # Lock the PID file (and fail if not possible to do so). Any code
2449
  # wanting to send a signal to the daemon should try to lock the PID
2450
  # file before reading it. If acquiring the lock succeeds, the daemon is
2451
  # no longer running and the signal should not be sent.
2452
  LockFile(fd_pidfile)
2453

    
2454
  os.write(fd_pidfile, "%d\n" % os.getpid())
2455

    
2456
  return fd_pidfile
2457

    
2458

    
2459
def RemovePidFile(name):
2460
  """Remove the current process pidfile.
2461

2462
  Any errors are ignored.
2463

2464
  @type name: str
2465
  @param name: the daemon name used to derive the pidfile name
2466

2467
  """
2468
  pidfilename = DaemonPidFileName(name)
2469
  # TODO: we could check here that the file contains our pid
2470
  try:
2471
    RemoveFile(pidfilename)
2472
  except: # pylint: disable-msg=W0702
2473
    pass
2474

    
2475

    
2476
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2477
                waitpid=False):
2478
  """Kill a process given by its pid.
2479

2480
  @type pid: int
2481
  @param pid: The PID to terminate.
2482
  @type signal_: int
2483
  @param signal_: The signal to send, by default SIGTERM
2484
  @type timeout: int
2485
  @param timeout: The timeout after which, if the process is still alive,
2486
                  a SIGKILL will be sent. If not positive, no such checking
2487
                  will be done
2488
  @type waitpid: boolean
2489
  @param waitpid: If true, we should waitpid on this process after
2490
      sending signals, since it's our own child and otherwise it
2491
      would remain as zombie
2492

2493
  """
2494
  def _helper(pid, signal_, wait):
2495
    """Simple helper to encapsulate the kill/waitpid sequence"""
2496
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2497
      try:
2498
        os.waitpid(pid, os.WNOHANG)
2499
      except OSError:
2500
        pass
2501

    
2502
  if pid <= 0:
2503
    # kill with pid=0 == suicide
2504
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2505

    
2506
  if not IsProcessAlive(pid):
2507
    return
2508

    
2509
  _helper(pid, signal_, waitpid)
2510

    
2511
  if timeout <= 0:
2512
    return
2513

    
2514
  def _CheckProcess():
2515
    if not IsProcessAlive(pid):
2516
      return
2517

    
2518
    try:
2519
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2520
    except OSError:
2521
      raise RetryAgain()
2522

    
2523
    if result_pid > 0:
2524
      return
2525

    
2526
    raise RetryAgain()
2527

    
2528
  try:
2529
    # Wait up to $timeout seconds
2530
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2531
  except RetryTimeout:
2532
    pass
2533

    
2534
  if IsProcessAlive(pid):
2535
    # Kill process if it's still alive
2536
    _helper(pid, signal.SIGKILL, waitpid)
2537

    
2538

    
2539
def FindFile(name, search_path, test=os.path.exists):
2540
  """Look for a filesystem object in a given path.
2541

2542
  This is an abstract method to search for filesystem object (files,
2543
  dirs) under a given search path.
2544

2545
  @type name: str
2546
  @param name: the name to look for
2547
  @type search_path: str
2548
  @param search_path: location to start at
2549
  @type test: callable
2550
  @param test: a function taking one argument that should return True
2551
      if the a given object is valid; the default value is
2552
      os.path.exists, causing only existing files to be returned
2553
  @rtype: str or None
2554
  @return: full path to the object if found, None otherwise
2555

2556
  """
2557
  # validate the filename mask
2558
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2559
    logging.critical("Invalid value passed for external script name: '%s'",
2560
                     name)
2561
    return None
2562

    
2563
  for dir_name in search_path:
2564
    # FIXME: investigate switch to PathJoin
2565
    item_name = os.path.sep.join([dir_name, name])
2566
    # check the user test and that we're indeed resolving to the given
2567
    # basename
2568
    if test(item_name) and os.path.basename(item_name) == name:
2569
      return item_name
2570
  return None
2571

    
2572

    
2573
def CheckVolumeGroupSize(vglist, vgname, minsize):
2574
  """Checks if the volume group list is valid.
2575

2576
  The function will check if a given volume group is in the list of
2577
  volume groups and has a minimum size.
2578

2579
  @type vglist: dict
2580
  @param vglist: dictionary of volume group names and their size
2581
  @type vgname: str
2582
  @param vgname: the volume group we should check
2583
  @type minsize: int
2584
  @param minsize: the minimum size we accept
2585
  @rtype: None or str
2586
  @return: None for success, otherwise the error message
2587

2588
  """
2589
  vgsize = vglist.get(vgname, None)
2590
  if vgsize is None:
2591
    return "volume group '%s' missing" % vgname
2592
  elif vgsize < minsize:
2593
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2594
            (vgname, minsize, vgsize))
2595
  return None
2596

    
2597

    
2598
def SplitTime(value):
2599
  """Splits time as floating point number into a tuple.
2600

2601
  @param value: Time in seconds
2602
  @type value: int or float
2603
  @return: Tuple containing (seconds, microseconds)
2604

2605
  """
2606
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2607

    
2608
  assert 0 <= seconds, \
2609
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2610
  assert 0 <= microseconds <= 999999, \
2611
    "Microseconds must be 0-999999, but are %s" % microseconds
2612

    
2613
  return (int(seconds), int(microseconds))
2614

    
2615

    
2616
def MergeTime(timetuple):
2617
  """Merges a tuple into time as a floating point number.
2618

2619
  @param timetuple: Time as tuple, (seconds, microseconds)
2620
  @type timetuple: tuple
2621
  @return: Time as a floating point number expressed in seconds
2622

2623
  """
2624
  (seconds, microseconds) = timetuple
2625

    
2626
  assert 0 <= seconds, \
2627
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2628
  assert 0 <= microseconds <= 999999, \
2629
    "Microseconds must be 0-999999, but are %s" % microseconds
2630

    
2631
  return float(seconds) + (float(microseconds) * 0.000001)
2632

    
2633

    
2634
class LogFileHandler(logging.FileHandler):
2635
  """Log handler that doesn't fallback to stderr.
2636

2637
  When an error occurs while writing on the logfile, logging.FileHandler tries
2638
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2639
  the logfile. This class avoids failures reporting errors to /dev/console.
2640

2641
  """
2642
  def __init__(self, filename, mode="a", encoding=None):
2643
    """Open the specified file and use it as the stream for logging.
2644

2645
    Also open /dev/console to report errors while logging.
2646

2647
    """
2648
    logging.FileHandler.__init__(self, filename, mode, encoding)
2649
    self.console = open(constants.DEV_CONSOLE, "a")
2650

    
2651
  def handleError(self, record): # pylint: disable-msg=C0103
2652
    """Handle errors which occur during an emit() call.
2653

2654
    Try to handle errors with FileHandler method, if it fails write to
2655
    /dev/console.
2656

2657
    """
2658
    try:
2659
      logging.FileHandler.handleError(self, record)
2660
    except Exception: # pylint: disable-msg=W0703
2661
      try:
2662
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2663
      except Exception: # pylint: disable-msg=W0703
2664
        # Log handler tried everything it could, now just give up
2665
        pass
2666

    
2667

    
2668
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2669
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2670
                 console_logging=False):
2671
  """Configures the logging module.
2672

2673
  @type logfile: str
2674
  @param logfile: the filename to which we should log
2675
  @type debug: integer
2676
  @param debug: if greater than zero, enable debug messages, otherwise
2677
      only those at C{INFO} and above level
2678
  @type stderr_logging: boolean
2679
  @param stderr_logging: whether we should also log to the standard error
2680
  @type program: str
2681
  @param program: the name under which we should log messages
2682
  @type multithreaded: boolean
2683
  @param multithreaded: if True, will add the thread name to the log file
2684
  @type syslog: string
2685
  @param syslog: one of 'no', 'yes', 'only':
2686
      - if no, syslog is not used
2687
      - if yes, syslog is used (in addition to file-logging)
2688
      - if only, only syslog is used
2689
  @type console_logging: boolean
2690
  @param console_logging: if True, will use a FileHandler which falls back to
2691
      the system console if logging fails
2692
  @raise EnvironmentError: if we can't open the log file and
2693
      syslog/stderr logging is disabled
2694

2695
  """
2696
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2697
  sft = program + "[%(process)d]:"
2698
  if multithreaded:
2699
    fmt += "/%(threadName)s"
2700
    sft += " (%(threadName)s)"
2701
  if debug:
2702
    fmt += " %(module)s:%(lineno)s"
2703
    # no debug info for syslog loggers
2704
  fmt += " %(levelname)s %(message)s"
2705
  # yes, we do want the textual level, as remote syslog will probably
2706
  # lose the error level, and it's easier to grep for it
2707
  sft += " %(levelname)s %(message)s"
2708
  formatter = logging.Formatter(fmt)
2709
  sys_fmt = logging.Formatter(sft)
2710

    
2711
  root_logger = logging.getLogger("")
2712
  root_logger.setLevel(logging.NOTSET)
2713

    
2714
  # Remove all previously setup handlers
2715
  for handler in root_logger.handlers:
2716
    handler.close()
2717
    root_logger.removeHandler(handler)
2718

    
2719
  if stderr_logging:
2720
    stderr_handler = logging.StreamHandler()
2721
    stderr_handler.setFormatter(formatter)
2722
    if debug:
2723
      stderr_handler.setLevel(logging.NOTSET)
2724
    else:
2725
      stderr_handler.setLevel(logging.CRITICAL)
2726
    root_logger.addHandler(stderr_handler)
2727

    
2728
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2729
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2730
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2731
                                                    facility)
2732
    syslog_handler.setFormatter(sys_fmt)
2733
    # Never enable debug over syslog
2734
    syslog_handler.setLevel(logging.INFO)
2735
    root_logger.addHandler(syslog_handler)
2736

    
2737
  if syslog != constants.SYSLOG_ONLY:
2738
    # this can fail, if the logging directories are not setup or we have
2739
    # a permisssion problem; in this case, it's best to log but ignore
2740
    # the error if stderr_logging is True, and if false we re-raise the
2741
    # exception since otherwise we could run but without any logs at all
2742
    try:
2743
      if console_logging:
2744
        logfile_handler = LogFileHandler(logfile)
2745
      else:
2746
        logfile_handler = logging.FileHandler(logfile)
2747
      logfile_handler.setFormatter(formatter)
2748
      if debug:
2749
        logfile_handler.setLevel(logging.DEBUG)
2750
      else:
2751
        logfile_handler.setLevel(logging.INFO)
2752
      root_logger.addHandler(logfile_handler)
2753
    except EnvironmentError:
2754
      if stderr_logging or syslog == constants.SYSLOG_YES:
2755
        logging.exception("Failed to enable logging to file '%s'", logfile)
2756
      else:
2757
        # we need to re-raise the exception
2758
        raise
2759

    
2760

    
2761
def IsNormAbsPath(path):
2762
  """Check whether a path is absolute and also normalized
2763

2764
  This avoids things like /dir/../../other/path to be valid.
2765

2766
  """
2767
  return os.path.normpath(path) == path and os.path.isabs(path)
2768

    
2769

    
2770
def PathJoin(*args):
2771
  """Safe-join a list of path components.
2772

2773
  Requirements:
2774
      - the first argument must be an absolute path
2775
      - no component in the path must have backtracking (e.g. /../),
2776
        since we check for normalization at the end
2777

2778
  @param args: the path components to be joined
2779
  @raise ValueError: for invalid paths
2780

2781
  """
2782
  # ensure we're having at least one path passed in
2783
  assert args
2784
  # ensure the first component is an absolute and normalized path name
2785
  root = args[0]
2786
  if not IsNormAbsPath(root):
2787
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2788
  result = os.path.join(*args)
2789
  # ensure that the whole path is normalized
2790
  if not IsNormAbsPath(result):
2791
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2792
  # check that we're still under the original prefix
2793
  prefix = os.path.commonprefix([root, result])
2794
  if prefix != root:
2795
    raise ValueError("Error: path joining resulted in different prefix"
2796
                     " (%s != %s)" % (prefix, root))
2797
  return result
2798

    
2799

    
2800
def TailFile(fname, lines=20):
2801
  """Return the last lines from a file.
2802

2803
  @note: this function will only read and parse the last 4KB of
2804
      the file; if the lines are very long, it could be that less
2805
      than the requested number of lines are returned
2806

2807
  @param fname: the file name
2808
  @type lines: int
2809
  @param lines: the (maximum) number of lines to return
2810

2811
  """
2812
  fd = open(fname, "r")
2813
  try:
2814
    fd.seek(0, 2)
2815
    pos = fd.tell()
2816
    pos = max(0, pos-4096)
2817
    fd.seek(pos, 0)
2818
    raw_data = fd.read()
2819
  finally:
2820
    fd.close()
2821

    
2822
  rows = raw_data.splitlines()
2823
  return rows[-lines:]
2824

    
2825

    
2826
def FormatTimestampWithTZ(secs):
2827
  """Formats a Unix timestamp with the local timezone.
2828

2829
  """
2830
  return time.strftime("%F %T %Z", time.gmtime(secs))
2831

    
2832

    
2833
def _ParseAsn1Generalizedtime(value):
2834
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2835

2836
  @type value: string
2837
  @param value: ASN1 GENERALIZEDTIME timestamp
2838

2839
  """
2840
  m = _ASN1_TIME_REGEX.match(value)
2841
  if m:
2842
    # We have an offset
2843
    asn1time = m.group(1)
2844
    hours = int(m.group(2))
2845
    minutes = int(m.group(3))
2846
    utcoffset = (60 * hours) + minutes
2847
  else:
2848
    if not value.endswith("Z"):
2849
      raise ValueError("Missing timezone")
2850
    asn1time = value[:-1]
2851
    utcoffset = 0
2852

    
2853
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2854

    
2855
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2856

    
2857
  return calendar.timegm(tt.utctimetuple())
2858

    
2859

    
2860
def GetX509CertValidity(cert):
2861
  """Returns the validity period of the certificate.
2862

2863
  @type cert: OpenSSL.crypto.X509
2864
  @param cert: X509 certificate object
2865

2866
  """
2867
  # The get_notBefore and get_notAfter functions are only supported in
2868
  # pyOpenSSL 0.7 and above.
2869
  try:
2870
    get_notbefore_fn = cert.get_notBefore
2871
  except AttributeError:
2872
    not_before = None
2873
  else:
2874
    not_before_asn1 = get_notbefore_fn()
2875

    
2876
    if not_before_asn1 is None:
2877
      not_before = None
2878
    else:
2879
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2880

    
2881
  try:
2882
    get_notafter_fn = cert.get_notAfter
2883
  except AttributeError:
2884
    not_after = None
2885
  else:
2886
    not_after_asn1 = get_notafter_fn()
2887

    
2888
    if not_after_asn1 is None:
2889
      not_after = None
2890
    else:
2891
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2892

    
2893
  return (not_before, not_after)
2894

    
2895

    
2896
def _VerifyCertificateInner(expired, not_before, not_after, now,
2897
                            warn_days, error_days):
2898
  """Verifies certificate validity.
2899

2900
  @type expired: bool
2901
  @param expired: Whether pyOpenSSL considers the certificate as expired
2902
  @type not_before: number or None
2903
  @param not_before: Unix timestamp before which certificate is not valid
2904
  @type not_after: number or None
2905
  @param not_after: Unix timestamp after which certificate is invalid
2906
  @type now: number
2907
  @param now: Current time as Unix timestamp
2908
  @type warn_days: number or None
2909
  @param warn_days: How many days before expiration a warning should be reported
2910
  @type error_days: number or None
2911
  @param error_days: How many days before expiration an error should be reported
2912

2913
  """
2914
  if expired:
2915
    msg = "Certificate is expired"
2916

    
2917
    if not_before is not None and not_after is not None:
2918
      msg += (" (valid from %s to %s)" %
2919
              (FormatTimestampWithTZ(not_before),
2920
               FormatTimestampWithTZ(not_after)))
2921
    elif not_before is not None:
2922
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2923
    elif not_after is not None:
2924
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2925

    
2926
    return (CERT_ERROR, msg)
2927

    
2928
  elif not_before is not None and not_before > now:
2929
    return (CERT_WARNING,
2930
            "Certificate not yet valid (valid from %s)" %
2931
            FormatTimestampWithTZ(not_before))
2932

    
2933
  elif not_after is not None:
2934
    remaining_days = int((not_after - now) / (24 * 3600))
2935

    
2936
    msg = "Certificate expires in about %d days" % remaining_days
2937

    
2938
    if error_days is not None and remaining_days <= error_days:
2939
      return (CERT_ERROR, msg)
2940

    
2941
    if warn_days is not None and remaining_days <= warn_days:
2942
      return (CERT_WARNING, msg)
2943

    
2944
  return (None, None)
2945

    
2946

    
2947
def VerifyX509Certificate(cert, warn_days, error_days):
2948
  """Verifies a certificate for LUVerifyCluster.
2949

2950
  @type cert: OpenSSL.crypto.X509
2951
  @param cert: X509 certificate object
2952
  @type warn_days: number or None
2953
  @param warn_days: How many days before expiration a warning should be reported
2954
  @type error_days: number or None
2955
  @param error_days: How many days before expiration an error should be reported
2956

2957
  """
2958
  # Depending on the pyOpenSSL version, this can just return (None, None)
2959
  (not_before, not_after) = GetX509CertValidity(cert)
2960

    
2961
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2962
                                 time.time(), warn_days, error_days)
2963

    
2964

    
2965
def SignX509Certificate(cert, key, salt):
2966
  """Sign a X509 certificate.
2967

2968
  An RFC822-like signature header is added in front of the certificate.
2969

2970
  @type cert: OpenSSL.crypto.X509
2971
  @param cert: X509 certificate object
2972
  @type key: string
2973
  @param key: Key for HMAC
2974
  @type salt: string
2975
  @param salt: Salt for HMAC
2976
  @rtype: string
2977
  @return: Serialized and signed certificate in PEM format
2978

2979
  """
2980
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2981
    raise errors.GenericError("Invalid salt: %r" % salt)
2982

    
2983
  # Dumping as PEM here ensures the certificate is in a sane format
2984
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2985

    
2986
  return ("%s: %s/%s\n\n%s" %
2987
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2988
           Sha1Hmac(key, cert_pem, salt=salt),
2989
           cert_pem))
2990

    
2991

    
2992
def _ExtractX509CertificateSignature(cert_pem):
2993
  """Helper function to extract signature from X509 certificate.
2994

2995
  """
2996
  # Extract signature from original PEM data
2997
  for line in cert_pem.splitlines():
2998
    if line.startswith("---"):
2999
      break
3000

    
3001
    m = X509_SIGNATURE.match(line.strip())
3002
    if m:
3003
      return (m.group("salt"), m.group("sign"))
3004

    
3005
  raise errors.GenericError("X509 certificate signature is missing")
3006

    
3007

    
3008
def LoadSignedX509Certificate(cert_pem, key):
3009
  """Verifies a signed X509 certificate.
3010

3011
  @type cert_pem: string
3012
  @param cert_pem: Certificate in PEM format and with signature header
3013
  @type key: string
3014
  @param key: Key for HMAC
3015
  @rtype: tuple; (OpenSSL.crypto.X509, string)
3016
  @return: X509 certificate object and salt
3017

3018
  """
3019
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
3020

    
3021
  # Load certificate
3022
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
3023

    
3024
  # Dump again to ensure it's in a sane format
3025
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3026

    
3027
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
3028
    raise errors.GenericError("X509 certificate signature is invalid")
3029

    
3030
  return (cert, salt)
3031

    
3032

    
3033
def Sha1Hmac(key, text, salt=None):
3034
  """Calculates the HMAC-SHA1 digest of a text.
3035

3036
  HMAC is defined in RFC2104.
3037

3038
  @type key: string
3039
  @param key: Secret key
3040
  @type text: string
3041

3042
  """
3043
  if salt:
3044
    salted_text = salt + text
3045
  else:
3046
    salted_text = text
3047

    
3048
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
3049

    
3050

    
3051
def VerifySha1Hmac(key, text, digest, salt=None):
3052
  """Verifies the HMAC-SHA1 digest of a text.
3053

3054
  HMAC is defined in RFC2104.
3055

3056
  @type key: string
3057
  @param key: Secret key
3058
  @type text: string
3059
  @type digest: string
3060
  @param digest: Expected digest
3061
  @rtype: bool
3062
  @return: Whether HMAC-SHA1 digest matches
3063

3064
  """
3065
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
3066

    
3067

    
3068
def SafeEncode(text):
3069
  """Return a 'safe' version of a source string.
3070

3071
  This function mangles the input string and returns a version that
3072
  should be safe to display/encode as ASCII. To this end, we first
3073
  convert it to ASCII using the 'backslashreplace' encoding which
3074
  should get rid of any non-ASCII chars, and then we process it
3075
  through a loop copied from the string repr sources in the python; we
3076
  don't use string_escape anymore since that escape single quotes and
3077
  backslashes too, and that is too much; and that escaping is not
3078
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
3079

3080
  @type text: str or unicode
3081
  @param text: input data
3082
  @rtype: str
3083
  @return: a safe version of text
3084

3085
  """
3086
  if isinstance(text, unicode):
3087
    # only if unicode; if str already, we handle it below
3088
    text = text.encode('ascii', 'backslashreplace')
3089
  resu = ""
3090
  for char in text:
3091
    c = ord(char)
3092
    if char  == '\t':
3093
      resu += r'\t'
3094
    elif char == '\n':
3095
      resu += r'\n'
3096
    elif char == '\r':
3097
      resu += r'\'r'
3098
    elif c < 32 or c >= 127: # non-printable
3099
      resu += "\\x%02x" % (c & 0xff)
3100
    else:
3101
      resu += char
3102
  return resu
3103

    
3104

    
3105
def UnescapeAndSplit(text, sep=","):
3106
  """Split and unescape a string based on a given separator.
3107

3108
  This function splits a string based on a separator where the
3109
  separator itself can be escape in order to be an element of the
3110
  elements. The escaping rules are (assuming coma being the
3111
  separator):
3112
    - a plain , separates the elements
3113
    - a sequence \\\\, (double backslash plus comma) is handled as a
3114
      backslash plus a separator comma
3115
    - a sequence \, (backslash plus comma) is handled as a
3116
      non-separator comma
3117

3118
  @type text: string
3119
  @param text: the string to split
3120
  @type sep: string
3121
  @param text: the separator
3122
  @rtype: string
3123
  @return: a list of strings
3124

3125
  """
3126
  # we split the list by sep (with no escaping at this stage)
3127
  slist = text.split(sep)
3128
  # next, we revisit the elements and if any of them ended with an odd
3129
  # number of backslashes, then we join it with the next
3130
  rlist = []
3131
  while slist:
3132
    e1 = slist.pop(0)
3133
    if e1.endswith("\\"):
3134
      num_b = len(e1) - len(e1.rstrip("\\"))
3135
      if num_b % 2 == 1:
3136
        e2 = slist.pop(0)
3137
        # here the backslashes remain (all), and will be reduced in
3138
        # the next step
3139
        rlist.append(e1 + sep + e2)
3140
        continue
3141
    rlist.append(e1)
3142
  # finally, replace backslash-something with something
3143
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3144
  return rlist
3145

    
3146

    
3147
def CommaJoin(names):
3148
  """Nicely join a set of identifiers.
3149

3150
  @param names: set, list or tuple
3151
  @return: a string with the formatted results
3152

3153
  """
3154
  return ", ".join([str(val) for val in names])
3155

    
3156

    
3157
def FindMatch(data, name):
3158
  """Tries to find an item in a dictionary matching a name.
3159

3160
  Callers have to ensure the data names aren't contradictory (e.g. a regexp
3161
  that matches a string). If the name isn't a direct key, all regular
3162
  expression objects in the dictionary are matched against it.
3163

3164
  @type data: dict
3165
  @param data: Dictionary containing data
3166
  @type name: string
3167
  @param name: Name to look for
3168
  @rtype: tuple; (value in dictionary, matched groups as list)
3169

3170
  """
3171
  if name in data:
3172
    return (data[name], [])
3173

    
3174
  for key, value in data.items():
3175
    # Regex objects
3176
    if hasattr(key, "match"):
3177
      m = key.match(name)
3178
      if m:
3179
        return (value, list(m.groups()))
3180

    
3181
  return None
3182

    
3183

    
3184
def BytesToMebibyte(value):
3185
  """Converts bytes to mebibytes.
3186

3187
  @type value: int
3188
  @param value: Value in bytes
3189
  @rtype: int
3190
  @return: Value in mebibytes
3191

3192
  """
3193
  return int(round(value / (1024.0 * 1024.0), 0))
3194

    
3195

    
3196
def CalculateDirectorySize(path):
3197
  """Calculates the size of a directory recursively.
3198

3199
  @type path: string
3200
  @param path: Path to directory
3201
  @rtype: int
3202
  @return: Size in mebibytes
3203

3204
  """
3205
  size = 0
3206

    
3207
  for (curpath, _, files) in os.walk(path):
3208
    for filename in files:
3209
      st = os.lstat(PathJoin(curpath, filename))
3210
      size += st.st_size
3211

    
3212
  return BytesToMebibyte(size)
3213

    
3214

    
3215
def GetMounts(filename=constants.PROC_MOUNTS):
3216
  """Returns the list of mounted filesystems.
3217

3218
  This function is Linux-specific.
3219

3220
  @param filename: path of mounts file (/proc/mounts by default)
3221
  @rtype: list of tuples
3222
  @return: list of mount entries (device, mountpoint, fstype, options)
3223

3224
  """
3225
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3226
  data = []
3227
  mountlines = ReadFile(filename).splitlines()
3228
  for line in mountlines:
3229
    device, mountpoint, fstype, options, _ = line.split(None, 4)
3230
    data.append((device, mountpoint, fstype, options))
3231

    
3232
  return data
3233

    
3234

    
3235
def GetFilesystemStats(path):
3236
  """Returns the total and free space on a filesystem.
3237

3238
  @type path: string
3239
  @param path: Path on filesystem to be examined
3240
  @rtype: int
3241
  @return: tuple of (Total space, Free space) in mebibytes
3242

3243
  """
3244
  st = os.statvfs(path)
3245

    
3246
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3247
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3248
  return (tsize, fsize)
3249

    
3250

    
3251
def RunInSeparateProcess(fn, *args):
3252
  """Runs a function in a separate process.
3253

3254
  Note: Only boolean return values are supported.
3255

3256
  @type fn: callable
3257
  @param fn: Function to be called
3258
  @rtype: bool
3259
  @return: Function's result
3260

3261
  """
3262
  pid = os.fork()
3263
  if pid == 0:
3264
    # Child process
3265
    try:
3266
      # In case the function uses temporary files
3267
      ResetTempfileModule()
3268

    
3269
      # Call function
3270
      result = int(bool(fn(*args)))
3271
      assert result in (0, 1)
3272
    except: # pylint: disable-msg=W0702
3273
      logging.exception("Error while calling function in separate process")
3274
      # 0 and 1 are reserved for the return value
3275
      result = 33
3276

    
3277
    os._exit(result) # pylint: disable-msg=W0212
3278

    
3279
  # Parent process
3280

    
3281
  # Avoid zombies and check exit code
3282
  (_, status) = os.waitpid(pid, 0)
3283

    
3284
  if os.WIFSIGNALED(status):
3285
    exitcode = None
3286
    signum = os.WTERMSIG(status)
3287
  else:
3288
    exitcode = os.WEXITSTATUS(status)
3289
    signum = None
3290

    
3291
  if not (exitcode in (0, 1) and signum is None):
3292
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3293
                              (exitcode, signum))
3294

    
3295
  return bool(exitcode)
3296

    
3297

    
3298
def IgnoreProcessNotFound(fn, *args, **kwargs):
3299
  """Ignores ESRCH when calling a process-related function.
3300

3301
  ESRCH is raised when a process is not found.
3302

3303
  @rtype: bool
3304
  @return: Whether process was found
3305

3306
  """
3307
  try:
3308
    fn(*args, **kwargs)
3309
  except EnvironmentError, err:
3310
    # Ignore ESRCH
3311
    if err.errno == errno.ESRCH:
3312
      return False
3313
    raise
3314

    
3315
  return True
3316

    
3317

    
3318
def IgnoreSignals(fn, *args, **kwargs):
3319
  """Tries to call a function ignoring failures due to EINTR.
3320

3321
  """
3322
  try:
3323
    return fn(*args, **kwargs)
3324
  except EnvironmentError, err:
3325
    if err.errno == errno.EINTR:
3326
      return None
3327
    else:
3328
      raise
3329
  except (select.error, socket.error), err:
3330
    # In python 2.6 and above select.error is an IOError, so it's handled
3331
    # above, in 2.5 and below it's not, and it's handled here.
3332
    if err.args and err.args[0] == errno.EINTR:
3333
      return None
3334
    else:
3335
      raise
3336

    
3337

    
3338
def LockFile(fd):
3339
  """Locks a file using POSIX locks.
3340

3341
  @type fd: int
3342
  @param fd: the file descriptor we need to lock
3343

3344
  """
3345
  try:
3346
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3347
  except IOError, err:
3348
    if err.errno == errno.EAGAIN:
3349
      raise errors.LockError("File already locked")
3350
    raise
3351

    
3352

    
3353
def FormatTime(val):
3354
  """Formats a time value.
3355

3356
  @type val: float or None
3357
  @param val: the timestamp as returned by time.time()
3358
  @return: a string value or N/A if we don't have a valid timestamp
3359

3360
  """
3361
  if val is None or not isinstance(val, (int, float)):
3362
    return "N/A"
3363
  # these two codes works on Linux, but they are not guaranteed on all
3364
  # platforms
3365
  return time.strftime("%F %T", time.localtime(val))
3366

    
3367

    
3368
def FormatSeconds(secs):
3369
  """Formats seconds for easier reading.
3370

3371
  @type secs: number
3372
  @param secs: Number of seconds
3373
  @rtype: string
3374
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3375

3376
  """
3377
  parts = []
3378

    
3379
  secs = round(secs, 0)
3380

    
3381
  if secs > 0:
3382
    # Negative values would be a bit tricky
3383
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3384
      (complete, secs) = divmod(secs, one)
3385
      if complete or parts:
3386
        parts.append("%d%s" % (complete, unit))
3387

    
3388
  parts.append("%ds" % secs)
3389

    
3390
  return " ".join(parts)
3391

    
3392

    
3393
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3394
  """Reads the watcher pause file.
3395

3396
  @type filename: string
3397
  @param filename: Path to watcher pause file
3398
  @type now: None, float or int
3399
  @param now: Current time as Unix timestamp
3400
  @type remove_after: int
3401
  @param remove_after: Remove watcher pause file after specified amount of
3402
    seconds past the pause end time
3403

3404
  """
3405
  if now is None:
3406
    now = time.time()
3407

    
3408
  try:
3409
    value = ReadFile(filename)
3410
  except IOError, err:
3411
    if err.errno != errno.ENOENT:
3412
      raise
3413
    value = None
3414

    
3415
  if value is not None:
3416
    try:
3417
      value = int(value)
3418
    except ValueError:
3419
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3420
                       " removing it"), filename)
3421
      RemoveFile(filename)
3422
      value = None
3423

    
3424
    if value is not None:
3425
      # Remove file if it's outdated
3426
      if now > (value + remove_after):
3427
        RemoveFile(filename)
3428
        value = None
3429

    
3430
      elif now > value:
3431
        value = None
3432

    
3433
  return value
3434

    
3435

    
3436
class RetryTimeout(Exception):
3437
  """Retry loop timed out.
3438

3439
  Any arguments which was passed by the retried function to RetryAgain will be
3440
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3441
  the RaiseInner helper method will reraise it.
3442

3443
  """
3444
  def RaiseInner(self):
3445
    if self.args and isinstance(self.args[0], Exception):
3446
      raise self.args[0]
3447
    else:
3448
      raise RetryTimeout(*self.args)
3449

    
3450

    
3451
class RetryAgain(Exception):
3452
  """Retry again.
3453

3454
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3455
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3456
  of the RetryTimeout() method can be used to reraise it.
3457

3458
  """
3459

    
3460

    
3461
class _RetryDelayCalculator(object):
3462
  """Calculator for increasing delays.
3463

3464
  """
3465
  __slots__ = [
3466
    "_factor",
3467
    "_limit",
3468
    "_next",
3469
    "_start",
3470
    ]
3471

    
3472
  def __init__(self, start, factor, limit):
3473
    """Initializes this class.
3474

3475
    @type start: float
3476
    @param start: Initial delay
3477
    @type factor: float
3478
    @param factor: Factor for delay increase
3479
    @type limit: float or None
3480
    @param limit: Upper limit for delay or None for no limit
3481

3482
    """
3483
    assert start > 0.0
3484
    assert factor >= 1.0
3485
    assert limit is None or limit >= 0.0
3486

    
3487
    self._start = start
3488
    self._factor = factor
3489
    self._limit = limit
3490

    
3491
    self._next = start
3492

    
3493
  def __call__(self):
3494
    """Returns current delay and calculates the next one.
3495

3496
    """
3497
    current = self._next
3498

    
3499
    # Update for next run
3500
    if self._limit is None or self._next < self._limit:
3501
      self._next = min(self._limit, self._next * self._factor)
3502

    
3503
    return current
3504

    
3505

    
3506
#: Special delay to specify whole remaining timeout
3507
RETRY_REMAINING_TIME = object()
3508

    
3509

    
3510
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3511
          _time_fn=time.time):
3512
  """Call a function repeatedly until it succeeds.
3513

3514
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3515
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3516
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3517

3518
  C{delay} can be one of the following:
3519
    - callable returning the delay length as a float
3520
    - Tuple of (start, factor, limit)
3521
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3522
      useful when overriding L{wait_fn} to wait for an external event)
3523
    - A static delay as a number (int or float)
3524

3525
  @type fn: callable
3526
  @param fn: Function to be called
3527
  @param delay: Either a callable (returning the delay), a tuple of (start,
3528
                factor, limit) (see L{_RetryDelayCalculator}),
3529
                L{RETRY_REMAINING_TIME} or a number (int or float)
3530
  @type timeout: float
3531
  @param timeout: Total timeout
3532
  @type wait_fn: callable
3533
  @param wait_fn: Waiting function
3534
  @return: Return value of function
3535

3536
  """
3537
  assert callable(fn)
3538
  assert callable(wait_fn)
3539
  assert callable(_time_fn)
3540

    
3541
  if args is None:
3542
    args = []
3543

    
3544
  end_time = _time_fn() + timeout
3545

    
3546
  if callable(delay):
3547
    # External function to calculate delay
3548
    calc_delay = delay
3549

    
3550
  elif isinstance(delay, (tuple, list)):
3551
    # Increasing delay with optional upper boundary
3552
    (start, factor, limit) = delay
3553
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3554

    
3555
  elif delay is RETRY_REMAINING_TIME:
3556
    # Always use the remaining time
3557
    calc_delay = None
3558

    
3559
  else:
3560
    # Static delay
3561
    calc_delay = lambda: delay
3562

    
3563
  assert calc_delay is None or callable(calc_delay)
3564

    
3565
  while True:
3566
    retry_args = []
3567
    try:
3568
      # pylint: disable-msg=W0142
3569
      return fn(*args)
3570
    except RetryAgain, err:
3571
      retry_args = err.args
3572
    except RetryTimeout:
3573
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3574
                                   " handle RetryTimeout")
3575

    
3576
    remaining_time = end_time - _time_fn()
3577

    
3578
    if remaining_time < 0.0:
3579
      # pylint: disable-msg=W0142
3580
      raise RetryTimeout(*retry_args)
3581

    
3582
    assert remaining_time >= 0.0
3583

    
3584
    if calc_delay is None:
3585
      wait_fn(remaining_time)
3586
    else:
3587
      current_delay = calc_delay()
3588
      if current_delay > 0.0:
3589
        wait_fn(current_delay)
3590

    
3591

    
3592
def GetClosedTempfile(*args, **kwargs):
3593
  """Creates a temporary file and returns its path.
3594

3595
  """
3596
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3597
  _CloseFDNoErr(fd)
3598
  return path
3599

    
3600

    
3601
def GenerateSelfSignedX509Cert(common_name, validity):
3602
  """Generates a self-signed X509 certificate.
3603

3604
  @type common_name: string
3605
  @param common_name: commonName value
3606
  @type validity: int
3607
  @param validity: Validity for certificate in seconds
3608

3609
  """
3610
  # Create private and public key
3611
  key = OpenSSL.crypto.PKey()
3612
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3613

    
3614
  # Create self-signed certificate
3615
  cert = OpenSSL.crypto.X509()
3616
  if common_name:
3617
    cert.get_subject().CN = common_name
3618
  cert.set_serial_number(1)
3619
  cert.gmtime_adj_notBefore(0)
3620
  cert.gmtime_adj_notAfter(validity)
3621
  cert.set_issuer(cert.get_subject())
3622
  cert.set_pubkey(key)
3623
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3624

    
3625
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3626
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3627

    
3628
  return (key_pem, cert_pem)
3629

    
3630

    
3631
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3632
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3633
  """Legacy function to generate self-signed X509 certificate.
3634

3635
  @type filename: str
3636
  @param filename: path to write certificate to
3637
  @type common_name: string
3638
  @param common_name: commonName value
3639
  @type validity: int
3640
  @param validity: validity of certificate in number of days
3641

3642
  """
3643
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3644
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3645
  # and node daemon certificates have the proper Subject/Issuer.
3646
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3647
                                                   validity * 24 * 60 * 60)
3648

    
3649
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3650

    
3651

    
3652
class FileLock(object):
3653
  """Utility class for file locks.
3654

3655
  """
3656
  def __init__(self, fd, filename):
3657
    """Constructor for FileLock.
3658

3659
    @type fd: file
3660
    @param fd: File object
3661
    @type filename: str
3662
    @param filename: Path of the file opened at I{fd}
3663

3664
    """
3665
    self.fd = fd
3666
    self.filename = filename
3667

    
3668
  @classmethod
3669
  def Open(cls, filename):
3670
    """Creates and opens a file to be used as a file-based lock.
3671

3672
    @type filename: string
3673
    @param filename: path to the file to be locked
3674

3675
    """
3676
    # Using "os.open" is necessary to allow both opening existing file
3677
    # read/write and creating if not existing. Vanilla "open" will truncate an
3678
    # existing file -or- allow creating if not existing.
3679
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3680
               filename)
3681

    
3682
  def __del__(self):
3683
    self.Close()
3684

    
3685
  def Close(self):
3686
    """Close the file and release the lock.
3687

3688
    """
3689
    if hasattr(self, "fd") and self.fd:
3690
      self.fd.close()
3691
      self.fd = None
3692

    
3693
  def _flock(self, flag, blocking, timeout, errmsg):
3694
    """Wrapper for fcntl.flock.
3695

3696
    @type flag: int
3697
    @param flag: operation flag
3698
    @type blocking: bool
3699
    @param blocking: whether the operation should be done in blocking mode.
3700
    @type timeout: None or float
3701
    @param timeout: for how long the operation should be retried (implies
3702
                    non-blocking mode).
3703
    @type errmsg: string
3704
    @param errmsg: error message in case operation fails.
3705

3706
    """
3707
    assert self.fd, "Lock was closed"
3708
    assert timeout is None or timeout >= 0, \
3709
      "If specified, timeout must be positive"
3710
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3711

    
3712
    # When a timeout is used, LOCK_NB must always be set
3713
    if not (timeout is None and blocking):
3714
      flag |= fcntl.LOCK_NB
3715

    
3716
    if timeout is None:
3717
      self._Lock(self.fd, flag, timeout)
3718
    else:
3719
      try:
3720
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3721
              args=(self.fd, flag, timeout))
3722
      except RetryTimeout:
3723
        raise errors.LockError(errmsg)
3724

    
3725
  @staticmethod
3726
  def _Lock(fd, flag, timeout):
3727
    try:
3728
      fcntl.flock(fd, flag)
3729
    except IOError, err:
3730
      if timeout is not None and err.errno == errno.EAGAIN:
3731
        raise RetryAgain()
3732

    
3733
      logging.exception("fcntl.flock failed")
3734
      raise
3735

    
3736
  def Exclusive(self, blocking=False, timeout=None):
3737
    """Locks the file in exclusive mode.
3738

3739
    @type blocking: boolean
3740
    @param blocking: whether to block and wait until we
3741
        can lock the file or return immediately
3742
    @type timeout: int or None
3743
    @param timeout: if not None, the duration to wait for the lock
3744
        (in blocking mode)
3745

3746
    """
3747
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3748
                "Failed to lock %s in exclusive mode" % self.filename)
3749

    
3750
  def Shared(self, blocking=False, timeout=None):
3751
    """Locks the file in shared mode.
3752

3753
    @type blocking: boolean
3754
    @param blocking: whether to block and wait until we
3755
        can lock the file or return immediately
3756
    @type timeout: int or None
3757
    @param timeout: if not None, the duration to wait for the lock
3758
        (in blocking mode)
3759

3760
    """
3761
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3762
                "Failed to lock %s in shared mode" % self.filename)
3763

    
3764
  def Unlock(self, blocking=True, timeout=None):
3765
    """Unlocks the file.
3766

3767
    According to C{flock(2)}, unlocking can also be a nonblocking
3768
    operation::
3769

3770
      To make a non-blocking request, include LOCK_NB with any of the above
3771
      operations.
3772

3773
    @type blocking: boolean
3774
    @param blocking: whether to block and wait until we
3775
        can lock the file or return immediately
3776
    @type timeout: int or None
3777
    @param timeout: if not None, the duration to wait for the lock
3778
        (in blocking mode)
3779

3780
    """
3781
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3782
                "Failed to unlock %s" % self.filename)
3783

    
3784

    
3785
class LineSplitter:
3786
  """Splits data chunks into lines separated by newline.
3787

3788
  Instances provide a file-like interface.
3789

3790
  """
3791
  def __init__(self, line_fn, *args):
3792
    """Initializes this class.
3793

3794
    @type line_fn: callable
3795
    @param line_fn: Function called for each line, first parameter is line
3796
    @param args: Extra arguments for L{line_fn}
3797

3798
    """
3799
    assert callable(line_fn)
3800

    
3801
    if args:
3802
      # Python 2.4 doesn't have functools.partial yet
3803
      self._line_fn = \
3804
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3805
    else:
3806
      self._line_fn = line_fn
3807

    
3808
    self._lines = collections.deque()
3809
    self._buffer = ""
3810

    
3811
  def write(self, data):
3812
    parts = (self._buffer + data).split("\n")
3813
    self._buffer = parts.pop()
3814
    self._lines.extend(parts)
3815

    
3816
  def flush(self):
3817
    while self._lines:
3818
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3819

    
3820
  def close(self):
3821
    self.flush()
3822
    if self._buffer:
3823
      self._line_fn(self._buffer)
3824

    
3825

    
3826
def SignalHandled(signums):
3827
  """Signal Handled decoration.
3828

3829
  This special decorator installs a signal handler and then calls the target
3830
  function. The function must accept a 'signal_handlers' keyword argument,
3831
  which will contain a dict indexed by signal number, with SignalHandler
3832
  objects as values.
3833

3834
  The decorator can be safely stacked with iself, to handle multiple signals
3835
  with different handlers.
3836

3837
  @type signums: list
3838
  @param signums: signals to intercept
3839

3840
  """
3841
  def wrap(fn):
3842
    def sig_function(*args, **kwargs):
3843
      assert 'signal_handlers' not in kwargs or \
3844
             kwargs['signal_handlers'] is None or \
3845
             isinstance(kwargs['signal_handlers'], dict), \
3846
             "Wrong signal_handlers parameter in original function call"
3847
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3848
        signal_handlers = kwargs['signal_handlers']
3849
      else:
3850
        signal_handlers = {}
3851
        kwargs['signal_handlers'] = signal_handlers
3852
      sighandler = SignalHandler(signums)
3853
      try:
3854
        for sig in signums:
3855
          signal_handlers[sig] = sighandler
3856
        return fn(*args, **kwargs)
3857
      finally:
3858
        sighandler.Reset()
3859
    return sig_function
3860
  return wrap
3861

    
3862

    
3863
class SignalWakeupFd(object):
3864
  try:
3865
    # This is only supported in Python 2.5 and above (some distributions
3866
    # backported it to Python 2.4)
3867
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3868
  except AttributeError:
3869
    # Not supported
3870
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3871
      return -1
3872
  else:
3873
    def _SetWakeupFd(self, fd):
3874
      return self._set_wakeup_fd_fn(fd)
3875

    
3876
  def __init__(self):
3877
    """Initializes this class.
3878

3879
    """
3880
    (read_fd, write_fd) = os.pipe()
3881

    
3882
    # Once these succeeded, the file descriptors will be closed automatically.
3883
    # Buffer size 0 is important, otherwise .read() with a specified length
3884
    # might buffer data and the file descriptors won't be marked readable.
3885
    self._read_fh = os.fdopen(read_fd, "r", 0)
3886
    self._write_fh = os.fdopen(write_fd, "w", 0)
3887

    
3888
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3889

    
3890
    # Utility functions
3891
    self.fileno = self._read_fh.fileno
3892
    self.read = self._read_fh.read
3893

    
3894
  def Reset(self):
3895
    """Restores the previous wakeup file descriptor.
3896

3897
    """
3898
    if hasattr(self, "_previous") and self._previous is not None:
3899
      self._SetWakeupFd(self._previous)
3900
      self._previous = None
3901

    
3902
  def Notify(self):
3903
    """Notifies the wakeup file descriptor.
3904

3905
    """
3906
    self._write_fh.write("\0")
3907

    
3908
  def __del__(self):
3909
    """Called before object deletion.
3910

3911
    """
3912
    self.Reset()
3913

    
3914

    
3915
class SignalHandler(object):
3916
  """Generic signal handler class.
3917

3918
  It automatically restores the original handler when deconstructed or
3919
  when L{Reset} is called. You can either pass your own handler
3920
  function in or query the L{called} attribute to detect whether the
3921
  signal was sent.
3922

3923
  @type signum: list
3924
  @ivar signum: the signals we handle
3925
  @type called: boolean
3926
  @ivar called: tracks whether any of the signals have been raised
3927

3928
  """
3929
  def __init__(self, signum, handler_fn=None, wakeup=None):
3930
    """Constructs a new SignalHandler instance.
3931

3932
    @type signum: int or list of ints
3933
    @param signum: Single signal number or set of signal numbers
3934
    @type handler_fn: callable
3935
    @param handler_fn: Signal handling function
3936

3937
    """
3938
    assert handler_fn is None or callable(handler_fn)
3939

    
3940
    self.signum = set(signum)
3941
    self.called = False
3942

    
3943
    self._handler_fn = handler_fn
3944
    self._wakeup = wakeup
3945

    
3946
    self._previous = {}
3947
    try:
3948
      for signum in self.signum:
3949
        # Setup handler
3950
        prev_handler = signal.signal(signum, self._HandleSignal)
3951
        try:
3952
          self._previous[signum] = prev_handler
3953
        except:
3954
          # Restore previous handler
3955
          signal.signal(signum, prev_handler)
3956
          raise
3957
    except:
3958
      # Reset all handlers
3959
      self.Reset()
3960
      # Here we have a race condition: a handler may have already been called,
3961
      # but there's not much we can do about it at this point.
3962
      raise
3963

    
3964
  def __del__(self):
3965
    self.Reset()
3966

    
3967
  def Reset(self):
3968
    """Restore previous handler.
3969

3970
    This will reset all the signals to their previous handlers.
3971

3972
    """
3973
    for signum, prev_handler in self._previous.items():
3974
      signal.signal(signum, prev_handler)
3975
      # If successful, remove from dict
3976
      del self._previous[signum]
3977

    
3978
  def Clear(self):
3979
    """Unsets the L{called} flag.
3980

3981
    This function can be used in case a signal may arrive several times.
3982

3983
    """
3984
    self.called = False
3985

    
3986
  def _HandleSignal(self, signum, frame):
3987
    """Actual signal handling function.
3988

3989
    """
3990
    # This is not nice and not absolutely atomic, but it appears to be the only
3991
    # solution in Python -- there are no atomic types.
3992
    self.called = True
3993

    
3994
    if self._wakeup:
3995
      # Notify whoever is interested in signals
3996
      self._wakeup.Notify()
3997

    
3998
    if self._handler_fn:
3999
      self._handler_fn(signum, frame)
4000

    
4001

    
4002
class FieldSet(object):
4003
  """A simple field set.
4004

4005
  Among the features are:
4006
    - checking if a string is among a list of static string or regex objects
4007
    - checking if a whole list of string matches
4008
    - returning the matching groups from a regex match
4009

4010
  Internally, all fields are held as regular expression objects.
4011

4012
  """
4013
  def __init__(self, *items):
4014
    self.items = [re.compile("^%s$" % value) for value in items]
4015

    
4016
  def Extend(self, other_set):
4017
    """Extend the field set with the items from another one"""
4018
    self.items.extend(other_set.items)
4019

    
4020
  def Matches(self, field):
4021
    """Checks if a field matches the current set
4022

4023
    @type field: str
4024
    @param field: the string to match
4025
    @return: either None or a regular expression match object
4026

4027
    """
4028
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
4029
      return m
4030
    return None
4031

    
4032
  def NonMatching(self, items):
4033
    """Returns the list of fields not matching the current set
4034

4035
    @type items: list
4036
    @param items: the list of fields to check
4037
    @rtype: list
4038
    @return: list of non-matching fields
4039

4040
    """
4041
    return [val for val in items if not self.Matches(val)]
4042

    
4043

    
4044
class RunningTimeout(object):
4045
  """Class to calculate remaining timeout when doing several operations.
4046

4047
  """
4048
  __slots__ = [
4049
    "_allow_negative",
4050
    "_start_time",
4051
    "_time_fn",
4052
    "_timeout",
4053
    ]
4054

    
4055
  def __init__(self, timeout, allow_negative, _time_fn=time.time):
4056
    """Initializes this class.
4057

4058
    @type timeout: float
4059
    @param timeout: Timeout duration
4060
    @type allow_negative: bool
4061
    @param allow_negative: Whether to return values below zero
4062
    @param _time_fn: Time function for unittests
4063

4064
    """
4065
    object.__init__(self)
4066

    
4067
    if timeout is not None and timeout < 0.0:
4068
      raise ValueError("Timeout must not be negative")
4069

    
4070
    self._timeout = timeout
4071
    self._allow_negative = allow_negative
4072
    self._time_fn = _time_fn
4073

    
4074
    self._start_time = None
4075

    
4076
  def Remaining(self):
4077
    """Returns the remaining timeout.
4078

4079
    """
4080
    if self._timeout is None:
4081
      return None
4082

    
4083
    # Get start time on first calculation
4084
    if self._start_time is None:
4085
      self._start_time = self._time_fn()
4086

    
4087
    # Calculate remaining time
4088
    remaining_timeout = self._start_time + self._timeout - self._time_fn()
4089

    
4090
    if not self._allow_negative:
4091
      # Ensure timeout is always >= 0
4092
      return max(0.0, remaining_timeout)
4093

    
4094
    return remaining_timeout