Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 153533f3

History | View | Annotate | Download (110.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86
                     '[a-f0-9]{4}-[a-f0-9]{12}$')
87

    
88
# Certificate verification results
89
(CERT_WARNING,
90
 CERT_ERROR) = range(1, 3)
91

    
92
# Flags for mlockall() (from bits/mman.h)
93
_MCL_CURRENT = 1
94
_MCL_FUTURE = 2
95

    
96
#: MAC checker regexp
97
_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
98

    
99
(_TIMEOUT_NONE,
100
 _TIMEOUT_TERM,
101
 _TIMEOUT_KILL) = range(3)
102

    
103
#: Shell param checker regexp
104
_SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
105

    
106
#: Unit checker regexp
107
_PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$")
108

    
109
#: ASN1 time regexp
110
_ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
111

    
112
_SORTER_RE = re.compile("^%s(.*)$" % (8 * "(\D+|\d+)?"))
113
_SORTER_DIGIT = re.compile("^\d+$")
114

    
115

    
116
class RunResult(object):
117
  """Holds the result of running external programs.
118

119
  @type exit_code: int
120
  @ivar exit_code: the exit code of the program, or None (if the program
121
      didn't exit())
122
  @type signal: int or None
123
  @ivar signal: the signal that caused the program to finish, or None
124
      (if the program wasn't terminated by a signal)
125
  @type stdout: str
126
  @ivar stdout: the standard output of the program
127
  @type stderr: str
128
  @ivar stderr: the standard error of the program
129
  @type failed: boolean
130
  @ivar failed: True in case the program was
131
      terminated by a signal or exited with a non-zero exit code
132
  @ivar fail_reason: a string detailing the termination reason
133

134
  """
135
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
136
               "failed", "fail_reason", "cmd"]
137

    
138

    
139
  def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
140
               timeout):
141
    self.cmd = cmd
142
    self.exit_code = exit_code
143
    self.signal = signal_
144
    self.stdout = stdout
145
    self.stderr = stderr
146
    self.failed = (signal_ is not None or exit_code != 0)
147

    
148
    fail_msgs = []
149
    if self.signal is not None:
150
      fail_msgs.append("terminated by signal %s" % self.signal)
151
    elif self.exit_code is not None:
152
      fail_msgs.append("exited with exit code %s" % self.exit_code)
153
    else:
154
      fail_msgs.append("unable to determine termination reason")
155

    
156
    if timeout_action == _TIMEOUT_TERM:
157
      fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
158
    elif timeout_action == _TIMEOUT_KILL:
159
      fail_msgs.append(("force termination after timeout of %.2f seconds"
160
                        " and linger for another %.2f seconds") %
161
                       (timeout, constants.CHILD_LINGER_TIMEOUT))
162

    
163
    if fail_msgs and self.failed:
164
      self.fail_reason = CommaJoin(fail_msgs)
165

    
166
    if self.failed:
167
      logging.debug("Command '%s' failed (%s); output: %s",
168
                    self.cmd, self.fail_reason, self.output)
169

    
170
  def _GetOutput(self):
171
    """Returns the combined stdout and stderr for easier usage.
172

173
    """
174
    return self.stdout + self.stderr
175

    
176
  output = property(_GetOutput, None, None, "Return full output")
177

    
178

    
179
def _BuildCmdEnvironment(env, reset):
180
  """Builds the environment for an external program.
181

182
  """
183
  if reset:
184
    cmd_env = {}
185
  else:
186
    cmd_env = os.environ.copy()
187
    cmd_env["LC_ALL"] = "C"
188

    
189
  if env is not None:
190
    cmd_env.update(env)
191

    
192
  return cmd_env
193

    
194

    
195
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
196
           interactive=False, timeout=None):
197
  """Execute a (shell) command.
198

199
  The command should not read from its standard input, as it will be
200
  closed.
201

202
  @type cmd: string or list
203
  @param cmd: Command to run
204
  @type env: dict
205
  @param env: Additional environment variables
206
  @type output: str
207
  @param output: if desired, the output of the command can be
208
      saved in a file instead of the RunResult instance; this
209
      parameter denotes the file name (if not None)
210
  @type cwd: string
211
  @param cwd: if specified, will be used as the working
212
      directory for the command; the default will be /
213
  @type reset_env: boolean
214
  @param reset_env: whether to reset or keep the default os environment
215
  @type interactive: boolean
216
  @param interactive: weather we pipe stdin, stdout and stderr
217
                      (default behaviour) or run the command interactive
218
  @type timeout: int
219
  @param timeout: If not None, timeout in seconds until child process gets
220
                  killed
221
  @rtype: L{RunResult}
222
  @return: RunResult instance
223
  @raise errors.ProgrammerError: if we call this when forks are disabled
224

225
  """
226
  if no_fork:
227
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
228

    
229
  if output and interactive:
230
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
231
                                 " not be provided at the same time")
232

    
233
  if isinstance(cmd, basestring):
234
    strcmd = cmd
235
    shell = True
236
  else:
237
    cmd = [str(val) for val in cmd]
238
    strcmd = ShellQuoteArgs(cmd)
239
    shell = False
240

    
241
  if output:
242
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
243
  else:
244
    logging.debug("RunCmd %s", strcmd)
245

    
246
  cmd_env = _BuildCmdEnvironment(env, reset_env)
247

    
248
  try:
249
    if output is None:
250
      out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
251
                                                     interactive, timeout)
252
    else:
253
      timeout_action = _TIMEOUT_NONE
254
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
255
      out = err = ""
256
  except OSError, err:
257
    if err.errno == errno.ENOENT:
258
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
259
                               (strcmd, err))
260
    else:
261
      raise
262

    
263
  if status >= 0:
264
    exitcode = status
265
    signal_ = None
266
  else:
267
    exitcode = None
268
    signal_ = -status
269

    
270
  return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
271

    
272

    
273
def SetupDaemonEnv(cwd="/", umask=077):
274
  """Setup a daemon's environment.
275

276
  This should be called between the first and second fork, due to
277
  setsid usage.
278

279
  @param cwd: the directory to which to chdir
280
  @param umask: the umask to setup
281

282
  """
283
  os.chdir(cwd)
284
  os.umask(umask)
285
  os.setsid()
286

    
287

    
288
def SetupDaemonFDs(output_file, output_fd):
289
  """Setups up a daemon's file descriptors.
290

291
  @param output_file: if not None, the file to which to redirect
292
      stdout/stderr
293
  @param output_fd: if not None, the file descriptor for stdout/stderr
294

295
  """
296
  # check that at most one is defined
297
  assert [output_file, output_fd].count(None) >= 1
298

    
299
  # Open /dev/null (read-only, only for stdin)
300
  devnull_fd = os.open(os.devnull, os.O_RDONLY)
301

    
302
  if output_fd is not None:
303
    pass
304
  elif output_file is not None:
305
    # Open output file
306
    try:
307
      output_fd = os.open(output_file,
308
                          os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
309
    except EnvironmentError, err:
310
      raise Exception("Opening output file failed: %s" % err)
311
  else:
312
    output_fd = os.open(os.devnull, os.O_WRONLY)
313

    
314
  # Redirect standard I/O
315
  os.dup2(devnull_fd, 0)
316
  os.dup2(output_fd, 1)
317
  os.dup2(output_fd, 2)
318

    
319

    
320
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
321
                pidfile=None):
322
  """Start a daemon process after forking twice.
323

324
  @type cmd: string or list
325
  @param cmd: Command to run
326
  @type env: dict
327
  @param env: Additional environment variables
328
  @type cwd: string
329
  @param cwd: Working directory for the program
330
  @type output: string
331
  @param output: Path to file in which to save the output
332
  @type output_fd: int
333
  @param output_fd: File descriptor for output
334
  @type pidfile: string
335
  @param pidfile: Process ID file
336
  @rtype: int
337
  @return: Daemon process ID
338
  @raise errors.ProgrammerError: if we call this when forks are disabled
339

340
  """
341
  if no_fork:
342
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
343
                                 " disabled")
344

    
345
  if output and not (bool(output) ^ (output_fd is not None)):
346
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
347
                                 " specified")
348

    
349
  if isinstance(cmd, basestring):
350
    cmd = ["/bin/sh", "-c", cmd]
351

    
352
  strcmd = ShellQuoteArgs(cmd)
353

    
354
  if output:
355
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
356
  else:
357
    logging.debug("StartDaemon %s", strcmd)
358

    
359
  cmd_env = _BuildCmdEnvironment(env, False)
360

    
361
  # Create pipe for sending PID back
362
  (pidpipe_read, pidpipe_write) = os.pipe()
363
  try:
364
    try:
365
      # Create pipe for sending error messages
366
      (errpipe_read, errpipe_write) = os.pipe()
367
      try:
368
        try:
369
          # First fork
370
          pid = os.fork()
371
          if pid == 0:
372
            try:
373
              # Child process, won't return
374
              _StartDaemonChild(errpipe_read, errpipe_write,
375
                                pidpipe_read, pidpipe_write,
376
                                cmd, cmd_env, cwd,
377
                                output, output_fd, pidfile)
378
            finally:
379
              # Well, maybe child process failed
380
              os._exit(1) # pylint: disable-msg=W0212
381
        finally:
382
          _CloseFDNoErr(errpipe_write)
383

    
384
        # Wait for daemon to be started (or an error message to
385
        # arrive) and read up to 100 KB as an error message
386
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
387
      finally:
388
        _CloseFDNoErr(errpipe_read)
389
    finally:
390
      _CloseFDNoErr(pidpipe_write)
391

    
392
    # Read up to 128 bytes for PID
393
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
394
  finally:
395
    _CloseFDNoErr(pidpipe_read)
396

    
397
  # Try to avoid zombies by waiting for child process
398
  try:
399
    os.waitpid(pid, 0)
400
  except OSError:
401
    pass
402

    
403
  if errormsg:
404
    raise errors.OpExecError("Error when starting daemon process: %r" %
405
                             errormsg)
406

    
407
  try:
408
    return int(pidtext)
409
  except (ValueError, TypeError), err:
410
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
411
                             (pidtext, err))
412

    
413

    
414
def _StartDaemonChild(errpipe_read, errpipe_write,
415
                      pidpipe_read, pidpipe_write,
416
                      args, env, cwd,
417
                      output, fd_output, pidfile):
418
  """Child process for starting daemon.
419

420
  """
421
  try:
422
    # Close parent's side
423
    _CloseFDNoErr(errpipe_read)
424
    _CloseFDNoErr(pidpipe_read)
425

    
426
    # First child process
427
    SetupDaemonEnv()
428

    
429
    # And fork for the second time
430
    pid = os.fork()
431
    if pid != 0:
432
      # Exit first child process
433
      os._exit(0) # pylint: disable-msg=W0212
434

    
435
    # Make sure pipe is closed on execv* (and thereby notifies
436
    # original process)
437
    SetCloseOnExecFlag(errpipe_write, True)
438

    
439
    # List of file descriptors to be left open
440
    noclose_fds = [errpipe_write]
441

    
442
    # Open PID file
443
    if pidfile:
444
      fd_pidfile = WritePidFile(pidfile)
445

    
446
      # Keeping the file open to hold the lock
447
      noclose_fds.append(fd_pidfile)
448

    
449
      SetCloseOnExecFlag(fd_pidfile, False)
450
    else:
451
      fd_pidfile = None
452

    
453
    SetupDaemonFDs(output, fd_output)
454

    
455
    # Send daemon PID to parent
456
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
457

    
458
    # Close all file descriptors except stdio and error message pipe
459
    CloseFDs(noclose_fds=noclose_fds)
460

    
461
    # Change working directory
462
    os.chdir(cwd)
463

    
464
    if env is None:
465
      os.execvp(args[0], args)
466
    else:
467
      os.execvpe(args[0], args, env)
468
  except: # pylint: disable-msg=W0702
469
    try:
470
      # Report errors to original process
471
      WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
472
    except: # pylint: disable-msg=W0702
473
      # Ignore errors in error handling
474
      pass
475

    
476
  os._exit(1) # pylint: disable-msg=W0212
477

    
478

    
479
def WriteErrorToFD(fd, err):
480
  """Possibly write an error message to a fd.
481

482
  @type fd: None or int (file descriptor)
483
  @param fd: if not None, the error will be written to this fd
484
  @param err: string, the error message
485

486
  """
487
  if fd is None:
488
    return
489

    
490
  if not err:
491
    err = "<unknown error>"
492

    
493
  RetryOnSignal(os.write, fd, err)
494

    
495

    
496
def _CheckIfAlive(child):
497
  """Raises L{RetryAgain} if child is still alive.
498

499
  @raises RetryAgain: If child is still alive
500

501
  """
502
  if child.poll() is None:
503
    raise RetryAgain()
504

    
505

    
506
def _WaitForProcess(child, timeout):
507
  """Waits for the child to terminate or until we reach timeout.
508

509
  """
510
  try:
511
    Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
512
  except RetryTimeout:
513
    pass
514

    
515

    
516
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
517
                _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
518
  """Run a command and return its output.
519

520
  @type  cmd: string or list
521
  @param cmd: Command to run
522
  @type env: dict
523
  @param env: The environment to use
524
  @type via_shell: bool
525
  @param via_shell: if we should run via the shell
526
  @type cwd: string
527
  @param cwd: the working directory for the program
528
  @type interactive: boolean
529
  @param interactive: Run command interactive (without piping)
530
  @type timeout: int
531
  @param timeout: Timeout after the programm gets terminated
532
  @rtype: tuple
533
  @return: (out, err, status)
534

535
  """
536
  poller = select.poll()
537

    
538
  stderr = subprocess.PIPE
539
  stdout = subprocess.PIPE
540
  stdin = subprocess.PIPE
541

    
542
  if interactive:
543
    stderr = stdout = stdin = None
544

    
545
  child = subprocess.Popen(cmd, shell=via_shell,
546
                           stderr=stderr,
547
                           stdout=stdout,
548
                           stdin=stdin,
549
                           close_fds=True, env=env,
550
                           cwd=cwd)
551

    
552
  out = StringIO()
553
  err = StringIO()
554

    
555
  linger_timeout = None
556

    
557
  if timeout is None:
558
    poll_timeout = None
559
  else:
560
    poll_timeout = RunningTimeout(timeout, True).Remaining
561

    
562
  msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
563
                 (cmd, child.pid))
564
  msg_linger = ("Command %s (%d) run into linger timeout, killing" %
565
                (cmd, child.pid))
566

    
567
  timeout_action = _TIMEOUT_NONE
568

    
569
  if not interactive:
570
    child.stdin.close()
571
    poller.register(child.stdout, select.POLLIN)
572
    poller.register(child.stderr, select.POLLIN)
573
    fdmap = {
574
      child.stdout.fileno(): (out, child.stdout),
575
      child.stderr.fileno(): (err, child.stderr),
576
      }
577
    for fd in fdmap:
578
      SetNonblockFlag(fd, True)
579

    
580
    while fdmap:
581
      if poll_timeout:
582
        pt = poll_timeout() * 1000
583
        if pt < 0:
584
          if linger_timeout is None:
585
            logging.warning(msg_timeout)
586
            if child.poll() is None:
587
              timeout_action = _TIMEOUT_TERM
588
              IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
589
            linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
590
          pt = linger_timeout() * 1000
591
          if pt < 0:
592
            break
593
      else:
594
        pt = None
595

    
596
      pollresult = RetryOnSignal(poller.poll, pt)
597

    
598
      for fd, event in pollresult:
599
        if event & select.POLLIN or event & select.POLLPRI:
600
          data = fdmap[fd][1].read()
601
          # no data from read signifies EOF (the same as POLLHUP)
602
          if not data:
603
            poller.unregister(fd)
604
            del fdmap[fd]
605
            continue
606
          fdmap[fd][0].write(data)
607
        if (event & select.POLLNVAL or event & select.POLLHUP or
608
            event & select.POLLERR):
609
          poller.unregister(fd)
610
          del fdmap[fd]
611

    
612
  if timeout is not None:
613
    assert callable(poll_timeout)
614

    
615
    # We have no I/O left but it might still run
616
    if child.poll() is None:
617
      _WaitForProcess(child, poll_timeout())
618

    
619
    # Terminate if still alive after timeout
620
    if child.poll() is None:
621
      if linger_timeout is None:
622
        logging.warning(msg_timeout)
623
        timeout_action = _TIMEOUT_TERM
624
        IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
625
        lt = _linger_timeout
626
      else:
627
        lt = linger_timeout()
628
      _WaitForProcess(child, lt)
629

    
630
    # Okay, still alive after timeout and linger timeout? Kill it!
631
    if child.poll() is None:
632
      timeout_action = _TIMEOUT_KILL
633
      logging.warning(msg_linger)
634
      IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
635

    
636
  out = out.getvalue()
637
  err = err.getvalue()
638

    
639
  status = child.wait()
640
  return out, err, status, timeout_action
641

    
642

    
643
def _RunCmdFile(cmd, env, via_shell, output, cwd):
644
  """Run a command and save its output to a file.
645

646
  @type  cmd: string or list
647
  @param cmd: Command to run
648
  @type env: dict
649
  @param env: The environment to use
650
  @type via_shell: bool
651
  @param via_shell: if we should run via the shell
652
  @type output: str
653
  @param output: the filename in which to save the output
654
  @type cwd: string
655
  @param cwd: the working directory for the program
656
  @rtype: int
657
  @return: the exit status
658

659
  """
660
  fh = open(output, "a")
661
  try:
662
    child = subprocess.Popen(cmd, shell=via_shell,
663
                             stderr=subprocess.STDOUT,
664
                             stdout=fh,
665
                             stdin=subprocess.PIPE,
666
                             close_fds=True, env=env,
667
                             cwd=cwd)
668

    
669
    child.stdin.close()
670
    status = child.wait()
671
  finally:
672
    fh.close()
673
  return status
674

    
675

    
676
def SetCloseOnExecFlag(fd, enable):
677
  """Sets or unsets the close-on-exec flag on a file descriptor.
678

679
  @type fd: int
680
  @param fd: File descriptor
681
  @type enable: bool
682
  @param enable: Whether to set or unset it.
683

684
  """
685
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
686

    
687
  if enable:
688
    flags |= fcntl.FD_CLOEXEC
689
  else:
690
    flags &= ~fcntl.FD_CLOEXEC
691

    
692
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
693

    
694

    
695
def SetNonblockFlag(fd, enable):
696
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
697

698
  @type fd: int
699
  @param fd: File descriptor
700
  @type enable: bool
701
  @param enable: Whether to set or unset it
702

703
  """
704
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
705

    
706
  if enable:
707
    flags |= os.O_NONBLOCK
708
  else:
709
    flags &= ~os.O_NONBLOCK
710

    
711
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
712

    
713

    
714
def RetryOnSignal(fn, *args, **kwargs):
715
  """Calls a function again if it failed due to EINTR.
716

717
  """
718
  while True:
719
    try:
720
      return fn(*args, **kwargs)
721
    except EnvironmentError, err:
722
      if err.errno != errno.EINTR:
723
        raise
724
    except (socket.error, select.error), err:
725
      # In python 2.6 and above select.error is an IOError, so it's handled
726
      # above, in 2.5 and below it's not, and it's handled here.
727
      if not (err.args and err.args[0] == errno.EINTR):
728
        raise
729

    
730

    
731
def RunParts(dir_name, env=None, reset_env=False):
732
  """Run Scripts or programs in a directory
733

734
  @type dir_name: string
735
  @param dir_name: absolute path to a directory
736
  @type env: dict
737
  @param env: The environment to use
738
  @type reset_env: boolean
739
  @param reset_env: whether to reset or keep the default os environment
740
  @rtype: list of tuples
741
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
742

743
  """
744
  rr = []
745

    
746
  try:
747
    dir_contents = ListVisibleFiles(dir_name)
748
  except OSError, err:
749
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
750
    return rr
751

    
752
  for relname in sorted(dir_contents):
753
    fname = PathJoin(dir_name, relname)
754
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
755
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
756
      rr.append((relname, constants.RUNPARTS_SKIP, None))
757
    else:
758
      try:
759
        result = RunCmd([fname], env=env, reset_env=reset_env)
760
      except Exception, err: # pylint: disable-msg=W0703
761
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
762
      else:
763
        rr.append((relname, constants.RUNPARTS_RUN, result))
764

    
765
  return rr
766

    
767

    
768
def RemoveFile(filename):
769
  """Remove a file ignoring some errors.
770

771
  Remove a file, ignoring non-existing ones or directories. Other
772
  errors are passed.
773

774
  @type filename: str
775
  @param filename: the file to be removed
776

777
  """
778
  try:
779
    os.unlink(filename)
780
  except OSError, err:
781
    if err.errno not in (errno.ENOENT, errno.EISDIR):
782
      raise
783

    
784

    
785
def RemoveDir(dirname):
786
  """Remove an empty directory.
787

788
  Remove a directory, ignoring non-existing ones.
789
  Other errors are passed. This includes the case,
790
  where the directory is not empty, so it can't be removed.
791

792
  @type dirname: str
793
  @param dirname: the empty directory to be removed
794

795
  """
796
  try:
797
    os.rmdir(dirname)
798
  except OSError, err:
799
    if err.errno != errno.ENOENT:
800
      raise
801

    
802

    
803
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
804
  """Renames a file.
805

806
  @type old: string
807
  @param old: Original path
808
  @type new: string
809
  @param new: New path
810
  @type mkdir: bool
811
  @param mkdir: Whether to create target directory if it doesn't exist
812
  @type mkdir_mode: int
813
  @param mkdir_mode: Mode for newly created directories
814

815
  """
816
  try:
817
    return os.rename(old, new)
818
  except OSError, err:
819
    # In at least one use case of this function, the job queue, directory
820
    # creation is very rare. Checking for the directory before renaming is not
821
    # as efficient.
822
    if mkdir and err.errno == errno.ENOENT:
823
      # Create directory and try again
824
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
825

    
826
      return os.rename(old, new)
827

    
828
    raise
829

    
830

    
831
def Makedirs(path, mode=0750):
832
  """Super-mkdir; create a leaf directory and all intermediate ones.
833

834
  This is a wrapper around C{os.makedirs} adding error handling not implemented
835
  before Python 2.5.
836

837
  """
838
  try:
839
    os.makedirs(path, mode)
840
  except OSError, err:
841
    # Ignore EEXIST. This is only handled in os.makedirs as included in
842
    # Python 2.5 and above.
843
    if err.errno != errno.EEXIST or not os.path.exists(path):
844
      raise
845

    
846

    
847
def ResetTempfileModule():
848
  """Resets the random name generator of the tempfile module.
849

850
  This function should be called after C{os.fork} in the child process to
851
  ensure it creates a newly seeded random generator. Otherwise it would
852
  generate the same random parts as the parent process. If several processes
853
  race for the creation of a temporary file, this could lead to one not getting
854
  a temporary name.
855

856
  """
857
  # pylint: disable-msg=W0212
858
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
859
    tempfile._once_lock.acquire()
860
    try:
861
      # Reset random name generator
862
      tempfile._name_sequence = None
863
    finally:
864
      tempfile._once_lock.release()
865
  else:
866
    logging.critical("The tempfile module misses at least one of the"
867
                     " '_once_lock' and '_name_sequence' attributes")
868

    
869

    
870
def _FingerprintFile(filename):
871
  """Compute the fingerprint of a file.
872

873
  If the file does not exist, a None will be returned
874
  instead.
875

876
  @type filename: str
877
  @param filename: the filename to checksum
878
  @rtype: str
879
  @return: the hex digest of the sha checksum of the contents
880
      of the file
881

882
  """
883
  if not (os.path.exists(filename) and os.path.isfile(filename)):
884
    return None
885

    
886
  f = open(filename)
887

    
888
  fp = compat.sha1_hash()
889
  while True:
890
    data = f.read(4096)
891
    if not data:
892
      break
893

    
894
    fp.update(data)
895

    
896
  return fp.hexdigest()
897

    
898

    
899
def FingerprintFiles(files):
900
  """Compute fingerprints for a list of files.
901

902
  @type files: list
903
  @param files: the list of filename to fingerprint
904
  @rtype: dict
905
  @return: a dictionary filename: fingerprint, holding only
906
      existing files
907

908
  """
909
  ret = {}
910

    
911
  for filename in files:
912
    cksum = _FingerprintFile(filename)
913
    if cksum:
914
      ret[filename] = cksum
915

    
916
  return ret
917

    
918

    
919
def ForceDictType(target, key_types, allowed_values=None):
920
  """Force the values of a dict to have certain types.
921

922
  @type target: dict
923
  @param target: the dict to update
924
  @type key_types: dict
925
  @param key_types: dict mapping target dict keys to types
926
                    in constants.ENFORCEABLE_TYPES
927
  @type allowed_values: list
928
  @keyword allowed_values: list of specially allowed values
929

930
  """
931
  if allowed_values is None:
932
    allowed_values = []
933

    
934
  if not isinstance(target, dict):
935
    msg = "Expected dictionary, got '%s'" % target
936
    raise errors.TypeEnforcementError(msg)
937

    
938
  for key in target:
939
    if key not in key_types:
940
      msg = "Unknown key '%s'" % key
941
      raise errors.TypeEnforcementError(msg)
942

    
943
    if target[key] in allowed_values:
944
      continue
945

    
946
    ktype = key_types[key]
947
    if ktype not in constants.ENFORCEABLE_TYPES:
948
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
949
      raise errors.ProgrammerError(msg)
950

    
951
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
952
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
953
        pass
954
      elif not isinstance(target[key], basestring):
955
        if isinstance(target[key], bool) and not target[key]:
956
          target[key] = ''
957
        else:
958
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
959
          raise errors.TypeEnforcementError(msg)
960
    elif ktype == constants.VTYPE_BOOL:
961
      if isinstance(target[key], basestring) and target[key]:
962
        if target[key].lower() == constants.VALUE_FALSE:
963
          target[key] = False
964
        elif target[key].lower() == constants.VALUE_TRUE:
965
          target[key] = True
966
        else:
967
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
968
          raise errors.TypeEnforcementError(msg)
969
      elif target[key]:
970
        target[key] = True
971
      else:
972
        target[key] = False
973
    elif ktype == constants.VTYPE_SIZE:
974
      try:
975
        target[key] = ParseUnit(target[key])
976
      except errors.UnitParseError, err:
977
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
978
              (key, target[key], err)
979
        raise errors.TypeEnforcementError(msg)
980
    elif ktype == constants.VTYPE_INT:
981
      try:
982
        target[key] = int(target[key])
983
      except (ValueError, TypeError):
984
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
985
        raise errors.TypeEnforcementError(msg)
986

    
987

    
988
def _GetProcStatusPath(pid):
989
  """Returns the path for a PID's proc status file.
990

991
  @type pid: int
992
  @param pid: Process ID
993
  @rtype: string
994

995
  """
996
  return "/proc/%d/status" % pid
997

    
998

    
999
def IsProcessAlive(pid):
1000
  """Check if a given pid exists on the system.
1001

1002
  @note: zombie status is not handled, so zombie processes
1003
      will be returned as alive
1004
  @type pid: int
1005
  @param pid: the process ID to check
1006
  @rtype: boolean
1007
  @return: True if the process exists
1008

1009
  """
1010
  def _TryStat(name):
1011
    try:
1012
      os.stat(name)
1013
      return True
1014
    except EnvironmentError, err:
1015
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
1016
        return False
1017
      elif err.errno == errno.EINVAL:
1018
        raise RetryAgain(err)
1019
      raise
1020

    
1021
  assert isinstance(pid, int), "pid must be an integer"
1022
  if pid <= 0:
1023
    return False
1024

    
1025
  # /proc in a multiprocessor environment can have strange behaviors.
1026
  # Retry the os.stat a few times until we get a good result.
1027
  try:
1028
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1029
                 args=[_GetProcStatusPath(pid)])
1030
  except RetryTimeout, err:
1031
    err.RaiseInner()
1032

    
1033

    
1034
def _ParseSigsetT(sigset):
1035
  """Parse a rendered sigset_t value.
1036

1037
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1038
  function.
1039

1040
  @type sigset: string
1041
  @param sigset: Rendered signal set from /proc/$pid/status
1042
  @rtype: set
1043
  @return: Set of all enabled signal numbers
1044

1045
  """
1046
  result = set()
1047

    
1048
  signum = 0
1049
  for ch in reversed(sigset):
1050
    chv = int(ch, 16)
1051

    
1052
    # The following could be done in a loop, but it's easier to read and
1053
    # understand in the unrolled form
1054
    if chv & 1:
1055
      result.add(signum + 1)
1056
    if chv & 2:
1057
      result.add(signum + 2)
1058
    if chv & 4:
1059
      result.add(signum + 3)
1060
    if chv & 8:
1061
      result.add(signum + 4)
1062

    
1063
    signum += 4
1064

    
1065
  return result
1066

    
1067

    
1068
def _GetProcStatusField(pstatus, field):
1069
  """Retrieves a field from the contents of a proc status file.
1070

1071
  @type pstatus: string
1072
  @param pstatus: Contents of /proc/$pid/status
1073
  @type field: string
1074
  @param field: Name of field whose value should be returned
1075
  @rtype: string
1076

1077
  """
1078
  for line in pstatus.splitlines():
1079
    parts = line.split(":", 1)
1080

    
1081
    if len(parts) < 2 or parts[0] != field:
1082
      continue
1083

    
1084
    return parts[1].strip()
1085

    
1086
  return None
1087

    
1088

    
1089
def IsProcessHandlingSignal(pid, signum, status_path=None):
1090
  """Checks whether a process is handling a signal.
1091

1092
  @type pid: int
1093
  @param pid: Process ID
1094
  @type signum: int
1095
  @param signum: Signal number
1096
  @rtype: bool
1097

1098
  """
1099
  if status_path is None:
1100
    status_path = _GetProcStatusPath(pid)
1101

    
1102
  try:
1103
    proc_status = ReadFile(status_path)
1104
  except EnvironmentError, err:
1105
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
1106
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1107
      return False
1108
    raise
1109

    
1110
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1111
  if sigcgt is None:
1112
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1113

    
1114
  # Now check whether signal is handled
1115
  return signum in _ParseSigsetT(sigcgt)
1116

    
1117

    
1118
def ReadPidFile(pidfile):
1119
  """Read a pid from a file.
1120

1121
  @type  pidfile: string
1122
  @param pidfile: path to the file containing the pid
1123
  @rtype: int
1124
  @return: The process id, if the file exists and contains a valid PID,
1125
           otherwise 0
1126

1127
  """
1128
  try:
1129
    raw_data = ReadOneLineFile(pidfile)
1130
  except EnvironmentError, err:
1131
    if err.errno != errno.ENOENT:
1132
      logging.exception("Can't read pid file")
1133
    return 0
1134

    
1135
  try:
1136
    pid = int(raw_data)
1137
  except (TypeError, ValueError), err:
1138
    logging.info("Can't parse pid file contents", exc_info=True)
1139
    return 0
1140

    
1141
  return pid
1142

    
1143

    
1144
def ReadLockedPidFile(path):
1145
  """Reads a locked PID file.
1146

1147
  This can be used together with L{StartDaemon}.
1148

1149
  @type path: string
1150
  @param path: Path to PID file
1151
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1152

1153
  """
1154
  try:
1155
    fd = os.open(path, os.O_RDONLY)
1156
  except EnvironmentError, err:
1157
    if err.errno == errno.ENOENT:
1158
      # PID file doesn't exist
1159
      return None
1160
    raise
1161

    
1162
  try:
1163
    try:
1164
      # Try to acquire lock
1165
      LockFile(fd)
1166
    except errors.LockError:
1167
      # Couldn't lock, daemon is running
1168
      return int(os.read(fd, 100))
1169
  finally:
1170
    os.close(fd)
1171

    
1172
  return None
1173

    
1174

    
1175
def MatchNameComponent(key, name_list, case_sensitive=True):
1176
  """Try to match a name against a list.
1177

1178
  This function will try to match a name like test1 against a list
1179
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1180
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1181
  not I{'test1.ex'}. A multiple match will be considered as no match
1182
  at all (e.g. I{'test1'} against C{['test1.example.com',
1183
  'test1.example.org']}), except when the key fully matches an entry
1184
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1185

1186
  @type key: str
1187
  @param key: the name to be searched
1188
  @type name_list: list
1189
  @param name_list: the list of strings against which to search the key
1190
  @type case_sensitive: boolean
1191
  @param case_sensitive: whether to provide a case-sensitive match
1192

1193
  @rtype: None or str
1194
  @return: None if there is no match I{or} if there are multiple matches,
1195
      otherwise the element from the list which matches
1196

1197
  """
1198
  if key in name_list:
1199
    return key
1200

    
1201
  re_flags = 0
1202
  if not case_sensitive:
1203
    re_flags |= re.IGNORECASE
1204
    key = key.upper()
1205
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1206
  names_filtered = []
1207
  string_matches = []
1208
  for name in name_list:
1209
    if mo.match(name) is not None:
1210
      names_filtered.append(name)
1211
      if not case_sensitive and key == name.upper():
1212
        string_matches.append(name)
1213

    
1214
  if len(string_matches) == 1:
1215
    return string_matches[0]
1216
  if len(names_filtered) == 1:
1217
    return names_filtered[0]
1218
  return None
1219

    
1220

    
1221
def ValidateServiceName(name):
1222
  """Validate the given service name.
1223

1224
  @type name: number or string
1225
  @param name: Service name or port specification
1226

1227
  """
1228
  try:
1229
    numport = int(name)
1230
  except (ValueError, TypeError):
1231
    # Non-numeric service name
1232
    valid = _VALID_SERVICE_NAME_RE.match(name)
1233
  else:
1234
    # Numeric port (protocols other than TCP or UDP might need adjustments
1235
    # here)
1236
    valid = (numport >= 0 and numport < (1 << 16))
1237

    
1238
  if not valid:
1239
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1240
                               errors.ECODE_INVAL)
1241

    
1242
  return name
1243

    
1244

    
1245
def ListVolumeGroups():
1246
  """List volume groups and their size
1247

1248
  @rtype: dict
1249
  @return:
1250
       Dictionary with keys volume name and values
1251
       the size of the volume
1252

1253
  """
1254
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1255
  result = RunCmd(command)
1256
  retval = {}
1257
  if result.failed:
1258
    return retval
1259

    
1260
  for line in result.stdout.splitlines():
1261
    try:
1262
      name, size = line.split()
1263
      size = int(float(size))
1264
    except (IndexError, ValueError), err:
1265
      logging.error("Invalid output from vgs (%s): %s", err, line)
1266
      continue
1267

    
1268
    retval[name] = size
1269

    
1270
  return retval
1271

    
1272

    
1273
def BridgeExists(bridge):
1274
  """Check whether the given bridge exists in the system
1275

1276
  @type bridge: str
1277
  @param bridge: the bridge name to check
1278
  @rtype: boolean
1279
  @return: True if it does
1280

1281
  """
1282
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1283

    
1284

    
1285
def _NiceSortTryInt(val):
1286
  """Attempts to convert a string to an integer.
1287

1288
  """
1289
  if val and _SORTER_DIGIT.match(val):
1290
    return int(val)
1291
  else:
1292
    return val
1293

    
1294

    
1295
def _NiceSortKey(value):
1296
  """Extract key for sorting.
1297

1298
  """
1299
  return [_NiceSortTryInt(grp)
1300
          for grp in _SORTER_RE.match(value).groups()]
1301

    
1302

    
1303
def NiceSort(values, key=None):
1304
  """Sort a list of strings based on digit and non-digit groupings.
1305

1306
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1307
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1308
  'a11']}.
1309

1310
  The sort algorithm breaks each name in groups of either only-digits
1311
  or no-digits. Only the first eight such groups are considered, and
1312
  after that we just use what's left of the string.
1313

1314
  @type values: list
1315
  @param values: the names to be sorted
1316
  @type key: callable or None
1317
  @param key: function of one argument to extract a comparison key from each
1318
    list element, must return string
1319
  @rtype: list
1320
  @return: a copy of the name list sorted with our algorithm
1321

1322
  """
1323
  if key is None:
1324
    keyfunc = _NiceSortKey
1325
  else:
1326
    keyfunc = lambda value: _NiceSortKey(key(value))
1327

    
1328
  return sorted(values, key=keyfunc)
1329

    
1330

    
1331
def TryConvert(fn, val):
1332
  """Try to convert a value ignoring errors.
1333

1334
  This function tries to apply function I{fn} to I{val}. If no
1335
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1336
  the result, else it will return the original value. Any other
1337
  exceptions are propagated to the caller.
1338

1339
  @type fn: callable
1340
  @param fn: function to apply to the value
1341
  @param val: the value to be converted
1342
  @return: The converted value if the conversion was successful,
1343
      otherwise the original value.
1344

1345
  """
1346
  try:
1347
    nv = fn(val)
1348
  except (ValueError, TypeError):
1349
    nv = val
1350
  return nv
1351

    
1352

    
1353
def IsValidShellParam(word):
1354
  """Verifies is the given word is safe from the shell's p.o.v.
1355

1356
  This means that we can pass this to a command via the shell and be
1357
  sure that it doesn't alter the command line and is passed as such to
1358
  the actual command.
1359

1360
  Note that we are overly restrictive here, in order to be on the safe
1361
  side.
1362

1363
  @type word: str
1364
  @param word: the word to check
1365
  @rtype: boolean
1366
  @return: True if the word is 'safe'
1367

1368
  """
1369
  return bool(_SHELLPARAM_REGEX.match(word))
1370

    
1371

    
1372
def BuildShellCmd(template, *args):
1373
  """Build a safe shell command line from the given arguments.
1374

1375
  This function will check all arguments in the args list so that they
1376
  are valid shell parameters (i.e. they don't contain shell
1377
  metacharacters). If everything is ok, it will return the result of
1378
  template % args.
1379

1380
  @type template: str
1381
  @param template: the string holding the template for the
1382
      string formatting
1383
  @rtype: str
1384
  @return: the expanded command line
1385

1386
  """
1387
  for word in args:
1388
    if not IsValidShellParam(word):
1389
      raise errors.ProgrammerError("Shell argument '%s' contains"
1390
                                   " invalid characters" % word)
1391
  return template % args
1392

    
1393

    
1394
def FormatUnit(value, units):
1395
  """Formats an incoming number of MiB with the appropriate unit.
1396

1397
  @type value: int
1398
  @param value: integer representing the value in MiB (1048576)
1399
  @type units: char
1400
  @param units: the type of formatting we should do:
1401
      - 'h' for automatic scaling
1402
      - 'm' for MiBs
1403
      - 'g' for GiBs
1404
      - 't' for TiBs
1405
  @rtype: str
1406
  @return: the formatted value (with suffix)
1407

1408
  """
1409
  if units not in ('m', 'g', 't', 'h'):
1410
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1411

    
1412
  suffix = ''
1413

    
1414
  if units == 'm' or (units == 'h' and value < 1024):
1415
    if units == 'h':
1416
      suffix = 'M'
1417
    return "%d%s" % (round(value, 0), suffix)
1418

    
1419
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1420
    if units == 'h':
1421
      suffix = 'G'
1422
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1423

    
1424
  else:
1425
    if units == 'h':
1426
      suffix = 'T'
1427
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1428

    
1429

    
1430
def ParseUnit(input_string):
1431
  """Tries to extract number and scale from the given string.
1432

1433
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1434
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1435
  is always an int in MiB.
1436

1437
  """
1438
  m = _PARSEUNIT_REGEX.match(str(input_string))
1439
  if not m:
1440
    raise errors.UnitParseError("Invalid format")
1441

    
1442
  value = float(m.groups()[0])
1443

    
1444
  unit = m.groups()[1]
1445
  if unit:
1446
    lcunit = unit.lower()
1447
  else:
1448
    lcunit = 'm'
1449

    
1450
  if lcunit in ('m', 'mb', 'mib'):
1451
    # Value already in MiB
1452
    pass
1453

    
1454
  elif lcunit in ('g', 'gb', 'gib'):
1455
    value *= 1024
1456

    
1457
  elif lcunit in ('t', 'tb', 'tib'):
1458
    value *= 1024 * 1024
1459

    
1460
  else:
1461
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1462

    
1463
  # Make sure we round up
1464
  if int(value) < value:
1465
    value += 1
1466

    
1467
  # Round up to the next multiple of 4
1468
  value = int(value)
1469
  if value % 4:
1470
    value += 4 - value % 4
1471

    
1472
  return value
1473

    
1474

    
1475
def ParseCpuMask(cpu_mask):
1476
  """Parse a CPU mask definition and return the list of CPU IDs.
1477

1478
  CPU mask format: comma-separated list of CPU IDs
1479
  or dash-separated ID ranges
1480
  Example: "0-2,5" -> "0,1,2,5"
1481

1482
  @type cpu_mask: str
1483
  @param cpu_mask: CPU mask definition
1484
  @rtype: list of int
1485
  @return: list of CPU IDs
1486

1487
  """
1488
  if not cpu_mask:
1489
    return []
1490
  cpu_list = []
1491
  for range_def in cpu_mask.split(","):
1492
    boundaries = range_def.split("-")
1493
    n_elements = len(boundaries)
1494
    if n_elements > 2:
1495
      raise errors.ParseError("Invalid CPU ID range definition"
1496
                              " (only one hyphen allowed): %s" % range_def)
1497
    try:
1498
      lower = int(boundaries[0])
1499
    except (ValueError, TypeError), err:
1500
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1501
                              " CPU ID range: %s" % str(err))
1502
    try:
1503
      higher = int(boundaries[-1])
1504
    except (ValueError, TypeError), err:
1505
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1506
                              " CPU ID range: %s" % str(err))
1507
    if lower > higher:
1508
      raise errors.ParseError("Invalid CPU ID range definition"
1509
                              " (%d > %d): %s" % (lower, higher, range_def))
1510
    cpu_list.extend(range(lower, higher + 1))
1511
  return cpu_list
1512

    
1513

    
1514
def AddAuthorizedKey(file_obj, key):
1515
  """Adds an SSH public key to an authorized_keys file.
1516

1517
  @type file_obj: str or file handle
1518
  @param file_obj: path to authorized_keys file
1519
  @type key: str
1520
  @param key: string containing key
1521

1522
  """
1523
  key_fields = key.split()
1524

    
1525
  if isinstance(file_obj, basestring):
1526
    f = open(file_obj, 'a+')
1527
  else:
1528
    f = file_obj
1529

    
1530
  try:
1531
    nl = True
1532
    for line in f:
1533
      # Ignore whitespace changes
1534
      if line.split() == key_fields:
1535
        break
1536
      nl = line.endswith('\n')
1537
    else:
1538
      if not nl:
1539
        f.write("\n")
1540
      f.write(key.rstrip('\r\n'))
1541
      f.write("\n")
1542
      f.flush()
1543
  finally:
1544
    f.close()
1545

    
1546

    
1547
def RemoveAuthorizedKey(file_name, key):
1548
  """Removes an SSH public key from an authorized_keys file.
1549

1550
  @type file_name: str
1551
  @param file_name: path to authorized_keys file
1552
  @type key: str
1553
  @param key: string containing key
1554

1555
  """
1556
  key_fields = key.split()
1557

    
1558
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1559
  try:
1560
    out = os.fdopen(fd, 'w')
1561
    try:
1562
      f = open(file_name, 'r')
1563
      try:
1564
        for line in f:
1565
          # Ignore whitespace changes while comparing lines
1566
          if line.split() != key_fields:
1567
            out.write(line)
1568

    
1569
        out.flush()
1570
        os.rename(tmpname, file_name)
1571
      finally:
1572
        f.close()
1573
    finally:
1574
      out.close()
1575
  except:
1576
    RemoveFile(tmpname)
1577
    raise
1578

    
1579

    
1580
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1581
  """Sets the name of an IP address and hostname in /etc/hosts.
1582

1583
  @type file_name: str
1584
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1585
  @type ip: str
1586
  @param ip: the IP address
1587
  @type hostname: str
1588
  @param hostname: the hostname to be added
1589
  @type aliases: list
1590
  @param aliases: the list of aliases to add for the hostname
1591

1592
  """
1593
  # Ensure aliases are unique
1594
  aliases = UniqueSequence([hostname] + aliases)[1:]
1595

    
1596
  def _WriteEtcHosts(fd):
1597
    # Duplicating file descriptor because os.fdopen's result will automatically
1598
    # close the descriptor, but we would still like to have its functionality.
1599
    out = os.fdopen(os.dup(fd), "w")
1600
    try:
1601
      for line in ReadFile(file_name).splitlines(True):
1602
        fields = line.split()
1603
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1604
          continue
1605
        out.write(line)
1606

    
1607
      out.write("%s\t%s" % (ip, hostname))
1608
      if aliases:
1609
        out.write(" %s" % " ".join(aliases))
1610
      out.write("\n")
1611
      out.flush()
1612
    finally:
1613
      out.close()
1614

    
1615
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1616

    
1617

    
1618
def AddHostToEtcHosts(hostname, ip):
1619
  """Wrapper around SetEtcHostsEntry.
1620

1621
  @type hostname: str
1622
  @param hostname: a hostname that will be resolved and added to
1623
      L{constants.ETC_HOSTS}
1624
  @type ip: str
1625
  @param ip: The ip address of the host
1626

1627
  """
1628
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1629

    
1630

    
1631
def RemoveEtcHostsEntry(file_name, hostname):
1632
  """Removes a hostname from /etc/hosts.
1633

1634
  IP addresses without names are removed from the file.
1635

1636
  @type file_name: str
1637
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1638
  @type hostname: str
1639
  @param hostname: the hostname to be removed
1640

1641
  """
1642
  def _WriteEtcHosts(fd):
1643
    # Duplicating file descriptor because os.fdopen's result will automatically
1644
    # close the descriptor, but we would still like to have its functionality.
1645
    out = os.fdopen(os.dup(fd), "w")
1646
    try:
1647
      for line in ReadFile(file_name).splitlines(True):
1648
        fields = line.split()
1649
        if len(fields) > 1 and not fields[0].startswith("#"):
1650
          names = fields[1:]
1651
          if hostname in names:
1652
            while hostname in names:
1653
              names.remove(hostname)
1654
            if names:
1655
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1656
            continue
1657

    
1658
        out.write(line)
1659

    
1660
      out.flush()
1661
    finally:
1662
      out.close()
1663

    
1664
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1665

    
1666

    
1667
def RemoveHostFromEtcHosts(hostname):
1668
  """Wrapper around RemoveEtcHostsEntry.
1669

1670
  @type hostname: str
1671
  @param hostname: hostname that will be resolved and its
1672
      full and shot name will be removed from
1673
      L{constants.ETC_HOSTS}
1674

1675
  """
1676
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1677
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1678

    
1679

    
1680
def TimestampForFilename():
1681
  """Returns the current time formatted for filenames.
1682

1683
  The format doesn't contain colons as some shells and applications them as
1684
  separators.
1685

1686
  """
1687
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1688

    
1689

    
1690
def CreateBackup(file_name):
1691
  """Creates a backup of a file.
1692

1693
  @type file_name: str
1694
  @param file_name: file to be backed up
1695
  @rtype: str
1696
  @return: the path to the newly created backup
1697
  @raise errors.ProgrammerError: for invalid file names
1698

1699
  """
1700
  if not os.path.isfile(file_name):
1701
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1702
                                file_name)
1703

    
1704
  prefix = ("%s.backup-%s." %
1705
            (os.path.basename(file_name), TimestampForFilename()))
1706
  dir_name = os.path.dirname(file_name)
1707

    
1708
  fsrc = open(file_name, 'rb')
1709
  try:
1710
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1711
    fdst = os.fdopen(fd, 'wb')
1712
    try:
1713
      logging.debug("Backing up %s at %s", file_name, backup_name)
1714
      shutil.copyfileobj(fsrc, fdst)
1715
    finally:
1716
      fdst.close()
1717
  finally:
1718
    fsrc.close()
1719

    
1720
  return backup_name
1721

    
1722

    
1723
def ShellQuote(value):
1724
  """Quotes shell argument according to POSIX.
1725

1726
  @type value: str
1727
  @param value: the argument to be quoted
1728
  @rtype: str
1729
  @return: the quoted value
1730

1731
  """
1732
  if _re_shell_unquoted.match(value):
1733
    return value
1734
  else:
1735
    return "'%s'" % value.replace("'", "'\\''")
1736

    
1737

    
1738
def ShellQuoteArgs(args):
1739
  """Quotes a list of shell arguments.
1740

1741
  @type args: list
1742
  @param args: list of arguments to be quoted
1743
  @rtype: str
1744
  @return: the quoted arguments concatenated with spaces
1745

1746
  """
1747
  return ' '.join([ShellQuote(i) for i in args])
1748

    
1749

    
1750
class ShellWriter:
1751
  """Helper class to write scripts with indentation.
1752

1753
  """
1754
  INDENT_STR = "  "
1755

    
1756
  def __init__(self, fh):
1757
    """Initializes this class.
1758

1759
    """
1760
    self._fh = fh
1761
    self._indent = 0
1762

    
1763
  def IncIndent(self):
1764
    """Increase indentation level by 1.
1765

1766
    """
1767
    self._indent += 1
1768

    
1769
  def DecIndent(self):
1770
    """Decrease indentation level by 1.
1771

1772
    """
1773
    assert self._indent > 0
1774
    self._indent -= 1
1775

    
1776
  def Write(self, txt, *args):
1777
    """Write line to output file.
1778

1779
    """
1780
    assert self._indent >= 0
1781

    
1782
    self._fh.write(self._indent * self.INDENT_STR)
1783

    
1784
    if args:
1785
      self._fh.write(txt % args)
1786
    else:
1787
      self._fh.write(txt)
1788

    
1789
    self._fh.write("\n")
1790

    
1791

    
1792
def ListVisibleFiles(path):
1793
  """Returns a list of visible files in a directory.
1794

1795
  @type path: str
1796
  @param path: the directory to enumerate
1797
  @rtype: list
1798
  @return: the list of all files not starting with a dot
1799
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1800

1801
  """
1802
  if not IsNormAbsPath(path):
1803
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1804
                                 " absolute/normalized: '%s'" % path)
1805
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1806
  return files
1807

    
1808

    
1809
def GetHomeDir(user, default=None):
1810
  """Try to get the homedir of the given user.
1811

1812
  The user can be passed either as a string (denoting the name) or as
1813
  an integer (denoting the user id). If the user is not found, the
1814
  'default' argument is returned, which defaults to None.
1815

1816
  """
1817
  try:
1818
    if isinstance(user, basestring):
1819
      result = pwd.getpwnam(user)
1820
    elif isinstance(user, (int, long)):
1821
      result = pwd.getpwuid(user)
1822
    else:
1823
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1824
                                   type(user))
1825
  except KeyError:
1826
    return default
1827
  return result.pw_dir
1828

    
1829

    
1830
def NewUUID():
1831
  """Returns a random UUID.
1832

1833
  @note: This is a Linux-specific method as it uses the /proc
1834
      filesystem.
1835
  @rtype: str
1836

1837
  """
1838
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1839

    
1840

    
1841
def GenerateSecret(numbytes=20):
1842
  """Generates a random secret.
1843

1844
  This will generate a pseudo-random secret returning an hex string
1845
  (so that it can be used where an ASCII string is needed).
1846

1847
  @param numbytes: the number of bytes which will be represented by the returned
1848
      string (defaulting to 20, the length of a SHA1 hash)
1849
  @rtype: str
1850
  @return: an hex representation of the pseudo-random sequence
1851

1852
  """
1853
  return os.urandom(numbytes).encode('hex')
1854

    
1855

    
1856
def EnsureDirs(dirs):
1857
  """Make required directories, if they don't exist.
1858

1859
  @param dirs: list of tuples (dir_name, dir_mode)
1860
  @type dirs: list of (string, integer)
1861

1862
  """
1863
  for dir_name, dir_mode in dirs:
1864
    try:
1865
      os.mkdir(dir_name, dir_mode)
1866
    except EnvironmentError, err:
1867
      if err.errno != errno.EEXIST:
1868
        raise errors.GenericError("Cannot create needed directory"
1869
                                  " '%s': %s" % (dir_name, err))
1870
    try:
1871
      os.chmod(dir_name, dir_mode)
1872
    except EnvironmentError, err:
1873
      raise errors.GenericError("Cannot change directory permissions on"
1874
                                " '%s': %s" % (dir_name, err))
1875
    if not os.path.isdir(dir_name):
1876
      raise errors.GenericError("%s is not a directory" % dir_name)
1877

    
1878

    
1879
def ReadFile(file_name, size=-1):
1880
  """Reads a file.
1881

1882
  @type size: int
1883
  @param size: Read at most size bytes (if negative, entire file)
1884
  @rtype: str
1885
  @return: the (possibly partial) content of the file
1886

1887
  """
1888
  f = open(file_name, "r")
1889
  try:
1890
    return f.read(size)
1891
  finally:
1892
    f.close()
1893

    
1894

    
1895
def WriteFile(file_name, fn=None, data=None,
1896
              mode=None, uid=-1, gid=-1,
1897
              atime=None, mtime=None, close=True,
1898
              dry_run=False, backup=False,
1899
              prewrite=None, postwrite=None):
1900
  """(Over)write a file atomically.
1901

1902
  The file_name and either fn (a function taking one argument, the
1903
  file descriptor, and which should write the data to it) or data (the
1904
  contents of the file) must be passed. The other arguments are
1905
  optional and allow setting the file mode, owner and group, and the
1906
  mtime/atime of the file.
1907

1908
  If the function doesn't raise an exception, it has succeeded and the
1909
  target file has the new contents. If the function has raised an
1910
  exception, an existing target file should be unmodified and the
1911
  temporary file should be removed.
1912

1913
  @type file_name: str
1914
  @param file_name: the target filename
1915
  @type fn: callable
1916
  @param fn: content writing function, called with
1917
      file descriptor as parameter
1918
  @type data: str
1919
  @param data: contents of the file
1920
  @type mode: int
1921
  @param mode: file mode
1922
  @type uid: int
1923
  @param uid: the owner of the file
1924
  @type gid: int
1925
  @param gid: the group of the file
1926
  @type atime: int
1927
  @param atime: a custom access time to be set on the file
1928
  @type mtime: int
1929
  @param mtime: a custom modification time to be set on the file
1930
  @type close: boolean
1931
  @param close: whether to close file after writing it
1932
  @type prewrite: callable
1933
  @param prewrite: function to be called before writing content
1934
  @type postwrite: callable
1935
  @param postwrite: function to be called after writing content
1936

1937
  @rtype: None or int
1938
  @return: None if the 'close' parameter evaluates to True,
1939
      otherwise the file descriptor
1940

1941
  @raise errors.ProgrammerError: if any of the arguments are not valid
1942

1943
  """
1944
  if not os.path.isabs(file_name):
1945
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1946
                                 " absolute: '%s'" % file_name)
1947

    
1948
  if [fn, data].count(None) != 1:
1949
    raise errors.ProgrammerError("fn or data required")
1950

    
1951
  if [atime, mtime].count(None) == 1:
1952
    raise errors.ProgrammerError("Both atime and mtime must be either"
1953
                                 " set or None")
1954

    
1955
  if backup and not dry_run and os.path.isfile(file_name):
1956
    CreateBackup(file_name)
1957

    
1958
  dir_name, base_name = os.path.split(file_name)
1959
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1960
  do_remove = True
1961
  # here we need to make sure we remove the temp file, if any error
1962
  # leaves it in place
1963
  try:
1964
    if uid != -1 or gid != -1:
1965
      os.chown(new_name, uid, gid)
1966
    if mode:
1967
      os.chmod(new_name, mode)
1968
    if callable(prewrite):
1969
      prewrite(fd)
1970
    if data is not None:
1971
      os.write(fd, data)
1972
    else:
1973
      fn(fd)
1974
    if callable(postwrite):
1975
      postwrite(fd)
1976
    os.fsync(fd)
1977
    if atime is not None and mtime is not None:
1978
      os.utime(new_name, (atime, mtime))
1979
    if not dry_run:
1980
      os.rename(new_name, file_name)
1981
      do_remove = False
1982
  finally:
1983
    if close:
1984
      os.close(fd)
1985
      result = None
1986
    else:
1987
      result = fd
1988
    if do_remove:
1989
      RemoveFile(new_name)
1990

    
1991
  return result
1992

    
1993

    
1994
def GetFileID(path=None, fd=None):
1995
  """Returns the file 'id', i.e. the dev/inode and mtime information.
1996

1997
  Either the path to the file or the fd must be given.
1998

1999
  @param path: the file path
2000
  @param fd: a file descriptor
2001
  @return: a tuple of (device number, inode number, mtime)
2002

2003
  """
2004
  if [path, fd].count(None) != 1:
2005
    raise errors.ProgrammerError("One and only one of fd/path must be given")
2006

    
2007
  if fd is None:
2008
    st = os.stat(path)
2009
  else:
2010
    st = os.fstat(fd)
2011

    
2012
  return (st.st_dev, st.st_ino, st.st_mtime)
2013

    
2014

    
2015
def VerifyFileID(fi_disk, fi_ours):
2016
  """Verifies that two file IDs are matching.
2017

2018
  Differences in the inode/device are not accepted, but and older
2019
  timestamp for fi_disk is accepted.
2020

2021
  @param fi_disk: tuple (dev, inode, mtime) representing the actual
2022
      file data
2023
  @param fi_ours: tuple (dev, inode, mtime) representing the last
2024
      written file data
2025
  @rtype: boolean
2026

2027
  """
2028
  (d1, i1, m1) = fi_disk
2029
  (d2, i2, m2) = fi_ours
2030

    
2031
  return (d1, i1) == (d2, i2) and m1 <= m2
2032

    
2033

    
2034
def SafeWriteFile(file_name, file_id, **kwargs):
2035
  """Wraper over L{WriteFile} that locks the target file.
2036

2037
  By keeping the target file locked during WriteFile, we ensure that
2038
  cooperating writers will safely serialise access to the file.
2039

2040
  @type file_name: str
2041
  @param file_name: the target filename
2042
  @type file_id: tuple
2043
  @param file_id: a result from L{GetFileID}
2044

2045
  """
2046
  fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
2047
  try:
2048
    LockFile(fd)
2049
    if file_id is not None:
2050
      disk_id = GetFileID(fd=fd)
2051
      if not VerifyFileID(disk_id, file_id):
2052
        raise errors.LockError("Cannot overwrite file %s, it has been modified"
2053
                               " since last written" % file_name)
2054
    return WriteFile(file_name, **kwargs)
2055
  finally:
2056
    os.close(fd)
2057

    
2058

    
2059
def ReadOneLineFile(file_name, strict=False):
2060
  """Return the first non-empty line from a file.
2061

2062
  @type strict: boolean
2063
  @param strict: if True, abort if the file has more than one
2064
      non-empty line
2065

2066
  """
2067
  file_lines = ReadFile(file_name).splitlines()
2068
  full_lines = filter(bool, file_lines)
2069
  if not file_lines or not full_lines:
2070
    raise errors.GenericError("No data in one-liner file %s" % file_name)
2071
  elif strict and len(full_lines) > 1:
2072
    raise errors.GenericError("Too many lines in one-liner file %s" %
2073
                              file_name)
2074
  return full_lines[0]
2075

    
2076

    
2077
def FirstFree(seq, base=0):
2078
  """Returns the first non-existing integer from seq.
2079

2080
  The seq argument should be a sorted list of positive integers. The
2081
  first time the index of an element is smaller than the element
2082
  value, the index will be returned.
2083

2084
  The base argument is used to start at a different offset,
2085
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
2086

2087
  Example: C{[0, 1, 3]} will return I{2}.
2088

2089
  @type seq: sequence
2090
  @param seq: the sequence to be analyzed.
2091
  @type base: int
2092
  @param base: use this value as the base index of the sequence
2093
  @rtype: int
2094
  @return: the first non-used index in the sequence
2095

2096
  """
2097
  for idx, elem in enumerate(seq):
2098
    assert elem >= base, "Passed element is higher than base offset"
2099
    if elem > idx + base:
2100
      # idx is not used
2101
      return idx + base
2102
  return None
2103

    
2104

    
2105
def SingleWaitForFdCondition(fdobj, event, timeout):
2106
  """Waits for a condition to occur on the socket.
2107

2108
  Immediately returns at the first interruption.
2109

2110
  @type fdobj: integer or object supporting a fileno() method
2111
  @param fdobj: entity to wait for events on
2112
  @type event: integer
2113
  @param event: ORed condition (see select module)
2114
  @type timeout: float or None
2115
  @param timeout: Timeout in seconds
2116
  @rtype: int or None
2117
  @return: None for timeout, otherwise occured conditions
2118

2119
  """
2120
  check = (event | select.POLLPRI |
2121
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
2122

    
2123
  if timeout is not None:
2124
    # Poller object expects milliseconds
2125
    timeout *= 1000
2126

    
2127
  poller = select.poll()
2128
  poller.register(fdobj, event)
2129
  try:
2130
    # TODO: If the main thread receives a signal and we have no timeout, we
2131
    # could wait forever. This should check a global "quit" flag or something
2132
    # every so often.
2133
    io_events = poller.poll(timeout)
2134
  except select.error, err:
2135
    if err[0] != errno.EINTR:
2136
      raise
2137
    io_events = []
2138
  if io_events and io_events[0][1] & check:
2139
    return io_events[0][1]
2140
  else:
2141
    return None
2142

    
2143

    
2144
class FdConditionWaiterHelper(object):
2145
  """Retry helper for WaitForFdCondition.
2146

2147
  This class contains the retried and wait functions that make sure
2148
  WaitForFdCondition can continue waiting until the timeout is actually
2149
  expired.
2150

2151
  """
2152

    
2153
  def __init__(self, timeout):
2154
    self.timeout = timeout
2155

    
2156
  def Poll(self, fdobj, event):
2157
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2158
    if result is None:
2159
      raise RetryAgain()
2160
    else:
2161
      return result
2162

    
2163
  def UpdateTimeout(self, timeout):
2164
    self.timeout = timeout
2165

    
2166

    
2167
def WaitForFdCondition(fdobj, event, timeout):
2168
  """Waits for a condition to occur on the socket.
2169

2170
  Retries until the timeout is expired, even if interrupted.
2171

2172
  @type fdobj: integer or object supporting a fileno() method
2173
  @param fdobj: entity to wait for events on
2174
  @type event: integer
2175
  @param event: ORed condition (see select module)
2176
  @type timeout: float or None
2177
  @param timeout: Timeout in seconds
2178
  @rtype: int or None
2179
  @return: None for timeout, otherwise occured conditions
2180

2181
  """
2182
  if timeout is not None:
2183
    retrywaiter = FdConditionWaiterHelper(timeout)
2184
    try:
2185
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2186
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2187
    except RetryTimeout:
2188
      result = None
2189
  else:
2190
    result = None
2191
    while result is None:
2192
      result = SingleWaitForFdCondition(fdobj, event, timeout)
2193
  return result
2194

    
2195

    
2196
def UniqueSequence(seq):
2197
  """Returns a list with unique elements.
2198

2199
  Element order is preserved.
2200

2201
  @type seq: sequence
2202
  @param seq: the sequence with the source elements
2203
  @rtype: list
2204
  @return: list of unique elements from seq
2205

2206
  """
2207
  seen = set()
2208
  return [i for i in seq if i not in seen and not seen.add(i)]
2209

    
2210

    
2211
def FindDuplicates(seq):
2212
  """Identifies duplicates in a list.
2213

2214
  Does not preserve element order.
2215

2216
  @type seq: sequence
2217
  @param seq: Sequence with source elements
2218
  @rtype: list
2219
  @return: List of duplicate elements from seq
2220

2221
  """
2222
  dup = set()
2223
  seen = set()
2224

    
2225
  for item in seq:
2226
    if item in seen:
2227
      dup.add(item)
2228
    else:
2229
      seen.add(item)
2230

    
2231
  return list(dup)
2232

    
2233

    
2234
def NormalizeAndValidateMac(mac):
2235
  """Normalizes and check if a MAC address is valid.
2236

2237
  Checks whether the supplied MAC address is formally correct, only
2238
  accepts colon separated format. Normalize it to all lower.
2239

2240
  @type mac: str
2241
  @param mac: the MAC to be validated
2242
  @rtype: str
2243
  @return: returns the normalized and validated MAC.
2244

2245
  @raise errors.OpPrereqError: If the MAC isn't valid
2246

2247
  """
2248
  if not _MAC_CHECK.match(mac):
2249
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2250
                               mac, errors.ECODE_INVAL)
2251

    
2252
  return mac.lower()
2253

    
2254

    
2255
def TestDelay(duration):
2256
  """Sleep for a fixed amount of time.
2257

2258
  @type duration: float
2259
  @param duration: the sleep duration
2260
  @rtype: boolean
2261
  @return: False for negative value, True otherwise
2262

2263
  """
2264
  if duration < 0:
2265
    return False, "Invalid sleep duration"
2266
  time.sleep(duration)
2267
  return True, None
2268

    
2269

    
2270
def _CloseFDNoErr(fd, retries=5):
2271
  """Close a file descriptor ignoring errors.
2272

2273
  @type fd: int
2274
  @param fd: the file descriptor
2275
  @type retries: int
2276
  @param retries: how many retries to make, in case we get any
2277
      other error than EBADF
2278

2279
  """
2280
  try:
2281
    os.close(fd)
2282
  except OSError, err:
2283
    if err.errno != errno.EBADF:
2284
      if retries > 0:
2285
        _CloseFDNoErr(fd, retries - 1)
2286
    # else either it's closed already or we're out of retries, so we
2287
    # ignore this and go on
2288

    
2289

    
2290
def CloseFDs(noclose_fds=None):
2291
  """Close file descriptors.
2292

2293
  This closes all file descriptors above 2 (i.e. except
2294
  stdin/out/err).
2295

2296
  @type noclose_fds: list or None
2297
  @param noclose_fds: if given, it denotes a list of file descriptor
2298
      that should not be closed
2299

2300
  """
2301
  # Default maximum for the number of available file descriptors.
2302
  if 'SC_OPEN_MAX' in os.sysconf_names:
2303
    try:
2304
      MAXFD = os.sysconf('SC_OPEN_MAX')
2305
      if MAXFD < 0:
2306
        MAXFD = 1024
2307
    except OSError:
2308
      MAXFD = 1024
2309
  else:
2310
    MAXFD = 1024
2311
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2312
  if (maxfd == resource.RLIM_INFINITY):
2313
    maxfd = MAXFD
2314

    
2315
  # Iterate through and close all file descriptors (except the standard ones)
2316
  for fd in range(3, maxfd):
2317
    if noclose_fds and fd in noclose_fds:
2318
      continue
2319
    _CloseFDNoErr(fd)
2320

    
2321

    
2322
def Mlockall(_ctypes=ctypes):
2323
  """Lock current process' virtual address space into RAM.
2324

2325
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2326
  see mlock(2) for more details. This function requires ctypes module.
2327

2328
  @raises errors.NoCtypesError: if ctypes module is not found
2329

2330
  """
2331
  if _ctypes is None:
2332
    raise errors.NoCtypesError()
2333

    
2334
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2335
  if libc is None:
2336
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2337
    return
2338

    
2339
  # Some older version of the ctypes module don't have built-in functionality
2340
  # to access the errno global variable, where function error codes are stored.
2341
  # By declaring this variable as a pointer to an integer we can then access
2342
  # its value correctly, should the mlockall call fail, in order to see what
2343
  # the actual error code was.
2344
  # pylint: disable-msg=W0212
2345
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2346

    
2347
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2348
    # pylint: disable-msg=W0212
2349
    logging.error("Cannot set memory lock: %s",
2350
                  os.strerror(libc.__errno_location().contents.value))
2351
    return
2352

    
2353
  logging.debug("Memory lock set")
2354

    
2355

    
2356
def Daemonize(logfile):
2357
  """Daemonize the current process.
2358

2359
  This detaches the current process from the controlling terminal and
2360
  runs it in the background as a daemon.
2361

2362
  @type logfile: str
2363
  @param logfile: the logfile to which we should redirect stdout/stderr
2364
  @rtype: int
2365
  @return: the value zero
2366

2367
  """
2368
  # pylint: disable-msg=W0212
2369
  # yes, we really want os._exit
2370

    
2371
  # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2372
  # least abstract the pipe functionality between them
2373

    
2374
  # Create pipe for sending error messages
2375
  (rpipe, wpipe) = os.pipe()
2376

    
2377
  # this might fail
2378
  pid = os.fork()
2379
  if (pid == 0):  # The first child.
2380
    SetupDaemonEnv()
2381

    
2382
    # this might fail
2383
    pid = os.fork() # Fork a second child.
2384
    if (pid == 0):  # The second child.
2385
      _CloseFDNoErr(rpipe)
2386
    else:
2387
      # exit() or _exit()?  See below.
2388
      os._exit(0) # Exit parent (the first child) of the second child.
2389
  else:
2390
    _CloseFDNoErr(wpipe)
2391
    # Wait for daemon to be started (or an error message to
2392
    # arrive) and read up to 100 KB as an error message
2393
    errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2394
    if errormsg:
2395
      sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2396
      rcode = 1
2397
    else:
2398
      rcode = 0
2399
    os._exit(rcode) # Exit parent of the first child.
2400

    
2401
  SetupDaemonFDs(logfile, None)
2402
  return wpipe
2403

    
2404

    
2405
def DaemonPidFileName(name):
2406
  """Compute a ganeti pid file absolute path
2407

2408
  @type name: str
2409
  @param name: the daemon name
2410
  @rtype: str
2411
  @return: the full path to the pidfile corresponding to the given
2412
      daemon name
2413

2414
  """
2415
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2416

    
2417

    
2418
def EnsureDaemon(name):
2419
  """Check for and start daemon if not alive.
2420

2421
  """
2422
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2423
  if result.failed:
2424
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2425
                  name, result.fail_reason, result.output)
2426
    return False
2427

    
2428
  return True
2429

    
2430

    
2431
def StopDaemon(name):
2432
  """Stop daemon
2433

2434
  """
2435
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2436
  if result.failed:
2437
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2438
                  name, result.fail_reason, result.output)
2439
    return False
2440

    
2441
  return True
2442

    
2443

    
2444
def WritePidFile(pidfile):
2445
  """Write the current process pidfile.
2446

2447
  @type pidfile: sting
2448
  @param pidfile: the path to the file to be written
2449
  @raise errors.LockError: if the pid file already exists and
2450
      points to a live process
2451
  @rtype: int
2452
  @return: the file descriptor of the lock file; do not close this unless
2453
      you want to unlock the pid file
2454

2455
  """
2456
  # We don't rename nor truncate the file to not drop locks under
2457
  # existing processes
2458
  fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2459

    
2460
  # Lock the PID file (and fail if not possible to do so). Any code
2461
  # wanting to send a signal to the daemon should try to lock the PID
2462
  # file before reading it. If acquiring the lock succeeds, the daemon is
2463
  # no longer running and the signal should not be sent.
2464
  LockFile(fd_pidfile)
2465

    
2466
  os.write(fd_pidfile, "%d\n" % os.getpid())
2467

    
2468
  return fd_pidfile
2469

    
2470

    
2471
def RemovePidFile(name):
2472
  """Remove the current process pidfile.
2473

2474
  Any errors are ignored.
2475

2476
  @type name: str
2477
  @param name: the daemon name used to derive the pidfile name
2478

2479
  """
2480
  pidfilename = DaemonPidFileName(name)
2481
  # TODO: we could check here that the file contains our pid
2482
  try:
2483
    RemoveFile(pidfilename)
2484
  except: # pylint: disable-msg=W0702
2485
    pass
2486

    
2487

    
2488
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2489
                waitpid=False):
2490
  """Kill a process given by its pid.
2491

2492
  @type pid: int
2493
  @param pid: The PID to terminate.
2494
  @type signal_: int
2495
  @param signal_: The signal to send, by default SIGTERM
2496
  @type timeout: int
2497
  @param timeout: The timeout after which, if the process is still alive,
2498
                  a SIGKILL will be sent. If not positive, no such checking
2499
                  will be done
2500
  @type waitpid: boolean
2501
  @param waitpid: If true, we should waitpid on this process after
2502
      sending signals, since it's our own child and otherwise it
2503
      would remain as zombie
2504

2505
  """
2506
  def _helper(pid, signal_, wait):
2507
    """Simple helper to encapsulate the kill/waitpid sequence"""
2508
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2509
      try:
2510
        os.waitpid(pid, os.WNOHANG)
2511
      except OSError:
2512
        pass
2513

    
2514
  if pid <= 0:
2515
    # kill with pid=0 == suicide
2516
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2517

    
2518
  if not IsProcessAlive(pid):
2519
    return
2520

    
2521
  _helper(pid, signal_, waitpid)
2522

    
2523
  if timeout <= 0:
2524
    return
2525

    
2526
  def _CheckProcess():
2527
    if not IsProcessAlive(pid):
2528
      return
2529

    
2530
    try:
2531
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2532
    except OSError:
2533
      raise RetryAgain()
2534

    
2535
    if result_pid > 0:
2536
      return
2537

    
2538
    raise RetryAgain()
2539

    
2540
  try:
2541
    # Wait up to $timeout seconds
2542
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2543
  except RetryTimeout:
2544
    pass
2545

    
2546
  if IsProcessAlive(pid):
2547
    # Kill process if it's still alive
2548
    _helper(pid, signal.SIGKILL, waitpid)
2549

    
2550

    
2551
def FindFile(name, search_path, test=os.path.exists):
2552
  """Look for a filesystem object in a given path.
2553

2554
  This is an abstract method to search for filesystem object (files,
2555
  dirs) under a given search path.
2556

2557
  @type name: str
2558
  @param name: the name to look for
2559
  @type search_path: str
2560
  @param search_path: location to start at
2561
  @type test: callable
2562
  @param test: a function taking one argument that should return True
2563
      if the a given object is valid; the default value is
2564
      os.path.exists, causing only existing files to be returned
2565
  @rtype: str or None
2566
  @return: full path to the object if found, None otherwise
2567

2568
  """
2569
  # validate the filename mask
2570
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2571
    logging.critical("Invalid value passed for external script name: '%s'",
2572
                     name)
2573
    return None
2574

    
2575
  for dir_name in search_path:
2576
    # FIXME: investigate switch to PathJoin
2577
    item_name = os.path.sep.join([dir_name, name])
2578
    # check the user test and that we're indeed resolving to the given
2579
    # basename
2580
    if test(item_name) and os.path.basename(item_name) == name:
2581
      return item_name
2582
  return None
2583

    
2584

    
2585
def CheckVolumeGroupSize(vglist, vgname, minsize):
2586
  """Checks if the volume group list is valid.
2587

2588
  The function will check if a given volume group is in the list of
2589
  volume groups and has a minimum size.
2590

2591
  @type vglist: dict
2592
  @param vglist: dictionary of volume group names and their size
2593
  @type vgname: str
2594
  @param vgname: the volume group we should check
2595
  @type minsize: int
2596
  @param minsize: the minimum size we accept
2597
  @rtype: None or str
2598
  @return: None for success, otherwise the error message
2599

2600
  """
2601
  vgsize = vglist.get(vgname, None)
2602
  if vgsize is None:
2603
    return "volume group '%s' missing" % vgname
2604
  elif vgsize < minsize:
2605
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2606
            (vgname, minsize, vgsize))
2607
  return None
2608

    
2609

    
2610
def SplitTime(value):
2611
  """Splits time as floating point number into a tuple.
2612

2613
  @param value: Time in seconds
2614
  @type value: int or float
2615
  @return: Tuple containing (seconds, microseconds)
2616

2617
  """
2618
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2619

    
2620
  assert 0 <= seconds, \
2621
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2622
  assert 0 <= microseconds <= 999999, \
2623
    "Microseconds must be 0-999999, but are %s" % microseconds
2624

    
2625
  return (int(seconds), int(microseconds))
2626

    
2627

    
2628
def MergeTime(timetuple):
2629
  """Merges a tuple into time as a floating point number.
2630

2631
  @param timetuple: Time as tuple, (seconds, microseconds)
2632
  @type timetuple: tuple
2633
  @return: Time as a floating point number expressed in seconds
2634

2635
  """
2636
  (seconds, microseconds) = timetuple
2637

    
2638
  assert 0 <= seconds, \
2639
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2640
  assert 0 <= microseconds <= 999999, \
2641
    "Microseconds must be 0-999999, but are %s" % microseconds
2642

    
2643
  return float(seconds) + (float(microseconds) * 0.000001)
2644

    
2645

    
2646
class LogFileHandler(logging.FileHandler):
2647
  """Log handler that doesn't fallback to stderr.
2648

2649
  When an error occurs while writing on the logfile, logging.FileHandler tries
2650
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2651
  the logfile. This class avoids failures reporting errors to /dev/console.
2652

2653
  """
2654
  def __init__(self, filename, mode="a", encoding=None):
2655
    """Open the specified file and use it as the stream for logging.
2656

2657
    Also open /dev/console to report errors while logging.
2658

2659
    """
2660
    logging.FileHandler.__init__(self, filename, mode, encoding)
2661
    self.console = open(constants.DEV_CONSOLE, "a")
2662

    
2663
  def handleError(self, record): # pylint: disable-msg=C0103
2664
    """Handle errors which occur during an emit() call.
2665

2666
    Try to handle errors with FileHandler method, if it fails write to
2667
    /dev/console.
2668

2669
    """
2670
    try:
2671
      logging.FileHandler.handleError(self, record)
2672
    except Exception: # pylint: disable-msg=W0703
2673
      try:
2674
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2675
      except Exception: # pylint: disable-msg=W0703
2676
        # Log handler tried everything it could, now just give up
2677
        pass
2678

    
2679

    
2680
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2681
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2682
                 console_logging=False):
2683
  """Configures the logging module.
2684

2685
  @type logfile: str
2686
  @param logfile: the filename to which we should log
2687
  @type debug: integer
2688
  @param debug: if greater than zero, enable debug messages, otherwise
2689
      only those at C{INFO} and above level
2690
  @type stderr_logging: boolean
2691
  @param stderr_logging: whether we should also log to the standard error
2692
  @type program: str
2693
  @param program: the name under which we should log messages
2694
  @type multithreaded: boolean
2695
  @param multithreaded: if True, will add the thread name to the log file
2696
  @type syslog: string
2697
  @param syslog: one of 'no', 'yes', 'only':
2698
      - if no, syslog is not used
2699
      - if yes, syslog is used (in addition to file-logging)
2700
      - if only, only syslog is used
2701
  @type console_logging: boolean
2702
  @param console_logging: if True, will use a FileHandler which falls back to
2703
      the system console if logging fails
2704
  @raise EnvironmentError: if we can't open the log file and
2705
      syslog/stderr logging is disabled
2706

2707
  """
2708
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2709
  sft = program + "[%(process)d]:"
2710
  if multithreaded:
2711
    fmt += "/%(threadName)s"
2712
    sft += " (%(threadName)s)"
2713
  if debug:
2714
    fmt += " %(module)s:%(lineno)s"
2715
    # no debug info for syslog loggers
2716
  fmt += " %(levelname)s %(message)s"
2717
  # yes, we do want the textual level, as remote syslog will probably
2718
  # lose the error level, and it's easier to grep for it
2719
  sft += " %(levelname)s %(message)s"
2720
  formatter = logging.Formatter(fmt)
2721
  sys_fmt = logging.Formatter(sft)
2722

    
2723
  root_logger = logging.getLogger("")
2724
  root_logger.setLevel(logging.NOTSET)
2725

    
2726
  # Remove all previously setup handlers
2727
  for handler in root_logger.handlers:
2728
    handler.close()
2729
    root_logger.removeHandler(handler)
2730

    
2731
  if stderr_logging:
2732
    stderr_handler = logging.StreamHandler()
2733
    stderr_handler.setFormatter(formatter)
2734
    if debug:
2735
      stderr_handler.setLevel(logging.NOTSET)
2736
    else:
2737
      stderr_handler.setLevel(logging.CRITICAL)
2738
    root_logger.addHandler(stderr_handler)
2739

    
2740
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2741
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2742
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2743
                                                    facility)
2744
    syslog_handler.setFormatter(sys_fmt)
2745
    # Never enable debug over syslog
2746
    syslog_handler.setLevel(logging.INFO)
2747
    root_logger.addHandler(syslog_handler)
2748

    
2749
  if syslog != constants.SYSLOG_ONLY:
2750
    # this can fail, if the logging directories are not setup or we have
2751
    # a permisssion problem; in this case, it's best to log but ignore
2752
    # the error if stderr_logging is True, and if false we re-raise the
2753
    # exception since otherwise we could run but without any logs at all
2754
    try:
2755
      if console_logging:
2756
        logfile_handler = LogFileHandler(logfile)
2757
      else:
2758
        logfile_handler = logging.FileHandler(logfile)
2759
      logfile_handler.setFormatter(formatter)
2760
      if debug:
2761
        logfile_handler.setLevel(logging.DEBUG)
2762
      else:
2763
        logfile_handler.setLevel(logging.INFO)
2764
      root_logger.addHandler(logfile_handler)
2765
    except EnvironmentError:
2766
      if stderr_logging or syslog == constants.SYSLOG_YES:
2767
        logging.exception("Failed to enable logging to file '%s'", logfile)
2768
      else:
2769
        # we need to re-raise the exception
2770
        raise
2771

    
2772

    
2773
def IsNormAbsPath(path):
2774
  """Check whether a path is absolute and also normalized
2775

2776
  This avoids things like /dir/../../other/path to be valid.
2777

2778
  """
2779
  return os.path.normpath(path) == path and os.path.isabs(path)
2780

    
2781

    
2782
def PathJoin(*args):
2783
  """Safe-join a list of path components.
2784

2785
  Requirements:
2786
      - the first argument must be an absolute path
2787
      - no component in the path must have backtracking (e.g. /../),
2788
        since we check for normalization at the end
2789

2790
  @param args: the path components to be joined
2791
  @raise ValueError: for invalid paths
2792

2793
  """
2794
  # ensure we're having at least one path passed in
2795
  assert args
2796
  # ensure the first component is an absolute and normalized path name
2797
  root = args[0]
2798
  if not IsNormAbsPath(root):
2799
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2800
  result = os.path.join(*args)
2801
  # ensure that the whole path is normalized
2802
  if not IsNormAbsPath(result):
2803
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2804
  # check that we're still under the original prefix
2805
  prefix = os.path.commonprefix([root, result])
2806
  if prefix != root:
2807
    raise ValueError("Error: path joining resulted in different prefix"
2808
                     " (%s != %s)" % (prefix, root))
2809
  return result
2810

    
2811

    
2812
def TailFile(fname, lines=20):
2813
  """Return the last lines from a file.
2814

2815
  @note: this function will only read and parse the last 4KB of
2816
      the file; if the lines are very long, it could be that less
2817
      than the requested number of lines are returned
2818

2819
  @param fname: the file name
2820
  @type lines: int
2821
  @param lines: the (maximum) number of lines to return
2822

2823
  """
2824
  fd = open(fname, "r")
2825
  try:
2826
    fd.seek(0, 2)
2827
    pos = fd.tell()
2828
    pos = max(0, pos-4096)
2829
    fd.seek(pos, 0)
2830
    raw_data = fd.read()
2831
  finally:
2832
    fd.close()
2833

    
2834
  rows = raw_data.splitlines()
2835
  return rows[-lines:]
2836

    
2837

    
2838
def FormatTimestampWithTZ(secs):
2839
  """Formats a Unix timestamp with the local timezone.
2840

2841
  """
2842
  return time.strftime("%F %T %Z", time.gmtime(secs))
2843

    
2844

    
2845
def _ParseAsn1Generalizedtime(value):
2846
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2847

2848
  @type value: string
2849
  @param value: ASN1 GENERALIZEDTIME timestamp
2850

2851
  """
2852
  m = _ASN1_TIME_REGEX.match(value)
2853
  if m:
2854
    # We have an offset
2855
    asn1time = m.group(1)
2856
    hours = int(m.group(2))
2857
    minutes = int(m.group(3))
2858
    utcoffset = (60 * hours) + minutes
2859
  else:
2860
    if not value.endswith("Z"):
2861
      raise ValueError("Missing timezone")
2862
    asn1time = value[:-1]
2863
    utcoffset = 0
2864

    
2865
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2866

    
2867
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2868

    
2869
  return calendar.timegm(tt.utctimetuple())
2870

    
2871

    
2872
def GetX509CertValidity(cert):
2873
  """Returns the validity period of the certificate.
2874

2875
  @type cert: OpenSSL.crypto.X509
2876
  @param cert: X509 certificate object
2877

2878
  """
2879
  # The get_notBefore and get_notAfter functions are only supported in
2880
  # pyOpenSSL 0.7 and above.
2881
  try:
2882
    get_notbefore_fn = cert.get_notBefore
2883
  except AttributeError:
2884
    not_before = None
2885
  else:
2886
    not_before_asn1 = get_notbefore_fn()
2887

    
2888
    if not_before_asn1 is None:
2889
      not_before = None
2890
    else:
2891
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2892

    
2893
  try:
2894
    get_notafter_fn = cert.get_notAfter
2895
  except AttributeError:
2896
    not_after = None
2897
  else:
2898
    not_after_asn1 = get_notafter_fn()
2899

    
2900
    if not_after_asn1 is None:
2901
      not_after = None
2902
    else:
2903
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2904

    
2905
  return (not_before, not_after)
2906

    
2907

    
2908
def _VerifyCertificateInner(expired, not_before, not_after, now,
2909
                            warn_days, error_days):
2910
  """Verifies certificate validity.
2911

2912
  @type expired: bool
2913
  @param expired: Whether pyOpenSSL considers the certificate as expired
2914
  @type not_before: number or None
2915
  @param not_before: Unix timestamp before which certificate is not valid
2916
  @type not_after: number or None
2917
  @param not_after: Unix timestamp after which certificate is invalid
2918
  @type now: number
2919
  @param now: Current time as Unix timestamp
2920
  @type warn_days: number or None
2921
  @param warn_days: How many days before expiration a warning should be reported
2922
  @type error_days: number or None
2923
  @param error_days: How many days before expiration an error should be reported
2924

2925
  """
2926
  if expired:
2927
    msg = "Certificate is expired"
2928

    
2929
    if not_before is not None and not_after is not None:
2930
      msg += (" (valid from %s to %s)" %
2931
              (FormatTimestampWithTZ(not_before),
2932
               FormatTimestampWithTZ(not_after)))
2933
    elif not_before is not None:
2934
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2935
    elif not_after is not None:
2936
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2937

    
2938
    return (CERT_ERROR, msg)
2939

    
2940
  elif not_before is not None and not_before > now:
2941
    return (CERT_WARNING,
2942
            "Certificate not yet valid (valid from %s)" %
2943
            FormatTimestampWithTZ(not_before))
2944

    
2945
  elif not_after is not None:
2946
    remaining_days = int((not_after - now) / (24 * 3600))
2947

    
2948
    msg = "Certificate expires in about %d days" % remaining_days
2949

    
2950
    if error_days is not None and remaining_days <= error_days:
2951
      return (CERT_ERROR, msg)
2952

    
2953
    if warn_days is not None and remaining_days <= warn_days:
2954
      return (CERT_WARNING, msg)
2955

    
2956
  return (None, None)
2957

    
2958

    
2959
def VerifyX509Certificate(cert, warn_days, error_days):
2960
  """Verifies a certificate for LUVerifyCluster.
2961

2962
  @type cert: OpenSSL.crypto.X509
2963
  @param cert: X509 certificate object
2964
  @type warn_days: number or None
2965
  @param warn_days: How many days before expiration a warning should be reported
2966
  @type error_days: number or None
2967
  @param error_days: How many days before expiration an error should be reported
2968

2969
  """
2970
  # Depending on the pyOpenSSL version, this can just return (None, None)
2971
  (not_before, not_after) = GetX509CertValidity(cert)
2972

    
2973
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2974
                                 time.time(), warn_days, error_days)
2975

    
2976

    
2977
def SignX509Certificate(cert, key, salt):
2978
  """Sign a X509 certificate.
2979

2980
  An RFC822-like signature header is added in front of the certificate.
2981

2982
  @type cert: OpenSSL.crypto.X509
2983
  @param cert: X509 certificate object
2984
  @type key: string
2985
  @param key: Key for HMAC
2986
  @type salt: string
2987
  @param salt: Salt for HMAC
2988
  @rtype: string
2989
  @return: Serialized and signed certificate in PEM format
2990

2991
  """
2992
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2993
    raise errors.GenericError("Invalid salt: %r" % salt)
2994

    
2995
  # Dumping as PEM here ensures the certificate is in a sane format
2996
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2997

    
2998
  return ("%s: %s/%s\n\n%s" %
2999
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
3000
           Sha1Hmac(key, cert_pem, salt=salt),
3001
           cert_pem))
3002

    
3003

    
3004
def _ExtractX509CertificateSignature(cert_pem):
3005
  """Helper function to extract signature from X509 certificate.
3006

3007
  """
3008
  # Extract signature from original PEM data
3009
  for line in cert_pem.splitlines():
3010
    if line.startswith("---"):
3011
      break
3012

    
3013
    m = X509_SIGNATURE.match(line.strip())
3014
    if m:
3015
      return (m.group("salt"), m.group("sign"))
3016

    
3017
  raise errors.GenericError("X509 certificate signature is missing")
3018

    
3019

    
3020
def LoadSignedX509Certificate(cert_pem, key):
3021
  """Verifies a signed X509 certificate.
3022

3023
  @type cert_pem: string
3024
  @param cert_pem: Certificate in PEM format and with signature header
3025
  @type key: string
3026
  @param key: Key for HMAC
3027
  @rtype: tuple; (OpenSSL.crypto.X509, string)
3028
  @return: X509 certificate object and salt
3029

3030
  """
3031
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
3032

    
3033
  # Load certificate
3034
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
3035

    
3036
  # Dump again to ensure it's in a sane format
3037
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3038

    
3039
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
3040
    raise errors.GenericError("X509 certificate signature is invalid")
3041

    
3042
  return (cert, salt)
3043

    
3044

    
3045
def Sha1Hmac(key, text, salt=None):
3046
  """Calculates the HMAC-SHA1 digest of a text.
3047

3048
  HMAC is defined in RFC2104.
3049

3050
  @type key: string
3051
  @param key: Secret key
3052
  @type text: string
3053

3054
  """
3055
  if salt:
3056
    salted_text = salt + text
3057
  else:
3058
    salted_text = text
3059

    
3060
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
3061

    
3062

    
3063
def VerifySha1Hmac(key, text, digest, salt=None):
3064
  """Verifies the HMAC-SHA1 digest of a text.
3065

3066
  HMAC is defined in RFC2104.
3067

3068
  @type key: string
3069
  @param key: Secret key
3070
  @type text: string
3071
  @type digest: string
3072
  @param digest: Expected digest
3073
  @rtype: bool
3074
  @return: Whether HMAC-SHA1 digest matches
3075

3076
  """
3077
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
3078

    
3079

    
3080
def SafeEncode(text):
3081
  """Return a 'safe' version of a source string.
3082

3083
  This function mangles the input string and returns a version that
3084
  should be safe to display/encode as ASCII. To this end, we first
3085
  convert it to ASCII using the 'backslashreplace' encoding which
3086
  should get rid of any non-ASCII chars, and then we process it
3087
  through a loop copied from the string repr sources in the python; we
3088
  don't use string_escape anymore since that escape single quotes and
3089
  backslashes too, and that is too much; and that escaping is not
3090
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
3091

3092
  @type text: str or unicode
3093
  @param text: input data
3094
  @rtype: str
3095
  @return: a safe version of text
3096

3097
  """
3098
  if isinstance(text, unicode):
3099
    # only if unicode; if str already, we handle it below
3100
    text = text.encode('ascii', 'backslashreplace')
3101
  resu = ""
3102
  for char in text:
3103
    c = ord(char)
3104
    if char  == '\t':
3105
      resu += r'\t'
3106
    elif char == '\n':
3107
      resu += r'\n'
3108
    elif char == '\r':
3109
      resu += r'\'r'
3110
    elif c < 32 or c >= 127: # non-printable
3111
      resu += "\\x%02x" % (c & 0xff)
3112
    else:
3113
      resu += char
3114
  return resu
3115

    
3116

    
3117
def UnescapeAndSplit(text, sep=","):
3118
  """Split and unescape a string based on a given separator.
3119

3120
  This function splits a string based on a separator where the
3121
  separator itself can be escape in order to be an element of the
3122
  elements. The escaping rules are (assuming coma being the
3123
  separator):
3124
    - a plain , separates the elements
3125
    - a sequence \\\\, (double backslash plus comma) is handled as a
3126
      backslash plus a separator comma
3127
    - a sequence \, (backslash plus comma) is handled as a
3128
      non-separator comma
3129

3130
  @type text: string
3131
  @param text: the string to split
3132
  @type sep: string
3133
  @param text: the separator
3134
  @rtype: string
3135
  @return: a list of strings
3136

3137
  """
3138
  # we split the list by sep (with no escaping at this stage)
3139
  slist = text.split(sep)
3140
  # next, we revisit the elements and if any of them ended with an odd
3141
  # number of backslashes, then we join it with the next
3142
  rlist = []
3143
  while slist:
3144
    e1 = slist.pop(0)
3145
    if e1.endswith("\\"):
3146
      num_b = len(e1) - len(e1.rstrip("\\"))
3147
      if num_b % 2 == 1:
3148
        e2 = slist.pop(0)
3149
        # here the backslashes remain (all), and will be reduced in
3150
        # the next step
3151
        rlist.append(e1 + sep + e2)
3152
        continue
3153
    rlist.append(e1)
3154
  # finally, replace backslash-something with something
3155
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3156
  return rlist
3157

    
3158

    
3159
def CommaJoin(names):
3160
  """Nicely join a set of identifiers.
3161

3162
  @param names: set, list or tuple
3163
  @return: a string with the formatted results
3164

3165
  """
3166
  return ", ".join([str(val) for val in names])
3167

    
3168

    
3169
def FindMatch(data, name):
3170
  """Tries to find an item in a dictionary matching a name.
3171

3172
  Callers have to ensure the data names aren't contradictory (e.g. a regexp
3173
  that matches a string). If the name isn't a direct key, all regular
3174
  expression objects in the dictionary are matched against it.
3175

3176
  @type data: dict
3177
  @param data: Dictionary containing data
3178
  @type name: string
3179
  @param name: Name to look for
3180
  @rtype: tuple; (value in dictionary, matched groups as list)
3181

3182
  """
3183
  if name in data:
3184
    return (data[name], [])
3185

    
3186
  for key, value in data.items():
3187
    # Regex objects
3188
    if hasattr(key, "match"):
3189
      m = key.match(name)
3190
      if m:
3191
        return (value, list(m.groups()))
3192

    
3193
  return None
3194

    
3195

    
3196
def BytesToMebibyte(value):
3197
  """Converts bytes to mebibytes.
3198

3199
  @type value: int
3200
  @param value: Value in bytes
3201
  @rtype: int
3202
  @return: Value in mebibytes
3203

3204
  """
3205
  return int(round(value / (1024.0 * 1024.0), 0))
3206

    
3207

    
3208
def CalculateDirectorySize(path):
3209
  """Calculates the size of a directory recursively.
3210

3211
  @type path: string
3212
  @param path: Path to directory
3213
  @rtype: int
3214
  @return: Size in mebibytes
3215

3216
  """
3217
  size = 0
3218

    
3219
  for (curpath, _, files) in os.walk(path):
3220
    for filename in files:
3221
      st = os.lstat(PathJoin(curpath, filename))
3222
      size += st.st_size
3223

    
3224
  return BytesToMebibyte(size)
3225

    
3226

    
3227
def GetMounts(filename=constants.PROC_MOUNTS):
3228
  """Returns the list of mounted filesystems.
3229

3230
  This function is Linux-specific.
3231

3232
  @param filename: path of mounts file (/proc/mounts by default)
3233
  @rtype: list of tuples
3234
  @return: list of mount entries (device, mountpoint, fstype, options)
3235

3236
  """
3237
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3238
  data = []
3239
  mountlines = ReadFile(filename).splitlines()
3240
  for line in mountlines:
3241
    device, mountpoint, fstype, options, _ = line.split(None, 4)
3242
    data.append((device, mountpoint, fstype, options))
3243

    
3244
  return data
3245

    
3246

    
3247
def GetFilesystemStats(path):
3248
  """Returns the total and free space on a filesystem.
3249

3250
  @type path: string
3251
  @param path: Path on filesystem to be examined
3252
  @rtype: int
3253
  @return: tuple of (Total space, Free space) in mebibytes
3254

3255
  """
3256
  st = os.statvfs(path)
3257

    
3258
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3259
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3260
  return (tsize, fsize)
3261

    
3262

    
3263
def RunInSeparateProcess(fn, *args):
3264
  """Runs a function in a separate process.
3265

3266
  Note: Only boolean return values are supported.
3267

3268
  @type fn: callable
3269
  @param fn: Function to be called
3270
  @rtype: bool
3271
  @return: Function's result
3272

3273
  """
3274
  pid = os.fork()
3275
  if pid == 0:
3276
    # Child process
3277
    try:
3278
      # In case the function uses temporary files
3279
      ResetTempfileModule()
3280

    
3281
      # Call function
3282
      result = int(bool(fn(*args)))
3283
      assert result in (0, 1)
3284
    except: # pylint: disable-msg=W0702
3285
      logging.exception("Error while calling function in separate process")
3286
      # 0 and 1 are reserved for the return value
3287
      result = 33
3288

    
3289
    os._exit(result) # pylint: disable-msg=W0212
3290

    
3291
  # Parent process
3292

    
3293
  # Avoid zombies and check exit code
3294
  (_, status) = os.waitpid(pid, 0)
3295

    
3296
  if os.WIFSIGNALED(status):
3297
    exitcode = None
3298
    signum = os.WTERMSIG(status)
3299
  else:
3300
    exitcode = os.WEXITSTATUS(status)
3301
    signum = None
3302

    
3303
  if not (exitcode in (0, 1) and signum is None):
3304
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3305
                              (exitcode, signum))
3306

    
3307
  return bool(exitcode)
3308

    
3309

    
3310
def IgnoreProcessNotFound(fn, *args, **kwargs):
3311
  """Ignores ESRCH when calling a process-related function.
3312

3313
  ESRCH is raised when a process is not found.
3314

3315
  @rtype: bool
3316
  @return: Whether process was found
3317

3318
  """
3319
  try:
3320
    fn(*args, **kwargs)
3321
  except EnvironmentError, err:
3322
    # Ignore ESRCH
3323
    if err.errno == errno.ESRCH:
3324
      return False
3325
    raise
3326

    
3327
  return True
3328

    
3329

    
3330
def IgnoreSignals(fn, *args, **kwargs):
3331
  """Tries to call a function ignoring failures due to EINTR.
3332

3333
  """
3334
  try:
3335
    return fn(*args, **kwargs)
3336
  except EnvironmentError, err:
3337
    if err.errno == errno.EINTR:
3338
      return None
3339
    else:
3340
      raise
3341
  except (select.error, socket.error), err:
3342
    # In python 2.6 and above select.error is an IOError, so it's handled
3343
    # above, in 2.5 and below it's not, and it's handled here.
3344
    if err.args and err.args[0] == errno.EINTR:
3345
      return None
3346
    else:
3347
      raise
3348

    
3349

    
3350
def LockFile(fd):
3351
  """Locks a file using POSIX locks.
3352

3353
  @type fd: int
3354
  @param fd: the file descriptor we need to lock
3355

3356
  """
3357
  try:
3358
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3359
  except IOError, err:
3360
    if err.errno == errno.EAGAIN:
3361
      raise errors.LockError("File already locked")
3362
    raise
3363

    
3364

    
3365
def FormatTime(val):
3366
  """Formats a time value.
3367

3368
  @type val: float or None
3369
  @param val: the timestamp as returned by time.time()
3370
  @return: a string value or N/A if we don't have a valid timestamp
3371

3372
  """
3373
  if val is None or not isinstance(val, (int, float)):
3374
    return "N/A"
3375
  # these two codes works on Linux, but they are not guaranteed on all
3376
  # platforms
3377
  return time.strftime("%F %T", time.localtime(val))
3378

    
3379

    
3380
def FormatSeconds(secs):
3381
  """Formats seconds for easier reading.
3382

3383
  @type secs: number
3384
  @param secs: Number of seconds
3385
  @rtype: string
3386
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3387

3388
  """
3389
  parts = []
3390

    
3391
  secs = round(secs, 0)
3392

    
3393
  if secs > 0:
3394
    # Negative values would be a bit tricky
3395
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3396
      (complete, secs) = divmod(secs, one)
3397
      if complete or parts:
3398
        parts.append("%d%s" % (complete, unit))
3399

    
3400
  parts.append("%ds" % secs)
3401

    
3402
  return " ".join(parts)
3403

    
3404

    
3405
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3406
  """Reads the watcher pause file.
3407

3408
  @type filename: string
3409
  @param filename: Path to watcher pause file
3410
  @type now: None, float or int
3411
  @param now: Current time as Unix timestamp
3412
  @type remove_after: int
3413
  @param remove_after: Remove watcher pause file after specified amount of
3414
    seconds past the pause end time
3415

3416
  """
3417
  if now is None:
3418
    now = time.time()
3419

    
3420
  try:
3421
    value = ReadFile(filename)
3422
  except IOError, err:
3423
    if err.errno != errno.ENOENT:
3424
      raise
3425
    value = None
3426

    
3427
  if value is not None:
3428
    try:
3429
      value = int(value)
3430
    except ValueError:
3431
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3432
                       " removing it"), filename)
3433
      RemoveFile(filename)
3434
      value = None
3435

    
3436
    if value is not None:
3437
      # Remove file if it's outdated
3438
      if now > (value + remove_after):
3439
        RemoveFile(filename)
3440
        value = None
3441

    
3442
      elif now > value:
3443
        value = None
3444

    
3445
  return value
3446

    
3447

    
3448
class RetryTimeout(Exception):
3449
  """Retry loop timed out.
3450

3451
  Any arguments which was passed by the retried function to RetryAgain will be
3452
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3453
  the RaiseInner helper method will reraise it.
3454

3455
  """
3456
  def RaiseInner(self):
3457
    if self.args and isinstance(self.args[0], Exception):
3458
      raise self.args[0]
3459
    else:
3460
      raise RetryTimeout(*self.args)
3461

    
3462

    
3463
class RetryAgain(Exception):
3464
  """Retry again.
3465

3466
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3467
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3468
  of the RetryTimeout() method can be used to reraise it.
3469

3470
  """
3471

    
3472

    
3473
class _RetryDelayCalculator(object):
3474
  """Calculator for increasing delays.
3475

3476
  """
3477
  __slots__ = [
3478
    "_factor",
3479
    "_limit",
3480
    "_next",
3481
    "_start",
3482
    ]
3483

    
3484
  def __init__(self, start, factor, limit):
3485
    """Initializes this class.
3486

3487
    @type start: float
3488
    @param start: Initial delay
3489
    @type factor: float
3490
    @param factor: Factor for delay increase
3491
    @type limit: float or None
3492
    @param limit: Upper limit for delay or None for no limit
3493

3494
    """
3495
    assert start > 0.0
3496
    assert factor >= 1.0
3497
    assert limit is None or limit >= 0.0
3498

    
3499
    self._start = start
3500
    self._factor = factor
3501
    self._limit = limit
3502

    
3503
    self._next = start
3504

    
3505
  def __call__(self):
3506
    """Returns current delay and calculates the next one.
3507

3508
    """
3509
    current = self._next
3510

    
3511
    # Update for next run
3512
    if self._limit is None or self._next < self._limit:
3513
      self._next = min(self._limit, self._next * self._factor)
3514

    
3515
    return current
3516

    
3517

    
3518
#: Special delay to specify whole remaining timeout
3519
RETRY_REMAINING_TIME = object()
3520

    
3521

    
3522
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3523
          _time_fn=time.time):
3524
  """Call a function repeatedly until it succeeds.
3525

3526
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3527
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3528
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3529

3530
  C{delay} can be one of the following:
3531
    - callable returning the delay length as a float
3532
    - Tuple of (start, factor, limit)
3533
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3534
      useful when overriding L{wait_fn} to wait for an external event)
3535
    - A static delay as a number (int or float)
3536

3537
  @type fn: callable
3538
  @param fn: Function to be called
3539
  @param delay: Either a callable (returning the delay), a tuple of (start,
3540
                factor, limit) (see L{_RetryDelayCalculator}),
3541
                L{RETRY_REMAINING_TIME} or a number (int or float)
3542
  @type timeout: float
3543
  @param timeout: Total timeout
3544
  @type wait_fn: callable
3545
  @param wait_fn: Waiting function
3546
  @return: Return value of function
3547

3548
  """
3549
  assert callable(fn)
3550
  assert callable(wait_fn)
3551
  assert callable(_time_fn)
3552

    
3553
  if args is None:
3554
    args = []
3555

    
3556
  end_time = _time_fn() + timeout
3557

    
3558
  if callable(delay):
3559
    # External function to calculate delay
3560
    calc_delay = delay
3561

    
3562
  elif isinstance(delay, (tuple, list)):
3563
    # Increasing delay with optional upper boundary
3564
    (start, factor, limit) = delay
3565
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3566

    
3567
  elif delay is RETRY_REMAINING_TIME:
3568
    # Always use the remaining time
3569
    calc_delay = None
3570

    
3571
  else:
3572
    # Static delay
3573
    calc_delay = lambda: delay
3574

    
3575
  assert calc_delay is None or callable(calc_delay)
3576

    
3577
  while True:
3578
    retry_args = []
3579
    try:
3580
      # pylint: disable-msg=W0142
3581
      return fn(*args)
3582
    except RetryAgain, err:
3583
      retry_args = err.args
3584
    except RetryTimeout:
3585
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3586
                                   " handle RetryTimeout")
3587

    
3588
    remaining_time = end_time - _time_fn()
3589

    
3590
    if remaining_time < 0.0:
3591
      # pylint: disable-msg=W0142
3592
      raise RetryTimeout(*retry_args)
3593

    
3594
    assert remaining_time >= 0.0
3595

    
3596
    if calc_delay is None:
3597
      wait_fn(remaining_time)
3598
    else:
3599
      current_delay = calc_delay()
3600
      if current_delay > 0.0:
3601
        wait_fn(current_delay)
3602

    
3603

    
3604
def GetClosedTempfile(*args, **kwargs):
3605
  """Creates a temporary file and returns its path.
3606

3607
  """
3608
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3609
  _CloseFDNoErr(fd)
3610
  return path
3611

    
3612

    
3613
def GenerateSelfSignedX509Cert(common_name, validity):
3614
  """Generates a self-signed X509 certificate.
3615

3616
  @type common_name: string
3617
  @param common_name: commonName value
3618
  @type validity: int
3619
  @param validity: Validity for certificate in seconds
3620

3621
  """
3622
  # Create private and public key
3623
  key = OpenSSL.crypto.PKey()
3624
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3625

    
3626
  # Create self-signed certificate
3627
  cert = OpenSSL.crypto.X509()
3628
  if common_name:
3629
    cert.get_subject().CN = common_name
3630
  cert.set_serial_number(1)
3631
  cert.gmtime_adj_notBefore(0)
3632
  cert.gmtime_adj_notAfter(validity)
3633
  cert.set_issuer(cert.get_subject())
3634
  cert.set_pubkey(key)
3635
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3636

    
3637
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3638
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3639

    
3640
  return (key_pem, cert_pem)
3641

    
3642

    
3643
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3644
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3645
  """Legacy function to generate self-signed X509 certificate.
3646

3647
  @type filename: str
3648
  @param filename: path to write certificate to
3649
  @type common_name: string
3650
  @param common_name: commonName value
3651
  @type validity: int
3652
  @param validity: validity of certificate in number of days
3653

3654
  """
3655
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3656
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3657
  # and node daemon certificates have the proper Subject/Issuer.
3658
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3659
                                                   validity * 24 * 60 * 60)
3660

    
3661
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3662

    
3663

    
3664
class FileLock(object):
3665
  """Utility class for file locks.
3666

3667
  """
3668
  def __init__(self, fd, filename):
3669
    """Constructor for FileLock.
3670

3671
    @type fd: file
3672
    @param fd: File object
3673
    @type filename: str
3674
    @param filename: Path of the file opened at I{fd}
3675

3676
    """
3677
    self.fd = fd
3678
    self.filename = filename
3679

    
3680
  @classmethod
3681
  def Open(cls, filename):
3682
    """Creates and opens a file to be used as a file-based lock.
3683

3684
    @type filename: string
3685
    @param filename: path to the file to be locked
3686

3687
    """
3688
    # Using "os.open" is necessary to allow both opening existing file
3689
    # read/write and creating if not existing. Vanilla "open" will truncate an
3690
    # existing file -or- allow creating if not existing.
3691
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3692
               filename)
3693

    
3694
  def __del__(self):
3695
    self.Close()
3696

    
3697
  def Close(self):
3698
    """Close the file and release the lock.
3699

3700
    """
3701
    if hasattr(self, "fd") and self.fd:
3702
      self.fd.close()
3703
      self.fd = None
3704

    
3705
  def _flock(self, flag, blocking, timeout, errmsg):
3706
    """Wrapper for fcntl.flock.
3707

3708
    @type flag: int
3709
    @param flag: operation flag
3710
    @type blocking: bool
3711
    @param blocking: whether the operation should be done in blocking mode.
3712
    @type timeout: None or float
3713
    @param timeout: for how long the operation should be retried (implies
3714
                    non-blocking mode).
3715
    @type errmsg: string
3716
    @param errmsg: error message in case operation fails.
3717

3718
    """
3719
    assert self.fd, "Lock was closed"
3720
    assert timeout is None or timeout >= 0, \
3721
      "If specified, timeout must be positive"
3722
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3723

    
3724
    # When a timeout is used, LOCK_NB must always be set
3725
    if not (timeout is None and blocking):
3726
      flag |= fcntl.LOCK_NB
3727

    
3728
    if timeout is None:
3729
      self._Lock(self.fd, flag, timeout)
3730
    else:
3731
      try:
3732
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3733
              args=(self.fd, flag, timeout))
3734
      except RetryTimeout:
3735
        raise errors.LockError(errmsg)
3736

    
3737
  @staticmethod
3738
  def _Lock(fd, flag, timeout):
3739
    try:
3740
      fcntl.flock(fd, flag)
3741
    except IOError, err:
3742
      if timeout is not None and err.errno == errno.EAGAIN:
3743
        raise RetryAgain()
3744

    
3745
      logging.exception("fcntl.flock failed")
3746
      raise
3747

    
3748
  def Exclusive(self, blocking=False, timeout=None):
3749
    """Locks the file in exclusive mode.
3750

3751
    @type blocking: boolean
3752
    @param blocking: whether to block and wait until we
3753
        can lock the file or return immediately
3754
    @type timeout: int or None
3755
    @param timeout: if not None, the duration to wait for the lock
3756
        (in blocking mode)
3757

3758
    """
3759
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3760
                "Failed to lock %s in exclusive mode" % self.filename)
3761

    
3762
  def Shared(self, blocking=False, timeout=None):
3763
    """Locks the file in shared mode.
3764

3765
    @type blocking: boolean
3766
    @param blocking: whether to block and wait until we
3767
        can lock the file or return immediately
3768
    @type timeout: int or None
3769
    @param timeout: if not None, the duration to wait for the lock
3770
        (in blocking mode)
3771

3772
    """
3773
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3774
                "Failed to lock %s in shared mode" % self.filename)
3775

    
3776
  def Unlock(self, blocking=True, timeout=None):
3777
    """Unlocks the file.
3778

3779
    According to C{flock(2)}, unlocking can also be a nonblocking
3780
    operation::
3781

3782
      To make a non-blocking request, include LOCK_NB with any of the above
3783
      operations.
3784

3785
    @type blocking: boolean
3786
    @param blocking: whether to block and wait until we
3787
        can lock the file or return immediately
3788
    @type timeout: int or None
3789
    @param timeout: if not None, the duration to wait for the lock
3790
        (in blocking mode)
3791

3792
    """
3793
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3794
                "Failed to unlock %s" % self.filename)
3795

    
3796

    
3797
class LineSplitter:
3798
  """Splits data chunks into lines separated by newline.
3799

3800
  Instances provide a file-like interface.
3801

3802
  """
3803
  def __init__(self, line_fn, *args):
3804
    """Initializes this class.
3805

3806
    @type line_fn: callable
3807
    @param line_fn: Function called for each line, first parameter is line
3808
    @param args: Extra arguments for L{line_fn}
3809

3810
    """
3811
    assert callable(line_fn)
3812

    
3813
    if args:
3814
      # Python 2.4 doesn't have functools.partial yet
3815
      self._line_fn = \
3816
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3817
    else:
3818
      self._line_fn = line_fn
3819

    
3820
    self._lines = collections.deque()
3821
    self._buffer = ""
3822

    
3823
  def write(self, data):
3824
    parts = (self._buffer + data).split("\n")
3825
    self._buffer = parts.pop()
3826
    self._lines.extend(parts)
3827

    
3828
  def flush(self):
3829
    while self._lines:
3830
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3831

    
3832
  def close(self):
3833
    self.flush()
3834
    if self._buffer:
3835
      self._line_fn(self._buffer)
3836

    
3837

    
3838
def SignalHandled(signums):
3839
  """Signal Handled decoration.
3840

3841
  This special decorator installs a signal handler and then calls the target
3842
  function. The function must accept a 'signal_handlers' keyword argument,
3843
  which will contain a dict indexed by signal number, with SignalHandler
3844
  objects as values.
3845

3846
  The decorator can be safely stacked with iself, to handle multiple signals
3847
  with different handlers.
3848

3849
  @type signums: list
3850
  @param signums: signals to intercept
3851

3852
  """
3853
  def wrap(fn):
3854
    def sig_function(*args, **kwargs):
3855
      assert 'signal_handlers' not in kwargs or \
3856
             kwargs['signal_handlers'] is None or \
3857
             isinstance(kwargs['signal_handlers'], dict), \
3858
             "Wrong signal_handlers parameter in original function call"
3859
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3860
        signal_handlers = kwargs['signal_handlers']
3861
      else:
3862
        signal_handlers = {}
3863
        kwargs['signal_handlers'] = signal_handlers
3864
      sighandler = SignalHandler(signums)
3865
      try:
3866
        for sig in signums:
3867
          signal_handlers[sig] = sighandler
3868
        return fn(*args, **kwargs)
3869
      finally:
3870
        sighandler.Reset()
3871
    return sig_function
3872
  return wrap
3873

    
3874

    
3875
class SignalWakeupFd(object):
3876
  try:
3877
    # This is only supported in Python 2.5 and above (some distributions
3878
    # backported it to Python 2.4)
3879
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3880
  except AttributeError:
3881
    # Not supported
3882
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3883
      return -1
3884
  else:
3885
    def _SetWakeupFd(self, fd):
3886
      return self._set_wakeup_fd_fn(fd)
3887

    
3888
  def __init__(self):
3889
    """Initializes this class.
3890

3891
    """
3892
    (read_fd, write_fd) = os.pipe()
3893

    
3894
    # Once these succeeded, the file descriptors will be closed automatically.
3895
    # Buffer size 0 is important, otherwise .read() with a specified length
3896
    # might buffer data and the file descriptors won't be marked readable.
3897
    self._read_fh = os.fdopen(read_fd, "r", 0)
3898
    self._write_fh = os.fdopen(write_fd, "w", 0)
3899

    
3900
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3901

    
3902
    # Utility functions
3903
    self.fileno = self._read_fh.fileno
3904
    self.read = self._read_fh.read
3905

    
3906
  def Reset(self):
3907
    """Restores the previous wakeup file descriptor.
3908

3909
    """
3910
    if hasattr(self, "_previous") and self._previous is not None:
3911
      self._SetWakeupFd(self._previous)
3912
      self._previous = None
3913

    
3914
  def Notify(self):
3915
    """Notifies the wakeup file descriptor.
3916

3917
    """
3918
    self._write_fh.write("\0")
3919

    
3920
  def __del__(self):
3921
    """Called before object deletion.
3922

3923
    """
3924
    self.Reset()
3925

    
3926

    
3927
class SignalHandler(object):
3928
  """Generic signal handler class.
3929

3930
  It automatically restores the original handler when deconstructed or
3931
  when L{Reset} is called. You can either pass your own handler
3932
  function in or query the L{called} attribute to detect whether the
3933
  signal was sent.
3934

3935
  @type signum: list
3936
  @ivar signum: the signals we handle
3937
  @type called: boolean
3938
  @ivar called: tracks whether any of the signals have been raised
3939

3940
  """
3941
  def __init__(self, signum, handler_fn=None, wakeup=None):
3942
    """Constructs a new SignalHandler instance.
3943

3944
    @type signum: int or list of ints
3945
    @param signum: Single signal number or set of signal numbers
3946
    @type handler_fn: callable
3947
    @param handler_fn: Signal handling function
3948

3949
    """
3950
    assert handler_fn is None or callable(handler_fn)
3951

    
3952
    self.signum = set(signum)
3953
    self.called = False
3954

    
3955
    self._handler_fn = handler_fn
3956
    self._wakeup = wakeup
3957

    
3958
    self._previous = {}
3959
    try:
3960
      for signum in self.signum:
3961
        # Setup handler
3962
        prev_handler = signal.signal(signum, self._HandleSignal)
3963
        try:
3964
          self._previous[signum] = prev_handler
3965
        except:
3966
          # Restore previous handler
3967
          signal.signal(signum, prev_handler)
3968
          raise
3969
    except:
3970
      # Reset all handlers
3971
      self.Reset()
3972
      # Here we have a race condition: a handler may have already been called,
3973
      # but there's not much we can do about it at this point.
3974
      raise
3975

    
3976
  def __del__(self):
3977
    self.Reset()
3978

    
3979
  def Reset(self):
3980
    """Restore previous handler.
3981

3982
    This will reset all the signals to their previous handlers.
3983

3984
    """
3985
    for signum, prev_handler in self._previous.items():
3986
      signal.signal(signum, prev_handler)
3987
      # If successful, remove from dict
3988
      del self._previous[signum]
3989

    
3990
  def Clear(self):
3991
    """Unsets the L{called} flag.
3992

3993
    This function can be used in case a signal may arrive several times.
3994

3995
    """
3996
    self.called = False
3997

    
3998
  def _HandleSignal(self, signum, frame):
3999
    """Actual signal handling function.
4000

4001
    """
4002
    # This is not nice and not absolutely atomic, but it appears to be the only
4003
    # solution in Python -- there are no atomic types.
4004
    self.called = True
4005

    
4006
    if self._wakeup:
4007
      # Notify whoever is interested in signals
4008
      self._wakeup.Notify()
4009

    
4010
    if self._handler_fn:
4011
      self._handler_fn(signum, frame)
4012

    
4013

    
4014
class FieldSet(object):
4015
  """A simple field set.
4016

4017
  Among the features are:
4018
    - checking if a string is among a list of static string or regex objects
4019
    - checking if a whole list of string matches
4020
    - returning the matching groups from a regex match
4021

4022
  Internally, all fields are held as regular expression objects.
4023

4024
  """
4025
  def __init__(self, *items):
4026
    self.items = [re.compile("^%s$" % value) for value in items]
4027

    
4028
  def Extend(self, other_set):
4029
    """Extend the field set with the items from another one"""
4030
    self.items.extend(other_set.items)
4031

    
4032
  def Matches(self, field):
4033
    """Checks if a field matches the current set
4034

4035
    @type field: str
4036
    @param field: the string to match
4037
    @return: either None or a regular expression match object
4038

4039
    """
4040
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
4041
      return m
4042
    return None
4043

    
4044
  def NonMatching(self, items):
4045
    """Returns the list of fields not matching the current set
4046

4047
    @type items: list
4048
    @param items: the list of fields to check
4049
    @rtype: list
4050
    @return: list of non-matching fields
4051

4052
    """
4053
    return [val for val in items if not self.Matches(val)]
4054

    
4055

    
4056
class RunningTimeout(object):
4057
  """Class to calculate remaining timeout when doing several operations.
4058

4059
  """
4060
  __slots__ = [
4061
    "_allow_negative",
4062
    "_start_time",
4063
    "_time_fn",
4064
    "_timeout",
4065
    ]
4066

    
4067
  def __init__(self, timeout, allow_negative, _time_fn=time.time):
4068
    """Initializes this class.
4069

4070
    @type timeout: float
4071
    @param timeout: Timeout duration
4072
    @type allow_negative: bool
4073
    @param allow_negative: Whether to return values below zero
4074
    @param _time_fn: Time function for unittests
4075

4076
    """
4077
    object.__init__(self)
4078

    
4079
    if timeout is not None and timeout < 0.0:
4080
      raise ValueError("Timeout must not be negative")
4081

    
4082
    self._timeout = timeout
4083
    self._allow_negative = allow_negative
4084
    self._time_fn = _time_fn
4085

    
4086
    self._start_time = None
4087

    
4088
  def Remaining(self):
4089
    """Returns the remaining timeout.
4090

4091
    """
4092
    if self._timeout is None:
4093
      return None
4094

    
4095
    # Get start time on first calculation
4096
    if self._start_time is None:
4097
      self._start_time = self._time_fn()
4098

    
4099
    # Calculate remaining time
4100
    remaining_timeout = self._start_time + self._timeout - self._time_fn()
4101

    
4102
    if not self._allow_negative:
4103
      # Ensure timeout is always >= 0
4104
      return max(0.0, remaining_timeout)
4105

    
4106
    return remaining_timeout