Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ a535cef7

History | View | Annotate | Download (109.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86
                     '[a-f0-9]{4}-[a-f0-9]{12}$')
87

    
88
# Certificate verification results
89
(CERT_WARNING,
90
 CERT_ERROR) = range(1, 3)
91

    
92
# Flags for mlockall() (from bits/mman.h)
93
_MCL_CURRENT = 1
94
_MCL_FUTURE = 2
95

    
96
#: MAC checker regexp
97
_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
98

    
99
(_TIMEOUT_NONE,
100
 _TIMEOUT_TERM,
101
 _TIMEOUT_KILL) = range(3)
102

    
103

    
104
class RunResult(object):
105
  """Holds the result of running external programs.
106

107
  @type exit_code: int
108
  @ivar exit_code: the exit code of the program, or None (if the program
109
      didn't exit())
110
  @type signal: int or None
111
  @ivar signal: the signal that caused the program to finish, or None
112
      (if the program wasn't terminated by a signal)
113
  @type stdout: str
114
  @ivar stdout: the standard output of the program
115
  @type stderr: str
116
  @ivar stderr: the standard error of the program
117
  @type failed: boolean
118
  @ivar failed: True in case the program was
119
      terminated by a signal or exited with a non-zero exit code
120
  @ivar fail_reason: a string detailing the termination reason
121

122
  """
123
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
124
               "failed", "fail_reason", "cmd"]
125

    
126

    
127
  def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
128
               timeout):
129
    self.cmd = cmd
130
    self.exit_code = exit_code
131
    self.signal = signal_
132
    self.stdout = stdout
133
    self.stderr = stderr
134
    self.failed = (signal_ is not None or exit_code != 0)
135

    
136
    fail_msgs = []
137
    if self.signal is not None:
138
      fail_msgs.append("terminated by signal %s" % self.signal)
139
    elif self.exit_code is not None:
140
      fail_msgs.append("exited with exit code %s" % self.exit_code)
141
    else:
142
      fail_msgs.append("unable to determine termination reason")
143

    
144
    if timeout_action == _TIMEOUT_TERM:
145
      fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
146
    elif timeout_action == _TIMEOUT_KILL:
147
      fail_msgs.append(("force termination after timeout of %.2f seconds"
148
                        " and linger for another %.2f seconds") %
149
                       (timeout, constants.CHILD_LINGER_TIMEOUT))
150

    
151
    if fail_msgs and self.failed:
152
      self.fail_reason = CommaJoin(fail_msgs)
153

    
154
    if self.failed:
155
      logging.debug("Command '%s' failed (%s); output: %s",
156
                    self.cmd, self.fail_reason, self.output)
157

    
158
  def _GetOutput(self):
159
    """Returns the combined stdout and stderr for easier usage.
160

161
    """
162
    return self.stdout + self.stderr
163

    
164
  output = property(_GetOutput, None, None, "Return full output")
165

    
166

    
167
def _BuildCmdEnvironment(env, reset):
168
  """Builds the environment for an external program.
169

170
  """
171
  if reset:
172
    cmd_env = {}
173
  else:
174
    cmd_env = os.environ.copy()
175
    cmd_env["LC_ALL"] = "C"
176

    
177
  if env is not None:
178
    cmd_env.update(env)
179

    
180
  return cmd_env
181

    
182

    
183
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
184
           interactive=False, timeout=None):
185
  """Execute a (shell) command.
186

187
  The command should not read from its standard input, as it will be
188
  closed.
189

190
  @type cmd: string or list
191
  @param cmd: Command to run
192
  @type env: dict
193
  @param env: Additional environment variables
194
  @type output: str
195
  @param output: if desired, the output of the command can be
196
      saved in a file instead of the RunResult instance; this
197
      parameter denotes the file name (if not None)
198
  @type cwd: string
199
  @param cwd: if specified, will be used as the working
200
      directory for the command; the default will be /
201
  @type reset_env: boolean
202
  @param reset_env: whether to reset or keep the default os environment
203
  @type interactive: boolean
204
  @param interactive: weather we pipe stdin, stdout and stderr
205
                      (default behaviour) or run the command interactive
206
  @type timeout: int
207
  @param timeout: If not None, timeout in seconds until child process gets
208
                  killed
209
  @rtype: L{RunResult}
210
  @return: RunResult instance
211
  @raise errors.ProgrammerError: if we call this when forks are disabled
212

213
  """
214
  if no_fork:
215
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
216

    
217
  if output and interactive:
218
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
219
                                 " not be provided at the same time")
220

    
221
  if isinstance(cmd, basestring):
222
    strcmd = cmd
223
    shell = True
224
  else:
225
    cmd = [str(val) for val in cmd]
226
    strcmd = ShellQuoteArgs(cmd)
227
    shell = False
228

    
229
  if output:
230
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
231
  else:
232
    logging.debug("RunCmd %s", strcmd)
233

    
234
  cmd_env = _BuildCmdEnvironment(env, reset_env)
235

    
236
  try:
237
    if output is None:
238
      out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
239
                                                     interactive, timeout)
240
    else:
241
      timeout_action = _TIMEOUT_NONE
242
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
243
      out = err = ""
244
  except OSError, err:
245
    if err.errno == errno.ENOENT:
246
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
247
                               (strcmd, err))
248
    else:
249
      raise
250

    
251
  if status >= 0:
252
    exitcode = status
253
    signal_ = None
254
  else:
255
    exitcode = None
256
    signal_ = -status
257

    
258
  return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
259

    
260

    
261
def SetupDaemonEnv(cwd="/", umask=077):
262
  """Setup a daemon's environment.
263

264
  This should be called between the first and second fork, due to
265
  setsid usage.
266

267
  @param cwd: the directory to which to chdir
268
  @param umask: the umask to setup
269

270
  """
271
  os.chdir(cwd)
272
  os.umask(umask)
273
  os.setsid()
274

    
275

    
276
def SetupDaemonFDs(output_file, output_fd):
277
  """Setups up a daemon's file descriptors.
278

279
  @param output_file: if not None, the file to which to redirect
280
      stdout/stderr
281
  @param output_fd: if not None, the file descriptor for stdout/stderr
282

283
  """
284
  # check that at most one is defined
285
  assert [output_file, output_fd].count(None) >= 1
286

    
287
  # Open /dev/null (read-only, only for stdin)
288
  devnull_fd = os.open(os.devnull, os.O_RDONLY)
289

    
290
  if output_fd is not None:
291
    pass
292
  elif output_file is not None:
293
    # Open output file
294
    try:
295
      output_fd = os.open(output_file,
296
                          os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
297
    except EnvironmentError, err:
298
      raise Exception("Opening output file failed: %s" % err)
299
  else:
300
    output_fd = os.open(os.devnull, os.O_WRONLY)
301

    
302
  # Redirect standard I/O
303
  os.dup2(devnull_fd, 0)
304
  os.dup2(output_fd, 1)
305
  os.dup2(output_fd, 2)
306

    
307

    
308
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
309
                pidfile=None):
310
  """Start a daemon process after forking twice.
311

312
  @type cmd: string or list
313
  @param cmd: Command to run
314
  @type env: dict
315
  @param env: Additional environment variables
316
  @type cwd: string
317
  @param cwd: Working directory for the program
318
  @type output: string
319
  @param output: Path to file in which to save the output
320
  @type output_fd: int
321
  @param output_fd: File descriptor for output
322
  @type pidfile: string
323
  @param pidfile: Process ID file
324
  @rtype: int
325
  @return: Daemon process ID
326
  @raise errors.ProgrammerError: if we call this when forks are disabled
327

328
  """
329
  if no_fork:
330
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
331
                                 " disabled")
332

    
333
  if output and not (bool(output) ^ (output_fd is not None)):
334
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
335
                                 " specified")
336

    
337
  if isinstance(cmd, basestring):
338
    cmd = ["/bin/sh", "-c", cmd]
339

    
340
  strcmd = ShellQuoteArgs(cmd)
341

    
342
  if output:
343
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
344
  else:
345
    logging.debug("StartDaemon %s", strcmd)
346

    
347
  cmd_env = _BuildCmdEnvironment(env, False)
348

    
349
  # Create pipe for sending PID back
350
  (pidpipe_read, pidpipe_write) = os.pipe()
351
  try:
352
    try:
353
      # Create pipe for sending error messages
354
      (errpipe_read, errpipe_write) = os.pipe()
355
      try:
356
        try:
357
          # First fork
358
          pid = os.fork()
359
          if pid == 0:
360
            try:
361
              # Child process, won't return
362
              _StartDaemonChild(errpipe_read, errpipe_write,
363
                                pidpipe_read, pidpipe_write,
364
                                cmd, cmd_env, cwd,
365
                                output, output_fd, pidfile)
366
            finally:
367
              # Well, maybe child process failed
368
              os._exit(1) # pylint: disable-msg=W0212
369
        finally:
370
          _CloseFDNoErr(errpipe_write)
371

    
372
        # Wait for daemon to be started (or an error message to
373
        # arrive) and read up to 100 KB as an error message
374
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
375
      finally:
376
        _CloseFDNoErr(errpipe_read)
377
    finally:
378
      _CloseFDNoErr(pidpipe_write)
379

    
380
    # Read up to 128 bytes for PID
381
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
382
  finally:
383
    _CloseFDNoErr(pidpipe_read)
384

    
385
  # Try to avoid zombies by waiting for child process
386
  try:
387
    os.waitpid(pid, 0)
388
  except OSError:
389
    pass
390

    
391
  if errormsg:
392
    raise errors.OpExecError("Error when starting daemon process: %r" %
393
                             errormsg)
394

    
395
  try:
396
    return int(pidtext)
397
  except (ValueError, TypeError), err:
398
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
399
                             (pidtext, err))
400

    
401

    
402
def _StartDaemonChild(errpipe_read, errpipe_write,
403
                      pidpipe_read, pidpipe_write,
404
                      args, env, cwd,
405
                      output, fd_output, pidfile):
406
  """Child process for starting daemon.
407

408
  """
409
  try:
410
    # Close parent's side
411
    _CloseFDNoErr(errpipe_read)
412
    _CloseFDNoErr(pidpipe_read)
413

    
414
    # First child process
415
    SetupDaemonEnv()
416

    
417
    # And fork for the second time
418
    pid = os.fork()
419
    if pid != 0:
420
      # Exit first child process
421
      os._exit(0) # pylint: disable-msg=W0212
422

    
423
    # Make sure pipe is closed on execv* (and thereby notifies
424
    # original process)
425
    SetCloseOnExecFlag(errpipe_write, True)
426

    
427
    # List of file descriptors to be left open
428
    noclose_fds = [errpipe_write]
429

    
430
    # Open PID file
431
    if pidfile:
432
      fd_pidfile = WritePidFile(pidfile)
433

    
434
      # Keeping the file open to hold the lock
435
      noclose_fds.append(fd_pidfile)
436

    
437
      SetCloseOnExecFlag(fd_pidfile, False)
438
    else:
439
      fd_pidfile = None
440

    
441
    SetupDaemonFDs(output, fd_output)
442

    
443
    # Send daemon PID to parent
444
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
445

    
446
    # Close all file descriptors except stdio and error message pipe
447
    CloseFDs(noclose_fds=noclose_fds)
448

    
449
    # Change working directory
450
    os.chdir(cwd)
451

    
452
    if env is None:
453
      os.execvp(args[0], args)
454
    else:
455
      os.execvpe(args[0], args, env)
456
  except: # pylint: disable-msg=W0702
457
    try:
458
      # Report errors to original process
459
      WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
460
    except: # pylint: disable-msg=W0702
461
      # Ignore errors in error handling
462
      pass
463

    
464
  os._exit(1) # pylint: disable-msg=W0212
465

    
466

    
467
def WriteErrorToFD(fd, err):
468
  """Possibly write an error message to a fd.
469

470
  @type fd: None or int (file descriptor)
471
  @param fd: if not None, the error will be written to this fd
472
  @param err: string, the error message
473

474
  """
475
  if fd is None:
476
    return
477

    
478
  if not err:
479
    err = "<unknown error>"
480

    
481
  RetryOnSignal(os.write, fd, err)
482

    
483

    
484
def _CheckIfAlive(child):
485
  """Raises L{RetryAgain} if child is still alive.
486

487
  @raises RetryAgain: If child is still alive
488

489
  """
490
  if child.poll() is None:
491
    raise RetryAgain()
492

    
493

    
494
def _WaitForProcess(child, timeout):
495
  """Waits for the child to terminate or until we reach timeout.
496

497
  """
498
  try:
499
    Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
500
  except RetryTimeout:
501
    pass
502

    
503

    
504
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
505
                _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
506
  """Run a command and return its output.
507

508
  @type  cmd: string or list
509
  @param cmd: Command to run
510
  @type env: dict
511
  @param env: The environment to use
512
  @type via_shell: bool
513
  @param via_shell: if we should run via the shell
514
  @type cwd: string
515
  @param cwd: the working directory for the program
516
  @type interactive: boolean
517
  @param interactive: Run command interactive (without piping)
518
  @type timeout: int
519
  @param timeout: Timeout after the programm gets terminated
520
  @rtype: tuple
521
  @return: (out, err, status)
522

523
  """
524
  poller = select.poll()
525

    
526
  stderr = subprocess.PIPE
527
  stdout = subprocess.PIPE
528
  stdin = subprocess.PIPE
529

    
530
  if interactive:
531
    stderr = stdout = stdin = None
532

    
533
  child = subprocess.Popen(cmd, shell=via_shell,
534
                           stderr=stderr,
535
                           stdout=stdout,
536
                           stdin=stdin,
537
                           close_fds=True, env=env,
538
                           cwd=cwd)
539

    
540
  out = StringIO()
541
  err = StringIO()
542

    
543
  linger_timeout = None
544

    
545
  if timeout is None:
546
    poll_timeout = None
547
  else:
548
    poll_timeout = RunningTimeout(timeout, True).Remaining
549

    
550
  msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
551
                 (cmd, child.pid))
552
  msg_linger = ("Command %s (%d) run into linger timeout, killing" %
553
                (cmd, child.pid))
554

    
555
  timeout_action = _TIMEOUT_NONE
556

    
557
  if not interactive:
558
    child.stdin.close()
559
    poller.register(child.stdout, select.POLLIN)
560
    poller.register(child.stderr, select.POLLIN)
561
    fdmap = {
562
      child.stdout.fileno(): (out, child.stdout),
563
      child.stderr.fileno(): (err, child.stderr),
564
      }
565
    for fd in fdmap:
566
      SetNonblockFlag(fd, True)
567

    
568
    while fdmap:
569
      if poll_timeout:
570
        pt = poll_timeout() * 1000
571
        if pt < 0:
572
          if linger_timeout is None:
573
            logging.warning(msg_timeout)
574
            if child.poll() is None:
575
              timeout_action = _TIMEOUT_TERM
576
              IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
577
            linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
578
          pt = linger_timeout() * 1000
579
          if pt < 0:
580
            break
581
      else:
582
        pt = None
583

    
584
      pollresult = RetryOnSignal(poller.poll, pt)
585

    
586
      for fd, event in pollresult:
587
        if event & select.POLLIN or event & select.POLLPRI:
588
          data = fdmap[fd][1].read()
589
          # no data from read signifies EOF (the same as POLLHUP)
590
          if not data:
591
            poller.unregister(fd)
592
            del fdmap[fd]
593
            continue
594
          fdmap[fd][0].write(data)
595
        if (event & select.POLLNVAL or event & select.POLLHUP or
596
            event & select.POLLERR):
597
          poller.unregister(fd)
598
          del fdmap[fd]
599

    
600
  if timeout is not None:
601
    assert callable(poll_timeout)
602

    
603
    # We have no I/O left but it might still run
604
    if child.poll() is None:
605
      _WaitForProcess(child, poll_timeout())
606

    
607
    # Terminate if still alive after timeout
608
    if child.poll() is None:
609
      if linger_timeout is None:
610
        logging.warning(msg_timeout)
611
        timeout_action = _TIMEOUT_TERM
612
        IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
613
        lt = _linger_timeout
614
      else:
615
        lt = linger_timeout()
616
      _WaitForProcess(child, lt)
617

    
618
    # Okay, still alive after timeout and linger timeout? Kill it!
619
    if child.poll() is None:
620
      timeout_action = _TIMEOUT_KILL
621
      logging.warning(msg_linger)
622
      IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
623

    
624
  out = out.getvalue()
625
  err = err.getvalue()
626

    
627
  status = child.wait()
628
  return out, err, status, timeout_action
629

    
630

    
631
def _RunCmdFile(cmd, env, via_shell, output, cwd):
632
  """Run a command and save its output to a file.
633

634
  @type  cmd: string or list
635
  @param cmd: Command to run
636
  @type env: dict
637
  @param env: The environment to use
638
  @type via_shell: bool
639
  @param via_shell: if we should run via the shell
640
  @type output: str
641
  @param output: the filename in which to save the output
642
  @type cwd: string
643
  @param cwd: the working directory for the program
644
  @rtype: int
645
  @return: the exit status
646

647
  """
648
  fh = open(output, "a")
649
  try:
650
    child = subprocess.Popen(cmd, shell=via_shell,
651
                             stderr=subprocess.STDOUT,
652
                             stdout=fh,
653
                             stdin=subprocess.PIPE,
654
                             close_fds=True, env=env,
655
                             cwd=cwd)
656

    
657
    child.stdin.close()
658
    status = child.wait()
659
  finally:
660
    fh.close()
661
  return status
662

    
663

    
664
def SetCloseOnExecFlag(fd, enable):
665
  """Sets or unsets the close-on-exec flag on a file descriptor.
666

667
  @type fd: int
668
  @param fd: File descriptor
669
  @type enable: bool
670
  @param enable: Whether to set or unset it.
671

672
  """
673
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
674

    
675
  if enable:
676
    flags |= fcntl.FD_CLOEXEC
677
  else:
678
    flags &= ~fcntl.FD_CLOEXEC
679

    
680
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
681

    
682

    
683
def SetNonblockFlag(fd, enable):
684
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
685

686
  @type fd: int
687
  @param fd: File descriptor
688
  @type enable: bool
689
  @param enable: Whether to set or unset it
690

691
  """
692
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
693

    
694
  if enable:
695
    flags |= os.O_NONBLOCK
696
  else:
697
    flags &= ~os.O_NONBLOCK
698

    
699
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
700

    
701

    
702
def RetryOnSignal(fn, *args, **kwargs):
703
  """Calls a function again if it failed due to EINTR.
704

705
  """
706
  while True:
707
    try:
708
      return fn(*args, **kwargs)
709
    except EnvironmentError, err:
710
      if err.errno != errno.EINTR:
711
        raise
712
    except (socket.error, select.error), err:
713
      # In python 2.6 and above select.error is an IOError, so it's handled
714
      # above, in 2.5 and below it's not, and it's handled here.
715
      if not (err.args and err.args[0] == errno.EINTR):
716
        raise
717

    
718

    
719
def RunParts(dir_name, env=None, reset_env=False):
720
  """Run Scripts or programs in a directory
721

722
  @type dir_name: string
723
  @param dir_name: absolute path to a directory
724
  @type env: dict
725
  @param env: The environment to use
726
  @type reset_env: boolean
727
  @param reset_env: whether to reset or keep the default os environment
728
  @rtype: list of tuples
729
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
730

731
  """
732
  rr = []
733

    
734
  try:
735
    dir_contents = ListVisibleFiles(dir_name)
736
  except OSError, err:
737
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
738
    return rr
739

    
740
  for relname in sorted(dir_contents):
741
    fname = PathJoin(dir_name, relname)
742
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
743
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
744
      rr.append((relname, constants.RUNPARTS_SKIP, None))
745
    else:
746
      try:
747
        result = RunCmd([fname], env=env, reset_env=reset_env)
748
      except Exception, err: # pylint: disable-msg=W0703
749
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
750
      else:
751
        rr.append((relname, constants.RUNPARTS_RUN, result))
752

    
753
  return rr
754

    
755

    
756
def RemoveFile(filename):
757
  """Remove a file ignoring some errors.
758

759
  Remove a file, ignoring non-existing ones or directories. Other
760
  errors are passed.
761

762
  @type filename: str
763
  @param filename: the file to be removed
764

765
  """
766
  try:
767
    os.unlink(filename)
768
  except OSError, err:
769
    if err.errno not in (errno.ENOENT, errno.EISDIR):
770
      raise
771

    
772

    
773
def RemoveDir(dirname):
774
  """Remove an empty directory.
775

776
  Remove a directory, ignoring non-existing ones.
777
  Other errors are passed. This includes the case,
778
  where the directory is not empty, so it can't be removed.
779

780
  @type dirname: str
781
  @param dirname: the empty directory to be removed
782

783
  """
784
  try:
785
    os.rmdir(dirname)
786
  except OSError, err:
787
    if err.errno != errno.ENOENT:
788
      raise
789

    
790

    
791
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
792
  """Renames a file.
793

794
  @type old: string
795
  @param old: Original path
796
  @type new: string
797
  @param new: New path
798
  @type mkdir: bool
799
  @param mkdir: Whether to create target directory if it doesn't exist
800
  @type mkdir_mode: int
801
  @param mkdir_mode: Mode for newly created directories
802

803
  """
804
  try:
805
    return os.rename(old, new)
806
  except OSError, err:
807
    # In at least one use case of this function, the job queue, directory
808
    # creation is very rare. Checking for the directory before renaming is not
809
    # as efficient.
810
    if mkdir and err.errno == errno.ENOENT:
811
      # Create directory and try again
812
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
813

    
814
      return os.rename(old, new)
815

    
816
    raise
817

    
818

    
819
def Makedirs(path, mode=0750):
820
  """Super-mkdir; create a leaf directory and all intermediate ones.
821

822
  This is a wrapper around C{os.makedirs} adding error handling not implemented
823
  before Python 2.5.
824

825
  """
826
  try:
827
    os.makedirs(path, mode)
828
  except OSError, err:
829
    # Ignore EEXIST. This is only handled in os.makedirs as included in
830
    # Python 2.5 and above.
831
    if err.errno != errno.EEXIST or not os.path.exists(path):
832
      raise
833

    
834

    
835
def ResetTempfileModule():
836
  """Resets the random name generator of the tempfile module.
837

838
  This function should be called after C{os.fork} in the child process to
839
  ensure it creates a newly seeded random generator. Otherwise it would
840
  generate the same random parts as the parent process. If several processes
841
  race for the creation of a temporary file, this could lead to one not getting
842
  a temporary name.
843

844
  """
845
  # pylint: disable-msg=W0212
846
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
847
    tempfile._once_lock.acquire()
848
    try:
849
      # Reset random name generator
850
      tempfile._name_sequence = None
851
    finally:
852
      tempfile._once_lock.release()
853
  else:
854
    logging.critical("The tempfile module misses at least one of the"
855
                     " '_once_lock' and '_name_sequence' attributes")
856

    
857

    
858
def _FingerprintFile(filename):
859
  """Compute the fingerprint of a file.
860

861
  If the file does not exist, a None will be returned
862
  instead.
863

864
  @type filename: str
865
  @param filename: the filename to checksum
866
  @rtype: str
867
  @return: the hex digest of the sha checksum of the contents
868
      of the file
869

870
  """
871
  if not (os.path.exists(filename) and os.path.isfile(filename)):
872
    return None
873

    
874
  f = open(filename)
875

    
876
  fp = compat.sha1_hash()
877
  while True:
878
    data = f.read(4096)
879
    if not data:
880
      break
881

    
882
    fp.update(data)
883

    
884
  return fp.hexdigest()
885

    
886

    
887
def FingerprintFiles(files):
888
  """Compute fingerprints for a list of files.
889

890
  @type files: list
891
  @param files: the list of filename to fingerprint
892
  @rtype: dict
893
  @return: a dictionary filename: fingerprint, holding only
894
      existing files
895

896
  """
897
  ret = {}
898

    
899
  for filename in files:
900
    cksum = _FingerprintFile(filename)
901
    if cksum:
902
      ret[filename] = cksum
903

    
904
  return ret
905

    
906

    
907
def ForceDictType(target, key_types, allowed_values=None):
908
  """Force the values of a dict to have certain types.
909

910
  @type target: dict
911
  @param target: the dict to update
912
  @type key_types: dict
913
  @param key_types: dict mapping target dict keys to types
914
                    in constants.ENFORCEABLE_TYPES
915
  @type allowed_values: list
916
  @keyword allowed_values: list of specially allowed values
917

918
  """
919
  if allowed_values is None:
920
    allowed_values = []
921

    
922
  if not isinstance(target, dict):
923
    msg = "Expected dictionary, got '%s'" % target
924
    raise errors.TypeEnforcementError(msg)
925

    
926
  for key in target:
927
    if key not in key_types:
928
      msg = "Unknown key '%s'" % key
929
      raise errors.TypeEnforcementError(msg)
930

    
931
    if target[key] in allowed_values:
932
      continue
933

    
934
    ktype = key_types[key]
935
    if ktype not in constants.ENFORCEABLE_TYPES:
936
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
937
      raise errors.ProgrammerError(msg)
938

    
939
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
940
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
941
        pass
942
      elif not isinstance(target[key], basestring):
943
        if isinstance(target[key], bool) and not target[key]:
944
          target[key] = ''
945
        else:
946
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
947
          raise errors.TypeEnforcementError(msg)
948
    elif ktype == constants.VTYPE_BOOL:
949
      if isinstance(target[key], basestring) and target[key]:
950
        if target[key].lower() == constants.VALUE_FALSE:
951
          target[key] = False
952
        elif target[key].lower() == constants.VALUE_TRUE:
953
          target[key] = True
954
        else:
955
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
956
          raise errors.TypeEnforcementError(msg)
957
      elif target[key]:
958
        target[key] = True
959
      else:
960
        target[key] = False
961
    elif ktype == constants.VTYPE_SIZE:
962
      try:
963
        target[key] = ParseUnit(target[key])
964
      except errors.UnitParseError, err:
965
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
966
              (key, target[key], err)
967
        raise errors.TypeEnforcementError(msg)
968
    elif ktype == constants.VTYPE_INT:
969
      try:
970
        target[key] = int(target[key])
971
      except (ValueError, TypeError):
972
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
973
        raise errors.TypeEnforcementError(msg)
974

    
975

    
976
def _GetProcStatusPath(pid):
977
  """Returns the path for a PID's proc status file.
978

979
  @type pid: int
980
  @param pid: Process ID
981
  @rtype: string
982

983
  """
984
  return "/proc/%d/status" % pid
985

    
986

    
987
def IsProcessAlive(pid):
988
  """Check if a given pid exists on the system.
989

990
  @note: zombie status is not handled, so zombie processes
991
      will be returned as alive
992
  @type pid: int
993
  @param pid: the process ID to check
994
  @rtype: boolean
995
  @return: True if the process exists
996

997
  """
998
  def _TryStat(name):
999
    try:
1000
      os.stat(name)
1001
      return True
1002
    except EnvironmentError, err:
1003
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
1004
        return False
1005
      elif err.errno == errno.EINVAL:
1006
        raise RetryAgain(err)
1007
      raise
1008

    
1009
  assert isinstance(pid, int), "pid must be an integer"
1010
  if pid <= 0:
1011
    return False
1012

    
1013
  # /proc in a multiprocessor environment can have strange behaviors.
1014
  # Retry the os.stat a few times until we get a good result.
1015
  try:
1016
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1017
                 args=[_GetProcStatusPath(pid)])
1018
  except RetryTimeout, err:
1019
    err.RaiseInner()
1020

    
1021

    
1022
def _ParseSigsetT(sigset):
1023
  """Parse a rendered sigset_t value.
1024

1025
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1026
  function.
1027

1028
  @type sigset: string
1029
  @param sigset: Rendered signal set from /proc/$pid/status
1030
  @rtype: set
1031
  @return: Set of all enabled signal numbers
1032

1033
  """
1034
  result = set()
1035

    
1036
  signum = 0
1037
  for ch in reversed(sigset):
1038
    chv = int(ch, 16)
1039

    
1040
    # The following could be done in a loop, but it's easier to read and
1041
    # understand in the unrolled form
1042
    if chv & 1:
1043
      result.add(signum + 1)
1044
    if chv & 2:
1045
      result.add(signum + 2)
1046
    if chv & 4:
1047
      result.add(signum + 3)
1048
    if chv & 8:
1049
      result.add(signum + 4)
1050

    
1051
    signum += 4
1052

    
1053
  return result
1054

    
1055

    
1056
def _GetProcStatusField(pstatus, field):
1057
  """Retrieves a field from the contents of a proc status file.
1058

1059
  @type pstatus: string
1060
  @param pstatus: Contents of /proc/$pid/status
1061
  @type field: string
1062
  @param field: Name of field whose value should be returned
1063
  @rtype: string
1064

1065
  """
1066
  for line in pstatus.splitlines():
1067
    parts = line.split(":", 1)
1068

    
1069
    if len(parts) < 2 or parts[0] != field:
1070
      continue
1071

    
1072
    return parts[1].strip()
1073

    
1074
  return None
1075

    
1076

    
1077
def IsProcessHandlingSignal(pid, signum, status_path=None):
1078
  """Checks whether a process is handling a signal.
1079

1080
  @type pid: int
1081
  @param pid: Process ID
1082
  @type signum: int
1083
  @param signum: Signal number
1084
  @rtype: bool
1085

1086
  """
1087
  if status_path is None:
1088
    status_path = _GetProcStatusPath(pid)
1089

    
1090
  try:
1091
    proc_status = ReadFile(status_path)
1092
  except EnvironmentError, err:
1093
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
1094
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1095
      return False
1096
    raise
1097

    
1098
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1099
  if sigcgt is None:
1100
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1101

    
1102
  # Now check whether signal is handled
1103
  return signum in _ParseSigsetT(sigcgt)
1104

    
1105

    
1106
def ReadPidFile(pidfile):
1107
  """Read a pid from a file.
1108

1109
  @type  pidfile: string
1110
  @param pidfile: path to the file containing the pid
1111
  @rtype: int
1112
  @return: The process id, if the file exists and contains a valid PID,
1113
           otherwise 0
1114

1115
  """
1116
  try:
1117
    raw_data = ReadOneLineFile(pidfile)
1118
  except EnvironmentError, err:
1119
    if err.errno != errno.ENOENT:
1120
      logging.exception("Can't read pid file")
1121
    return 0
1122

    
1123
  try:
1124
    pid = int(raw_data)
1125
  except (TypeError, ValueError), err:
1126
    logging.info("Can't parse pid file contents", exc_info=True)
1127
    return 0
1128

    
1129
  return pid
1130

    
1131

    
1132
def ReadLockedPidFile(path):
1133
  """Reads a locked PID file.
1134

1135
  This can be used together with L{StartDaemon}.
1136

1137
  @type path: string
1138
  @param path: Path to PID file
1139
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1140

1141
  """
1142
  try:
1143
    fd = os.open(path, os.O_RDONLY)
1144
  except EnvironmentError, err:
1145
    if err.errno == errno.ENOENT:
1146
      # PID file doesn't exist
1147
      return None
1148
    raise
1149

    
1150
  try:
1151
    try:
1152
      # Try to acquire lock
1153
      LockFile(fd)
1154
    except errors.LockError:
1155
      # Couldn't lock, daemon is running
1156
      return int(os.read(fd, 100))
1157
  finally:
1158
    os.close(fd)
1159

    
1160
  return None
1161

    
1162

    
1163
def MatchNameComponent(key, name_list, case_sensitive=True):
1164
  """Try to match a name against a list.
1165

1166
  This function will try to match a name like test1 against a list
1167
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1168
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1169
  not I{'test1.ex'}. A multiple match will be considered as no match
1170
  at all (e.g. I{'test1'} against C{['test1.example.com',
1171
  'test1.example.org']}), except when the key fully matches an entry
1172
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1173

1174
  @type key: str
1175
  @param key: the name to be searched
1176
  @type name_list: list
1177
  @param name_list: the list of strings against which to search the key
1178
  @type case_sensitive: boolean
1179
  @param case_sensitive: whether to provide a case-sensitive match
1180

1181
  @rtype: None or str
1182
  @return: None if there is no match I{or} if there are multiple matches,
1183
      otherwise the element from the list which matches
1184

1185
  """
1186
  if key in name_list:
1187
    return key
1188

    
1189
  re_flags = 0
1190
  if not case_sensitive:
1191
    re_flags |= re.IGNORECASE
1192
    key = key.upper()
1193
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1194
  names_filtered = []
1195
  string_matches = []
1196
  for name in name_list:
1197
    if mo.match(name) is not None:
1198
      names_filtered.append(name)
1199
      if not case_sensitive and key == name.upper():
1200
        string_matches.append(name)
1201

    
1202
  if len(string_matches) == 1:
1203
    return string_matches[0]
1204
  if len(names_filtered) == 1:
1205
    return names_filtered[0]
1206
  return None
1207

    
1208

    
1209
def ValidateServiceName(name):
1210
  """Validate the given service name.
1211

1212
  @type name: number or string
1213
  @param name: Service name or port specification
1214

1215
  """
1216
  try:
1217
    numport = int(name)
1218
  except (ValueError, TypeError):
1219
    # Non-numeric service name
1220
    valid = _VALID_SERVICE_NAME_RE.match(name)
1221
  else:
1222
    # Numeric port (protocols other than TCP or UDP might need adjustments
1223
    # here)
1224
    valid = (numport >= 0 and numport < (1 << 16))
1225

    
1226
  if not valid:
1227
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1228
                               errors.ECODE_INVAL)
1229

    
1230
  return name
1231

    
1232

    
1233
def ListVolumeGroups():
1234
  """List volume groups and their size
1235

1236
  @rtype: dict
1237
  @return:
1238
       Dictionary with keys volume name and values
1239
       the size of the volume
1240

1241
  """
1242
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1243
  result = RunCmd(command)
1244
  retval = {}
1245
  if result.failed:
1246
    return retval
1247

    
1248
  for line in result.stdout.splitlines():
1249
    try:
1250
      name, size = line.split()
1251
      size = int(float(size))
1252
    except (IndexError, ValueError), err:
1253
      logging.error("Invalid output from vgs (%s): %s", err, line)
1254
      continue
1255

    
1256
    retval[name] = size
1257

    
1258
  return retval
1259

    
1260

    
1261
def BridgeExists(bridge):
1262
  """Check whether the given bridge exists in the system
1263

1264
  @type bridge: str
1265
  @param bridge: the bridge name to check
1266
  @rtype: boolean
1267
  @return: True if it does
1268

1269
  """
1270
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1271

    
1272

    
1273
def NiceSort(name_list):
1274
  """Sort a list of strings based on digit and non-digit groupings.
1275

1276
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1277
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1278
  'a11']}.
1279

1280
  The sort algorithm breaks each name in groups of either only-digits
1281
  or no-digits. Only the first eight such groups are considered, and
1282
  after that we just use what's left of the string.
1283

1284
  @type name_list: list
1285
  @param name_list: the names to be sorted
1286
  @rtype: list
1287
  @return: a copy of the name list sorted with our algorithm
1288

1289
  """
1290
  _SORTER_BASE = "(\D+|\d+)"
1291
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1292
                                                  _SORTER_BASE, _SORTER_BASE,
1293
                                                  _SORTER_BASE, _SORTER_BASE,
1294
                                                  _SORTER_BASE, _SORTER_BASE)
1295
  _SORTER_RE = re.compile(_SORTER_FULL)
1296
  _SORTER_NODIGIT = re.compile("^\D*$")
1297
  def _TryInt(val):
1298
    """Attempts to convert a variable to integer."""
1299
    if val is None or _SORTER_NODIGIT.match(val):
1300
      return val
1301
    rval = int(val)
1302
    return rval
1303

    
1304
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1305
             for name in name_list]
1306
  to_sort.sort()
1307
  return [tup[1] for tup in to_sort]
1308

    
1309

    
1310
def TryConvert(fn, val):
1311
  """Try to convert a value ignoring errors.
1312

1313
  This function tries to apply function I{fn} to I{val}. If no
1314
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1315
  the result, else it will return the original value. Any other
1316
  exceptions are propagated to the caller.
1317

1318
  @type fn: callable
1319
  @param fn: function to apply to the value
1320
  @param val: the value to be converted
1321
  @return: The converted value if the conversion was successful,
1322
      otherwise the original value.
1323

1324
  """
1325
  try:
1326
    nv = fn(val)
1327
  except (ValueError, TypeError):
1328
    nv = val
1329
  return nv
1330

    
1331

    
1332
def IsValidShellParam(word):
1333
  """Verifies is the given word is safe from the shell's p.o.v.
1334

1335
  This means that we can pass this to a command via the shell and be
1336
  sure that it doesn't alter the command line and is passed as such to
1337
  the actual command.
1338

1339
  Note that we are overly restrictive here, in order to be on the safe
1340
  side.
1341

1342
  @type word: str
1343
  @param word: the word to check
1344
  @rtype: boolean
1345
  @return: True if the word is 'safe'
1346

1347
  """
1348
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1349

    
1350

    
1351
def BuildShellCmd(template, *args):
1352
  """Build a safe shell command line from the given arguments.
1353

1354
  This function will check all arguments in the args list so that they
1355
  are valid shell parameters (i.e. they don't contain shell
1356
  metacharacters). If everything is ok, it will return the result of
1357
  template % args.
1358

1359
  @type template: str
1360
  @param template: the string holding the template for the
1361
      string formatting
1362
  @rtype: str
1363
  @return: the expanded command line
1364

1365
  """
1366
  for word in args:
1367
    if not IsValidShellParam(word):
1368
      raise errors.ProgrammerError("Shell argument '%s' contains"
1369
                                   " invalid characters" % word)
1370
  return template % args
1371

    
1372

    
1373
def FormatUnit(value, units):
1374
  """Formats an incoming number of MiB with the appropriate unit.
1375

1376
  @type value: int
1377
  @param value: integer representing the value in MiB (1048576)
1378
  @type units: char
1379
  @param units: the type of formatting we should do:
1380
      - 'h' for automatic scaling
1381
      - 'm' for MiBs
1382
      - 'g' for GiBs
1383
      - 't' for TiBs
1384
  @rtype: str
1385
  @return: the formatted value (with suffix)
1386

1387
  """
1388
  if units not in ('m', 'g', 't', 'h'):
1389
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1390

    
1391
  suffix = ''
1392

    
1393
  if units == 'm' or (units == 'h' and value < 1024):
1394
    if units == 'h':
1395
      suffix = 'M'
1396
    return "%d%s" % (round(value, 0), suffix)
1397

    
1398
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1399
    if units == 'h':
1400
      suffix = 'G'
1401
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1402

    
1403
  else:
1404
    if units == 'h':
1405
      suffix = 'T'
1406
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1407

    
1408

    
1409
def ParseUnit(input_string):
1410
  """Tries to extract number and scale from the given string.
1411

1412
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1413
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1414
  is always an int in MiB.
1415

1416
  """
1417
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1418
  if not m:
1419
    raise errors.UnitParseError("Invalid format")
1420

    
1421
  value = float(m.groups()[0])
1422

    
1423
  unit = m.groups()[1]
1424
  if unit:
1425
    lcunit = unit.lower()
1426
  else:
1427
    lcunit = 'm'
1428

    
1429
  if lcunit in ('m', 'mb', 'mib'):
1430
    # Value already in MiB
1431
    pass
1432

    
1433
  elif lcunit in ('g', 'gb', 'gib'):
1434
    value *= 1024
1435

    
1436
  elif lcunit in ('t', 'tb', 'tib'):
1437
    value *= 1024 * 1024
1438

    
1439
  else:
1440
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1441

    
1442
  # Make sure we round up
1443
  if int(value) < value:
1444
    value += 1
1445

    
1446
  # Round up to the next multiple of 4
1447
  value = int(value)
1448
  if value % 4:
1449
    value += 4 - value % 4
1450

    
1451
  return value
1452

    
1453

    
1454
def ParseCpuMask(cpu_mask):
1455
  """Parse a CPU mask definition and return the list of CPU IDs.
1456

1457
  CPU mask format: comma-separated list of CPU IDs
1458
  or dash-separated ID ranges
1459
  Example: "0-2,5" -> "0,1,2,5"
1460

1461
  @type cpu_mask: str
1462
  @param cpu_mask: CPU mask definition
1463
  @rtype: list of int
1464
  @return: list of CPU IDs
1465

1466
  """
1467
  if not cpu_mask:
1468
    return []
1469
  cpu_list = []
1470
  for range_def in cpu_mask.split(","):
1471
    boundaries = range_def.split("-")
1472
    n_elements = len(boundaries)
1473
    if n_elements > 2:
1474
      raise errors.ParseError("Invalid CPU ID range definition"
1475
                              " (only one hyphen allowed): %s" % range_def)
1476
    try:
1477
      lower = int(boundaries[0])
1478
    except (ValueError, TypeError), err:
1479
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1480
                              " CPU ID range: %s" % str(err))
1481
    try:
1482
      higher = int(boundaries[-1])
1483
    except (ValueError, TypeError), err:
1484
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1485
                              " CPU ID range: %s" % str(err))
1486
    if lower > higher:
1487
      raise errors.ParseError("Invalid CPU ID range definition"
1488
                              " (%d > %d): %s" % (lower, higher, range_def))
1489
    cpu_list.extend(range(lower, higher + 1))
1490
  return cpu_list
1491

    
1492

    
1493
def AddAuthorizedKey(file_obj, key):
1494
  """Adds an SSH public key to an authorized_keys file.
1495

1496
  @type file_obj: str or file handle
1497
  @param file_obj: path to authorized_keys file
1498
  @type key: str
1499
  @param key: string containing key
1500

1501
  """
1502
  key_fields = key.split()
1503

    
1504
  if isinstance(file_obj, basestring):
1505
    f = open(file_obj, 'a+')
1506
  else:
1507
    f = file_obj
1508

    
1509
  try:
1510
    nl = True
1511
    for line in f:
1512
      # Ignore whitespace changes
1513
      if line.split() == key_fields:
1514
        break
1515
      nl = line.endswith('\n')
1516
    else:
1517
      if not nl:
1518
        f.write("\n")
1519
      f.write(key.rstrip('\r\n'))
1520
      f.write("\n")
1521
      f.flush()
1522
  finally:
1523
    f.close()
1524

    
1525

    
1526
def RemoveAuthorizedKey(file_name, key):
1527
  """Removes an SSH public key from an authorized_keys file.
1528

1529
  @type file_name: str
1530
  @param file_name: path to authorized_keys file
1531
  @type key: str
1532
  @param key: string containing key
1533

1534
  """
1535
  key_fields = key.split()
1536

    
1537
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1538
  try:
1539
    out = os.fdopen(fd, 'w')
1540
    try:
1541
      f = open(file_name, 'r')
1542
      try:
1543
        for line in f:
1544
          # Ignore whitespace changes while comparing lines
1545
          if line.split() != key_fields:
1546
            out.write(line)
1547

    
1548
        out.flush()
1549
        os.rename(tmpname, file_name)
1550
      finally:
1551
        f.close()
1552
    finally:
1553
      out.close()
1554
  except:
1555
    RemoveFile(tmpname)
1556
    raise
1557

    
1558

    
1559
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1560
  """Sets the name of an IP address and hostname in /etc/hosts.
1561

1562
  @type file_name: str
1563
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1564
  @type ip: str
1565
  @param ip: the IP address
1566
  @type hostname: str
1567
  @param hostname: the hostname to be added
1568
  @type aliases: list
1569
  @param aliases: the list of aliases to add for the hostname
1570

1571
  """
1572
  # Ensure aliases are unique
1573
  aliases = UniqueSequence([hostname] + aliases)[1:]
1574

    
1575
  def _WriteEtcHosts(fd):
1576
    # Duplicating file descriptor because os.fdopen's result will automatically
1577
    # close the descriptor, but we would still like to have its functionality.
1578
    out = os.fdopen(os.dup(fd), "w")
1579
    try:
1580
      for line in ReadFile(file_name).splitlines(True):
1581
        fields = line.split()
1582
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1583
          continue
1584
        out.write(line)
1585

    
1586
      out.write("%s\t%s" % (ip, hostname))
1587
      if aliases:
1588
        out.write(" %s" % " ".join(aliases))
1589
      out.write("\n")
1590
      out.flush()
1591
    finally:
1592
      out.close()
1593

    
1594
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1595

    
1596

    
1597
def AddHostToEtcHosts(hostname, ip):
1598
  """Wrapper around SetEtcHostsEntry.
1599

1600
  @type hostname: str
1601
  @param hostname: a hostname that will be resolved and added to
1602
      L{constants.ETC_HOSTS}
1603
  @type ip: str
1604
  @param ip: The ip address of the host
1605

1606
  """
1607
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1608

    
1609

    
1610
def RemoveEtcHostsEntry(file_name, hostname):
1611
  """Removes a hostname from /etc/hosts.
1612

1613
  IP addresses without names are removed from the file.
1614

1615
  @type file_name: str
1616
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1617
  @type hostname: str
1618
  @param hostname: the hostname to be removed
1619

1620
  """
1621
  def _WriteEtcHosts(fd):
1622
    # Duplicating file descriptor because os.fdopen's result will automatically
1623
    # close the descriptor, but we would still like to have its functionality.
1624
    out = os.fdopen(os.dup(fd), "w")
1625
    try:
1626
      for line in ReadFile(file_name).splitlines(True):
1627
        fields = line.split()
1628
        if len(fields) > 1 and not fields[0].startswith("#"):
1629
          names = fields[1:]
1630
          if hostname in names:
1631
            while hostname in names:
1632
              names.remove(hostname)
1633
            if names:
1634
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1635
            continue
1636

    
1637
        out.write(line)
1638

    
1639
      out.flush()
1640
    finally:
1641
      out.close()
1642

    
1643
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1644

    
1645

    
1646
def RemoveHostFromEtcHosts(hostname):
1647
  """Wrapper around RemoveEtcHostsEntry.
1648

1649
  @type hostname: str
1650
  @param hostname: hostname that will be resolved and its
1651
      full and shot name will be removed from
1652
      L{constants.ETC_HOSTS}
1653

1654
  """
1655
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1656
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1657

    
1658

    
1659
def TimestampForFilename():
1660
  """Returns the current time formatted for filenames.
1661

1662
  The format doesn't contain colons as some shells and applications them as
1663
  separators.
1664

1665
  """
1666
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1667

    
1668

    
1669
def CreateBackup(file_name):
1670
  """Creates a backup of a file.
1671

1672
  @type file_name: str
1673
  @param file_name: file to be backed up
1674
  @rtype: str
1675
  @return: the path to the newly created backup
1676
  @raise errors.ProgrammerError: for invalid file names
1677

1678
  """
1679
  if not os.path.isfile(file_name):
1680
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1681
                                file_name)
1682

    
1683
  prefix = ("%s.backup-%s." %
1684
            (os.path.basename(file_name), TimestampForFilename()))
1685
  dir_name = os.path.dirname(file_name)
1686

    
1687
  fsrc = open(file_name, 'rb')
1688
  try:
1689
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1690
    fdst = os.fdopen(fd, 'wb')
1691
    try:
1692
      logging.debug("Backing up %s at %s", file_name, backup_name)
1693
      shutil.copyfileobj(fsrc, fdst)
1694
    finally:
1695
      fdst.close()
1696
  finally:
1697
    fsrc.close()
1698

    
1699
  return backup_name
1700

    
1701

    
1702
def ShellQuote(value):
1703
  """Quotes shell argument according to POSIX.
1704

1705
  @type value: str
1706
  @param value: the argument to be quoted
1707
  @rtype: str
1708
  @return: the quoted value
1709

1710
  """
1711
  if _re_shell_unquoted.match(value):
1712
    return value
1713
  else:
1714
    return "'%s'" % value.replace("'", "'\\''")
1715

    
1716

    
1717
def ShellQuoteArgs(args):
1718
  """Quotes a list of shell arguments.
1719

1720
  @type args: list
1721
  @param args: list of arguments to be quoted
1722
  @rtype: str
1723
  @return: the quoted arguments concatenated with spaces
1724

1725
  """
1726
  return ' '.join([ShellQuote(i) for i in args])
1727

    
1728

    
1729
class ShellWriter:
1730
  """Helper class to write scripts with indentation.
1731

1732
  """
1733
  INDENT_STR = "  "
1734

    
1735
  def __init__(self, fh):
1736
    """Initializes this class.
1737

1738
    """
1739
    self._fh = fh
1740
    self._indent = 0
1741

    
1742
  def IncIndent(self):
1743
    """Increase indentation level by 1.
1744

1745
    """
1746
    self._indent += 1
1747

    
1748
  def DecIndent(self):
1749
    """Decrease indentation level by 1.
1750

1751
    """
1752
    assert self._indent > 0
1753
    self._indent -= 1
1754

    
1755
  def Write(self, txt, *args):
1756
    """Write line to output file.
1757

1758
    """
1759
    assert self._indent >= 0
1760

    
1761
    self._fh.write(self._indent * self.INDENT_STR)
1762

    
1763
    if args:
1764
      self._fh.write(txt % args)
1765
    else:
1766
      self._fh.write(txt)
1767

    
1768
    self._fh.write("\n")
1769

    
1770

    
1771
def ListVisibleFiles(path):
1772
  """Returns a list of visible files in a directory.
1773

1774
  @type path: str
1775
  @param path: the directory to enumerate
1776
  @rtype: list
1777
  @return: the list of all files not starting with a dot
1778
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1779

1780
  """
1781
  if not IsNormAbsPath(path):
1782
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1783
                                 " absolute/normalized: '%s'" % path)
1784
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1785
  return files
1786

    
1787

    
1788
def GetHomeDir(user, default=None):
1789
  """Try to get the homedir of the given user.
1790

1791
  The user can be passed either as a string (denoting the name) or as
1792
  an integer (denoting the user id). If the user is not found, the
1793
  'default' argument is returned, which defaults to None.
1794

1795
  """
1796
  try:
1797
    if isinstance(user, basestring):
1798
      result = pwd.getpwnam(user)
1799
    elif isinstance(user, (int, long)):
1800
      result = pwd.getpwuid(user)
1801
    else:
1802
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1803
                                   type(user))
1804
  except KeyError:
1805
    return default
1806
  return result.pw_dir
1807

    
1808

    
1809
def NewUUID():
1810
  """Returns a random UUID.
1811

1812
  @note: This is a Linux-specific method as it uses the /proc
1813
      filesystem.
1814
  @rtype: str
1815

1816
  """
1817
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1818

    
1819

    
1820
def GenerateSecret(numbytes=20):
1821
  """Generates a random secret.
1822

1823
  This will generate a pseudo-random secret returning an hex string
1824
  (so that it can be used where an ASCII string is needed).
1825

1826
  @param numbytes: the number of bytes which will be represented by the returned
1827
      string (defaulting to 20, the length of a SHA1 hash)
1828
  @rtype: str
1829
  @return: an hex representation of the pseudo-random sequence
1830

1831
  """
1832
  return os.urandom(numbytes).encode('hex')
1833

    
1834

    
1835
def EnsureDirs(dirs):
1836
  """Make required directories, if they don't exist.
1837

1838
  @param dirs: list of tuples (dir_name, dir_mode)
1839
  @type dirs: list of (string, integer)
1840

1841
  """
1842
  for dir_name, dir_mode in dirs:
1843
    try:
1844
      os.mkdir(dir_name, dir_mode)
1845
    except EnvironmentError, err:
1846
      if err.errno != errno.EEXIST:
1847
        raise errors.GenericError("Cannot create needed directory"
1848
                                  " '%s': %s" % (dir_name, err))
1849
    try:
1850
      os.chmod(dir_name, dir_mode)
1851
    except EnvironmentError, err:
1852
      raise errors.GenericError("Cannot change directory permissions on"
1853
                                " '%s': %s" % (dir_name, err))
1854
    if not os.path.isdir(dir_name):
1855
      raise errors.GenericError("%s is not a directory" % dir_name)
1856

    
1857

    
1858
def ReadFile(file_name, size=-1):
1859
  """Reads a file.
1860

1861
  @type size: int
1862
  @param size: Read at most size bytes (if negative, entire file)
1863
  @rtype: str
1864
  @return: the (possibly partial) content of the file
1865

1866
  """
1867
  f = open(file_name, "r")
1868
  try:
1869
    return f.read(size)
1870
  finally:
1871
    f.close()
1872

    
1873

    
1874
def WriteFile(file_name, fn=None, data=None,
1875
              mode=None, uid=-1, gid=-1,
1876
              atime=None, mtime=None, close=True,
1877
              dry_run=False, backup=False,
1878
              prewrite=None, postwrite=None):
1879
  """(Over)write a file atomically.
1880

1881
  The file_name and either fn (a function taking one argument, the
1882
  file descriptor, and which should write the data to it) or data (the
1883
  contents of the file) must be passed. The other arguments are
1884
  optional and allow setting the file mode, owner and group, and the
1885
  mtime/atime of the file.
1886

1887
  If the function doesn't raise an exception, it has succeeded and the
1888
  target file has the new contents. If the function has raised an
1889
  exception, an existing target file should be unmodified and the
1890
  temporary file should be removed.
1891

1892
  @type file_name: str
1893
  @param file_name: the target filename
1894
  @type fn: callable
1895
  @param fn: content writing function, called with
1896
      file descriptor as parameter
1897
  @type data: str
1898
  @param data: contents of the file
1899
  @type mode: int
1900
  @param mode: file mode
1901
  @type uid: int
1902
  @param uid: the owner of the file
1903
  @type gid: int
1904
  @param gid: the group of the file
1905
  @type atime: int
1906
  @param atime: a custom access time to be set on the file
1907
  @type mtime: int
1908
  @param mtime: a custom modification time to be set on the file
1909
  @type close: boolean
1910
  @param close: whether to close file after writing it
1911
  @type prewrite: callable
1912
  @param prewrite: function to be called before writing content
1913
  @type postwrite: callable
1914
  @param postwrite: function to be called after writing content
1915

1916
  @rtype: None or int
1917
  @return: None if the 'close' parameter evaluates to True,
1918
      otherwise the file descriptor
1919

1920
  @raise errors.ProgrammerError: if any of the arguments are not valid
1921

1922
  """
1923
  if not os.path.isabs(file_name):
1924
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1925
                                 " absolute: '%s'" % file_name)
1926

    
1927
  if [fn, data].count(None) != 1:
1928
    raise errors.ProgrammerError("fn or data required")
1929

    
1930
  if [atime, mtime].count(None) == 1:
1931
    raise errors.ProgrammerError("Both atime and mtime must be either"
1932
                                 " set or None")
1933

    
1934
  if backup and not dry_run and os.path.isfile(file_name):
1935
    CreateBackup(file_name)
1936

    
1937
  dir_name, base_name = os.path.split(file_name)
1938
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1939
  do_remove = True
1940
  # here we need to make sure we remove the temp file, if any error
1941
  # leaves it in place
1942
  try:
1943
    if uid != -1 or gid != -1:
1944
      os.chown(new_name, uid, gid)
1945
    if mode:
1946
      os.chmod(new_name, mode)
1947
    if callable(prewrite):
1948
      prewrite(fd)
1949
    if data is not None:
1950
      os.write(fd, data)
1951
    else:
1952
      fn(fd)
1953
    if callable(postwrite):
1954
      postwrite(fd)
1955
    os.fsync(fd)
1956
    if atime is not None and mtime is not None:
1957
      os.utime(new_name, (atime, mtime))
1958
    if not dry_run:
1959
      os.rename(new_name, file_name)
1960
      do_remove = False
1961
  finally:
1962
    if close:
1963
      os.close(fd)
1964
      result = None
1965
    else:
1966
      result = fd
1967
    if do_remove:
1968
      RemoveFile(new_name)
1969

    
1970
  return result
1971

    
1972

    
1973
def GetFileID(path=None, fd=None):
1974
  """Returns the file 'id', i.e. the dev/inode and mtime information.
1975

1976
  Either the path to the file or the fd must be given.
1977

1978
  @param path: the file path
1979
  @param fd: a file descriptor
1980
  @return: a tuple of (device number, inode number, mtime)
1981

1982
  """
1983
  if [path, fd].count(None) != 1:
1984
    raise errors.ProgrammerError("One and only one of fd/path must be given")
1985

    
1986
  if fd is None:
1987
    st = os.stat(path)
1988
  else:
1989
    st = os.fstat(fd)
1990

    
1991
  return (st.st_dev, st.st_ino, st.st_mtime)
1992

    
1993

    
1994
def VerifyFileID(fi_disk, fi_ours):
1995
  """Verifies that two file IDs are matching.
1996

1997
  Differences in the inode/device are not accepted, but and older
1998
  timestamp for fi_disk is accepted.
1999

2000
  @param fi_disk: tuple (dev, inode, mtime) representing the actual
2001
      file data
2002
  @param fi_ours: tuple (dev, inode, mtime) representing the last
2003
      written file data
2004
  @rtype: boolean
2005

2006
  """
2007
  (d1, i1, m1) = fi_disk
2008
  (d2, i2, m2) = fi_ours
2009

    
2010
  return (d1, i1) == (d2, i2) and m1 <= m2
2011

    
2012

    
2013
def SafeWriteFile(file_name, file_id, **kwargs):
2014
  """Wraper over L{WriteFile} that locks the target file.
2015

2016
  By keeping the target file locked during WriteFile, we ensure that
2017
  cooperating writers will safely serialise access to the file.
2018

2019
  @type file_name: str
2020
  @param file_name: the target filename
2021
  @type file_id: tuple
2022
  @param file_id: a result from L{GetFileID}
2023

2024
  """
2025
  fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
2026
  try:
2027
    LockFile(fd)
2028
    if file_id is not None:
2029
      disk_id = GetFileID(fd=fd)
2030
      if not VerifyFileID(disk_id, file_id):
2031
        raise errors.LockError("Cannot overwrite file %s, it has been modified"
2032
                               " since last written" % file_name)
2033
    return WriteFile(file_name, **kwargs)
2034
  finally:
2035
    os.close(fd)
2036

    
2037

    
2038
def ReadOneLineFile(file_name, strict=False):
2039
  """Return the first non-empty line from a file.
2040

2041
  @type strict: boolean
2042
  @param strict: if True, abort if the file has more than one
2043
      non-empty line
2044

2045
  """
2046
  file_lines = ReadFile(file_name).splitlines()
2047
  full_lines = filter(bool, file_lines)
2048
  if not file_lines or not full_lines:
2049
    raise errors.GenericError("No data in one-liner file %s" % file_name)
2050
  elif strict and len(full_lines) > 1:
2051
    raise errors.GenericError("Too many lines in one-liner file %s" %
2052
                              file_name)
2053
  return full_lines[0]
2054

    
2055

    
2056
def FirstFree(seq, base=0):
2057
  """Returns the first non-existing integer from seq.
2058

2059
  The seq argument should be a sorted list of positive integers. The
2060
  first time the index of an element is smaller than the element
2061
  value, the index will be returned.
2062

2063
  The base argument is used to start at a different offset,
2064
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
2065

2066
  Example: C{[0, 1, 3]} will return I{2}.
2067

2068
  @type seq: sequence
2069
  @param seq: the sequence to be analyzed.
2070
  @type base: int
2071
  @param base: use this value as the base index of the sequence
2072
  @rtype: int
2073
  @return: the first non-used index in the sequence
2074

2075
  """
2076
  for idx, elem in enumerate(seq):
2077
    assert elem >= base, "Passed element is higher than base offset"
2078
    if elem > idx + base:
2079
      # idx is not used
2080
      return idx + base
2081
  return None
2082

    
2083

    
2084
def SingleWaitForFdCondition(fdobj, event, timeout):
2085
  """Waits for a condition to occur on the socket.
2086

2087
  Immediately returns at the first interruption.
2088

2089
  @type fdobj: integer or object supporting a fileno() method
2090
  @param fdobj: entity to wait for events on
2091
  @type event: integer
2092
  @param event: ORed condition (see select module)
2093
  @type timeout: float or None
2094
  @param timeout: Timeout in seconds
2095
  @rtype: int or None
2096
  @return: None for timeout, otherwise occured conditions
2097

2098
  """
2099
  check = (event | select.POLLPRI |
2100
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
2101

    
2102
  if timeout is not None:
2103
    # Poller object expects milliseconds
2104
    timeout *= 1000
2105

    
2106
  poller = select.poll()
2107
  poller.register(fdobj, event)
2108
  try:
2109
    # TODO: If the main thread receives a signal and we have no timeout, we
2110
    # could wait forever. This should check a global "quit" flag or something
2111
    # every so often.
2112
    io_events = poller.poll(timeout)
2113
  except select.error, err:
2114
    if err[0] != errno.EINTR:
2115
      raise
2116
    io_events = []
2117
  if io_events and io_events[0][1] & check:
2118
    return io_events[0][1]
2119
  else:
2120
    return None
2121

    
2122

    
2123
class FdConditionWaiterHelper(object):
2124
  """Retry helper for WaitForFdCondition.
2125

2126
  This class contains the retried and wait functions that make sure
2127
  WaitForFdCondition can continue waiting until the timeout is actually
2128
  expired.
2129

2130
  """
2131

    
2132
  def __init__(self, timeout):
2133
    self.timeout = timeout
2134

    
2135
  def Poll(self, fdobj, event):
2136
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2137
    if result is None:
2138
      raise RetryAgain()
2139
    else:
2140
      return result
2141

    
2142
  def UpdateTimeout(self, timeout):
2143
    self.timeout = timeout
2144

    
2145

    
2146
def WaitForFdCondition(fdobj, event, timeout):
2147
  """Waits for a condition to occur on the socket.
2148

2149
  Retries until the timeout is expired, even if interrupted.
2150

2151
  @type fdobj: integer or object supporting a fileno() method
2152
  @param fdobj: entity to wait for events on
2153
  @type event: integer
2154
  @param event: ORed condition (see select module)
2155
  @type timeout: float or None
2156
  @param timeout: Timeout in seconds
2157
  @rtype: int or None
2158
  @return: None for timeout, otherwise occured conditions
2159

2160
  """
2161
  if timeout is not None:
2162
    retrywaiter = FdConditionWaiterHelper(timeout)
2163
    try:
2164
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2165
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2166
    except RetryTimeout:
2167
      result = None
2168
  else:
2169
    result = None
2170
    while result is None:
2171
      result = SingleWaitForFdCondition(fdobj, event, timeout)
2172
  return result
2173

    
2174

    
2175
def UniqueSequence(seq):
2176
  """Returns a list with unique elements.
2177

2178
  Element order is preserved.
2179

2180
  @type seq: sequence
2181
  @param seq: the sequence with the source elements
2182
  @rtype: list
2183
  @return: list of unique elements from seq
2184

2185
  """
2186
  seen = set()
2187
  return [i for i in seq if i not in seen and not seen.add(i)]
2188

    
2189

    
2190
def NormalizeAndValidateMac(mac):
2191
  """Normalizes and check if a MAC address is valid.
2192

2193
  Checks whether the supplied MAC address is formally correct, only
2194
  accepts colon separated format. Normalize it to all lower.
2195

2196
  @type mac: str
2197
  @param mac: the MAC to be validated
2198
  @rtype: str
2199
  @return: returns the normalized and validated MAC.
2200

2201
  @raise errors.OpPrereqError: If the MAC isn't valid
2202

2203
  """
2204
  if not _MAC_CHECK.match(mac):
2205
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2206
                               mac, errors.ECODE_INVAL)
2207

    
2208
  return mac.lower()
2209

    
2210

    
2211
def TestDelay(duration):
2212
  """Sleep for a fixed amount of time.
2213

2214
  @type duration: float
2215
  @param duration: the sleep duration
2216
  @rtype: boolean
2217
  @return: False for negative value, True otherwise
2218

2219
  """
2220
  if duration < 0:
2221
    return False, "Invalid sleep duration"
2222
  time.sleep(duration)
2223
  return True, None
2224

    
2225

    
2226
def _CloseFDNoErr(fd, retries=5):
2227
  """Close a file descriptor ignoring errors.
2228

2229
  @type fd: int
2230
  @param fd: the file descriptor
2231
  @type retries: int
2232
  @param retries: how many retries to make, in case we get any
2233
      other error than EBADF
2234

2235
  """
2236
  try:
2237
    os.close(fd)
2238
  except OSError, err:
2239
    if err.errno != errno.EBADF:
2240
      if retries > 0:
2241
        _CloseFDNoErr(fd, retries - 1)
2242
    # else either it's closed already or we're out of retries, so we
2243
    # ignore this and go on
2244

    
2245

    
2246
def CloseFDs(noclose_fds=None):
2247
  """Close file descriptors.
2248

2249
  This closes all file descriptors above 2 (i.e. except
2250
  stdin/out/err).
2251

2252
  @type noclose_fds: list or None
2253
  @param noclose_fds: if given, it denotes a list of file descriptor
2254
      that should not be closed
2255

2256
  """
2257
  # Default maximum for the number of available file descriptors.
2258
  if 'SC_OPEN_MAX' in os.sysconf_names:
2259
    try:
2260
      MAXFD = os.sysconf('SC_OPEN_MAX')
2261
      if MAXFD < 0:
2262
        MAXFD = 1024
2263
    except OSError:
2264
      MAXFD = 1024
2265
  else:
2266
    MAXFD = 1024
2267
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2268
  if (maxfd == resource.RLIM_INFINITY):
2269
    maxfd = MAXFD
2270

    
2271
  # Iterate through and close all file descriptors (except the standard ones)
2272
  for fd in range(3, maxfd):
2273
    if noclose_fds and fd in noclose_fds:
2274
      continue
2275
    _CloseFDNoErr(fd)
2276

    
2277

    
2278
def Mlockall(_ctypes=ctypes):
2279
  """Lock current process' virtual address space into RAM.
2280

2281
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2282
  see mlock(2) for more details. This function requires ctypes module.
2283

2284
  @raises errors.NoCtypesError: if ctypes module is not found
2285

2286
  """
2287
  if _ctypes is None:
2288
    raise errors.NoCtypesError()
2289

    
2290
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2291
  if libc is None:
2292
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2293
    return
2294

    
2295
  # Some older version of the ctypes module don't have built-in functionality
2296
  # to access the errno global variable, where function error codes are stored.
2297
  # By declaring this variable as a pointer to an integer we can then access
2298
  # its value correctly, should the mlockall call fail, in order to see what
2299
  # the actual error code was.
2300
  # pylint: disable-msg=W0212
2301
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2302

    
2303
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2304
    # pylint: disable-msg=W0212
2305
    logging.error("Cannot set memory lock: %s",
2306
                  os.strerror(libc.__errno_location().contents.value))
2307
    return
2308

    
2309
  logging.debug("Memory lock set")
2310

    
2311

    
2312
def Daemonize(logfile):
2313
  """Daemonize the current process.
2314

2315
  This detaches the current process from the controlling terminal and
2316
  runs it in the background as a daemon.
2317

2318
  @type logfile: str
2319
  @param logfile: the logfile to which we should redirect stdout/stderr
2320
  @rtype: int
2321
  @return: the value zero
2322

2323
  """
2324
  # pylint: disable-msg=W0212
2325
  # yes, we really want os._exit
2326

    
2327
  # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2328
  # least abstract the pipe functionality between them
2329

    
2330
  # Create pipe for sending error messages
2331
  (rpipe, wpipe) = os.pipe()
2332

    
2333
  # this might fail
2334
  pid = os.fork()
2335
  if (pid == 0):  # The first child.
2336
    SetupDaemonEnv()
2337

    
2338
    # this might fail
2339
    pid = os.fork() # Fork a second child.
2340
    if (pid == 0):  # The second child.
2341
      _CloseFDNoErr(rpipe)
2342
    else:
2343
      # exit() or _exit()?  See below.
2344
      os._exit(0) # Exit parent (the first child) of the second child.
2345
  else:
2346
    _CloseFDNoErr(wpipe)
2347
    # Wait for daemon to be started (or an error message to
2348
    # arrive) and read up to 100 KB as an error message
2349
    errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2350
    if errormsg:
2351
      sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2352
      rcode = 1
2353
    else:
2354
      rcode = 0
2355
    os._exit(rcode) # Exit parent of the first child.
2356

    
2357
  SetupDaemonFDs(logfile, None)
2358
  return wpipe
2359

    
2360

    
2361
def DaemonPidFileName(name):
2362
  """Compute a ganeti pid file absolute path
2363

2364
  @type name: str
2365
  @param name: the daemon name
2366
  @rtype: str
2367
  @return: the full path to the pidfile corresponding to the given
2368
      daemon name
2369

2370
  """
2371
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2372

    
2373

    
2374
def EnsureDaemon(name):
2375
  """Check for and start daemon if not alive.
2376

2377
  """
2378
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2379
  if result.failed:
2380
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2381
                  name, result.fail_reason, result.output)
2382
    return False
2383

    
2384
  return True
2385

    
2386

    
2387
def StopDaemon(name):
2388
  """Stop daemon
2389

2390
  """
2391
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2392
  if result.failed:
2393
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2394
                  name, result.fail_reason, result.output)
2395
    return False
2396

    
2397
  return True
2398

    
2399

    
2400
def WritePidFile(pidfile):
2401
  """Write the current process pidfile.
2402

2403
  @type pidfile: sting
2404
  @param pidfile: the path to the file to be written
2405
  @raise errors.LockError: if the pid file already exists and
2406
      points to a live process
2407
  @rtype: int
2408
  @return: the file descriptor of the lock file; do not close this unless
2409
      you want to unlock the pid file
2410

2411
  """
2412
  # We don't rename nor truncate the file to not drop locks under
2413
  # existing processes
2414
  fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2415

    
2416
  # Lock the PID file (and fail if not possible to do so). Any code
2417
  # wanting to send a signal to the daemon should try to lock the PID
2418
  # file before reading it. If acquiring the lock succeeds, the daemon is
2419
  # no longer running and the signal should not be sent.
2420
  LockFile(fd_pidfile)
2421

    
2422
  os.write(fd_pidfile, "%d\n" % os.getpid())
2423

    
2424
  return fd_pidfile
2425

    
2426

    
2427
def RemovePidFile(name):
2428
  """Remove the current process pidfile.
2429

2430
  Any errors are ignored.
2431

2432
  @type name: str
2433
  @param name: the daemon name used to derive the pidfile name
2434

2435
  """
2436
  pidfilename = DaemonPidFileName(name)
2437
  # TODO: we could check here that the file contains our pid
2438
  try:
2439
    RemoveFile(pidfilename)
2440
  except: # pylint: disable-msg=W0702
2441
    pass
2442

    
2443

    
2444
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2445
                waitpid=False):
2446
  """Kill a process given by its pid.
2447

2448
  @type pid: int
2449
  @param pid: The PID to terminate.
2450
  @type signal_: int
2451
  @param signal_: The signal to send, by default SIGTERM
2452
  @type timeout: int
2453
  @param timeout: The timeout after which, if the process is still alive,
2454
                  a SIGKILL will be sent. If not positive, no such checking
2455
                  will be done
2456
  @type waitpid: boolean
2457
  @param waitpid: If true, we should waitpid on this process after
2458
      sending signals, since it's our own child and otherwise it
2459
      would remain as zombie
2460

2461
  """
2462
  def _helper(pid, signal_, wait):
2463
    """Simple helper to encapsulate the kill/waitpid sequence"""
2464
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2465
      try:
2466
        os.waitpid(pid, os.WNOHANG)
2467
      except OSError:
2468
        pass
2469

    
2470
  if pid <= 0:
2471
    # kill with pid=0 == suicide
2472
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2473

    
2474
  if not IsProcessAlive(pid):
2475
    return
2476

    
2477
  _helper(pid, signal_, waitpid)
2478

    
2479
  if timeout <= 0:
2480
    return
2481

    
2482
  def _CheckProcess():
2483
    if not IsProcessAlive(pid):
2484
      return
2485

    
2486
    try:
2487
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2488
    except OSError:
2489
      raise RetryAgain()
2490

    
2491
    if result_pid > 0:
2492
      return
2493

    
2494
    raise RetryAgain()
2495

    
2496
  try:
2497
    # Wait up to $timeout seconds
2498
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2499
  except RetryTimeout:
2500
    pass
2501

    
2502
  if IsProcessAlive(pid):
2503
    # Kill process if it's still alive
2504
    _helper(pid, signal.SIGKILL, waitpid)
2505

    
2506

    
2507
def FindFile(name, search_path, test=os.path.exists):
2508
  """Look for a filesystem object in a given path.
2509

2510
  This is an abstract method to search for filesystem object (files,
2511
  dirs) under a given search path.
2512

2513
  @type name: str
2514
  @param name: the name to look for
2515
  @type search_path: str
2516
  @param search_path: location to start at
2517
  @type test: callable
2518
  @param test: a function taking one argument that should return True
2519
      if the a given object is valid; the default value is
2520
      os.path.exists, causing only existing files to be returned
2521
  @rtype: str or None
2522
  @return: full path to the object if found, None otherwise
2523

2524
  """
2525
  # validate the filename mask
2526
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2527
    logging.critical("Invalid value passed for external script name: '%s'",
2528
                     name)
2529
    return None
2530

    
2531
  for dir_name in search_path:
2532
    # FIXME: investigate switch to PathJoin
2533
    item_name = os.path.sep.join([dir_name, name])
2534
    # check the user test and that we're indeed resolving to the given
2535
    # basename
2536
    if test(item_name) and os.path.basename(item_name) == name:
2537
      return item_name
2538
  return None
2539

    
2540

    
2541
def CheckVolumeGroupSize(vglist, vgname, minsize):
2542
  """Checks if the volume group list is valid.
2543

2544
  The function will check if a given volume group is in the list of
2545
  volume groups and has a minimum size.
2546

2547
  @type vglist: dict
2548
  @param vglist: dictionary of volume group names and their size
2549
  @type vgname: str
2550
  @param vgname: the volume group we should check
2551
  @type minsize: int
2552
  @param minsize: the minimum size we accept
2553
  @rtype: None or str
2554
  @return: None for success, otherwise the error message
2555

2556
  """
2557
  vgsize = vglist.get(vgname, None)
2558
  if vgsize is None:
2559
    return "volume group '%s' missing" % vgname
2560
  elif vgsize < minsize:
2561
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2562
            (vgname, minsize, vgsize))
2563
  return None
2564

    
2565

    
2566
def SplitTime(value):
2567
  """Splits time as floating point number into a tuple.
2568

2569
  @param value: Time in seconds
2570
  @type value: int or float
2571
  @return: Tuple containing (seconds, microseconds)
2572

2573
  """
2574
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2575

    
2576
  assert 0 <= seconds, \
2577
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2578
  assert 0 <= microseconds <= 999999, \
2579
    "Microseconds must be 0-999999, but are %s" % microseconds
2580

    
2581
  return (int(seconds), int(microseconds))
2582

    
2583

    
2584
def MergeTime(timetuple):
2585
  """Merges a tuple into time as a floating point number.
2586

2587
  @param timetuple: Time as tuple, (seconds, microseconds)
2588
  @type timetuple: tuple
2589
  @return: Time as a floating point number expressed in seconds
2590

2591
  """
2592
  (seconds, microseconds) = timetuple
2593

    
2594
  assert 0 <= seconds, \
2595
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2596
  assert 0 <= microseconds <= 999999, \
2597
    "Microseconds must be 0-999999, but are %s" % microseconds
2598

    
2599
  return float(seconds) + (float(microseconds) * 0.000001)
2600

    
2601

    
2602
class LogFileHandler(logging.FileHandler):
2603
  """Log handler that doesn't fallback to stderr.
2604

2605
  When an error occurs while writing on the logfile, logging.FileHandler tries
2606
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2607
  the logfile. This class avoids failures reporting errors to /dev/console.
2608

2609
  """
2610
  def __init__(self, filename, mode="a", encoding=None):
2611
    """Open the specified file and use it as the stream for logging.
2612

2613
    Also open /dev/console to report errors while logging.
2614

2615
    """
2616
    logging.FileHandler.__init__(self, filename, mode, encoding)
2617
    self.console = open(constants.DEV_CONSOLE, "a")
2618

    
2619
  def handleError(self, record): # pylint: disable-msg=C0103
2620
    """Handle errors which occur during an emit() call.
2621

2622
    Try to handle errors with FileHandler method, if it fails write to
2623
    /dev/console.
2624

2625
    """
2626
    try:
2627
      logging.FileHandler.handleError(self, record)
2628
    except Exception: # pylint: disable-msg=W0703
2629
      try:
2630
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2631
      except Exception: # pylint: disable-msg=W0703
2632
        # Log handler tried everything it could, now just give up
2633
        pass
2634

    
2635

    
2636
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2637
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2638
                 console_logging=False):
2639
  """Configures the logging module.
2640

2641
  @type logfile: str
2642
  @param logfile: the filename to which we should log
2643
  @type debug: integer
2644
  @param debug: if greater than zero, enable debug messages, otherwise
2645
      only those at C{INFO} and above level
2646
  @type stderr_logging: boolean
2647
  @param stderr_logging: whether we should also log to the standard error
2648
  @type program: str
2649
  @param program: the name under which we should log messages
2650
  @type multithreaded: boolean
2651
  @param multithreaded: if True, will add the thread name to the log file
2652
  @type syslog: string
2653
  @param syslog: one of 'no', 'yes', 'only':
2654
      - if no, syslog is not used
2655
      - if yes, syslog is used (in addition to file-logging)
2656
      - if only, only syslog is used
2657
  @type console_logging: boolean
2658
  @param console_logging: if True, will use a FileHandler which falls back to
2659
      the system console if logging fails
2660
  @raise EnvironmentError: if we can't open the log file and
2661
      syslog/stderr logging is disabled
2662

2663
  """
2664
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2665
  sft = program + "[%(process)d]:"
2666
  if multithreaded:
2667
    fmt += "/%(threadName)s"
2668
    sft += " (%(threadName)s)"
2669
  if debug:
2670
    fmt += " %(module)s:%(lineno)s"
2671
    # no debug info for syslog loggers
2672
  fmt += " %(levelname)s %(message)s"
2673
  # yes, we do want the textual level, as remote syslog will probably
2674
  # lose the error level, and it's easier to grep for it
2675
  sft += " %(levelname)s %(message)s"
2676
  formatter = logging.Formatter(fmt)
2677
  sys_fmt = logging.Formatter(sft)
2678

    
2679
  root_logger = logging.getLogger("")
2680
  root_logger.setLevel(logging.NOTSET)
2681

    
2682
  # Remove all previously setup handlers
2683
  for handler in root_logger.handlers:
2684
    handler.close()
2685
    root_logger.removeHandler(handler)
2686

    
2687
  if stderr_logging:
2688
    stderr_handler = logging.StreamHandler()
2689
    stderr_handler.setFormatter(formatter)
2690
    if debug:
2691
      stderr_handler.setLevel(logging.NOTSET)
2692
    else:
2693
      stderr_handler.setLevel(logging.CRITICAL)
2694
    root_logger.addHandler(stderr_handler)
2695

    
2696
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2697
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2698
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2699
                                                    facility)
2700
    syslog_handler.setFormatter(sys_fmt)
2701
    # Never enable debug over syslog
2702
    syslog_handler.setLevel(logging.INFO)
2703
    root_logger.addHandler(syslog_handler)
2704

    
2705
  if syslog != constants.SYSLOG_ONLY:
2706
    # this can fail, if the logging directories are not setup or we have
2707
    # a permisssion problem; in this case, it's best to log but ignore
2708
    # the error if stderr_logging is True, and if false we re-raise the
2709
    # exception since otherwise we could run but without any logs at all
2710
    try:
2711
      if console_logging:
2712
        logfile_handler = LogFileHandler(logfile)
2713
      else:
2714
        logfile_handler = logging.FileHandler(logfile)
2715
      logfile_handler.setFormatter(formatter)
2716
      if debug:
2717
        logfile_handler.setLevel(logging.DEBUG)
2718
      else:
2719
        logfile_handler.setLevel(logging.INFO)
2720
      root_logger.addHandler(logfile_handler)
2721
    except EnvironmentError:
2722
      if stderr_logging or syslog == constants.SYSLOG_YES:
2723
        logging.exception("Failed to enable logging to file '%s'", logfile)
2724
      else:
2725
        # we need to re-raise the exception
2726
        raise
2727

    
2728

    
2729
def IsNormAbsPath(path):
2730
  """Check whether a path is absolute and also normalized
2731

2732
  This avoids things like /dir/../../other/path to be valid.
2733

2734
  """
2735
  return os.path.normpath(path) == path and os.path.isabs(path)
2736

    
2737

    
2738
def PathJoin(*args):
2739
  """Safe-join a list of path components.
2740

2741
  Requirements:
2742
      - the first argument must be an absolute path
2743
      - no component in the path must have backtracking (e.g. /../),
2744
        since we check for normalization at the end
2745

2746
  @param args: the path components to be joined
2747
  @raise ValueError: for invalid paths
2748

2749
  """
2750
  # ensure we're having at least one path passed in
2751
  assert args
2752
  # ensure the first component is an absolute and normalized path name
2753
  root = args[0]
2754
  if not IsNormAbsPath(root):
2755
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2756
  result = os.path.join(*args)
2757
  # ensure that the whole path is normalized
2758
  if not IsNormAbsPath(result):
2759
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2760
  # check that we're still under the original prefix
2761
  prefix = os.path.commonprefix([root, result])
2762
  if prefix != root:
2763
    raise ValueError("Error: path joining resulted in different prefix"
2764
                     " (%s != %s)" % (prefix, root))
2765
  return result
2766

    
2767

    
2768
def TailFile(fname, lines=20):
2769
  """Return the last lines from a file.
2770

2771
  @note: this function will only read and parse the last 4KB of
2772
      the file; if the lines are very long, it could be that less
2773
      than the requested number of lines are returned
2774

2775
  @param fname: the file name
2776
  @type lines: int
2777
  @param lines: the (maximum) number of lines to return
2778

2779
  """
2780
  fd = open(fname, "r")
2781
  try:
2782
    fd.seek(0, 2)
2783
    pos = fd.tell()
2784
    pos = max(0, pos-4096)
2785
    fd.seek(pos, 0)
2786
    raw_data = fd.read()
2787
  finally:
2788
    fd.close()
2789

    
2790
  rows = raw_data.splitlines()
2791
  return rows[-lines:]
2792

    
2793

    
2794
def FormatTimestampWithTZ(secs):
2795
  """Formats a Unix timestamp with the local timezone.
2796

2797
  """
2798
  return time.strftime("%F %T %Z", time.gmtime(secs))
2799

    
2800

    
2801
def _ParseAsn1Generalizedtime(value):
2802
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2803

2804
  @type value: string
2805
  @param value: ASN1 GENERALIZEDTIME timestamp
2806

2807
  """
2808
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2809
  if m:
2810
    # We have an offset
2811
    asn1time = m.group(1)
2812
    hours = int(m.group(2))
2813
    minutes = int(m.group(3))
2814
    utcoffset = (60 * hours) + minutes
2815
  else:
2816
    if not value.endswith("Z"):
2817
      raise ValueError("Missing timezone")
2818
    asn1time = value[:-1]
2819
    utcoffset = 0
2820

    
2821
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2822

    
2823
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2824

    
2825
  return calendar.timegm(tt.utctimetuple())
2826

    
2827

    
2828
def GetX509CertValidity(cert):
2829
  """Returns the validity period of the certificate.
2830

2831
  @type cert: OpenSSL.crypto.X509
2832
  @param cert: X509 certificate object
2833

2834
  """
2835
  # The get_notBefore and get_notAfter functions are only supported in
2836
  # pyOpenSSL 0.7 and above.
2837
  try:
2838
    get_notbefore_fn = cert.get_notBefore
2839
  except AttributeError:
2840
    not_before = None
2841
  else:
2842
    not_before_asn1 = get_notbefore_fn()
2843

    
2844
    if not_before_asn1 is None:
2845
      not_before = None
2846
    else:
2847
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2848

    
2849
  try:
2850
    get_notafter_fn = cert.get_notAfter
2851
  except AttributeError:
2852
    not_after = None
2853
  else:
2854
    not_after_asn1 = get_notafter_fn()
2855

    
2856
    if not_after_asn1 is None:
2857
      not_after = None
2858
    else:
2859
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2860

    
2861
  return (not_before, not_after)
2862

    
2863

    
2864
def _VerifyCertificateInner(expired, not_before, not_after, now,
2865
                            warn_days, error_days):
2866
  """Verifies certificate validity.
2867

2868
  @type expired: bool
2869
  @param expired: Whether pyOpenSSL considers the certificate as expired
2870
  @type not_before: number or None
2871
  @param not_before: Unix timestamp before which certificate is not valid
2872
  @type not_after: number or None
2873
  @param not_after: Unix timestamp after which certificate is invalid
2874
  @type now: number
2875
  @param now: Current time as Unix timestamp
2876
  @type warn_days: number or None
2877
  @param warn_days: How many days before expiration a warning should be reported
2878
  @type error_days: number or None
2879
  @param error_days: How many days before expiration an error should be reported
2880

2881
  """
2882
  if expired:
2883
    msg = "Certificate is expired"
2884

    
2885
    if not_before is not None and not_after is not None:
2886
      msg += (" (valid from %s to %s)" %
2887
              (FormatTimestampWithTZ(not_before),
2888
               FormatTimestampWithTZ(not_after)))
2889
    elif not_before is not None:
2890
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2891
    elif not_after is not None:
2892
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2893

    
2894
    return (CERT_ERROR, msg)
2895

    
2896
  elif not_before is not None and not_before > now:
2897
    return (CERT_WARNING,
2898
            "Certificate not yet valid (valid from %s)" %
2899
            FormatTimestampWithTZ(not_before))
2900

    
2901
  elif not_after is not None:
2902
    remaining_days = int((not_after - now) / (24 * 3600))
2903

    
2904
    msg = "Certificate expires in about %d days" % remaining_days
2905

    
2906
    if error_days is not None and remaining_days <= error_days:
2907
      return (CERT_ERROR, msg)
2908

    
2909
    if warn_days is not None and remaining_days <= warn_days:
2910
      return (CERT_WARNING, msg)
2911

    
2912
  return (None, None)
2913

    
2914

    
2915
def VerifyX509Certificate(cert, warn_days, error_days):
2916
  """Verifies a certificate for LUVerifyCluster.
2917

2918
  @type cert: OpenSSL.crypto.X509
2919
  @param cert: X509 certificate object
2920
  @type warn_days: number or None
2921
  @param warn_days: How many days before expiration a warning should be reported
2922
  @type error_days: number or None
2923
  @param error_days: How many days before expiration an error should be reported
2924

2925
  """
2926
  # Depending on the pyOpenSSL version, this can just return (None, None)
2927
  (not_before, not_after) = GetX509CertValidity(cert)
2928

    
2929
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2930
                                 time.time(), warn_days, error_days)
2931

    
2932

    
2933
def SignX509Certificate(cert, key, salt):
2934
  """Sign a X509 certificate.
2935

2936
  An RFC822-like signature header is added in front of the certificate.
2937

2938
  @type cert: OpenSSL.crypto.X509
2939
  @param cert: X509 certificate object
2940
  @type key: string
2941
  @param key: Key for HMAC
2942
  @type salt: string
2943
  @param salt: Salt for HMAC
2944
  @rtype: string
2945
  @return: Serialized and signed certificate in PEM format
2946

2947
  """
2948
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2949
    raise errors.GenericError("Invalid salt: %r" % salt)
2950

    
2951
  # Dumping as PEM here ensures the certificate is in a sane format
2952
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2953

    
2954
  return ("%s: %s/%s\n\n%s" %
2955
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2956
           Sha1Hmac(key, cert_pem, salt=salt),
2957
           cert_pem))
2958

    
2959

    
2960
def _ExtractX509CertificateSignature(cert_pem):
2961
  """Helper function to extract signature from X509 certificate.
2962

2963
  """
2964
  # Extract signature from original PEM data
2965
  for line in cert_pem.splitlines():
2966
    if line.startswith("---"):
2967
      break
2968

    
2969
    m = X509_SIGNATURE.match(line.strip())
2970
    if m:
2971
      return (m.group("salt"), m.group("sign"))
2972

    
2973
  raise errors.GenericError("X509 certificate signature is missing")
2974

    
2975

    
2976
def LoadSignedX509Certificate(cert_pem, key):
2977
  """Verifies a signed X509 certificate.
2978

2979
  @type cert_pem: string
2980
  @param cert_pem: Certificate in PEM format and with signature header
2981
  @type key: string
2982
  @param key: Key for HMAC
2983
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2984
  @return: X509 certificate object and salt
2985

2986
  """
2987
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2988

    
2989
  # Load certificate
2990
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2991

    
2992
  # Dump again to ensure it's in a sane format
2993
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2994

    
2995
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2996
    raise errors.GenericError("X509 certificate signature is invalid")
2997

    
2998
  return (cert, salt)
2999

    
3000

    
3001
def Sha1Hmac(key, text, salt=None):
3002
  """Calculates the HMAC-SHA1 digest of a text.
3003

3004
  HMAC is defined in RFC2104.
3005

3006
  @type key: string
3007
  @param key: Secret key
3008
  @type text: string
3009

3010
  """
3011
  if salt:
3012
    salted_text = salt + text
3013
  else:
3014
    salted_text = text
3015

    
3016
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
3017

    
3018

    
3019
def VerifySha1Hmac(key, text, digest, salt=None):
3020
  """Verifies the HMAC-SHA1 digest of a text.
3021

3022
  HMAC is defined in RFC2104.
3023

3024
  @type key: string
3025
  @param key: Secret key
3026
  @type text: string
3027
  @type digest: string
3028
  @param digest: Expected digest
3029
  @rtype: bool
3030
  @return: Whether HMAC-SHA1 digest matches
3031

3032
  """
3033
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
3034

    
3035

    
3036
def SafeEncode(text):
3037
  """Return a 'safe' version of a source string.
3038

3039
  This function mangles the input string and returns a version that
3040
  should be safe to display/encode as ASCII. To this end, we first
3041
  convert it to ASCII using the 'backslashreplace' encoding which
3042
  should get rid of any non-ASCII chars, and then we process it
3043
  through a loop copied from the string repr sources in the python; we
3044
  don't use string_escape anymore since that escape single quotes and
3045
  backslashes too, and that is too much; and that escaping is not
3046
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
3047

3048
  @type text: str or unicode
3049
  @param text: input data
3050
  @rtype: str
3051
  @return: a safe version of text
3052

3053
  """
3054
  if isinstance(text, unicode):
3055
    # only if unicode; if str already, we handle it below
3056
    text = text.encode('ascii', 'backslashreplace')
3057
  resu = ""
3058
  for char in text:
3059
    c = ord(char)
3060
    if char  == '\t':
3061
      resu += r'\t'
3062
    elif char == '\n':
3063
      resu += r'\n'
3064
    elif char == '\r':
3065
      resu += r'\'r'
3066
    elif c < 32 or c >= 127: # non-printable
3067
      resu += "\\x%02x" % (c & 0xff)
3068
    else:
3069
      resu += char
3070
  return resu
3071

    
3072

    
3073
def UnescapeAndSplit(text, sep=","):
3074
  """Split and unescape a string based on a given separator.
3075

3076
  This function splits a string based on a separator where the
3077
  separator itself can be escape in order to be an element of the
3078
  elements. The escaping rules are (assuming coma being the
3079
  separator):
3080
    - a plain , separates the elements
3081
    - a sequence \\\\, (double backslash plus comma) is handled as a
3082
      backslash plus a separator comma
3083
    - a sequence \, (backslash plus comma) is handled as a
3084
      non-separator comma
3085

3086
  @type text: string
3087
  @param text: the string to split
3088
  @type sep: string
3089
  @param text: the separator
3090
  @rtype: string
3091
  @return: a list of strings
3092

3093
  """
3094
  # we split the list by sep (with no escaping at this stage)
3095
  slist = text.split(sep)
3096
  # next, we revisit the elements and if any of them ended with an odd
3097
  # number of backslashes, then we join it with the next
3098
  rlist = []
3099
  while slist:
3100
    e1 = slist.pop(0)
3101
    if e1.endswith("\\"):
3102
      num_b = len(e1) - len(e1.rstrip("\\"))
3103
      if num_b % 2 == 1:
3104
        e2 = slist.pop(0)
3105
        # here the backslashes remain (all), and will be reduced in
3106
        # the next step
3107
        rlist.append(e1 + sep + e2)
3108
        continue
3109
    rlist.append(e1)
3110
  # finally, replace backslash-something with something
3111
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3112
  return rlist
3113

    
3114

    
3115
def CommaJoin(names):
3116
  """Nicely join a set of identifiers.
3117

3118
  @param names: set, list or tuple
3119
  @return: a string with the formatted results
3120

3121
  """
3122
  return ", ".join([str(val) for val in names])
3123

    
3124

    
3125
def FindMatch(data, name):
3126
  """Tries to find an item in a dictionary matching a name.
3127

3128
  Callers have to ensure the data names aren't contradictory (e.g. a regexp
3129
  that matches a string). If the name isn't a direct key, all regular
3130
  expression objects in the dictionary are matched against it.
3131

3132
  @type data: dict
3133
  @param data: Dictionary containing data
3134
  @type name: string
3135
  @param name: Name to look for
3136
  @rtype: tuple; (value in dictionary, matched groups as list)
3137

3138
  """
3139
  if name in data:
3140
    return (data[name], [])
3141

    
3142
  for key, value in data.items():
3143
    # Regex objects
3144
    if hasattr(key, "match"):
3145
      m = key.match(name)
3146
      if m:
3147
        return (value, list(m.groups()))
3148

    
3149
  return None
3150

    
3151

    
3152
def BytesToMebibyte(value):
3153
  """Converts bytes to mebibytes.
3154

3155
  @type value: int
3156
  @param value: Value in bytes
3157
  @rtype: int
3158
  @return: Value in mebibytes
3159

3160
  """
3161
  return int(round(value / (1024.0 * 1024.0), 0))
3162

    
3163

    
3164
def CalculateDirectorySize(path):
3165
  """Calculates the size of a directory recursively.
3166

3167
  @type path: string
3168
  @param path: Path to directory
3169
  @rtype: int
3170
  @return: Size in mebibytes
3171

3172
  """
3173
  size = 0
3174

    
3175
  for (curpath, _, files) in os.walk(path):
3176
    for filename in files:
3177
      st = os.lstat(PathJoin(curpath, filename))
3178
      size += st.st_size
3179

    
3180
  return BytesToMebibyte(size)
3181

    
3182

    
3183
def GetMounts(filename=constants.PROC_MOUNTS):
3184
  """Returns the list of mounted filesystems.
3185

3186
  This function is Linux-specific.
3187

3188
  @param filename: path of mounts file (/proc/mounts by default)
3189
  @rtype: list of tuples
3190
  @return: list of mount entries (device, mountpoint, fstype, options)
3191

3192
  """
3193
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3194
  data = []
3195
  mountlines = ReadFile(filename).splitlines()
3196
  for line in mountlines:
3197
    device, mountpoint, fstype, options, _ = line.split(None, 4)
3198
    data.append((device, mountpoint, fstype, options))
3199

    
3200
  return data
3201

    
3202

    
3203
def GetFilesystemStats(path):
3204
  """Returns the total and free space on a filesystem.
3205

3206
  @type path: string
3207
  @param path: Path on filesystem to be examined
3208
  @rtype: int
3209
  @return: tuple of (Total space, Free space) in mebibytes
3210

3211
  """
3212
  st = os.statvfs(path)
3213

    
3214
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3215
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3216
  return (tsize, fsize)
3217

    
3218

    
3219
def RunInSeparateProcess(fn, *args):
3220
  """Runs a function in a separate process.
3221

3222
  Note: Only boolean return values are supported.
3223

3224
  @type fn: callable
3225
  @param fn: Function to be called
3226
  @rtype: bool
3227
  @return: Function's result
3228

3229
  """
3230
  pid = os.fork()
3231
  if pid == 0:
3232
    # Child process
3233
    try:
3234
      # In case the function uses temporary files
3235
      ResetTempfileModule()
3236

    
3237
      # Call function
3238
      result = int(bool(fn(*args)))
3239
      assert result in (0, 1)
3240
    except: # pylint: disable-msg=W0702
3241
      logging.exception("Error while calling function in separate process")
3242
      # 0 and 1 are reserved for the return value
3243
      result = 33
3244

    
3245
    os._exit(result) # pylint: disable-msg=W0212
3246

    
3247
  # Parent process
3248

    
3249
  # Avoid zombies and check exit code
3250
  (_, status) = os.waitpid(pid, 0)
3251

    
3252
  if os.WIFSIGNALED(status):
3253
    exitcode = None
3254
    signum = os.WTERMSIG(status)
3255
  else:
3256
    exitcode = os.WEXITSTATUS(status)
3257
    signum = None
3258

    
3259
  if not (exitcode in (0, 1) and signum is None):
3260
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3261
                              (exitcode, signum))
3262

    
3263
  return bool(exitcode)
3264

    
3265

    
3266
def IgnoreProcessNotFound(fn, *args, **kwargs):
3267
  """Ignores ESRCH when calling a process-related function.
3268

3269
  ESRCH is raised when a process is not found.
3270

3271
  @rtype: bool
3272
  @return: Whether process was found
3273

3274
  """
3275
  try:
3276
    fn(*args, **kwargs)
3277
  except EnvironmentError, err:
3278
    # Ignore ESRCH
3279
    if err.errno == errno.ESRCH:
3280
      return False
3281
    raise
3282

    
3283
  return True
3284

    
3285

    
3286
def IgnoreSignals(fn, *args, **kwargs):
3287
  """Tries to call a function ignoring failures due to EINTR.
3288

3289
  """
3290
  try:
3291
    return fn(*args, **kwargs)
3292
  except EnvironmentError, err:
3293
    if err.errno == errno.EINTR:
3294
      return None
3295
    else:
3296
      raise
3297
  except (select.error, socket.error), err:
3298
    # In python 2.6 and above select.error is an IOError, so it's handled
3299
    # above, in 2.5 and below it's not, and it's handled here.
3300
    if err.args and err.args[0] == errno.EINTR:
3301
      return None
3302
    else:
3303
      raise
3304

    
3305

    
3306
def LockFile(fd):
3307
  """Locks a file using POSIX locks.
3308

3309
  @type fd: int
3310
  @param fd: the file descriptor we need to lock
3311

3312
  """
3313
  try:
3314
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3315
  except IOError, err:
3316
    if err.errno == errno.EAGAIN:
3317
      raise errors.LockError("File already locked")
3318
    raise
3319

    
3320

    
3321
def FormatTime(val):
3322
  """Formats a time value.
3323

3324
  @type val: float or None
3325
  @param val: the timestamp as returned by time.time()
3326
  @return: a string value or N/A if we don't have a valid timestamp
3327

3328
  """
3329
  if val is None or not isinstance(val, (int, float)):
3330
    return "N/A"
3331
  # these two codes works on Linux, but they are not guaranteed on all
3332
  # platforms
3333
  return time.strftime("%F %T", time.localtime(val))
3334

    
3335

    
3336
def FormatSeconds(secs):
3337
  """Formats seconds for easier reading.
3338

3339
  @type secs: number
3340
  @param secs: Number of seconds
3341
  @rtype: string
3342
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3343

3344
  """
3345
  parts = []
3346

    
3347
  secs = round(secs, 0)
3348

    
3349
  if secs > 0:
3350
    # Negative values would be a bit tricky
3351
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3352
      (complete, secs) = divmod(secs, one)
3353
      if complete or parts:
3354
        parts.append("%d%s" % (complete, unit))
3355

    
3356
  parts.append("%ds" % secs)
3357

    
3358
  return " ".join(parts)
3359

    
3360

    
3361
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3362
  """Reads the watcher pause file.
3363

3364
  @type filename: string
3365
  @param filename: Path to watcher pause file
3366
  @type now: None, float or int
3367
  @param now: Current time as Unix timestamp
3368
  @type remove_after: int
3369
  @param remove_after: Remove watcher pause file after specified amount of
3370
    seconds past the pause end time
3371

3372
  """
3373
  if now is None:
3374
    now = time.time()
3375

    
3376
  try:
3377
    value = ReadFile(filename)
3378
  except IOError, err:
3379
    if err.errno != errno.ENOENT:
3380
      raise
3381
    value = None
3382

    
3383
  if value is not None:
3384
    try:
3385
      value = int(value)
3386
    except ValueError:
3387
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3388
                       " removing it"), filename)
3389
      RemoveFile(filename)
3390
      value = None
3391

    
3392
    if value is not None:
3393
      # Remove file if it's outdated
3394
      if now > (value + remove_after):
3395
        RemoveFile(filename)
3396
        value = None
3397

    
3398
      elif now > value:
3399
        value = None
3400

    
3401
  return value
3402

    
3403

    
3404
class RetryTimeout(Exception):
3405
  """Retry loop timed out.
3406

3407
  Any arguments which was passed by the retried function to RetryAgain will be
3408
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3409
  the RaiseInner helper method will reraise it.
3410

3411
  """
3412
  def RaiseInner(self):
3413
    if self.args and isinstance(self.args[0], Exception):
3414
      raise self.args[0]
3415
    else:
3416
      raise RetryTimeout(*self.args)
3417

    
3418

    
3419
class RetryAgain(Exception):
3420
  """Retry again.
3421

3422
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3423
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3424
  of the RetryTimeout() method can be used to reraise it.
3425

3426
  """
3427

    
3428

    
3429
class _RetryDelayCalculator(object):
3430
  """Calculator for increasing delays.
3431

3432
  """
3433
  __slots__ = [
3434
    "_factor",
3435
    "_limit",
3436
    "_next",
3437
    "_start",
3438
    ]
3439

    
3440
  def __init__(self, start, factor, limit):
3441
    """Initializes this class.
3442

3443
    @type start: float
3444
    @param start: Initial delay
3445
    @type factor: float
3446
    @param factor: Factor for delay increase
3447
    @type limit: float or None
3448
    @param limit: Upper limit for delay or None for no limit
3449

3450
    """
3451
    assert start > 0.0
3452
    assert factor >= 1.0
3453
    assert limit is None or limit >= 0.0
3454

    
3455
    self._start = start
3456
    self._factor = factor
3457
    self._limit = limit
3458

    
3459
    self._next = start
3460

    
3461
  def __call__(self):
3462
    """Returns current delay and calculates the next one.
3463

3464
    """
3465
    current = self._next
3466

    
3467
    # Update for next run
3468
    if self._limit is None or self._next < self._limit:
3469
      self._next = min(self._limit, self._next * self._factor)
3470

    
3471
    return current
3472

    
3473

    
3474
#: Special delay to specify whole remaining timeout
3475
RETRY_REMAINING_TIME = object()
3476

    
3477

    
3478
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3479
          _time_fn=time.time):
3480
  """Call a function repeatedly until it succeeds.
3481

3482
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3483
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3484
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3485

3486
  C{delay} can be one of the following:
3487
    - callable returning the delay length as a float
3488
    - Tuple of (start, factor, limit)
3489
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3490
      useful when overriding L{wait_fn} to wait for an external event)
3491
    - A static delay as a number (int or float)
3492

3493
  @type fn: callable
3494
  @param fn: Function to be called
3495
  @param delay: Either a callable (returning the delay), a tuple of (start,
3496
                factor, limit) (see L{_RetryDelayCalculator}),
3497
                L{RETRY_REMAINING_TIME} or a number (int or float)
3498
  @type timeout: float
3499
  @param timeout: Total timeout
3500
  @type wait_fn: callable
3501
  @param wait_fn: Waiting function
3502
  @return: Return value of function
3503

3504
  """
3505
  assert callable(fn)
3506
  assert callable(wait_fn)
3507
  assert callable(_time_fn)
3508

    
3509
  if args is None:
3510
    args = []
3511

    
3512
  end_time = _time_fn() + timeout
3513

    
3514
  if callable(delay):
3515
    # External function to calculate delay
3516
    calc_delay = delay
3517

    
3518
  elif isinstance(delay, (tuple, list)):
3519
    # Increasing delay with optional upper boundary
3520
    (start, factor, limit) = delay
3521
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3522

    
3523
  elif delay is RETRY_REMAINING_TIME:
3524
    # Always use the remaining time
3525
    calc_delay = None
3526

    
3527
  else:
3528
    # Static delay
3529
    calc_delay = lambda: delay
3530

    
3531
  assert calc_delay is None or callable(calc_delay)
3532

    
3533
  while True:
3534
    retry_args = []
3535
    try:
3536
      # pylint: disable-msg=W0142
3537
      return fn(*args)
3538
    except RetryAgain, err:
3539
      retry_args = err.args
3540
    except RetryTimeout:
3541
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3542
                                   " handle RetryTimeout")
3543

    
3544
    remaining_time = end_time - _time_fn()
3545

    
3546
    if remaining_time < 0.0:
3547
      # pylint: disable-msg=W0142
3548
      raise RetryTimeout(*retry_args)
3549

    
3550
    assert remaining_time >= 0.0
3551

    
3552
    if calc_delay is None:
3553
      wait_fn(remaining_time)
3554
    else:
3555
      current_delay = calc_delay()
3556
      if current_delay > 0.0:
3557
        wait_fn(current_delay)
3558

    
3559

    
3560
def GetClosedTempfile(*args, **kwargs):
3561
  """Creates a temporary file and returns its path.
3562

3563
  """
3564
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3565
  _CloseFDNoErr(fd)
3566
  return path
3567

    
3568

    
3569
def GenerateSelfSignedX509Cert(common_name, validity):
3570
  """Generates a self-signed X509 certificate.
3571

3572
  @type common_name: string
3573
  @param common_name: commonName value
3574
  @type validity: int
3575
  @param validity: Validity for certificate in seconds
3576

3577
  """
3578
  # Create private and public key
3579
  key = OpenSSL.crypto.PKey()
3580
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3581

    
3582
  # Create self-signed certificate
3583
  cert = OpenSSL.crypto.X509()
3584
  if common_name:
3585
    cert.get_subject().CN = common_name
3586
  cert.set_serial_number(1)
3587
  cert.gmtime_adj_notBefore(0)
3588
  cert.gmtime_adj_notAfter(validity)
3589
  cert.set_issuer(cert.get_subject())
3590
  cert.set_pubkey(key)
3591
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3592

    
3593
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3594
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3595

    
3596
  return (key_pem, cert_pem)
3597

    
3598

    
3599
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3600
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3601
  """Legacy function to generate self-signed X509 certificate.
3602

3603
  @type filename: str
3604
  @param filename: path to write certificate to
3605
  @type common_name: string
3606
  @param common_name: commonName value
3607
  @type validity: int
3608
  @param validity: validity of certificate in number of days
3609

3610
  """
3611
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3612
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3613
  # and node daemon certificates have the proper Subject/Issuer.
3614
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3615
                                                   validity * 24 * 60 * 60)
3616

    
3617
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3618

    
3619

    
3620
class FileLock(object):
3621
  """Utility class for file locks.
3622

3623
  """
3624
  def __init__(self, fd, filename):
3625
    """Constructor for FileLock.
3626

3627
    @type fd: file
3628
    @param fd: File object
3629
    @type filename: str
3630
    @param filename: Path of the file opened at I{fd}
3631

3632
    """
3633
    self.fd = fd
3634
    self.filename = filename
3635

    
3636
  @classmethod
3637
  def Open(cls, filename):
3638
    """Creates and opens a file to be used as a file-based lock.
3639

3640
    @type filename: string
3641
    @param filename: path to the file to be locked
3642

3643
    """
3644
    # Using "os.open" is necessary to allow both opening existing file
3645
    # read/write and creating if not existing. Vanilla "open" will truncate an
3646
    # existing file -or- allow creating if not existing.
3647
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3648
               filename)
3649

    
3650
  def __del__(self):
3651
    self.Close()
3652

    
3653
  def Close(self):
3654
    """Close the file and release the lock.
3655

3656
    """
3657
    if hasattr(self, "fd") and self.fd:
3658
      self.fd.close()
3659
      self.fd = None
3660

    
3661
  def _flock(self, flag, blocking, timeout, errmsg):
3662
    """Wrapper for fcntl.flock.
3663

3664
    @type flag: int
3665
    @param flag: operation flag
3666
    @type blocking: bool
3667
    @param blocking: whether the operation should be done in blocking mode.
3668
    @type timeout: None or float
3669
    @param timeout: for how long the operation should be retried (implies
3670
                    non-blocking mode).
3671
    @type errmsg: string
3672
    @param errmsg: error message in case operation fails.
3673

3674
    """
3675
    assert self.fd, "Lock was closed"
3676
    assert timeout is None or timeout >= 0, \
3677
      "If specified, timeout must be positive"
3678
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3679

    
3680
    # When a timeout is used, LOCK_NB must always be set
3681
    if not (timeout is None and blocking):
3682
      flag |= fcntl.LOCK_NB
3683

    
3684
    if timeout is None:
3685
      self._Lock(self.fd, flag, timeout)
3686
    else:
3687
      try:
3688
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3689
              args=(self.fd, flag, timeout))
3690
      except RetryTimeout:
3691
        raise errors.LockError(errmsg)
3692

    
3693
  @staticmethod
3694
  def _Lock(fd, flag, timeout):
3695
    try:
3696
      fcntl.flock(fd, flag)
3697
    except IOError, err:
3698
      if timeout is not None and err.errno == errno.EAGAIN:
3699
        raise RetryAgain()
3700

    
3701
      logging.exception("fcntl.flock failed")
3702
      raise
3703

    
3704
  def Exclusive(self, blocking=False, timeout=None):
3705
    """Locks the file in exclusive mode.
3706

3707
    @type blocking: boolean
3708
    @param blocking: whether to block and wait until we
3709
        can lock the file or return immediately
3710
    @type timeout: int or None
3711
    @param timeout: if not None, the duration to wait for the lock
3712
        (in blocking mode)
3713

3714
    """
3715
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3716
                "Failed to lock %s in exclusive mode" % self.filename)
3717

    
3718
  def Shared(self, blocking=False, timeout=None):
3719
    """Locks the file in shared mode.
3720

3721
    @type blocking: boolean
3722
    @param blocking: whether to block and wait until we
3723
        can lock the file or return immediately
3724
    @type timeout: int or None
3725
    @param timeout: if not None, the duration to wait for the lock
3726
        (in blocking mode)
3727

3728
    """
3729
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3730
                "Failed to lock %s in shared mode" % self.filename)
3731

    
3732
  def Unlock(self, blocking=True, timeout=None):
3733
    """Unlocks the file.
3734

3735
    According to C{flock(2)}, unlocking can also be a nonblocking
3736
    operation::
3737

3738
      To make a non-blocking request, include LOCK_NB with any of the above
3739
      operations.
3740

3741
    @type blocking: boolean
3742
    @param blocking: whether to block and wait until we
3743
        can lock the file or return immediately
3744
    @type timeout: int or None
3745
    @param timeout: if not None, the duration to wait for the lock
3746
        (in blocking mode)
3747

3748
    """
3749
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3750
                "Failed to unlock %s" % self.filename)
3751

    
3752

    
3753
class LineSplitter:
3754
  """Splits data chunks into lines separated by newline.
3755

3756
  Instances provide a file-like interface.
3757

3758
  """
3759
  def __init__(self, line_fn, *args):
3760
    """Initializes this class.
3761

3762
    @type line_fn: callable
3763
    @param line_fn: Function called for each line, first parameter is line
3764
    @param args: Extra arguments for L{line_fn}
3765

3766
    """
3767
    assert callable(line_fn)
3768

    
3769
    if args:
3770
      # Python 2.4 doesn't have functools.partial yet
3771
      self._line_fn = \
3772
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3773
    else:
3774
      self._line_fn = line_fn
3775

    
3776
    self._lines = collections.deque()
3777
    self._buffer = ""
3778

    
3779
  def write(self, data):
3780
    parts = (self._buffer + data).split("\n")
3781
    self._buffer = parts.pop()
3782
    self._lines.extend(parts)
3783

    
3784
  def flush(self):
3785
    while self._lines:
3786
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3787

    
3788
  def close(self):
3789
    self.flush()
3790
    if self._buffer:
3791
      self._line_fn(self._buffer)
3792

    
3793

    
3794
def SignalHandled(signums):
3795
  """Signal Handled decoration.
3796

3797
  This special decorator installs a signal handler and then calls the target
3798
  function. The function must accept a 'signal_handlers' keyword argument,
3799
  which will contain a dict indexed by signal number, with SignalHandler
3800
  objects as values.
3801

3802
  The decorator can be safely stacked with iself, to handle multiple signals
3803
  with different handlers.
3804

3805
  @type signums: list
3806
  @param signums: signals to intercept
3807

3808
  """
3809
  def wrap(fn):
3810
    def sig_function(*args, **kwargs):
3811
      assert 'signal_handlers' not in kwargs or \
3812
             kwargs['signal_handlers'] is None or \
3813
             isinstance(kwargs['signal_handlers'], dict), \
3814
             "Wrong signal_handlers parameter in original function call"
3815
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3816
        signal_handlers = kwargs['signal_handlers']
3817
      else:
3818
        signal_handlers = {}
3819
        kwargs['signal_handlers'] = signal_handlers
3820
      sighandler = SignalHandler(signums)
3821
      try:
3822
        for sig in signums:
3823
          signal_handlers[sig] = sighandler
3824
        return fn(*args, **kwargs)
3825
      finally:
3826
        sighandler.Reset()
3827
    return sig_function
3828
  return wrap
3829

    
3830

    
3831
class SignalWakeupFd(object):
3832
  try:
3833
    # This is only supported in Python 2.5 and above (some distributions
3834
    # backported it to Python 2.4)
3835
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3836
  except AttributeError:
3837
    # Not supported
3838
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3839
      return -1
3840
  else:
3841
    def _SetWakeupFd(self, fd):
3842
      return self._set_wakeup_fd_fn(fd)
3843

    
3844
  def __init__(self):
3845
    """Initializes this class.
3846

3847
    """
3848
    (read_fd, write_fd) = os.pipe()
3849

    
3850
    # Once these succeeded, the file descriptors will be closed automatically.
3851
    # Buffer size 0 is important, otherwise .read() with a specified length
3852
    # might buffer data and the file descriptors won't be marked readable.
3853
    self._read_fh = os.fdopen(read_fd, "r", 0)
3854
    self._write_fh = os.fdopen(write_fd, "w", 0)
3855

    
3856
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3857

    
3858
    # Utility functions
3859
    self.fileno = self._read_fh.fileno
3860
    self.read = self._read_fh.read
3861

    
3862
  def Reset(self):
3863
    """Restores the previous wakeup file descriptor.
3864

3865
    """
3866
    if hasattr(self, "_previous") and self._previous is not None:
3867
      self._SetWakeupFd(self._previous)
3868
      self._previous = None
3869

    
3870
  def Notify(self):
3871
    """Notifies the wakeup file descriptor.
3872

3873
    """
3874
    self._write_fh.write("\0")
3875

    
3876
  def __del__(self):
3877
    """Called before object deletion.
3878

3879
    """
3880
    self.Reset()
3881

    
3882

    
3883
class SignalHandler(object):
3884
  """Generic signal handler class.
3885

3886
  It automatically restores the original handler when deconstructed or
3887
  when L{Reset} is called. You can either pass your own handler
3888
  function in or query the L{called} attribute to detect whether the
3889
  signal was sent.
3890

3891
  @type signum: list
3892
  @ivar signum: the signals we handle
3893
  @type called: boolean
3894
  @ivar called: tracks whether any of the signals have been raised
3895

3896
  """
3897
  def __init__(self, signum, handler_fn=None, wakeup=None):
3898
    """Constructs a new SignalHandler instance.
3899

3900
    @type signum: int or list of ints
3901
    @param signum: Single signal number or set of signal numbers
3902
    @type handler_fn: callable
3903
    @param handler_fn: Signal handling function
3904

3905
    """
3906
    assert handler_fn is None or callable(handler_fn)
3907

    
3908
    self.signum = set(signum)
3909
    self.called = False
3910

    
3911
    self._handler_fn = handler_fn
3912
    self._wakeup = wakeup
3913

    
3914
    self._previous = {}
3915
    try:
3916
      for signum in self.signum:
3917
        # Setup handler
3918
        prev_handler = signal.signal(signum, self._HandleSignal)
3919
        try:
3920
          self._previous[signum] = prev_handler
3921
        except:
3922
          # Restore previous handler
3923
          signal.signal(signum, prev_handler)
3924
          raise
3925
    except:
3926
      # Reset all handlers
3927
      self.Reset()
3928
      # Here we have a race condition: a handler may have already been called,
3929
      # but there's not much we can do about it at this point.
3930
      raise
3931

    
3932
  def __del__(self):
3933
    self.Reset()
3934

    
3935
  def Reset(self):
3936
    """Restore previous handler.
3937

3938
    This will reset all the signals to their previous handlers.
3939

3940
    """
3941
    for signum, prev_handler in self._previous.items():
3942
      signal.signal(signum, prev_handler)
3943
      # If successful, remove from dict
3944
      del self._previous[signum]
3945

    
3946
  def Clear(self):
3947
    """Unsets the L{called} flag.
3948

3949
    This function can be used in case a signal may arrive several times.
3950

3951
    """
3952
    self.called = False
3953

    
3954
  def _HandleSignal(self, signum, frame):
3955
    """Actual signal handling function.
3956

3957
    """
3958
    # This is not nice and not absolutely atomic, but it appears to be the only
3959
    # solution in Python -- there are no atomic types.
3960
    self.called = True
3961

    
3962
    if self._wakeup:
3963
      # Notify whoever is interested in signals
3964
      self._wakeup.Notify()
3965

    
3966
    if self._handler_fn:
3967
      self._handler_fn(signum, frame)
3968

    
3969

    
3970
class FieldSet(object):
3971
  """A simple field set.
3972

3973
  Among the features are:
3974
    - checking if a string is among a list of static string or regex objects
3975
    - checking if a whole list of string matches
3976
    - returning the matching groups from a regex match
3977

3978
  Internally, all fields are held as regular expression objects.
3979

3980
  """
3981
  def __init__(self, *items):
3982
    self.items = [re.compile("^%s$" % value) for value in items]
3983

    
3984
  def Extend(self, other_set):
3985
    """Extend the field set with the items from another one"""
3986
    self.items.extend(other_set.items)
3987

    
3988
  def Matches(self, field):
3989
    """Checks if a field matches the current set
3990

3991
    @type field: str
3992
    @param field: the string to match
3993
    @return: either None or a regular expression match object
3994

3995
    """
3996
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3997
      return m
3998
    return None
3999

    
4000
  def NonMatching(self, items):
4001
    """Returns the list of fields not matching the current set
4002

4003
    @type items: list
4004
    @param items: the list of fields to check
4005
    @rtype: list
4006
    @return: list of non-matching fields
4007

4008
    """
4009
    return [val for val in items if not self.Matches(val)]
4010

    
4011

    
4012
class RunningTimeout(object):
4013
  """Class to calculate remaining timeout when doing several operations.
4014

4015
  """
4016
  __slots__ = [
4017
    "_allow_negative",
4018
    "_start_time",
4019
    "_time_fn",
4020
    "_timeout",
4021
    ]
4022

    
4023
  def __init__(self, timeout, allow_negative, _time_fn=time.time):
4024
    """Initializes this class.
4025

4026
    @type timeout: float
4027
    @param timeout: Timeout duration
4028
    @type allow_negative: bool
4029
    @param allow_negative: Whether to return values below zero
4030
    @param _time_fn: Time function for unittests
4031

4032
    """
4033
    object.__init__(self)
4034

    
4035
    if timeout is not None and timeout < 0.0:
4036
      raise ValueError("Timeout must not be negative")
4037

    
4038
    self._timeout = timeout
4039
    self._allow_negative = allow_negative
4040
    self._time_fn = _time_fn
4041

    
4042
    self._start_time = None
4043

    
4044
  def Remaining(self):
4045
    """Returns the remaining timeout.
4046

4047
    """
4048
    if self._timeout is None:
4049
      return None
4050

    
4051
    # Get start time on first calculation
4052
    if self._start_time is None:
4053
      self._start_time = self._time_fn()
4054

    
4055
    # Calculate remaining time
4056
    remaining_timeout = self._start_time + self._timeout - self._time_fn()
4057

    
4058
    if not self._allow_negative:
4059
      # Ensure timeout is always >= 0
4060
      return max(0.0, remaining_timeout)
4061

    
4062
    return remaining_timeout