Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ c74cda62

History | View | Annotate | Download (110 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86
                     '[a-f0-9]{4}-[a-f0-9]{12}$')
87

    
88
# Certificate verification results
89
(CERT_WARNING,
90
 CERT_ERROR) = range(1, 3)
91

    
92
# Flags for mlockall() (from bits/mman.h)
93
_MCL_CURRENT = 1
94
_MCL_FUTURE = 2
95

    
96
#: MAC checker regexp
97
_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
98

    
99
(_TIMEOUT_NONE,
100
 _TIMEOUT_TERM,
101
 _TIMEOUT_KILL) = range(3)
102

    
103

    
104
class RunResult(object):
105
  """Holds the result of running external programs.
106

107
  @type exit_code: int
108
  @ivar exit_code: the exit code of the program, or None (if the program
109
      didn't exit())
110
  @type signal: int or None
111
  @ivar signal: the signal that caused the program to finish, or None
112
      (if the program wasn't terminated by a signal)
113
  @type stdout: str
114
  @ivar stdout: the standard output of the program
115
  @type stderr: str
116
  @ivar stderr: the standard error of the program
117
  @type failed: boolean
118
  @ivar failed: True in case the program was
119
      terminated by a signal or exited with a non-zero exit code
120
  @ivar fail_reason: a string detailing the termination reason
121

122
  """
123
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
124
               "failed", "fail_reason", "cmd"]
125

    
126

    
127
  def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
128
               timeout):
129
    self.cmd = cmd
130
    self.exit_code = exit_code
131
    self.signal = signal_
132
    self.stdout = stdout
133
    self.stderr = stderr
134
    self.failed = (signal_ is not None or exit_code != 0)
135

    
136
    fail_msgs = []
137
    if self.signal is not None:
138
      fail_msgs.append("terminated by signal %s" % self.signal)
139
    elif self.exit_code is not None:
140
      fail_msgs.append("exited with exit code %s" % self.exit_code)
141
    else:
142
      fail_msgs.append("unable to determine termination reason")
143

    
144
    if timeout_action == _TIMEOUT_TERM:
145
      fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
146
    elif timeout_action == _TIMEOUT_KILL:
147
      fail_msgs.append(("force termination after timeout of %.2f seconds"
148
                        " and linger for another %.2f seconds") %
149
                       (timeout, constants.CHILD_LINGER_TIMEOUT))
150

    
151
    if fail_msgs and self.failed:
152
      self.fail_reason = CommaJoin(fail_msgs)
153

    
154
    if self.failed:
155
      logging.debug("Command '%s' failed (%s); output: %s",
156
                    self.cmd, self.fail_reason, self.output)
157

    
158
  def _GetOutput(self):
159
    """Returns the combined stdout and stderr for easier usage.
160

161
    """
162
    return self.stdout + self.stderr
163

    
164
  output = property(_GetOutput, None, None, "Return full output")
165

    
166

    
167
def _BuildCmdEnvironment(env, reset):
168
  """Builds the environment for an external program.
169

170
  """
171
  if reset:
172
    cmd_env = {}
173
  else:
174
    cmd_env = os.environ.copy()
175
    cmd_env["LC_ALL"] = "C"
176

    
177
  if env is not None:
178
    cmd_env.update(env)
179

    
180
  return cmd_env
181

    
182

    
183
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
184
           interactive=False, timeout=None):
185
  """Execute a (shell) command.
186

187
  The command should not read from its standard input, as it will be
188
  closed.
189

190
  @type cmd: string or list
191
  @param cmd: Command to run
192
  @type env: dict
193
  @param env: Additional environment variables
194
  @type output: str
195
  @param output: if desired, the output of the command can be
196
      saved in a file instead of the RunResult instance; this
197
      parameter denotes the file name (if not None)
198
  @type cwd: string
199
  @param cwd: if specified, will be used as the working
200
      directory for the command; the default will be /
201
  @type reset_env: boolean
202
  @param reset_env: whether to reset or keep the default os environment
203
  @type interactive: boolean
204
  @param interactive: weather we pipe stdin, stdout and stderr
205
                      (default behaviour) or run the command interactive
206
  @type timeout: int
207
  @param timeout: If not None, timeout in seconds until child process gets
208
                  killed
209
  @rtype: L{RunResult}
210
  @return: RunResult instance
211
  @raise errors.ProgrammerError: if we call this when forks are disabled
212

213
  """
214
  if no_fork:
215
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
216

    
217
  if output and interactive:
218
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
219
                                 " not be provided at the same time")
220

    
221
  if isinstance(cmd, basestring):
222
    strcmd = cmd
223
    shell = True
224
  else:
225
    cmd = [str(val) for val in cmd]
226
    strcmd = ShellQuoteArgs(cmd)
227
    shell = False
228

    
229
  if output:
230
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
231
  else:
232
    logging.debug("RunCmd %s", strcmd)
233

    
234
  cmd_env = _BuildCmdEnvironment(env, reset_env)
235

    
236
  try:
237
    if output is None:
238
      out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
239
                                                     interactive, timeout)
240
    else:
241
      timeout_action = _TIMEOUT_NONE
242
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
243
      out = err = ""
244
  except OSError, err:
245
    if err.errno == errno.ENOENT:
246
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
247
                               (strcmd, err))
248
    else:
249
      raise
250

    
251
  if status >= 0:
252
    exitcode = status
253
    signal_ = None
254
  else:
255
    exitcode = None
256
    signal_ = -status
257

    
258
  return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
259

    
260

    
261
def SetupDaemonEnv(cwd="/", umask=077):
262
  """Setup a daemon's environment.
263

264
  This should be called between the first and second fork, due to
265
  setsid usage.
266

267
  @param cwd: the directory to which to chdir
268
  @param umask: the umask to setup
269

270
  """
271
  os.chdir(cwd)
272
  os.umask(umask)
273
  os.setsid()
274

    
275

    
276
def SetupDaemonFDs(output_file, output_fd):
277
  """Setups up a daemon's file descriptors.
278

279
  @param output_file: if not None, the file to which to redirect
280
      stdout/stderr
281
  @param output_fd: if not None, the file descriptor for stdout/stderr
282

283
  """
284
  # check that at most one is defined
285
  assert [output_file, output_fd].count(None) >= 1
286

    
287
  # Open /dev/null (read-only, only for stdin)
288
  devnull_fd = os.open(os.devnull, os.O_RDONLY)
289

    
290
  if output_fd is not None:
291
    pass
292
  elif output_file is not None:
293
    # Open output file
294
    try:
295
      output_fd = os.open(output_file,
296
                          os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
297
    except EnvironmentError, err:
298
      raise Exception("Opening output file failed: %s" % err)
299
  else:
300
    output_fd = os.open(os.devnull, os.O_WRONLY)
301

    
302
  # Redirect standard I/O
303
  os.dup2(devnull_fd, 0)
304
  os.dup2(output_fd, 1)
305
  os.dup2(output_fd, 2)
306

    
307

    
308
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
309
                pidfile=None):
310
  """Start a daemon process after forking twice.
311

312
  @type cmd: string or list
313
  @param cmd: Command to run
314
  @type env: dict
315
  @param env: Additional environment variables
316
  @type cwd: string
317
  @param cwd: Working directory for the program
318
  @type output: string
319
  @param output: Path to file in which to save the output
320
  @type output_fd: int
321
  @param output_fd: File descriptor for output
322
  @type pidfile: string
323
  @param pidfile: Process ID file
324
  @rtype: int
325
  @return: Daemon process ID
326
  @raise errors.ProgrammerError: if we call this when forks are disabled
327

328
  """
329
  if no_fork:
330
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
331
                                 " disabled")
332

    
333
  if output and not (bool(output) ^ (output_fd is not None)):
334
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
335
                                 " specified")
336

    
337
  if isinstance(cmd, basestring):
338
    cmd = ["/bin/sh", "-c", cmd]
339

    
340
  strcmd = ShellQuoteArgs(cmd)
341

    
342
  if output:
343
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
344
  else:
345
    logging.debug("StartDaemon %s", strcmd)
346

    
347
  cmd_env = _BuildCmdEnvironment(env, False)
348

    
349
  # Create pipe for sending PID back
350
  (pidpipe_read, pidpipe_write) = os.pipe()
351
  try:
352
    try:
353
      # Create pipe for sending error messages
354
      (errpipe_read, errpipe_write) = os.pipe()
355
      try:
356
        try:
357
          # First fork
358
          pid = os.fork()
359
          if pid == 0:
360
            try:
361
              # Child process, won't return
362
              _StartDaemonChild(errpipe_read, errpipe_write,
363
                                pidpipe_read, pidpipe_write,
364
                                cmd, cmd_env, cwd,
365
                                output, output_fd, pidfile)
366
            finally:
367
              # Well, maybe child process failed
368
              os._exit(1) # pylint: disable-msg=W0212
369
        finally:
370
          _CloseFDNoErr(errpipe_write)
371

    
372
        # Wait for daemon to be started (or an error message to
373
        # arrive) and read up to 100 KB as an error message
374
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
375
      finally:
376
        _CloseFDNoErr(errpipe_read)
377
    finally:
378
      _CloseFDNoErr(pidpipe_write)
379

    
380
    # Read up to 128 bytes for PID
381
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
382
  finally:
383
    _CloseFDNoErr(pidpipe_read)
384

    
385
  # Try to avoid zombies by waiting for child process
386
  try:
387
    os.waitpid(pid, 0)
388
  except OSError:
389
    pass
390

    
391
  if errormsg:
392
    raise errors.OpExecError("Error when starting daemon process: %r" %
393
                             errormsg)
394

    
395
  try:
396
    return int(pidtext)
397
  except (ValueError, TypeError), err:
398
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
399
                             (pidtext, err))
400

    
401

    
402
def _StartDaemonChild(errpipe_read, errpipe_write,
403
                      pidpipe_read, pidpipe_write,
404
                      args, env, cwd,
405
                      output, fd_output, pidfile):
406
  """Child process for starting daemon.
407

408
  """
409
  try:
410
    # Close parent's side
411
    _CloseFDNoErr(errpipe_read)
412
    _CloseFDNoErr(pidpipe_read)
413

    
414
    # First child process
415
    SetupDaemonEnv()
416

    
417
    # And fork for the second time
418
    pid = os.fork()
419
    if pid != 0:
420
      # Exit first child process
421
      os._exit(0) # pylint: disable-msg=W0212
422

    
423
    # Make sure pipe is closed on execv* (and thereby notifies
424
    # original process)
425
    SetCloseOnExecFlag(errpipe_write, True)
426

    
427
    # List of file descriptors to be left open
428
    noclose_fds = [errpipe_write]
429

    
430
    # Open PID file
431
    if pidfile:
432
      fd_pidfile = WritePidFile(pidfile)
433

    
434
      # Keeping the file open to hold the lock
435
      noclose_fds.append(fd_pidfile)
436

    
437
      SetCloseOnExecFlag(fd_pidfile, False)
438
    else:
439
      fd_pidfile = None
440

    
441
    SetupDaemonFDs(output, fd_output)
442

    
443
    # Send daemon PID to parent
444
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
445

    
446
    # Close all file descriptors except stdio and error message pipe
447
    CloseFDs(noclose_fds=noclose_fds)
448

    
449
    # Change working directory
450
    os.chdir(cwd)
451

    
452
    if env is None:
453
      os.execvp(args[0], args)
454
    else:
455
      os.execvpe(args[0], args, env)
456
  except: # pylint: disable-msg=W0702
457
    try:
458
      # Report errors to original process
459
      WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
460
    except: # pylint: disable-msg=W0702
461
      # Ignore errors in error handling
462
      pass
463

    
464
  os._exit(1) # pylint: disable-msg=W0212
465

    
466

    
467
def WriteErrorToFD(fd, err):
468
  """Possibly write an error message to a fd.
469

470
  @type fd: None or int (file descriptor)
471
  @param fd: if not None, the error will be written to this fd
472
  @param err: string, the error message
473

474
  """
475
  if fd is None:
476
    return
477

    
478
  if not err:
479
    err = "<unknown error>"
480

    
481
  RetryOnSignal(os.write, fd, err)
482

    
483

    
484
def _CheckIfAlive(child):
485
  """Raises L{RetryAgain} if child is still alive.
486

487
  @raises RetryAgain: If child is still alive
488

489
  """
490
  if child.poll() is None:
491
    raise RetryAgain()
492

    
493

    
494
def _WaitForProcess(child, timeout):
495
  """Waits for the child to terminate or until we reach timeout.
496

497
  """
498
  try:
499
    Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
500
  except RetryTimeout:
501
    pass
502

    
503

    
504
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
505
                _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
506
  """Run a command and return its output.
507

508
  @type  cmd: string or list
509
  @param cmd: Command to run
510
  @type env: dict
511
  @param env: The environment to use
512
  @type via_shell: bool
513
  @param via_shell: if we should run via the shell
514
  @type cwd: string
515
  @param cwd: the working directory for the program
516
  @type interactive: boolean
517
  @param interactive: Run command interactive (without piping)
518
  @type timeout: int
519
  @param timeout: Timeout after the programm gets terminated
520
  @rtype: tuple
521
  @return: (out, err, status)
522

523
  """
524
  poller = select.poll()
525

    
526
  stderr = subprocess.PIPE
527
  stdout = subprocess.PIPE
528
  stdin = subprocess.PIPE
529

    
530
  if interactive:
531
    stderr = stdout = stdin = None
532

    
533
  child = subprocess.Popen(cmd, shell=via_shell,
534
                           stderr=stderr,
535
                           stdout=stdout,
536
                           stdin=stdin,
537
                           close_fds=True, env=env,
538
                           cwd=cwd)
539

    
540
  out = StringIO()
541
  err = StringIO()
542

    
543
  linger_timeout = None
544

    
545
  if timeout is None:
546
    poll_timeout = None
547
  else:
548
    poll_timeout = RunningTimeout(timeout, True).Remaining
549

    
550
  msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
551
                 (cmd, child.pid))
552
  msg_linger = ("Command %s (%d) run into linger timeout, killing" %
553
                (cmd, child.pid))
554

    
555
  timeout_action = _TIMEOUT_NONE
556

    
557
  if not interactive:
558
    child.stdin.close()
559
    poller.register(child.stdout, select.POLLIN)
560
    poller.register(child.stderr, select.POLLIN)
561
    fdmap = {
562
      child.stdout.fileno(): (out, child.stdout),
563
      child.stderr.fileno(): (err, child.stderr),
564
      }
565
    for fd in fdmap:
566
      SetNonblockFlag(fd, True)
567

    
568
    while fdmap:
569
      if poll_timeout:
570
        current_timeout = poll_timeout()
571
        if current_timeout < 0:
572
          if linger_timeout is None:
573
            logging.warning(msg_timeout)
574
            if child.poll() is None:
575
              timeout_action = _TIMEOUT_TERM
576
              IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
577
            linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
578
          lt = linger_timeout()
579
          if lt < 0:
580
            break
581

    
582
          pt = max(0, lt)
583
        else:
584
          pt = current_timeout
585
      else:
586
        pt = None
587

    
588
      pollresult = RetryOnSignal(poller.poll, pt)
589

    
590
      for fd, event in pollresult:
591
        if event & select.POLLIN or event & select.POLLPRI:
592
          data = fdmap[fd][1].read()
593
          # no data from read signifies EOF (the same as POLLHUP)
594
          if not data:
595
            poller.unregister(fd)
596
            del fdmap[fd]
597
            continue
598
          fdmap[fd][0].write(data)
599
        if (event & select.POLLNVAL or event & select.POLLHUP or
600
            event & select.POLLERR):
601
          poller.unregister(fd)
602
          del fdmap[fd]
603

    
604
  if timeout is not None:
605
    assert callable(poll_timeout)
606

    
607
    # We have no I/O left but it might still run
608
    if child.poll() is None:
609
      _WaitForProcess(child, poll_timeout())
610

    
611
    # Terminate if still alive after timeout
612
    if child.poll() is None:
613
      if linger_timeout is None:
614
        logging.warning(msg_timeout)
615
        timeout_action = _TIMEOUT_TERM
616
        IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
617
        lt = _linger_timeout
618
      else:
619
        lt = linger_timeout()
620
      _WaitForProcess(child, lt)
621

    
622
    # Okay, still alive after timeout and linger timeout? Kill it!
623
    if child.poll() is None:
624
      timeout_action = _TIMEOUT_KILL
625
      logging.warning(msg_linger)
626
      IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
627

    
628
  out = out.getvalue()
629
  err = err.getvalue()
630

    
631
  status = child.wait()
632
  return out, err, status, timeout_action
633

    
634

    
635
def _RunCmdFile(cmd, env, via_shell, output, cwd):
636
  """Run a command and save its output to a file.
637

638
  @type  cmd: string or list
639
  @param cmd: Command to run
640
  @type env: dict
641
  @param env: The environment to use
642
  @type via_shell: bool
643
  @param via_shell: if we should run via the shell
644
  @type output: str
645
  @param output: the filename in which to save the output
646
  @type cwd: string
647
  @param cwd: the working directory for the program
648
  @rtype: int
649
  @return: the exit status
650

651
  """
652
  fh = open(output, "a")
653
  try:
654
    child = subprocess.Popen(cmd, shell=via_shell,
655
                             stderr=subprocess.STDOUT,
656
                             stdout=fh,
657
                             stdin=subprocess.PIPE,
658
                             close_fds=True, env=env,
659
                             cwd=cwd)
660

    
661
    child.stdin.close()
662
    status = child.wait()
663
  finally:
664
    fh.close()
665
  return status
666

    
667

    
668
def SetCloseOnExecFlag(fd, enable):
669
  """Sets or unsets the close-on-exec flag on a file descriptor.
670

671
  @type fd: int
672
  @param fd: File descriptor
673
  @type enable: bool
674
  @param enable: Whether to set or unset it.
675

676
  """
677
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
678

    
679
  if enable:
680
    flags |= fcntl.FD_CLOEXEC
681
  else:
682
    flags &= ~fcntl.FD_CLOEXEC
683

    
684
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
685

    
686

    
687
def SetNonblockFlag(fd, enable):
688
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
689

690
  @type fd: int
691
  @param fd: File descriptor
692
  @type enable: bool
693
  @param enable: Whether to set or unset it
694

695
  """
696
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
697

    
698
  if enable:
699
    flags |= os.O_NONBLOCK
700
  else:
701
    flags &= ~os.O_NONBLOCK
702

    
703
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
704

    
705

    
706
def RetryOnSignal(fn, *args, **kwargs):
707
  """Calls a function again if it failed due to EINTR.
708

709
  """
710
  while True:
711
    try:
712
      return fn(*args, **kwargs)
713
    except EnvironmentError, err:
714
      if err.errno != errno.EINTR:
715
        raise
716
    except (socket.error, select.error), err:
717
      # In python 2.6 and above select.error is an IOError, so it's handled
718
      # above, in 2.5 and below it's not, and it's handled here.
719
      if not (err.args and err.args[0] == errno.EINTR):
720
        raise
721

    
722

    
723
def RunParts(dir_name, env=None, reset_env=False):
724
  """Run Scripts or programs in a directory
725

726
  @type dir_name: string
727
  @param dir_name: absolute path to a directory
728
  @type env: dict
729
  @param env: The environment to use
730
  @type reset_env: boolean
731
  @param reset_env: whether to reset or keep the default os environment
732
  @rtype: list of tuples
733
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
734

735
  """
736
  rr = []
737

    
738
  try:
739
    dir_contents = ListVisibleFiles(dir_name)
740
  except OSError, err:
741
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
742
    return rr
743

    
744
  for relname in sorted(dir_contents):
745
    fname = PathJoin(dir_name, relname)
746
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
747
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
748
      rr.append((relname, constants.RUNPARTS_SKIP, None))
749
    else:
750
      try:
751
        result = RunCmd([fname], env=env, reset_env=reset_env)
752
      except Exception, err: # pylint: disable-msg=W0703
753
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
754
      else:
755
        rr.append((relname, constants.RUNPARTS_RUN, result))
756

    
757
  return rr
758

    
759

    
760
def RemoveFile(filename):
761
  """Remove a file ignoring some errors.
762

763
  Remove a file, ignoring non-existing ones or directories. Other
764
  errors are passed.
765

766
  @type filename: str
767
  @param filename: the file to be removed
768

769
  """
770
  try:
771
    os.unlink(filename)
772
  except OSError, err:
773
    if err.errno not in (errno.ENOENT, errno.EISDIR):
774
      raise
775

    
776

    
777
def RemoveDir(dirname):
778
  """Remove an empty directory.
779

780
  Remove a directory, ignoring non-existing ones.
781
  Other errors are passed. This includes the case,
782
  where the directory is not empty, so it can't be removed.
783

784
  @type dirname: str
785
  @param dirname: the empty directory to be removed
786

787
  """
788
  try:
789
    os.rmdir(dirname)
790
  except OSError, err:
791
    if err.errno != errno.ENOENT:
792
      raise
793

    
794

    
795
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
796
  """Renames a file.
797

798
  @type old: string
799
  @param old: Original path
800
  @type new: string
801
  @param new: New path
802
  @type mkdir: bool
803
  @param mkdir: Whether to create target directory if it doesn't exist
804
  @type mkdir_mode: int
805
  @param mkdir_mode: Mode for newly created directories
806

807
  """
808
  try:
809
    return os.rename(old, new)
810
  except OSError, err:
811
    # In at least one use case of this function, the job queue, directory
812
    # creation is very rare. Checking for the directory before renaming is not
813
    # as efficient.
814
    if mkdir and err.errno == errno.ENOENT:
815
      # Create directory and try again
816
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
817

    
818
      return os.rename(old, new)
819

    
820
    raise
821

    
822

    
823
def Makedirs(path, mode=0750):
824
  """Super-mkdir; create a leaf directory and all intermediate ones.
825

826
  This is a wrapper around C{os.makedirs} adding error handling not implemented
827
  before Python 2.5.
828

829
  """
830
  try:
831
    os.makedirs(path, mode)
832
  except OSError, err:
833
    # Ignore EEXIST. This is only handled in os.makedirs as included in
834
    # Python 2.5 and above.
835
    if err.errno != errno.EEXIST or not os.path.exists(path):
836
      raise
837

    
838

    
839
def ResetTempfileModule():
840
  """Resets the random name generator of the tempfile module.
841

842
  This function should be called after C{os.fork} in the child process to
843
  ensure it creates a newly seeded random generator. Otherwise it would
844
  generate the same random parts as the parent process. If several processes
845
  race for the creation of a temporary file, this could lead to one not getting
846
  a temporary name.
847

848
  """
849
  # pylint: disable-msg=W0212
850
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
851
    tempfile._once_lock.acquire()
852
    try:
853
      # Reset random name generator
854
      tempfile._name_sequence = None
855
    finally:
856
      tempfile._once_lock.release()
857
  else:
858
    logging.critical("The tempfile module misses at least one of the"
859
                     " '_once_lock' and '_name_sequence' attributes")
860

    
861

    
862
def _FingerprintFile(filename):
863
  """Compute the fingerprint of a file.
864

865
  If the file does not exist, a None will be returned
866
  instead.
867

868
  @type filename: str
869
  @param filename: the filename to checksum
870
  @rtype: str
871
  @return: the hex digest of the sha checksum of the contents
872
      of the file
873

874
  """
875
  if not (os.path.exists(filename) and os.path.isfile(filename)):
876
    return None
877

    
878
  f = open(filename)
879

    
880
  fp = compat.sha1_hash()
881
  while True:
882
    data = f.read(4096)
883
    if not data:
884
      break
885

    
886
    fp.update(data)
887

    
888
  return fp.hexdigest()
889

    
890

    
891
def FingerprintFiles(files):
892
  """Compute fingerprints for a list of files.
893

894
  @type files: list
895
  @param files: the list of filename to fingerprint
896
  @rtype: dict
897
  @return: a dictionary filename: fingerprint, holding only
898
      existing files
899

900
  """
901
  ret = {}
902

    
903
  for filename in files:
904
    cksum = _FingerprintFile(filename)
905
    if cksum:
906
      ret[filename] = cksum
907

    
908
  return ret
909

    
910

    
911
def ForceDictType(target, key_types, allowed_values=None):
912
  """Force the values of a dict to have certain types.
913

914
  @type target: dict
915
  @param target: the dict to update
916
  @type key_types: dict
917
  @param key_types: dict mapping target dict keys to types
918
                    in constants.ENFORCEABLE_TYPES
919
  @type allowed_values: list
920
  @keyword allowed_values: list of specially allowed values
921

922
  """
923
  if allowed_values is None:
924
    allowed_values = []
925

    
926
  if not isinstance(target, dict):
927
    msg = "Expected dictionary, got '%s'" % target
928
    raise errors.TypeEnforcementError(msg)
929

    
930
  for key in target:
931
    if key not in key_types:
932
      msg = "Unknown key '%s'" % key
933
      raise errors.TypeEnforcementError(msg)
934

    
935
    if target[key] in allowed_values:
936
      continue
937

    
938
    ktype = key_types[key]
939
    if ktype not in constants.ENFORCEABLE_TYPES:
940
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
941
      raise errors.ProgrammerError(msg)
942

    
943
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
944
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
945
        pass
946
      elif not isinstance(target[key], basestring):
947
        if isinstance(target[key], bool) and not target[key]:
948
          target[key] = ''
949
        else:
950
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
951
          raise errors.TypeEnforcementError(msg)
952
    elif ktype == constants.VTYPE_BOOL:
953
      if isinstance(target[key], basestring) and target[key]:
954
        if target[key].lower() == constants.VALUE_FALSE:
955
          target[key] = False
956
        elif target[key].lower() == constants.VALUE_TRUE:
957
          target[key] = True
958
        else:
959
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
960
          raise errors.TypeEnforcementError(msg)
961
      elif target[key]:
962
        target[key] = True
963
      else:
964
        target[key] = False
965
    elif ktype == constants.VTYPE_SIZE:
966
      try:
967
        target[key] = ParseUnit(target[key])
968
      except errors.UnitParseError, err:
969
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
970
              (key, target[key], err)
971
        raise errors.TypeEnforcementError(msg)
972
    elif ktype == constants.VTYPE_INT:
973
      try:
974
        target[key] = int(target[key])
975
      except (ValueError, TypeError):
976
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
977
        raise errors.TypeEnforcementError(msg)
978

    
979

    
980
def _GetProcStatusPath(pid):
981
  """Returns the path for a PID's proc status file.
982

983
  @type pid: int
984
  @param pid: Process ID
985
  @rtype: string
986

987
  """
988
  return "/proc/%d/status" % pid
989

    
990

    
991
def IsProcessAlive(pid):
992
  """Check if a given pid exists on the system.
993

994
  @note: zombie status is not handled, so zombie processes
995
      will be returned as alive
996
  @type pid: int
997
  @param pid: the process ID to check
998
  @rtype: boolean
999
  @return: True if the process exists
1000

1001
  """
1002
  def _TryStat(name):
1003
    try:
1004
      os.stat(name)
1005
      return True
1006
    except EnvironmentError, err:
1007
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
1008
        return False
1009
      elif err.errno == errno.EINVAL:
1010
        raise RetryAgain(err)
1011
      raise
1012

    
1013
  assert isinstance(pid, int), "pid must be an integer"
1014
  if pid <= 0:
1015
    return False
1016

    
1017
  # /proc in a multiprocessor environment can have strange behaviors.
1018
  # Retry the os.stat a few times until we get a good result.
1019
  try:
1020
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1021
                 args=[_GetProcStatusPath(pid)])
1022
  except RetryTimeout, err:
1023
    err.RaiseInner()
1024

    
1025

    
1026
def _ParseSigsetT(sigset):
1027
  """Parse a rendered sigset_t value.
1028

1029
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1030
  function.
1031

1032
  @type sigset: string
1033
  @param sigset: Rendered signal set from /proc/$pid/status
1034
  @rtype: set
1035
  @return: Set of all enabled signal numbers
1036

1037
  """
1038
  result = set()
1039

    
1040
  signum = 0
1041
  for ch in reversed(sigset):
1042
    chv = int(ch, 16)
1043

    
1044
    # The following could be done in a loop, but it's easier to read and
1045
    # understand in the unrolled form
1046
    if chv & 1:
1047
      result.add(signum + 1)
1048
    if chv & 2:
1049
      result.add(signum + 2)
1050
    if chv & 4:
1051
      result.add(signum + 3)
1052
    if chv & 8:
1053
      result.add(signum + 4)
1054

    
1055
    signum += 4
1056

    
1057
  return result
1058

    
1059

    
1060
def _GetProcStatusField(pstatus, field):
1061
  """Retrieves a field from the contents of a proc status file.
1062

1063
  @type pstatus: string
1064
  @param pstatus: Contents of /proc/$pid/status
1065
  @type field: string
1066
  @param field: Name of field whose value should be returned
1067
  @rtype: string
1068

1069
  """
1070
  for line in pstatus.splitlines():
1071
    parts = line.split(":", 1)
1072

    
1073
    if len(parts) < 2 or parts[0] != field:
1074
      continue
1075

    
1076
    return parts[1].strip()
1077

    
1078
  return None
1079

    
1080

    
1081
def IsProcessHandlingSignal(pid, signum, status_path=None):
1082
  """Checks whether a process is handling a signal.
1083

1084
  @type pid: int
1085
  @param pid: Process ID
1086
  @type signum: int
1087
  @param signum: Signal number
1088
  @rtype: bool
1089

1090
  """
1091
  if status_path is None:
1092
    status_path = _GetProcStatusPath(pid)
1093

    
1094
  try:
1095
    proc_status = ReadFile(status_path)
1096
  except EnvironmentError, err:
1097
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
1098
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1099
      return False
1100
    raise
1101

    
1102
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1103
  if sigcgt is None:
1104
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1105

    
1106
  # Now check whether signal is handled
1107
  return signum in _ParseSigsetT(sigcgt)
1108

    
1109

    
1110
def ReadPidFile(pidfile):
1111
  """Read a pid from a file.
1112

1113
  @type  pidfile: string
1114
  @param pidfile: path to the file containing the pid
1115
  @rtype: int
1116
  @return: The process id, if the file exists and contains a valid PID,
1117
           otherwise 0
1118

1119
  """
1120
  try:
1121
    raw_data = ReadOneLineFile(pidfile)
1122
  except EnvironmentError, err:
1123
    if err.errno != errno.ENOENT:
1124
      logging.exception("Can't read pid file")
1125
    return 0
1126

    
1127
  try:
1128
    pid = int(raw_data)
1129
  except (TypeError, ValueError), err:
1130
    logging.info("Can't parse pid file contents", exc_info=True)
1131
    return 0
1132

    
1133
  return pid
1134

    
1135

    
1136
def ReadLockedPidFile(path):
1137
  """Reads a locked PID file.
1138

1139
  This can be used together with L{StartDaemon}.
1140

1141
  @type path: string
1142
  @param path: Path to PID file
1143
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1144

1145
  """
1146
  try:
1147
    fd = os.open(path, os.O_RDONLY)
1148
  except EnvironmentError, err:
1149
    if err.errno == errno.ENOENT:
1150
      # PID file doesn't exist
1151
      return None
1152
    raise
1153

    
1154
  try:
1155
    try:
1156
      # Try to acquire lock
1157
      LockFile(fd)
1158
    except errors.LockError:
1159
      # Couldn't lock, daemon is running
1160
      return int(os.read(fd, 100))
1161
  finally:
1162
    os.close(fd)
1163

    
1164
  return None
1165

    
1166

    
1167
def MatchNameComponent(key, name_list, case_sensitive=True):
1168
  """Try to match a name against a list.
1169

1170
  This function will try to match a name like test1 against a list
1171
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1172
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1173
  not I{'test1.ex'}. A multiple match will be considered as no match
1174
  at all (e.g. I{'test1'} against C{['test1.example.com',
1175
  'test1.example.org']}), except when the key fully matches an entry
1176
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1177

1178
  @type key: str
1179
  @param key: the name to be searched
1180
  @type name_list: list
1181
  @param name_list: the list of strings against which to search the key
1182
  @type case_sensitive: boolean
1183
  @param case_sensitive: whether to provide a case-sensitive match
1184

1185
  @rtype: None or str
1186
  @return: None if there is no match I{or} if there are multiple matches,
1187
      otherwise the element from the list which matches
1188

1189
  """
1190
  if key in name_list:
1191
    return key
1192

    
1193
  re_flags = 0
1194
  if not case_sensitive:
1195
    re_flags |= re.IGNORECASE
1196
    key = key.upper()
1197
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1198
  names_filtered = []
1199
  string_matches = []
1200
  for name in name_list:
1201
    if mo.match(name) is not None:
1202
      names_filtered.append(name)
1203
      if not case_sensitive and key == name.upper():
1204
        string_matches.append(name)
1205

    
1206
  if len(string_matches) == 1:
1207
    return string_matches[0]
1208
  if len(names_filtered) == 1:
1209
    return names_filtered[0]
1210
  return None
1211

    
1212

    
1213
def ValidateServiceName(name):
1214
  """Validate the given service name.
1215

1216
  @type name: number or string
1217
  @param name: Service name or port specification
1218

1219
  """
1220
  try:
1221
    numport = int(name)
1222
  except (ValueError, TypeError):
1223
    # Non-numeric service name
1224
    valid = _VALID_SERVICE_NAME_RE.match(name)
1225
  else:
1226
    # Numeric port (protocols other than TCP or UDP might need adjustments
1227
    # here)
1228
    valid = (numport >= 0 and numport < (1 << 16))
1229

    
1230
  if not valid:
1231
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1232
                               errors.ECODE_INVAL)
1233

    
1234
  return name
1235

    
1236

    
1237
def ListVolumeGroups():
1238
  """List volume groups and their size
1239

1240
  @rtype: dict
1241
  @return:
1242
       Dictionary with keys volume name and values
1243
       the size of the volume
1244

1245
  """
1246
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1247
  result = RunCmd(command)
1248
  retval = {}
1249
  if result.failed:
1250
    return retval
1251

    
1252
  for line in result.stdout.splitlines():
1253
    try:
1254
      name, size = line.split()
1255
      size = int(float(size))
1256
    except (IndexError, ValueError), err:
1257
      logging.error("Invalid output from vgs (%s): %s", err, line)
1258
      continue
1259

    
1260
    retval[name] = size
1261

    
1262
  return retval
1263

    
1264

    
1265
def BridgeExists(bridge):
1266
  """Check whether the given bridge exists in the system
1267

1268
  @type bridge: str
1269
  @param bridge: the bridge name to check
1270
  @rtype: boolean
1271
  @return: True if it does
1272

1273
  """
1274
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1275

    
1276

    
1277
def NiceSort(name_list):
1278
  """Sort a list of strings based on digit and non-digit groupings.
1279

1280
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1281
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1282
  'a11']}.
1283

1284
  The sort algorithm breaks each name in groups of either only-digits
1285
  or no-digits. Only the first eight such groups are considered, and
1286
  after that we just use what's left of the string.
1287

1288
  @type name_list: list
1289
  @param name_list: the names to be sorted
1290
  @rtype: list
1291
  @return: a copy of the name list sorted with our algorithm
1292

1293
  """
1294
  _SORTER_BASE = "(\D+|\d+)"
1295
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1296
                                                  _SORTER_BASE, _SORTER_BASE,
1297
                                                  _SORTER_BASE, _SORTER_BASE,
1298
                                                  _SORTER_BASE, _SORTER_BASE)
1299
  _SORTER_RE = re.compile(_SORTER_FULL)
1300
  _SORTER_NODIGIT = re.compile("^\D*$")
1301
  def _TryInt(val):
1302
    """Attempts to convert a variable to integer."""
1303
    if val is None or _SORTER_NODIGIT.match(val):
1304
      return val
1305
    rval = int(val)
1306
    return rval
1307

    
1308
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1309
             for name in name_list]
1310
  to_sort.sort()
1311
  return [tup[1] for tup in to_sort]
1312

    
1313

    
1314
def TryConvert(fn, val):
1315
  """Try to convert a value ignoring errors.
1316

1317
  This function tries to apply function I{fn} to I{val}. If no
1318
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1319
  the result, else it will return the original value. Any other
1320
  exceptions are propagated to the caller.
1321

1322
  @type fn: callable
1323
  @param fn: function to apply to the value
1324
  @param val: the value to be converted
1325
  @return: The converted value if the conversion was successful,
1326
      otherwise the original value.
1327

1328
  """
1329
  try:
1330
    nv = fn(val)
1331
  except (ValueError, TypeError):
1332
    nv = val
1333
  return nv
1334

    
1335

    
1336
def IsValidShellParam(word):
1337
  """Verifies is the given word is safe from the shell's p.o.v.
1338

1339
  This means that we can pass this to a command via the shell and be
1340
  sure that it doesn't alter the command line and is passed as such to
1341
  the actual command.
1342

1343
  Note that we are overly restrictive here, in order to be on the safe
1344
  side.
1345

1346
  @type word: str
1347
  @param word: the word to check
1348
  @rtype: boolean
1349
  @return: True if the word is 'safe'
1350

1351
  """
1352
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1353

    
1354

    
1355
def BuildShellCmd(template, *args):
1356
  """Build a safe shell command line from the given arguments.
1357

1358
  This function will check all arguments in the args list so that they
1359
  are valid shell parameters (i.e. they don't contain shell
1360
  metacharacters). If everything is ok, it will return the result of
1361
  template % args.
1362

1363
  @type template: str
1364
  @param template: the string holding the template for the
1365
      string formatting
1366
  @rtype: str
1367
  @return: the expanded command line
1368

1369
  """
1370
  for word in args:
1371
    if not IsValidShellParam(word):
1372
      raise errors.ProgrammerError("Shell argument '%s' contains"
1373
                                   " invalid characters" % word)
1374
  return template % args
1375

    
1376

    
1377
def FormatUnit(value, units):
1378
  """Formats an incoming number of MiB with the appropriate unit.
1379

1380
  @type value: int
1381
  @param value: integer representing the value in MiB (1048576)
1382
  @type units: char
1383
  @param units: the type of formatting we should do:
1384
      - 'h' for automatic scaling
1385
      - 'm' for MiBs
1386
      - 'g' for GiBs
1387
      - 't' for TiBs
1388
  @rtype: str
1389
  @return: the formatted value (with suffix)
1390

1391
  """
1392
  if units not in ('m', 'g', 't', 'h'):
1393
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1394

    
1395
  suffix = ''
1396

    
1397
  if units == 'm' or (units == 'h' and value < 1024):
1398
    if units == 'h':
1399
      suffix = 'M'
1400
    return "%d%s" % (round(value, 0), suffix)
1401

    
1402
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1403
    if units == 'h':
1404
      suffix = 'G'
1405
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1406

    
1407
  else:
1408
    if units == 'h':
1409
      suffix = 'T'
1410
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1411

    
1412

    
1413
def ParseUnit(input_string):
1414
  """Tries to extract number and scale from the given string.
1415

1416
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1417
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1418
  is always an int in MiB.
1419

1420
  """
1421
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1422
  if not m:
1423
    raise errors.UnitParseError("Invalid format")
1424

    
1425
  value = float(m.groups()[0])
1426

    
1427
  unit = m.groups()[1]
1428
  if unit:
1429
    lcunit = unit.lower()
1430
  else:
1431
    lcunit = 'm'
1432

    
1433
  if lcunit in ('m', 'mb', 'mib'):
1434
    # Value already in MiB
1435
    pass
1436

    
1437
  elif lcunit in ('g', 'gb', 'gib'):
1438
    value *= 1024
1439

    
1440
  elif lcunit in ('t', 'tb', 'tib'):
1441
    value *= 1024 * 1024
1442

    
1443
  else:
1444
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1445

    
1446
  # Make sure we round up
1447
  if int(value) < value:
1448
    value += 1
1449

    
1450
  # Round up to the next multiple of 4
1451
  value = int(value)
1452
  if value % 4:
1453
    value += 4 - value % 4
1454

    
1455
  return value
1456

    
1457

    
1458
def ParseCpuMask(cpu_mask):
1459
  """Parse a CPU mask definition and return the list of CPU IDs.
1460

1461
  CPU mask format: comma-separated list of CPU IDs
1462
  or dash-separated ID ranges
1463
  Example: "0-2,5" -> "0,1,2,5"
1464

1465
  @type cpu_mask: str
1466
  @param cpu_mask: CPU mask definition
1467
  @rtype: list of int
1468
  @return: list of CPU IDs
1469

1470
  """
1471
  if not cpu_mask:
1472
    return []
1473
  cpu_list = []
1474
  for range_def in cpu_mask.split(","):
1475
    boundaries = range_def.split("-")
1476
    n_elements = len(boundaries)
1477
    if n_elements > 2:
1478
      raise errors.ParseError("Invalid CPU ID range definition"
1479
                              " (only one hyphen allowed): %s" % range_def)
1480
    try:
1481
      lower = int(boundaries[0])
1482
    except (ValueError, TypeError), err:
1483
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1484
                              " CPU ID range: %s" % str(err))
1485
    try:
1486
      higher = int(boundaries[-1])
1487
    except (ValueError, TypeError), err:
1488
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1489
                              " CPU ID range: %s" % str(err))
1490
    if lower > higher:
1491
      raise errors.ParseError("Invalid CPU ID range definition"
1492
                              " (%d > %d): %s" % (lower, higher, range_def))
1493
    cpu_list.extend(range(lower, higher + 1))
1494
  return cpu_list
1495

    
1496

    
1497
def AddAuthorizedKey(file_obj, key):
1498
  """Adds an SSH public key to an authorized_keys file.
1499

1500
  @type file_obj: str or file handle
1501
  @param file_obj: path to authorized_keys file
1502
  @type key: str
1503
  @param key: string containing key
1504

1505
  """
1506
  key_fields = key.split()
1507

    
1508
  if isinstance(file_obj, basestring):
1509
    f = open(file_obj, 'a+')
1510
  else:
1511
    f = file_obj
1512

    
1513
  try:
1514
    nl = True
1515
    for line in f:
1516
      # Ignore whitespace changes
1517
      if line.split() == key_fields:
1518
        break
1519
      nl = line.endswith('\n')
1520
    else:
1521
      if not nl:
1522
        f.write("\n")
1523
      f.write(key.rstrip('\r\n'))
1524
      f.write("\n")
1525
      f.flush()
1526
  finally:
1527
    f.close()
1528

    
1529

    
1530
def RemoveAuthorizedKey(file_name, key):
1531
  """Removes an SSH public key from an authorized_keys file.
1532

1533
  @type file_name: str
1534
  @param file_name: path to authorized_keys file
1535
  @type key: str
1536
  @param key: string containing key
1537

1538
  """
1539
  key_fields = key.split()
1540

    
1541
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1542
  try:
1543
    out = os.fdopen(fd, 'w')
1544
    try:
1545
      f = open(file_name, 'r')
1546
      try:
1547
        for line in f:
1548
          # Ignore whitespace changes while comparing lines
1549
          if line.split() != key_fields:
1550
            out.write(line)
1551

    
1552
        out.flush()
1553
        os.rename(tmpname, file_name)
1554
      finally:
1555
        f.close()
1556
    finally:
1557
      out.close()
1558
  except:
1559
    RemoveFile(tmpname)
1560
    raise
1561

    
1562

    
1563
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1564
  """Sets the name of an IP address and hostname in /etc/hosts.
1565

1566
  @type file_name: str
1567
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1568
  @type ip: str
1569
  @param ip: the IP address
1570
  @type hostname: str
1571
  @param hostname: the hostname to be added
1572
  @type aliases: list
1573
  @param aliases: the list of aliases to add for the hostname
1574

1575
  """
1576
  # Ensure aliases are unique
1577
  aliases = UniqueSequence([hostname] + aliases)[1:]
1578

    
1579
  def _WriteEtcHosts(fd):
1580
    # Duplicating file descriptor because os.fdopen's result will automatically
1581
    # close the descriptor, but we would still like to have its functionality.
1582
    out = os.fdopen(os.dup(fd), "w")
1583
    try:
1584
      for line in ReadFile(file_name).splitlines(True):
1585
        fields = line.split()
1586
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1587
          continue
1588
        out.write(line)
1589

    
1590
      out.write("%s\t%s" % (ip, hostname))
1591
      if aliases:
1592
        out.write(" %s" % " ".join(aliases))
1593
      out.write("\n")
1594
      out.flush()
1595
    finally:
1596
      out.close()
1597

    
1598
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1599

    
1600

    
1601
def AddHostToEtcHosts(hostname, ip):
1602
  """Wrapper around SetEtcHostsEntry.
1603

1604
  @type hostname: str
1605
  @param hostname: a hostname that will be resolved and added to
1606
      L{constants.ETC_HOSTS}
1607
  @type ip: str
1608
  @param ip: The ip address of the host
1609

1610
  """
1611
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1612

    
1613

    
1614
def RemoveEtcHostsEntry(file_name, hostname):
1615
  """Removes a hostname from /etc/hosts.
1616

1617
  IP addresses without names are removed from the file.
1618

1619
  @type file_name: str
1620
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1621
  @type hostname: str
1622
  @param hostname: the hostname to be removed
1623

1624
  """
1625
  def _WriteEtcHosts(fd):
1626
    # Duplicating file descriptor because os.fdopen's result will automatically
1627
    # close the descriptor, but we would still like to have its functionality.
1628
    out = os.fdopen(os.dup(fd), "w")
1629
    try:
1630
      for line in ReadFile(file_name).splitlines(True):
1631
        fields = line.split()
1632
        if len(fields) > 1 and not fields[0].startswith("#"):
1633
          names = fields[1:]
1634
          if hostname in names:
1635
            while hostname in names:
1636
              names.remove(hostname)
1637
            if names:
1638
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1639
            continue
1640

    
1641
        out.write(line)
1642

    
1643
      out.flush()
1644
    finally:
1645
      out.close()
1646

    
1647
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1648

    
1649

    
1650
def RemoveHostFromEtcHosts(hostname):
1651
  """Wrapper around RemoveEtcHostsEntry.
1652

1653
  @type hostname: str
1654
  @param hostname: hostname that will be resolved and its
1655
      full and shot name will be removed from
1656
      L{constants.ETC_HOSTS}
1657

1658
  """
1659
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1660
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1661

    
1662

    
1663
def TimestampForFilename():
1664
  """Returns the current time formatted for filenames.
1665

1666
  The format doesn't contain colons as some shells and applications them as
1667
  separators.
1668

1669
  """
1670
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1671

    
1672

    
1673
def CreateBackup(file_name):
1674
  """Creates a backup of a file.
1675

1676
  @type file_name: str
1677
  @param file_name: file to be backed up
1678
  @rtype: str
1679
  @return: the path to the newly created backup
1680
  @raise errors.ProgrammerError: for invalid file names
1681

1682
  """
1683
  if not os.path.isfile(file_name):
1684
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1685
                                file_name)
1686

    
1687
  prefix = ("%s.backup-%s." %
1688
            (os.path.basename(file_name), TimestampForFilename()))
1689
  dir_name = os.path.dirname(file_name)
1690

    
1691
  fsrc = open(file_name, 'rb')
1692
  try:
1693
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1694
    fdst = os.fdopen(fd, 'wb')
1695
    try:
1696
      logging.debug("Backing up %s at %s", file_name, backup_name)
1697
      shutil.copyfileobj(fsrc, fdst)
1698
    finally:
1699
      fdst.close()
1700
  finally:
1701
    fsrc.close()
1702

    
1703
  return backup_name
1704

    
1705

    
1706
def ShellQuote(value):
1707
  """Quotes shell argument according to POSIX.
1708

1709
  @type value: str
1710
  @param value: the argument to be quoted
1711
  @rtype: str
1712
  @return: the quoted value
1713

1714
  """
1715
  if _re_shell_unquoted.match(value):
1716
    return value
1717
  else:
1718
    return "'%s'" % value.replace("'", "'\\''")
1719

    
1720

    
1721
def ShellQuoteArgs(args):
1722
  """Quotes a list of shell arguments.
1723

1724
  @type args: list
1725
  @param args: list of arguments to be quoted
1726
  @rtype: str
1727
  @return: the quoted arguments concatenated with spaces
1728

1729
  """
1730
  return ' '.join([ShellQuote(i) for i in args])
1731

    
1732

    
1733
class ShellWriter:
1734
  """Helper class to write scripts with indentation.
1735

1736
  """
1737
  INDENT_STR = "  "
1738

    
1739
  def __init__(self, fh):
1740
    """Initializes this class.
1741

1742
    """
1743
    self._fh = fh
1744
    self._indent = 0
1745

    
1746
  def IncIndent(self):
1747
    """Increase indentation level by 1.
1748

1749
    """
1750
    self._indent += 1
1751

    
1752
  def DecIndent(self):
1753
    """Decrease indentation level by 1.
1754

1755
    """
1756
    assert self._indent > 0
1757
    self._indent -= 1
1758

    
1759
  def Write(self, txt, *args):
1760
    """Write line to output file.
1761

1762
    """
1763
    assert self._indent >= 0
1764

    
1765
    self._fh.write(self._indent * self.INDENT_STR)
1766

    
1767
    if args:
1768
      self._fh.write(txt % args)
1769
    else:
1770
      self._fh.write(txt)
1771

    
1772
    self._fh.write("\n")
1773

    
1774

    
1775
def ListVisibleFiles(path):
1776
  """Returns a list of visible files in a directory.
1777

1778
  @type path: str
1779
  @param path: the directory to enumerate
1780
  @rtype: list
1781
  @return: the list of all files not starting with a dot
1782
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1783

1784
  """
1785
  if not IsNormAbsPath(path):
1786
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1787
                                 " absolute/normalized: '%s'" % path)
1788
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1789
  return files
1790

    
1791

    
1792
def GetHomeDir(user, default=None):
1793
  """Try to get the homedir of the given user.
1794

1795
  The user can be passed either as a string (denoting the name) or as
1796
  an integer (denoting the user id). If the user is not found, the
1797
  'default' argument is returned, which defaults to None.
1798

1799
  """
1800
  try:
1801
    if isinstance(user, basestring):
1802
      result = pwd.getpwnam(user)
1803
    elif isinstance(user, (int, long)):
1804
      result = pwd.getpwuid(user)
1805
    else:
1806
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1807
                                   type(user))
1808
  except KeyError:
1809
    return default
1810
  return result.pw_dir
1811

    
1812

    
1813
def NewUUID():
1814
  """Returns a random UUID.
1815

1816
  @note: This is a Linux-specific method as it uses the /proc
1817
      filesystem.
1818
  @rtype: str
1819

1820
  """
1821
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1822

    
1823

    
1824
def GenerateSecret(numbytes=20):
1825
  """Generates a random secret.
1826

1827
  This will generate a pseudo-random secret returning an hex string
1828
  (so that it can be used where an ASCII string is needed).
1829

1830
  @param numbytes: the number of bytes which will be represented by the returned
1831
      string (defaulting to 20, the length of a SHA1 hash)
1832
  @rtype: str
1833
  @return: an hex representation of the pseudo-random sequence
1834

1835
  """
1836
  return os.urandom(numbytes).encode('hex')
1837

    
1838

    
1839
def EnsureDirs(dirs):
1840
  """Make required directories, if they don't exist.
1841

1842
  @param dirs: list of tuples (dir_name, dir_mode)
1843
  @type dirs: list of (string, integer)
1844

1845
  """
1846
  for dir_name, dir_mode in dirs:
1847
    try:
1848
      os.mkdir(dir_name, dir_mode)
1849
    except EnvironmentError, err:
1850
      if err.errno != errno.EEXIST:
1851
        raise errors.GenericError("Cannot create needed directory"
1852
                                  " '%s': %s" % (dir_name, err))
1853
    try:
1854
      os.chmod(dir_name, dir_mode)
1855
    except EnvironmentError, err:
1856
      raise errors.GenericError("Cannot change directory permissions on"
1857
                                " '%s': %s" % (dir_name, err))
1858
    if not os.path.isdir(dir_name):
1859
      raise errors.GenericError("%s is not a directory" % dir_name)
1860

    
1861

    
1862
def ReadFile(file_name, size=-1):
1863
  """Reads a file.
1864

1865
  @type size: int
1866
  @param size: Read at most size bytes (if negative, entire file)
1867
  @rtype: str
1868
  @return: the (possibly partial) content of the file
1869

1870
  """
1871
  f = open(file_name, "r")
1872
  try:
1873
    return f.read(size)
1874
  finally:
1875
    f.close()
1876

    
1877

    
1878
def WriteFile(file_name, fn=None, data=None,
1879
              mode=None, uid=-1, gid=-1,
1880
              atime=None, mtime=None, close=True,
1881
              dry_run=False, backup=False,
1882
              prewrite=None, postwrite=None):
1883
  """(Over)write a file atomically.
1884

1885
  The file_name and either fn (a function taking one argument, the
1886
  file descriptor, and which should write the data to it) or data (the
1887
  contents of the file) must be passed. The other arguments are
1888
  optional and allow setting the file mode, owner and group, and the
1889
  mtime/atime of the file.
1890

1891
  If the function doesn't raise an exception, it has succeeded and the
1892
  target file has the new contents. If the function has raised an
1893
  exception, an existing target file should be unmodified and the
1894
  temporary file should be removed.
1895

1896
  @type file_name: str
1897
  @param file_name: the target filename
1898
  @type fn: callable
1899
  @param fn: content writing function, called with
1900
      file descriptor as parameter
1901
  @type data: str
1902
  @param data: contents of the file
1903
  @type mode: int
1904
  @param mode: file mode
1905
  @type uid: int
1906
  @param uid: the owner of the file
1907
  @type gid: int
1908
  @param gid: the group of the file
1909
  @type atime: int
1910
  @param atime: a custom access time to be set on the file
1911
  @type mtime: int
1912
  @param mtime: a custom modification time to be set on the file
1913
  @type close: boolean
1914
  @param close: whether to close file after writing it
1915
  @type prewrite: callable
1916
  @param prewrite: function to be called before writing content
1917
  @type postwrite: callable
1918
  @param postwrite: function to be called after writing content
1919

1920
  @rtype: None or int
1921
  @return: None if the 'close' parameter evaluates to True,
1922
      otherwise the file descriptor
1923

1924
  @raise errors.ProgrammerError: if any of the arguments are not valid
1925

1926
  """
1927
  if not os.path.isabs(file_name):
1928
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1929
                                 " absolute: '%s'" % file_name)
1930

    
1931
  if [fn, data].count(None) != 1:
1932
    raise errors.ProgrammerError("fn or data required")
1933

    
1934
  if [atime, mtime].count(None) == 1:
1935
    raise errors.ProgrammerError("Both atime and mtime must be either"
1936
                                 " set or None")
1937

    
1938
  if backup and not dry_run and os.path.isfile(file_name):
1939
    CreateBackup(file_name)
1940

    
1941
  dir_name, base_name = os.path.split(file_name)
1942
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1943
  do_remove = True
1944
  # here we need to make sure we remove the temp file, if any error
1945
  # leaves it in place
1946
  try:
1947
    if uid != -1 or gid != -1:
1948
      os.chown(new_name, uid, gid)
1949
    if mode:
1950
      os.chmod(new_name, mode)
1951
    if callable(prewrite):
1952
      prewrite(fd)
1953
    if data is not None:
1954
      os.write(fd, data)
1955
    else:
1956
      fn(fd)
1957
    if callable(postwrite):
1958
      postwrite(fd)
1959
    os.fsync(fd)
1960
    if atime is not None and mtime is not None:
1961
      os.utime(new_name, (atime, mtime))
1962
    if not dry_run:
1963
      os.rename(new_name, file_name)
1964
      do_remove = False
1965
  finally:
1966
    if close:
1967
      os.close(fd)
1968
      result = None
1969
    else:
1970
      result = fd
1971
    if do_remove:
1972
      RemoveFile(new_name)
1973

    
1974
  return result
1975

    
1976

    
1977
def GetFileID(path=None, fd=None):
1978
  """Returns the file 'id', i.e. the dev/inode and mtime information.
1979

1980
  Either the path to the file or the fd must be given.
1981

1982
  @param path: the file path
1983
  @param fd: a file descriptor
1984
  @return: a tuple of (device number, inode number, mtime)
1985

1986
  """
1987
  if [path, fd].count(None) != 1:
1988
    raise errors.ProgrammerError("One and only one of fd/path must be given")
1989

    
1990
  if fd is None:
1991
    st = os.stat(path)
1992
  else:
1993
    st = os.fstat(fd)
1994

    
1995
  return (st.st_dev, st.st_ino, st.st_mtime)
1996

    
1997

    
1998
def VerifyFileID(fi_disk, fi_ours):
1999
  """Verifies that two file IDs are matching.
2000

2001
  Differences in the inode/device are not accepted, but and older
2002
  timestamp for fi_disk is accepted.
2003

2004
  @param fi_disk: tuple (dev, inode, mtime) representing the actual
2005
      file data
2006
  @param fi_ours: tuple (dev, inode, mtime) representing the last
2007
      written file data
2008
  @rtype: boolean
2009

2010
  """
2011
  (d1, i1, m1) = fi_disk
2012
  (d2, i2, m2) = fi_ours
2013

    
2014
  return (d1, i1) == (d2, i2) and m1 <= m2
2015

    
2016

    
2017
def SafeWriteFile(file_name, file_id, **kwargs):
2018
  """Wraper over L{WriteFile} that locks the target file.
2019

2020
  By keeping the target file locked during WriteFile, we ensure that
2021
  cooperating writers will safely serialise access to the file.
2022

2023
  @type file_name: str
2024
  @param file_name: the target filename
2025
  @type file_id: tuple
2026
  @param file_id: a result from L{GetFileID}
2027

2028
  """
2029
  fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
2030
  try:
2031
    LockFile(fd)
2032
    if file_id is not None:
2033
      disk_id = GetFileID(fd=fd)
2034
      if not VerifyFileID(disk_id, file_id):
2035
        raise errors.LockError("Cannot overwrite file %s, it has been modified"
2036
                               " since last written" % file_name)
2037
    return WriteFile(file_name, **kwargs)
2038
  finally:
2039
    os.close(fd)
2040

    
2041

    
2042
def ReadOneLineFile(file_name, strict=False):
2043
  """Return the first non-empty line from a file.
2044

2045
  @type strict: boolean
2046
  @param strict: if True, abort if the file has more than one
2047
      non-empty line
2048

2049
  """
2050
  file_lines = ReadFile(file_name).splitlines()
2051
  full_lines = filter(bool, file_lines)
2052
  if not file_lines or not full_lines:
2053
    raise errors.GenericError("No data in one-liner file %s" % file_name)
2054
  elif strict and len(full_lines) > 1:
2055
    raise errors.GenericError("Too many lines in one-liner file %s" %
2056
                              file_name)
2057
  return full_lines[0]
2058

    
2059

    
2060
def FirstFree(seq, base=0):
2061
  """Returns the first non-existing integer from seq.
2062

2063
  The seq argument should be a sorted list of positive integers. The
2064
  first time the index of an element is smaller than the element
2065
  value, the index will be returned.
2066

2067
  The base argument is used to start at a different offset,
2068
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
2069

2070
  Example: C{[0, 1, 3]} will return I{2}.
2071

2072
  @type seq: sequence
2073
  @param seq: the sequence to be analyzed.
2074
  @type base: int
2075
  @param base: use this value as the base index of the sequence
2076
  @rtype: int
2077
  @return: the first non-used index in the sequence
2078

2079
  """
2080
  for idx, elem in enumerate(seq):
2081
    assert elem >= base, "Passed element is higher than base offset"
2082
    if elem > idx + base:
2083
      # idx is not used
2084
      return idx + base
2085
  return None
2086

    
2087

    
2088
def SingleWaitForFdCondition(fdobj, event, timeout):
2089
  """Waits for a condition to occur on the socket.
2090

2091
  Immediately returns at the first interruption.
2092

2093
  @type fdobj: integer or object supporting a fileno() method
2094
  @param fdobj: entity to wait for events on
2095
  @type event: integer
2096
  @param event: ORed condition (see select module)
2097
  @type timeout: float or None
2098
  @param timeout: Timeout in seconds
2099
  @rtype: int or None
2100
  @return: None for timeout, otherwise occured conditions
2101

2102
  """
2103
  check = (event | select.POLLPRI |
2104
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
2105

    
2106
  if timeout is not None:
2107
    # Poller object expects milliseconds
2108
    timeout *= 1000
2109

    
2110
  poller = select.poll()
2111
  poller.register(fdobj, event)
2112
  try:
2113
    # TODO: If the main thread receives a signal and we have no timeout, we
2114
    # could wait forever. This should check a global "quit" flag or something
2115
    # every so often.
2116
    io_events = poller.poll(timeout)
2117
  except select.error, err:
2118
    if err[0] != errno.EINTR:
2119
      raise
2120
    io_events = []
2121
  if io_events and io_events[0][1] & check:
2122
    return io_events[0][1]
2123
  else:
2124
    return None
2125

    
2126

    
2127
class FdConditionWaiterHelper(object):
2128
  """Retry helper for WaitForFdCondition.
2129

2130
  This class contains the retried and wait functions that make sure
2131
  WaitForFdCondition can continue waiting until the timeout is actually
2132
  expired.
2133

2134
  """
2135

    
2136
  def __init__(self, timeout):
2137
    self.timeout = timeout
2138

    
2139
  def Poll(self, fdobj, event):
2140
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2141
    if result is None:
2142
      raise RetryAgain()
2143
    else:
2144
      return result
2145

    
2146
  def UpdateTimeout(self, timeout):
2147
    self.timeout = timeout
2148

    
2149

    
2150
def WaitForFdCondition(fdobj, event, timeout):
2151
  """Waits for a condition to occur on the socket.
2152

2153
  Retries until the timeout is expired, even if interrupted.
2154

2155
  @type fdobj: integer or object supporting a fileno() method
2156
  @param fdobj: entity to wait for events on
2157
  @type event: integer
2158
  @param event: ORed condition (see select module)
2159
  @type timeout: float or None
2160
  @param timeout: Timeout in seconds
2161
  @rtype: int or None
2162
  @return: None for timeout, otherwise occured conditions
2163

2164
  """
2165
  if timeout is not None:
2166
    retrywaiter = FdConditionWaiterHelper(timeout)
2167
    try:
2168
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2169
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2170
    except RetryTimeout:
2171
      result = None
2172
  else:
2173
    result = None
2174
    while result is None:
2175
      result = SingleWaitForFdCondition(fdobj, event, timeout)
2176
  return result
2177

    
2178

    
2179
def UniqueSequence(seq):
2180
  """Returns a list with unique elements.
2181

2182
  Element order is preserved.
2183

2184
  @type seq: sequence
2185
  @param seq: the sequence with the source elements
2186
  @rtype: list
2187
  @return: list of unique elements from seq
2188

2189
  """
2190
  seen = set()
2191
  return [i for i in seq if i not in seen and not seen.add(i)]
2192

    
2193

    
2194
def NormalizeAndValidateMac(mac):
2195
  """Normalizes and check if a MAC address is valid.
2196

2197
  Checks whether the supplied MAC address is formally correct, only
2198
  accepts colon separated format. Normalize it to all lower.
2199

2200
  @type mac: str
2201
  @param mac: the MAC to be validated
2202
  @rtype: str
2203
  @return: returns the normalized and validated MAC.
2204

2205
  @raise errors.OpPrereqError: If the MAC isn't valid
2206

2207
  """
2208
  if not _MAC_CHECK.match(mac):
2209
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2210
                               mac, errors.ECODE_INVAL)
2211

    
2212
  return mac.lower()
2213

    
2214

    
2215
def TestDelay(duration):
2216
  """Sleep for a fixed amount of time.
2217

2218
  @type duration: float
2219
  @param duration: the sleep duration
2220
  @rtype: boolean
2221
  @return: False for negative value, True otherwise
2222

2223
  """
2224
  if duration < 0:
2225
    return False, "Invalid sleep duration"
2226
  time.sleep(duration)
2227
  return True, None
2228

    
2229

    
2230
def _CloseFDNoErr(fd, retries=5):
2231
  """Close a file descriptor ignoring errors.
2232

2233
  @type fd: int
2234
  @param fd: the file descriptor
2235
  @type retries: int
2236
  @param retries: how many retries to make, in case we get any
2237
      other error than EBADF
2238

2239
  """
2240
  try:
2241
    os.close(fd)
2242
  except OSError, err:
2243
    if err.errno != errno.EBADF:
2244
      if retries > 0:
2245
        _CloseFDNoErr(fd, retries - 1)
2246
    # else either it's closed already or we're out of retries, so we
2247
    # ignore this and go on
2248

    
2249

    
2250
def CloseFDs(noclose_fds=None):
2251
  """Close file descriptors.
2252

2253
  This closes all file descriptors above 2 (i.e. except
2254
  stdin/out/err).
2255

2256
  @type noclose_fds: list or None
2257
  @param noclose_fds: if given, it denotes a list of file descriptor
2258
      that should not be closed
2259

2260
  """
2261
  # Default maximum for the number of available file descriptors.
2262
  if 'SC_OPEN_MAX' in os.sysconf_names:
2263
    try:
2264
      MAXFD = os.sysconf('SC_OPEN_MAX')
2265
      if MAXFD < 0:
2266
        MAXFD = 1024
2267
    except OSError:
2268
      MAXFD = 1024
2269
  else:
2270
    MAXFD = 1024
2271
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2272
  if (maxfd == resource.RLIM_INFINITY):
2273
    maxfd = MAXFD
2274

    
2275
  # Iterate through and close all file descriptors (except the standard ones)
2276
  for fd in range(3, maxfd):
2277
    if noclose_fds and fd in noclose_fds:
2278
      continue
2279
    _CloseFDNoErr(fd)
2280

    
2281

    
2282
def Mlockall(_ctypes=ctypes):
2283
  """Lock current process' virtual address space into RAM.
2284

2285
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2286
  see mlock(2) for more details. This function requires ctypes module.
2287

2288
  @raises errors.NoCtypesError: if ctypes module is not found
2289

2290
  """
2291
  if _ctypes is None:
2292
    raise errors.NoCtypesError()
2293

    
2294
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2295
  if libc is None:
2296
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2297
    return
2298

    
2299
  # Some older version of the ctypes module don't have built-in functionality
2300
  # to access the errno global variable, where function error codes are stored.
2301
  # By declaring this variable as a pointer to an integer we can then access
2302
  # its value correctly, should the mlockall call fail, in order to see what
2303
  # the actual error code was.
2304
  # pylint: disable-msg=W0212
2305
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2306

    
2307
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2308
    # pylint: disable-msg=W0212
2309
    logging.error("Cannot set memory lock: %s",
2310
                  os.strerror(libc.__errno_location().contents.value))
2311
    return
2312

    
2313
  logging.debug("Memory lock set")
2314

    
2315

    
2316
def Daemonize(logfile):
2317
  """Daemonize the current process.
2318

2319
  This detaches the current process from the controlling terminal and
2320
  runs it in the background as a daemon.
2321

2322
  @type logfile: str
2323
  @param logfile: the logfile to which we should redirect stdout/stderr
2324
  @rtype: int
2325
  @return: the value zero
2326

2327
  """
2328
  # pylint: disable-msg=W0212
2329
  # yes, we really want os._exit
2330

    
2331
  # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2332
  # least abstract the pipe functionality between them
2333

    
2334
  # Create pipe for sending error messages
2335
  (rpipe, wpipe) = os.pipe()
2336

    
2337
  # this might fail
2338
  pid = os.fork()
2339
  if (pid == 0):  # The first child.
2340
    SetupDaemonEnv()
2341

    
2342
    # this might fail
2343
    pid = os.fork() # Fork a second child.
2344
    if (pid == 0):  # The second child.
2345
      _CloseFDNoErr(rpipe)
2346
    else:
2347
      # exit() or _exit()?  See below.
2348
      os._exit(0) # Exit parent (the first child) of the second child.
2349
  else:
2350
    _CloseFDNoErr(wpipe)
2351
    # Wait for daemon to be started (or an error message to
2352
    # arrive) and read up to 100 KB as an error message
2353
    errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2354
    if errormsg:
2355
      sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2356
      rcode = 1
2357
    else:
2358
      rcode = 0
2359
    os._exit(rcode) # Exit parent of the first child.
2360

    
2361
  SetupDaemonFDs(logfile, None)
2362
  return wpipe
2363

    
2364

    
2365
def DaemonPidFileName(name):
2366
  """Compute a ganeti pid file absolute path
2367

2368
  @type name: str
2369
  @param name: the daemon name
2370
  @rtype: str
2371
  @return: the full path to the pidfile corresponding to the given
2372
      daemon name
2373

2374
  """
2375
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2376

    
2377

    
2378
def EnsureDaemon(name):
2379
  """Check for and start daemon if not alive.
2380

2381
  """
2382
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2383
  if result.failed:
2384
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2385
                  name, result.fail_reason, result.output)
2386
    return False
2387

    
2388
  return True
2389

    
2390

    
2391
def StopDaemon(name):
2392
  """Stop daemon
2393

2394
  """
2395
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2396
  if result.failed:
2397
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2398
                  name, result.fail_reason, result.output)
2399
    return False
2400

    
2401
  return True
2402

    
2403

    
2404
def WritePidFile(pidfile):
2405
  """Write the current process pidfile.
2406

2407
  @type pidfile: sting
2408
  @param pidfile: the path to the file to be written
2409
  @raise errors.LockError: if the pid file already exists and
2410
      points to a live process
2411
  @rtype: int
2412
  @return: the file descriptor of the lock file; do not close this unless
2413
      you want to unlock the pid file
2414

2415
  """
2416
  # We don't rename nor truncate the file to not drop locks under
2417
  # existing processes
2418
  fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2419

    
2420
  # Lock the PID file (and fail if not possible to do so). Any code
2421
  # wanting to send a signal to the daemon should try to lock the PID
2422
  # file before reading it. If acquiring the lock succeeds, the daemon is
2423
  # no longer running and the signal should not be sent.
2424
  LockFile(fd_pidfile)
2425

    
2426
  os.write(fd_pidfile, "%d\n" % os.getpid())
2427

    
2428
  return fd_pidfile
2429

    
2430

    
2431
def RemovePidFile(name):
2432
  """Remove the current process pidfile.
2433

2434
  Any errors are ignored.
2435

2436
  @type name: str
2437
  @param name: the daemon name used to derive the pidfile name
2438

2439
  """
2440
  pidfilename = DaemonPidFileName(name)
2441
  # TODO: we could check here that the file contains our pid
2442
  try:
2443
    RemoveFile(pidfilename)
2444
  except: # pylint: disable-msg=W0702
2445
    pass
2446

    
2447

    
2448
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2449
                waitpid=False):
2450
  """Kill a process given by its pid.
2451

2452
  @type pid: int
2453
  @param pid: The PID to terminate.
2454
  @type signal_: int
2455
  @param signal_: The signal to send, by default SIGTERM
2456
  @type timeout: int
2457
  @param timeout: The timeout after which, if the process is still alive,
2458
                  a SIGKILL will be sent. If not positive, no such checking
2459
                  will be done
2460
  @type waitpid: boolean
2461
  @param waitpid: If true, we should waitpid on this process after
2462
      sending signals, since it's our own child and otherwise it
2463
      would remain as zombie
2464

2465
  """
2466
  def _helper(pid, signal_, wait):
2467
    """Simple helper to encapsulate the kill/waitpid sequence"""
2468
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2469
      try:
2470
        os.waitpid(pid, os.WNOHANG)
2471
      except OSError:
2472
        pass
2473

    
2474
  if pid <= 0:
2475
    # kill with pid=0 == suicide
2476
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2477

    
2478
  if not IsProcessAlive(pid):
2479
    return
2480

    
2481
  _helper(pid, signal_, waitpid)
2482

    
2483
  if timeout <= 0:
2484
    return
2485

    
2486
  def _CheckProcess():
2487
    if not IsProcessAlive(pid):
2488
      return
2489

    
2490
    try:
2491
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2492
    except OSError:
2493
      raise RetryAgain()
2494

    
2495
    if result_pid > 0:
2496
      return
2497

    
2498
    raise RetryAgain()
2499

    
2500
  try:
2501
    # Wait up to $timeout seconds
2502
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2503
  except RetryTimeout:
2504
    pass
2505

    
2506
  if IsProcessAlive(pid):
2507
    # Kill process if it's still alive
2508
    _helper(pid, signal.SIGKILL, waitpid)
2509

    
2510

    
2511
def FindFile(name, search_path, test=os.path.exists):
2512
  """Look for a filesystem object in a given path.
2513

2514
  This is an abstract method to search for filesystem object (files,
2515
  dirs) under a given search path.
2516

2517
  @type name: str
2518
  @param name: the name to look for
2519
  @type search_path: str
2520
  @param search_path: location to start at
2521
  @type test: callable
2522
  @param test: a function taking one argument that should return True
2523
      if the a given object is valid; the default value is
2524
      os.path.exists, causing only existing files to be returned
2525
  @rtype: str or None
2526
  @return: full path to the object if found, None otherwise
2527

2528
  """
2529
  # validate the filename mask
2530
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2531
    logging.critical("Invalid value passed for external script name: '%s'",
2532
                     name)
2533
    return None
2534

    
2535
  for dir_name in search_path:
2536
    # FIXME: investigate switch to PathJoin
2537
    item_name = os.path.sep.join([dir_name, name])
2538
    # check the user test and that we're indeed resolving to the given
2539
    # basename
2540
    if test(item_name) and os.path.basename(item_name) == name:
2541
      return item_name
2542
  return None
2543

    
2544

    
2545
def CheckVolumeGroupSize(vglist, vgname, minsize):
2546
  """Checks if the volume group list is valid.
2547

2548
  The function will check if a given volume group is in the list of
2549
  volume groups and has a minimum size.
2550

2551
  @type vglist: dict
2552
  @param vglist: dictionary of volume group names and their size
2553
  @type vgname: str
2554
  @param vgname: the volume group we should check
2555
  @type minsize: int
2556
  @param minsize: the minimum size we accept
2557
  @rtype: None or str
2558
  @return: None for success, otherwise the error message
2559

2560
  """
2561
  vgsize = vglist.get(vgname, None)
2562
  if vgsize is None:
2563
    return "volume group '%s' missing" % vgname
2564
  elif vgsize < minsize:
2565
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2566
            (vgname, minsize, vgsize))
2567
  return None
2568

    
2569

    
2570
def SplitTime(value):
2571
  """Splits time as floating point number into a tuple.
2572

2573
  @param value: Time in seconds
2574
  @type value: int or float
2575
  @return: Tuple containing (seconds, microseconds)
2576

2577
  """
2578
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2579

    
2580
  assert 0 <= seconds, \
2581
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2582
  assert 0 <= microseconds <= 999999, \
2583
    "Microseconds must be 0-999999, but are %s" % microseconds
2584

    
2585
  return (int(seconds), int(microseconds))
2586

    
2587

    
2588
def MergeTime(timetuple):
2589
  """Merges a tuple into time as a floating point number.
2590

2591
  @param timetuple: Time as tuple, (seconds, microseconds)
2592
  @type timetuple: tuple
2593
  @return: Time as a floating point number expressed in seconds
2594

2595
  """
2596
  (seconds, microseconds) = timetuple
2597

    
2598
  assert 0 <= seconds, \
2599
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2600
  assert 0 <= microseconds <= 999999, \
2601
    "Microseconds must be 0-999999, but are %s" % microseconds
2602

    
2603
  return float(seconds) + (float(microseconds) * 0.000001)
2604

    
2605

    
2606
class LogFileHandler(logging.FileHandler):
2607
  """Log handler that doesn't fallback to stderr.
2608

2609
  When an error occurs while writing on the logfile, logging.FileHandler tries
2610
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2611
  the logfile. This class avoids failures reporting errors to /dev/console.
2612

2613
  """
2614
  def __init__(self, filename, mode="a", encoding=None):
2615
    """Open the specified file and use it as the stream for logging.
2616

2617
    Also open /dev/console to report errors while logging.
2618

2619
    """
2620
    logging.FileHandler.__init__(self, filename, mode, encoding)
2621
    self.console = open(constants.DEV_CONSOLE, "a")
2622

    
2623
  def handleError(self, record): # pylint: disable-msg=C0103
2624
    """Handle errors which occur during an emit() call.
2625

2626
    Try to handle errors with FileHandler method, if it fails write to
2627
    /dev/console.
2628

2629
    """
2630
    try:
2631
      logging.FileHandler.handleError(self, record)
2632
    except Exception: # pylint: disable-msg=W0703
2633
      try:
2634
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2635
      except Exception: # pylint: disable-msg=W0703
2636
        # Log handler tried everything it could, now just give up
2637
        pass
2638

    
2639

    
2640
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2641
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2642
                 console_logging=False):
2643
  """Configures the logging module.
2644

2645
  @type logfile: str
2646
  @param logfile: the filename to which we should log
2647
  @type debug: integer
2648
  @param debug: if greater than zero, enable debug messages, otherwise
2649
      only those at C{INFO} and above level
2650
  @type stderr_logging: boolean
2651
  @param stderr_logging: whether we should also log to the standard error
2652
  @type program: str
2653
  @param program: the name under which we should log messages
2654
  @type multithreaded: boolean
2655
  @param multithreaded: if True, will add the thread name to the log file
2656
  @type syslog: string
2657
  @param syslog: one of 'no', 'yes', 'only':
2658
      - if no, syslog is not used
2659
      - if yes, syslog is used (in addition to file-logging)
2660
      - if only, only syslog is used
2661
  @type console_logging: boolean
2662
  @param console_logging: if True, will use a FileHandler which falls back to
2663
      the system console if logging fails
2664
  @raise EnvironmentError: if we can't open the log file and
2665
      syslog/stderr logging is disabled
2666

2667
  """
2668
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2669
  sft = program + "[%(process)d]:"
2670
  if multithreaded:
2671
    fmt += "/%(threadName)s"
2672
    sft += " (%(threadName)s)"
2673
  if debug:
2674
    fmt += " %(module)s:%(lineno)s"
2675
    # no debug info for syslog loggers
2676
  fmt += " %(levelname)s %(message)s"
2677
  # yes, we do want the textual level, as remote syslog will probably
2678
  # lose the error level, and it's easier to grep for it
2679
  sft += " %(levelname)s %(message)s"
2680
  formatter = logging.Formatter(fmt)
2681
  sys_fmt = logging.Formatter(sft)
2682

    
2683
  root_logger = logging.getLogger("")
2684
  root_logger.setLevel(logging.NOTSET)
2685

    
2686
  # Remove all previously setup handlers
2687
  for handler in root_logger.handlers:
2688
    handler.close()
2689
    root_logger.removeHandler(handler)
2690

    
2691
  if stderr_logging:
2692
    stderr_handler = logging.StreamHandler()
2693
    stderr_handler.setFormatter(formatter)
2694
    if debug:
2695
      stderr_handler.setLevel(logging.NOTSET)
2696
    else:
2697
      stderr_handler.setLevel(logging.CRITICAL)
2698
    root_logger.addHandler(stderr_handler)
2699

    
2700
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2701
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2702
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2703
                                                    facility)
2704
    syslog_handler.setFormatter(sys_fmt)
2705
    # Never enable debug over syslog
2706
    syslog_handler.setLevel(logging.INFO)
2707
    root_logger.addHandler(syslog_handler)
2708

    
2709
  if syslog != constants.SYSLOG_ONLY:
2710
    # this can fail, if the logging directories are not setup or we have
2711
    # a permisssion problem; in this case, it's best to log but ignore
2712
    # the error if stderr_logging is True, and if false we re-raise the
2713
    # exception since otherwise we could run but without any logs at all
2714
    try:
2715
      if console_logging:
2716
        logfile_handler = LogFileHandler(logfile)
2717
      else:
2718
        logfile_handler = logging.FileHandler(logfile)
2719
      logfile_handler.setFormatter(formatter)
2720
      if debug:
2721
        logfile_handler.setLevel(logging.DEBUG)
2722
      else:
2723
        logfile_handler.setLevel(logging.INFO)
2724
      root_logger.addHandler(logfile_handler)
2725
    except EnvironmentError:
2726
      if stderr_logging or syslog == constants.SYSLOG_YES:
2727
        logging.exception("Failed to enable logging to file '%s'", logfile)
2728
      else:
2729
        # we need to re-raise the exception
2730
        raise
2731

    
2732

    
2733
def IsNormAbsPath(path):
2734
  """Check whether a path is absolute and also normalized
2735

2736
  This avoids things like /dir/../../other/path to be valid.
2737

2738
  """
2739
  return os.path.normpath(path) == path and os.path.isabs(path)
2740

    
2741

    
2742
def PathJoin(*args):
2743
  """Safe-join a list of path components.
2744

2745
  Requirements:
2746
      - the first argument must be an absolute path
2747
      - no component in the path must have backtracking (e.g. /../),
2748
        since we check for normalization at the end
2749

2750
  @param args: the path components to be joined
2751
  @raise ValueError: for invalid paths
2752

2753
  """
2754
  # ensure we're having at least one path passed in
2755
  assert args
2756
  # ensure the first component is an absolute and normalized path name
2757
  root = args[0]
2758
  if not IsNormAbsPath(root):
2759
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2760
  result = os.path.join(*args)
2761
  # ensure that the whole path is normalized
2762
  if not IsNormAbsPath(result):
2763
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2764
  # check that we're still under the original prefix
2765
  prefix = os.path.commonprefix([root, result])
2766
  if prefix != root:
2767
    raise ValueError("Error: path joining resulted in different prefix"
2768
                     " (%s != %s)" % (prefix, root))
2769
  return result
2770

    
2771

    
2772
def TailFile(fname, lines=20):
2773
  """Return the last lines from a file.
2774

2775
  @note: this function will only read and parse the last 4KB of
2776
      the file; if the lines are very long, it could be that less
2777
      than the requested number of lines are returned
2778

2779
  @param fname: the file name
2780
  @type lines: int
2781
  @param lines: the (maximum) number of lines to return
2782

2783
  """
2784
  fd = open(fname, "r")
2785
  try:
2786
    fd.seek(0, 2)
2787
    pos = fd.tell()
2788
    pos = max(0, pos-4096)
2789
    fd.seek(pos, 0)
2790
    raw_data = fd.read()
2791
  finally:
2792
    fd.close()
2793

    
2794
  rows = raw_data.splitlines()
2795
  return rows[-lines:]
2796

    
2797

    
2798
def FormatTimestampWithTZ(secs):
2799
  """Formats a Unix timestamp with the local timezone.
2800

2801
  """
2802
  return time.strftime("%F %T %Z", time.gmtime(secs))
2803

    
2804

    
2805
def _ParseAsn1Generalizedtime(value):
2806
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2807

2808
  @type value: string
2809
  @param value: ASN1 GENERALIZEDTIME timestamp
2810

2811
  """
2812
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2813
  if m:
2814
    # We have an offset
2815
    asn1time = m.group(1)
2816
    hours = int(m.group(2))
2817
    minutes = int(m.group(3))
2818
    utcoffset = (60 * hours) + minutes
2819
  else:
2820
    if not value.endswith("Z"):
2821
      raise ValueError("Missing timezone")
2822
    asn1time = value[:-1]
2823
    utcoffset = 0
2824

    
2825
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2826

    
2827
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2828

    
2829
  return calendar.timegm(tt.utctimetuple())
2830

    
2831

    
2832
def GetX509CertValidity(cert):
2833
  """Returns the validity period of the certificate.
2834

2835
  @type cert: OpenSSL.crypto.X509
2836
  @param cert: X509 certificate object
2837

2838
  """
2839
  # The get_notBefore and get_notAfter functions are only supported in
2840
  # pyOpenSSL 0.7 and above.
2841
  try:
2842
    get_notbefore_fn = cert.get_notBefore
2843
  except AttributeError:
2844
    not_before = None
2845
  else:
2846
    not_before_asn1 = get_notbefore_fn()
2847

    
2848
    if not_before_asn1 is None:
2849
      not_before = None
2850
    else:
2851
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2852

    
2853
  try:
2854
    get_notafter_fn = cert.get_notAfter
2855
  except AttributeError:
2856
    not_after = None
2857
  else:
2858
    not_after_asn1 = get_notafter_fn()
2859

    
2860
    if not_after_asn1 is None:
2861
      not_after = None
2862
    else:
2863
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2864

    
2865
  return (not_before, not_after)
2866

    
2867

    
2868
def _VerifyCertificateInner(expired, not_before, not_after, now,
2869
                            warn_days, error_days):
2870
  """Verifies certificate validity.
2871

2872
  @type expired: bool
2873
  @param expired: Whether pyOpenSSL considers the certificate as expired
2874
  @type not_before: number or None
2875
  @param not_before: Unix timestamp before which certificate is not valid
2876
  @type not_after: number or None
2877
  @param not_after: Unix timestamp after which certificate is invalid
2878
  @type now: number
2879
  @param now: Current time as Unix timestamp
2880
  @type warn_days: number or None
2881
  @param warn_days: How many days before expiration a warning should be reported
2882
  @type error_days: number or None
2883
  @param error_days: How many days before expiration an error should be reported
2884

2885
  """
2886
  if expired:
2887
    msg = "Certificate is expired"
2888

    
2889
    if not_before is not None and not_after is not None:
2890
      msg += (" (valid from %s to %s)" %
2891
              (FormatTimestampWithTZ(not_before),
2892
               FormatTimestampWithTZ(not_after)))
2893
    elif not_before is not None:
2894
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2895
    elif not_after is not None:
2896
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2897

    
2898
    return (CERT_ERROR, msg)
2899

    
2900
  elif not_before is not None and not_before > now:
2901
    return (CERT_WARNING,
2902
            "Certificate not yet valid (valid from %s)" %
2903
            FormatTimestampWithTZ(not_before))
2904

    
2905
  elif not_after is not None:
2906
    remaining_days = int((not_after - now) / (24 * 3600))
2907

    
2908
    msg = "Certificate expires in about %d days" % remaining_days
2909

    
2910
    if error_days is not None and remaining_days <= error_days:
2911
      return (CERT_ERROR, msg)
2912

    
2913
    if warn_days is not None and remaining_days <= warn_days:
2914
      return (CERT_WARNING, msg)
2915

    
2916
  return (None, None)
2917

    
2918

    
2919
def VerifyX509Certificate(cert, warn_days, error_days):
2920
  """Verifies a certificate for LUVerifyCluster.
2921

2922
  @type cert: OpenSSL.crypto.X509
2923
  @param cert: X509 certificate object
2924
  @type warn_days: number or None
2925
  @param warn_days: How many days before expiration a warning should be reported
2926
  @type error_days: number or None
2927
  @param error_days: How many days before expiration an error should be reported
2928

2929
  """
2930
  # Depending on the pyOpenSSL version, this can just return (None, None)
2931
  (not_before, not_after) = GetX509CertValidity(cert)
2932

    
2933
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2934
                                 time.time(), warn_days, error_days)
2935

    
2936

    
2937
def SignX509Certificate(cert, key, salt):
2938
  """Sign a X509 certificate.
2939

2940
  An RFC822-like signature header is added in front of the certificate.
2941

2942
  @type cert: OpenSSL.crypto.X509
2943
  @param cert: X509 certificate object
2944
  @type key: string
2945
  @param key: Key for HMAC
2946
  @type salt: string
2947
  @param salt: Salt for HMAC
2948
  @rtype: string
2949
  @return: Serialized and signed certificate in PEM format
2950

2951
  """
2952
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2953
    raise errors.GenericError("Invalid salt: %r" % salt)
2954

    
2955
  # Dumping as PEM here ensures the certificate is in a sane format
2956
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2957

    
2958
  return ("%s: %s/%s\n\n%s" %
2959
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2960
           Sha1Hmac(key, cert_pem, salt=salt),
2961
           cert_pem))
2962

    
2963

    
2964
def _ExtractX509CertificateSignature(cert_pem):
2965
  """Helper function to extract signature from X509 certificate.
2966

2967
  """
2968
  # Extract signature from original PEM data
2969
  for line in cert_pem.splitlines():
2970
    if line.startswith("---"):
2971
      break
2972

    
2973
    m = X509_SIGNATURE.match(line.strip())
2974
    if m:
2975
      return (m.group("salt"), m.group("sign"))
2976

    
2977
  raise errors.GenericError("X509 certificate signature is missing")
2978

    
2979

    
2980
def LoadSignedX509Certificate(cert_pem, key):
2981
  """Verifies a signed X509 certificate.
2982

2983
  @type cert_pem: string
2984
  @param cert_pem: Certificate in PEM format and with signature header
2985
  @type key: string
2986
  @param key: Key for HMAC
2987
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2988
  @return: X509 certificate object and salt
2989

2990
  """
2991
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2992

    
2993
  # Load certificate
2994
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2995

    
2996
  # Dump again to ensure it's in a sane format
2997
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2998

    
2999
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
3000
    raise errors.GenericError("X509 certificate signature is invalid")
3001

    
3002
  return (cert, salt)
3003

    
3004

    
3005
def Sha1Hmac(key, text, salt=None):
3006
  """Calculates the HMAC-SHA1 digest of a text.
3007

3008
  HMAC is defined in RFC2104.
3009

3010
  @type key: string
3011
  @param key: Secret key
3012
  @type text: string
3013

3014
  """
3015
  if salt:
3016
    salted_text = salt + text
3017
  else:
3018
    salted_text = text
3019

    
3020
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
3021

    
3022

    
3023
def VerifySha1Hmac(key, text, digest, salt=None):
3024
  """Verifies the HMAC-SHA1 digest of a text.
3025

3026
  HMAC is defined in RFC2104.
3027

3028
  @type key: string
3029
  @param key: Secret key
3030
  @type text: string
3031
  @type digest: string
3032
  @param digest: Expected digest
3033
  @rtype: bool
3034
  @return: Whether HMAC-SHA1 digest matches
3035

3036
  """
3037
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
3038

    
3039

    
3040
def SafeEncode(text):
3041
  """Return a 'safe' version of a source string.
3042

3043
  This function mangles the input string and returns a version that
3044
  should be safe to display/encode as ASCII. To this end, we first
3045
  convert it to ASCII using the 'backslashreplace' encoding which
3046
  should get rid of any non-ASCII chars, and then we process it
3047
  through a loop copied from the string repr sources in the python; we
3048
  don't use string_escape anymore since that escape single quotes and
3049
  backslashes too, and that is too much; and that escaping is not
3050
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
3051

3052
  @type text: str or unicode
3053
  @param text: input data
3054
  @rtype: str
3055
  @return: a safe version of text
3056

3057
  """
3058
  if isinstance(text, unicode):
3059
    # only if unicode; if str already, we handle it below
3060
    text = text.encode('ascii', 'backslashreplace')
3061
  resu = ""
3062
  for char in text:
3063
    c = ord(char)
3064
    if char  == '\t':
3065
      resu += r'\t'
3066
    elif char == '\n':
3067
      resu += r'\n'
3068
    elif char == '\r':
3069
      resu += r'\'r'
3070
    elif c < 32 or c >= 127: # non-printable
3071
      resu += "\\x%02x" % (c & 0xff)
3072
    else:
3073
      resu += char
3074
  return resu
3075

    
3076

    
3077
def UnescapeAndSplit(text, sep=","):
3078
  """Split and unescape a string based on a given separator.
3079

3080
  This function splits a string based on a separator where the
3081
  separator itself can be escape in order to be an element of the
3082
  elements. The escaping rules are (assuming coma being the
3083
  separator):
3084
    - a plain , separates the elements
3085
    - a sequence \\\\, (double backslash plus comma) is handled as a
3086
      backslash plus a separator comma
3087
    - a sequence \, (backslash plus comma) is handled as a
3088
      non-separator comma
3089

3090
  @type text: string
3091
  @param text: the string to split
3092
  @type sep: string
3093
  @param text: the separator
3094
  @rtype: string
3095
  @return: a list of strings
3096

3097
  """
3098
  # we split the list by sep (with no escaping at this stage)
3099
  slist = text.split(sep)
3100
  # next, we revisit the elements and if any of them ended with an odd
3101
  # number of backslashes, then we join it with the next
3102
  rlist = []
3103
  while slist:
3104
    e1 = slist.pop(0)
3105
    if e1.endswith("\\"):
3106
      num_b = len(e1) - len(e1.rstrip("\\"))
3107
      if num_b % 2 == 1:
3108
        e2 = slist.pop(0)
3109
        # here the backslashes remain (all), and will be reduced in
3110
        # the next step
3111
        rlist.append(e1 + sep + e2)
3112
        continue
3113
    rlist.append(e1)
3114
  # finally, replace backslash-something with something
3115
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3116
  return rlist
3117

    
3118

    
3119
def CommaJoin(names):
3120
  """Nicely join a set of identifiers.
3121

3122
  @param names: set, list or tuple
3123
  @return: a string with the formatted results
3124

3125
  """
3126
  return ", ".join([str(val) for val in names])
3127

    
3128

    
3129
def FindMatch(data, name):
3130
  """Tries to find an item in a dictionary matching a name.
3131

3132
  Callers have to ensure the data names aren't contradictory (e.g. a regexp
3133
  that matches a string). If the name isn't a direct key, all regular
3134
  expression objects in the dictionary are matched against it.
3135

3136
  @type data: dict
3137
  @param data: Dictionary containing data
3138
  @type name: string
3139
  @param name: Name to look for
3140
  @rtype: tuple; (value in dictionary, matched groups as list)
3141

3142
  """
3143
  if name in data:
3144
    return (data[name], [])
3145

    
3146
  for key, value in data.items():
3147
    # Regex objects
3148
    if hasattr(key, "match"):
3149
      m = key.match(name)
3150
      if m:
3151
        return (value, list(m.groups()))
3152

    
3153
  return None
3154

    
3155

    
3156
def BytesToMebibyte(value):
3157
  """Converts bytes to mebibytes.
3158

3159
  @type value: int
3160
  @param value: Value in bytes
3161
  @rtype: int
3162
  @return: Value in mebibytes
3163

3164
  """
3165
  return int(round(value / (1024.0 * 1024.0), 0))
3166

    
3167

    
3168
def CalculateDirectorySize(path):
3169
  """Calculates the size of a directory recursively.
3170

3171
  @type path: string
3172
  @param path: Path to directory
3173
  @rtype: int
3174
  @return: Size in mebibytes
3175

3176
  """
3177
  size = 0
3178

    
3179
  for (curpath, _, files) in os.walk(path):
3180
    for filename in files:
3181
      st = os.lstat(PathJoin(curpath, filename))
3182
      size += st.st_size
3183

    
3184
  return BytesToMebibyte(size)
3185

    
3186

    
3187
def GetMounts(filename=constants.PROC_MOUNTS):
3188
  """Returns the list of mounted filesystems.
3189

3190
  This function is Linux-specific.
3191

3192
  @param filename: path of mounts file (/proc/mounts by default)
3193
  @rtype: list of tuples
3194
  @return: list of mount entries (device, mountpoint, fstype, options)
3195

3196
  """
3197
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3198
  data = []
3199
  mountlines = ReadFile(filename).splitlines()
3200
  for line in mountlines:
3201
    device, mountpoint, fstype, options, _ = line.split(None, 4)
3202
    data.append((device, mountpoint, fstype, options))
3203

    
3204
  return data
3205

    
3206

    
3207
def GetFilesystemStats(path):
3208
  """Returns the total and free space on a filesystem.
3209

3210
  @type path: string
3211
  @param path: Path on filesystem to be examined
3212
  @rtype: int
3213
  @return: tuple of (Total space, Free space) in mebibytes
3214

3215
  """
3216
  st = os.statvfs(path)
3217

    
3218
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3219
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3220
  return (tsize, fsize)
3221

    
3222

    
3223
def RunInSeparateProcess(fn, *args):
3224
  """Runs a function in a separate process.
3225

3226
  Note: Only boolean return values are supported.
3227

3228
  @type fn: callable
3229
  @param fn: Function to be called
3230
  @rtype: bool
3231
  @return: Function's result
3232

3233
  """
3234
  pid = os.fork()
3235
  if pid == 0:
3236
    # Child process
3237
    try:
3238
      # In case the function uses temporary files
3239
      ResetTempfileModule()
3240

    
3241
      # Call function
3242
      result = int(bool(fn(*args)))
3243
      assert result in (0, 1)
3244
    except: # pylint: disable-msg=W0702
3245
      logging.exception("Error while calling function in separate process")
3246
      # 0 and 1 are reserved for the return value
3247
      result = 33
3248

    
3249
    os._exit(result) # pylint: disable-msg=W0212
3250

    
3251
  # Parent process
3252

    
3253
  # Avoid zombies and check exit code
3254
  (_, status) = os.waitpid(pid, 0)
3255

    
3256
  if os.WIFSIGNALED(status):
3257
    exitcode = None
3258
    signum = os.WTERMSIG(status)
3259
  else:
3260
    exitcode = os.WEXITSTATUS(status)
3261
    signum = None
3262

    
3263
  if not (exitcode in (0, 1) and signum is None):
3264
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3265
                              (exitcode, signum))
3266

    
3267
  return bool(exitcode)
3268

    
3269

    
3270
def IgnoreProcessNotFound(fn, *args, **kwargs):
3271
  """Ignores ESRCH when calling a process-related function.
3272

3273
  ESRCH is raised when a process is not found.
3274

3275
  @rtype: bool
3276
  @return: Whether process was found
3277

3278
  """
3279
  try:
3280
    fn(*args, **kwargs)
3281
  except EnvironmentError, err:
3282
    # Ignore ESRCH
3283
    if err.errno == errno.ESRCH:
3284
      return False
3285
    raise
3286

    
3287
  return True
3288

    
3289

    
3290
def IgnoreSignals(fn, *args, **kwargs):
3291
  """Tries to call a function ignoring failures due to EINTR.
3292

3293
  """
3294
  try:
3295
    return fn(*args, **kwargs)
3296
  except EnvironmentError, err:
3297
    if err.errno == errno.EINTR:
3298
      return None
3299
    else:
3300
      raise
3301
  except (select.error, socket.error), err:
3302
    # In python 2.6 and above select.error is an IOError, so it's handled
3303
    # above, in 2.5 and below it's not, and it's handled here.
3304
    if err.args and err.args[0] == errno.EINTR:
3305
      return None
3306
    else:
3307
      raise
3308

    
3309

    
3310
def LockFile(fd):
3311
  """Locks a file using POSIX locks.
3312

3313
  @type fd: int
3314
  @param fd: the file descriptor we need to lock
3315

3316
  """
3317
  try:
3318
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3319
  except IOError, err:
3320
    if err.errno == errno.EAGAIN:
3321
      raise errors.LockError("File already locked")
3322
    raise
3323

    
3324

    
3325
def FormatTime(val):
3326
  """Formats a time value.
3327

3328
  @type val: float or None
3329
  @param val: the timestamp as returned by time.time()
3330
  @return: a string value or N/A if we don't have a valid timestamp
3331

3332
  """
3333
  if val is None or not isinstance(val, (int, float)):
3334
    return "N/A"
3335
  # these two codes works on Linux, but they are not guaranteed on all
3336
  # platforms
3337
  return time.strftime("%F %T", time.localtime(val))
3338

    
3339

    
3340
def FormatSeconds(secs):
3341
  """Formats seconds for easier reading.
3342

3343
  @type secs: number
3344
  @param secs: Number of seconds
3345
  @rtype: string
3346
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3347

3348
  """
3349
  parts = []
3350

    
3351
  secs = round(secs, 0)
3352

    
3353
  if secs > 0:
3354
    # Negative values would be a bit tricky
3355
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3356
      (complete, secs) = divmod(secs, one)
3357
      if complete or parts:
3358
        parts.append("%d%s" % (complete, unit))
3359

    
3360
  parts.append("%ds" % secs)
3361

    
3362
  return " ".join(parts)
3363

    
3364

    
3365
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3366
  """Reads the watcher pause file.
3367

3368
  @type filename: string
3369
  @param filename: Path to watcher pause file
3370
  @type now: None, float or int
3371
  @param now: Current time as Unix timestamp
3372
  @type remove_after: int
3373
  @param remove_after: Remove watcher pause file after specified amount of
3374
    seconds past the pause end time
3375

3376
  """
3377
  if now is None:
3378
    now = time.time()
3379

    
3380
  try:
3381
    value = ReadFile(filename)
3382
  except IOError, err:
3383
    if err.errno != errno.ENOENT:
3384
      raise
3385
    value = None
3386

    
3387
  if value is not None:
3388
    try:
3389
      value = int(value)
3390
    except ValueError:
3391
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3392
                       " removing it"), filename)
3393
      RemoveFile(filename)
3394
      value = None
3395

    
3396
    if value is not None:
3397
      # Remove file if it's outdated
3398
      if now > (value + remove_after):
3399
        RemoveFile(filename)
3400
        value = None
3401

    
3402
      elif now > value:
3403
        value = None
3404

    
3405
  return value
3406

    
3407

    
3408
class RetryTimeout(Exception):
3409
  """Retry loop timed out.
3410

3411
  Any arguments which was passed by the retried function to RetryAgain will be
3412
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3413
  the RaiseInner helper method will reraise it.
3414

3415
  """
3416
  def RaiseInner(self):
3417
    if self.args and isinstance(self.args[0], Exception):
3418
      raise self.args[0]
3419
    else:
3420
      raise RetryTimeout(*self.args)
3421

    
3422

    
3423
class RetryAgain(Exception):
3424
  """Retry again.
3425

3426
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3427
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3428
  of the RetryTimeout() method can be used to reraise it.
3429

3430
  """
3431

    
3432

    
3433
class _RetryDelayCalculator(object):
3434
  """Calculator for increasing delays.
3435

3436
  """
3437
  __slots__ = [
3438
    "_factor",
3439
    "_limit",
3440
    "_next",
3441
    "_start",
3442
    ]
3443

    
3444
  def __init__(self, start, factor, limit):
3445
    """Initializes this class.
3446

3447
    @type start: float
3448
    @param start: Initial delay
3449
    @type factor: float
3450
    @param factor: Factor for delay increase
3451
    @type limit: float or None
3452
    @param limit: Upper limit for delay or None for no limit
3453

3454
    """
3455
    assert start > 0.0
3456
    assert factor >= 1.0
3457
    assert limit is None or limit >= 0.0
3458

    
3459
    self._start = start
3460
    self._factor = factor
3461
    self._limit = limit
3462

    
3463
    self._next = start
3464

    
3465
  def __call__(self):
3466
    """Returns current delay and calculates the next one.
3467

3468
    """
3469
    current = self._next
3470

    
3471
    # Update for next run
3472
    if self._limit is None or self._next < self._limit:
3473
      self._next = min(self._limit, self._next * self._factor)
3474

    
3475
    return current
3476

    
3477

    
3478
#: Special delay to specify whole remaining timeout
3479
RETRY_REMAINING_TIME = object()
3480

    
3481

    
3482
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3483
          _time_fn=time.time):
3484
  """Call a function repeatedly until it succeeds.
3485

3486
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3487
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3488
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3489

3490
  C{delay} can be one of the following:
3491
    - callable returning the delay length as a float
3492
    - Tuple of (start, factor, limit)
3493
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3494
      useful when overriding L{wait_fn} to wait for an external event)
3495
    - A static delay as a number (int or float)
3496

3497
  @type fn: callable
3498
  @param fn: Function to be called
3499
  @param delay: Either a callable (returning the delay), a tuple of (start,
3500
                factor, limit) (see L{_RetryDelayCalculator}),
3501
                L{RETRY_REMAINING_TIME} or a number (int or float)
3502
  @type timeout: float
3503
  @param timeout: Total timeout
3504
  @type wait_fn: callable
3505
  @param wait_fn: Waiting function
3506
  @return: Return value of function
3507

3508
  """
3509
  assert callable(fn)
3510
  assert callable(wait_fn)
3511
  assert callable(_time_fn)
3512

    
3513
  if args is None:
3514
    args = []
3515

    
3516
  end_time = _time_fn() + timeout
3517

    
3518
  if callable(delay):
3519
    # External function to calculate delay
3520
    calc_delay = delay
3521

    
3522
  elif isinstance(delay, (tuple, list)):
3523
    # Increasing delay with optional upper boundary
3524
    (start, factor, limit) = delay
3525
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3526

    
3527
  elif delay is RETRY_REMAINING_TIME:
3528
    # Always use the remaining time
3529
    calc_delay = None
3530

    
3531
  else:
3532
    # Static delay
3533
    calc_delay = lambda: delay
3534

    
3535
  assert calc_delay is None or callable(calc_delay)
3536

    
3537
  while True:
3538
    retry_args = []
3539
    try:
3540
      # pylint: disable-msg=W0142
3541
      return fn(*args)
3542
    except RetryAgain, err:
3543
      retry_args = err.args
3544
    except RetryTimeout:
3545
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3546
                                   " handle RetryTimeout")
3547

    
3548
    remaining_time = end_time - _time_fn()
3549

    
3550
    if remaining_time < 0.0:
3551
      # pylint: disable-msg=W0142
3552
      raise RetryTimeout(*retry_args)
3553

    
3554
    assert remaining_time >= 0.0
3555

    
3556
    if calc_delay is None:
3557
      wait_fn(remaining_time)
3558
    else:
3559
      current_delay = calc_delay()
3560
      if current_delay > 0.0:
3561
        wait_fn(current_delay)
3562

    
3563

    
3564
def GetClosedTempfile(*args, **kwargs):
3565
  """Creates a temporary file and returns its path.
3566

3567
  """
3568
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3569
  _CloseFDNoErr(fd)
3570
  return path
3571

    
3572

    
3573
def GenerateSelfSignedX509Cert(common_name, validity):
3574
  """Generates a self-signed X509 certificate.
3575

3576
  @type common_name: string
3577
  @param common_name: commonName value
3578
  @type validity: int
3579
  @param validity: Validity for certificate in seconds
3580

3581
  """
3582
  # Create private and public key
3583
  key = OpenSSL.crypto.PKey()
3584
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3585

    
3586
  # Create self-signed certificate
3587
  cert = OpenSSL.crypto.X509()
3588
  if common_name:
3589
    cert.get_subject().CN = common_name
3590
  cert.set_serial_number(1)
3591
  cert.gmtime_adj_notBefore(0)
3592
  cert.gmtime_adj_notAfter(validity)
3593
  cert.set_issuer(cert.get_subject())
3594
  cert.set_pubkey(key)
3595
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3596

    
3597
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3598
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3599

    
3600
  return (key_pem, cert_pem)
3601

    
3602

    
3603
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3604
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3605
  """Legacy function to generate self-signed X509 certificate.
3606

3607
  @type filename: str
3608
  @param filename: path to write certificate to
3609
  @type common_name: string
3610
  @param common_name: commonName value
3611
  @type validity: int
3612
  @param validity: validity of certificate in number of days
3613

3614
  """
3615
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3616
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3617
  # and node daemon certificates have the proper Subject/Issuer.
3618
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3619
                                                   validity * 24 * 60 * 60)
3620

    
3621
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3622

    
3623

    
3624
class FileLock(object):
3625
  """Utility class for file locks.
3626

3627
  """
3628
  def __init__(self, fd, filename):
3629
    """Constructor for FileLock.
3630

3631
    @type fd: file
3632
    @param fd: File object
3633
    @type filename: str
3634
    @param filename: Path of the file opened at I{fd}
3635

3636
    """
3637
    self.fd = fd
3638
    self.filename = filename
3639

    
3640
  @classmethod
3641
  def Open(cls, filename):
3642
    """Creates and opens a file to be used as a file-based lock.
3643

3644
    @type filename: string
3645
    @param filename: path to the file to be locked
3646

3647
    """
3648
    # Using "os.open" is necessary to allow both opening existing file
3649
    # read/write and creating if not existing. Vanilla "open" will truncate an
3650
    # existing file -or- allow creating if not existing.
3651
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3652
               filename)
3653

    
3654
  def __del__(self):
3655
    self.Close()
3656

    
3657
  def Close(self):
3658
    """Close the file and release the lock.
3659

3660
    """
3661
    if hasattr(self, "fd") and self.fd:
3662
      self.fd.close()
3663
      self.fd = None
3664

    
3665
  def _flock(self, flag, blocking, timeout, errmsg):
3666
    """Wrapper for fcntl.flock.
3667

3668
    @type flag: int
3669
    @param flag: operation flag
3670
    @type blocking: bool
3671
    @param blocking: whether the operation should be done in blocking mode.
3672
    @type timeout: None or float
3673
    @param timeout: for how long the operation should be retried (implies
3674
                    non-blocking mode).
3675
    @type errmsg: string
3676
    @param errmsg: error message in case operation fails.
3677

3678
    """
3679
    assert self.fd, "Lock was closed"
3680
    assert timeout is None or timeout >= 0, \
3681
      "If specified, timeout must be positive"
3682
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3683

    
3684
    # When a timeout is used, LOCK_NB must always be set
3685
    if not (timeout is None and blocking):
3686
      flag |= fcntl.LOCK_NB
3687

    
3688
    if timeout is None:
3689
      self._Lock(self.fd, flag, timeout)
3690
    else:
3691
      try:
3692
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3693
              args=(self.fd, flag, timeout))
3694
      except RetryTimeout:
3695
        raise errors.LockError(errmsg)
3696

    
3697
  @staticmethod
3698
  def _Lock(fd, flag, timeout):
3699
    try:
3700
      fcntl.flock(fd, flag)
3701
    except IOError, err:
3702
      if timeout is not None and err.errno == errno.EAGAIN:
3703
        raise RetryAgain()
3704

    
3705
      logging.exception("fcntl.flock failed")
3706
      raise
3707

    
3708
  def Exclusive(self, blocking=False, timeout=None):
3709
    """Locks the file in exclusive mode.
3710

3711
    @type blocking: boolean
3712
    @param blocking: whether to block and wait until we
3713
        can lock the file or return immediately
3714
    @type timeout: int or None
3715
    @param timeout: if not None, the duration to wait for the lock
3716
        (in blocking mode)
3717

3718
    """
3719
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3720
                "Failed to lock %s in exclusive mode" % self.filename)
3721

    
3722
  def Shared(self, blocking=False, timeout=None):
3723
    """Locks the file in shared mode.
3724

3725
    @type blocking: boolean
3726
    @param blocking: whether to block and wait until we
3727
        can lock the file or return immediately
3728
    @type timeout: int or None
3729
    @param timeout: if not None, the duration to wait for the lock
3730
        (in blocking mode)
3731

3732
    """
3733
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3734
                "Failed to lock %s in shared mode" % self.filename)
3735

    
3736
  def Unlock(self, blocking=True, timeout=None):
3737
    """Unlocks the file.
3738

3739
    According to C{flock(2)}, unlocking can also be a nonblocking
3740
    operation::
3741

3742
      To make a non-blocking request, include LOCK_NB with any of the above
3743
      operations.
3744

3745
    @type blocking: boolean
3746
    @param blocking: whether to block and wait until we
3747
        can lock the file or return immediately
3748
    @type timeout: int or None
3749
    @param timeout: if not None, the duration to wait for the lock
3750
        (in blocking mode)
3751

3752
    """
3753
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3754
                "Failed to unlock %s" % self.filename)
3755

    
3756

    
3757
class LineSplitter:
3758
  """Splits data chunks into lines separated by newline.
3759

3760
  Instances provide a file-like interface.
3761

3762
  """
3763
  def __init__(self, line_fn, *args):
3764
    """Initializes this class.
3765

3766
    @type line_fn: callable
3767
    @param line_fn: Function called for each line, first parameter is line
3768
    @param args: Extra arguments for L{line_fn}
3769

3770
    """
3771
    assert callable(line_fn)
3772

    
3773
    if args:
3774
      # Python 2.4 doesn't have functools.partial yet
3775
      self._line_fn = \
3776
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3777
    else:
3778
      self._line_fn = line_fn
3779

    
3780
    self._lines = collections.deque()
3781
    self._buffer = ""
3782

    
3783
  def write(self, data):
3784
    parts = (self._buffer + data).split("\n")
3785
    self._buffer = parts.pop()
3786
    self._lines.extend(parts)
3787

    
3788
  def flush(self):
3789
    while self._lines:
3790
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3791

    
3792
  def close(self):
3793
    self.flush()
3794
    if self._buffer:
3795
      self._line_fn(self._buffer)
3796

    
3797

    
3798
def SignalHandled(signums):
3799
  """Signal Handled decoration.
3800

3801
  This special decorator installs a signal handler and then calls the target
3802
  function. The function must accept a 'signal_handlers' keyword argument,
3803
  which will contain a dict indexed by signal number, with SignalHandler
3804
  objects as values.
3805

3806
  The decorator can be safely stacked with iself, to handle multiple signals
3807
  with different handlers.
3808

3809
  @type signums: list
3810
  @param signums: signals to intercept
3811

3812
  """
3813
  def wrap(fn):
3814
    def sig_function(*args, **kwargs):
3815
      assert 'signal_handlers' not in kwargs or \
3816
             kwargs['signal_handlers'] is None or \
3817
             isinstance(kwargs['signal_handlers'], dict), \
3818
             "Wrong signal_handlers parameter in original function call"
3819
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3820
        signal_handlers = kwargs['signal_handlers']
3821
      else:
3822
        signal_handlers = {}
3823
        kwargs['signal_handlers'] = signal_handlers
3824
      sighandler = SignalHandler(signums)
3825
      try:
3826
        for sig in signums:
3827
          signal_handlers[sig] = sighandler
3828
        return fn(*args, **kwargs)
3829
      finally:
3830
        sighandler.Reset()
3831
    return sig_function
3832
  return wrap
3833

    
3834

    
3835
class SignalWakeupFd(object):
3836
  try:
3837
    # This is only supported in Python 2.5 and above (some distributions
3838
    # backported it to Python 2.4)
3839
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3840
  except AttributeError:
3841
    # Not supported
3842
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3843
      return -1
3844
  else:
3845
    def _SetWakeupFd(self, fd):
3846
      return self._set_wakeup_fd_fn(fd)
3847

    
3848
  def __init__(self):
3849
    """Initializes this class.
3850

3851
    """
3852
    (read_fd, write_fd) = os.pipe()
3853

    
3854
    # Once these succeeded, the file descriptors will be closed automatically.
3855
    # Buffer size 0 is important, otherwise .read() with a specified length
3856
    # might buffer data and the file descriptors won't be marked readable.
3857
    self._read_fh = os.fdopen(read_fd, "r", 0)
3858
    self._write_fh = os.fdopen(write_fd, "w", 0)
3859

    
3860
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3861

    
3862
    # Utility functions
3863
    self.fileno = self._read_fh.fileno
3864
    self.read = self._read_fh.read
3865

    
3866
  def Reset(self):
3867
    """Restores the previous wakeup file descriptor.
3868

3869
    """
3870
    if hasattr(self, "_previous") and self._previous is not None:
3871
      self._SetWakeupFd(self._previous)
3872
      self._previous = None
3873

    
3874
  def Notify(self):
3875
    """Notifies the wakeup file descriptor.
3876

3877
    """
3878
    self._write_fh.write("\0")
3879

    
3880
  def __del__(self):
3881
    """Called before object deletion.
3882

3883
    """
3884
    self.Reset()
3885

    
3886

    
3887
class SignalHandler(object):
3888
  """Generic signal handler class.
3889

3890
  It automatically restores the original handler when deconstructed or
3891
  when L{Reset} is called. You can either pass your own handler
3892
  function in or query the L{called} attribute to detect whether the
3893
  signal was sent.
3894

3895
  @type signum: list
3896
  @ivar signum: the signals we handle
3897
  @type called: boolean
3898
  @ivar called: tracks whether any of the signals have been raised
3899

3900
  """
3901
  def __init__(self, signum, handler_fn=None, wakeup=None):
3902
    """Constructs a new SignalHandler instance.
3903

3904
    @type signum: int or list of ints
3905
    @param signum: Single signal number or set of signal numbers
3906
    @type handler_fn: callable
3907
    @param handler_fn: Signal handling function
3908

3909
    """
3910
    assert handler_fn is None or callable(handler_fn)
3911

    
3912
    self.signum = set(signum)
3913
    self.called = False
3914

    
3915
    self._handler_fn = handler_fn
3916
    self._wakeup = wakeup
3917

    
3918
    self._previous = {}
3919
    try:
3920
      for signum in self.signum:
3921
        # Setup handler
3922
        prev_handler = signal.signal(signum, self._HandleSignal)
3923
        try:
3924
          self._previous[signum] = prev_handler
3925
        except:
3926
          # Restore previous handler
3927
          signal.signal(signum, prev_handler)
3928
          raise
3929
    except:
3930
      # Reset all handlers
3931
      self.Reset()
3932
      # Here we have a race condition: a handler may have already been called,
3933
      # but there's not much we can do about it at this point.
3934
      raise
3935

    
3936
  def __del__(self):
3937
    self.Reset()
3938

    
3939
  def Reset(self):
3940
    """Restore previous handler.
3941

3942
    This will reset all the signals to their previous handlers.
3943

3944
    """
3945
    for signum, prev_handler in self._previous.items():
3946
      signal.signal(signum, prev_handler)
3947
      # If successful, remove from dict
3948
      del self._previous[signum]
3949

    
3950
  def Clear(self):
3951
    """Unsets the L{called} flag.
3952

3953
    This function can be used in case a signal may arrive several times.
3954

3955
    """
3956
    self.called = False
3957

    
3958
  def _HandleSignal(self, signum, frame):
3959
    """Actual signal handling function.
3960

3961
    """
3962
    # This is not nice and not absolutely atomic, but it appears to be the only
3963
    # solution in Python -- there are no atomic types.
3964
    self.called = True
3965

    
3966
    if self._wakeup:
3967
      # Notify whoever is interested in signals
3968
      self._wakeup.Notify()
3969

    
3970
    if self._handler_fn:
3971
      self._handler_fn(signum, frame)
3972

    
3973

    
3974
class FieldSet(object):
3975
  """A simple field set.
3976

3977
  Among the features are:
3978
    - checking if a string is among a list of static string or regex objects
3979
    - checking if a whole list of string matches
3980
    - returning the matching groups from a regex match
3981

3982
  Internally, all fields are held as regular expression objects.
3983

3984
  """
3985
  def __init__(self, *items):
3986
    self.items = [re.compile("^%s$" % value) for value in items]
3987

    
3988
  def Extend(self, other_set):
3989
    """Extend the field set with the items from another one"""
3990
    self.items.extend(other_set.items)
3991

    
3992
  def Matches(self, field):
3993
    """Checks if a field matches the current set
3994

3995
    @type field: str
3996
    @param field: the string to match
3997
    @return: either None or a regular expression match object
3998

3999
    """
4000
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
4001
      return m
4002
    return None
4003

    
4004
  def NonMatching(self, items):
4005
    """Returns the list of fields not matching the current set
4006

4007
    @type items: list
4008
    @param items: the list of fields to check
4009
    @rtype: list
4010
    @return: list of non-matching fields
4011

4012
    """
4013
    return [val for val in items if not self.Matches(val)]
4014

    
4015

    
4016
class RunningTimeout(object):
4017
  """Class to calculate remaining timeout when doing several operations.
4018

4019
  """
4020
  __slots__ = [
4021
    "_allow_negative",
4022
    "_start_time",
4023
    "_time_fn",
4024
    "_timeout",
4025
    ]
4026

    
4027
  def __init__(self, timeout, allow_negative, _time_fn=time.time):
4028
    """Initializes this class.
4029

4030
    @type timeout: float
4031
    @param timeout: Timeout duration
4032
    @type allow_negative: bool
4033
    @param allow_negative: Whether to return values below zero
4034
    @param _time_fn: Time function for unittests
4035

4036
    """
4037
    object.__init__(self)
4038

    
4039
    if timeout is not None and timeout < 0.0:
4040
      raise ValueError("Timeout must not be negative")
4041

    
4042
    self._timeout = timeout
4043
    self._allow_negative = allow_negative
4044
    self._time_fn = _time_fn
4045

    
4046
    self._start_time = None
4047

    
4048
  def Remaining(self):
4049
    """Returns the remaining timeout.
4050

4051
    """
4052
    if self._timeout is None:
4053
      return None
4054

    
4055
    # Get start time on first calculation
4056
    if self._start_time is None:
4057
      self._start_time = self._time_fn()
4058

    
4059
    # Calculate remaining time
4060
    remaining_timeout = self._start_time + self._timeout - self._time_fn()
4061

    
4062
    if not self._allow_negative:
4063
      # Ensure timeout is always >= 0
4064
      return max(0.0, remaining_timeout)
4065

    
4066
    return remaining_timeout