Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 0260032c

History | View | Annotate | Download (102.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52

    
53
from cStringIO import StringIO
54

    
55
try:
56
  # pylint: disable-msg=F0401
57
  import ctypes
58
except ImportError:
59
  ctypes = None
60

    
61
from ganeti import errors
62
from ganeti import constants
63
from ganeti import compat
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
84

    
85
UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86
                     '[a-f0-9]{4}-[a-f0-9]{12}$')
87

    
88
# Certificate verification results
89
(CERT_WARNING,
90
 CERT_ERROR) = range(1, 3)
91

    
92
# Flags for mlockall() (from bits/mman.h)
93
_MCL_CURRENT = 1
94
_MCL_FUTURE = 2
95

    
96
#: MAC checker regexp
97
_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
98

    
99

    
100
class RunResult(object):
101
  """Holds the result of running external programs.
102

103
  @type exit_code: int
104
  @ivar exit_code: the exit code of the program, or None (if the program
105
      didn't exit())
106
  @type signal: int or None
107
  @ivar signal: the signal that caused the program to finish, or None
108
      (if the program wasn't terminated by a signal)
109
  @type stdout: str
110
  @ivar stdout: the standard output of the program
111
  @type stderr: str
112
  @ivar stderr: the standard error of the program
113
  @type failed: boolean
114
  @ivar failed: True in case the program was
115
      terminated by a signal or exited with a non-zero exit code
116
  @ivar fail_reason: a string detailing the termination reason
117

118
  """
119
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
120
               "failed", "fail_reason", "cmd"]
121

    
122

    
123
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
124
    self.cmd = cmd
125
    self.exit_code = exit_code
126
    self.signal = signal_
127
    self.stdout = stdout
128
    self.stderr = stderr
129
    self.failed = (signal_ is not None or exit_code != 0)
130

    
131
    if self.signal is not None:
132
      self.fail_reason = "terminated by signal %s" % self.signal
133
    elif self.exit_code is not None:
134
      self.fail_reason = "exited with exit code %s" % self.exit_code
135
    else:
136
      self.fail_reason = "unable to determine termination reason"
137

    
138
    if self.failed:
139
      logging.debug("Command '%s' failed (%s); output: %s",
140
                    self.cmd, self.fail_reason, self.output)
141

    
142
  def _GetOutput(self):
143
    """Returns the combined stdout and stderr for easier usage.
144

145
    """
146
    return self.stdout + self.stderr
147

    
148
  output = property(_GetOutput, None, None, "Return full output")
149

    
150

    
151
def _BuildCmdEnvironment(env, reset):
152
  """Builds the environment for an external program.
153

154
  """
155
  if reset:
156
    cmd_env = {}
157
  else:
158
    cmd_env = os.environ.copy()
159
    cmd_env["LC_ALL"] = "C"
160

    
161
  if env is not None:
162
    cmd_env.update(env)
163

    
164
  return cmd_env
165

    
166

    
167
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
168
           interactive=False):
169
  """Execute a (shell) command.
170

171
  The command should not read from its standard input, as it will be
172
  closed.
173

174
  @type cmd: string or list
175
  @param cmd: Command to run
176
  @type env: dict
177
  @param env: Additional environment variables
178
  @type output: str
179
  @param output: if desired, the output of the command can be
180
      saved in a file instead of the RunResult instance; this
181
      parameter denotes the file name (if not None)
182
  @type cwd: string
183
  @param cwd: if specified, will be used as the working
184
      directory for the command; the default will be /
185
  @type reset_env: boolean
186
  @param reset_env: whether to reset or keep the default os environment
187
  @type interactive: boolean
188
  @param interactive: weather we pipe stdin, stdout and stderr
189
                      (default behaviour) or run the command interactive
190
  @rtype: L{RunResult}
191
  @return: RunResult instance
192
  @raise errors.ProgrammerError: if we call this when forks are disabled
193

194
  """
195
  if no_fork:
196
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
197

    
198
  if output and interactive:
199
    raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
200
                                 " not be provided at the same time")
201

    
202
  if isinstance(cmd, basestring):
203
    strcmd = cmd
204
    shell = True
205
  else:
206
    cmd = [str(val) for val in cmd]
207
    strcmd = ShellQuoteArgs(cmd)
208
    shell = False
209

    
210
  if output:
211
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
212
  else:
213
    logging.debug("RunCmd %s", strcmd)
214

    
215
  cmd_env = _BuildCmdEnvironment(env, reset_env)
216

    
217
  try:
218
    if output is None:
219
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd, interactive)
220
    else:
221
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
222
      out = err = ""
223
  except OSError, err:
224
    if err.errno == errno.ENOENT:
225
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
226
                               (strcmd, err))
227
    else:
228
      raise
229

    
230
  if status >= 0:
231
    exitcode = status
232
    signal_ = None
233
  else:
234
    exitcode = None
235
    signal_ = -status
236

    
237
  return RunResult(exitcode, signal_, out, err, strcmd)
238

    
239

    
240
def SetupDaemonEnv(cwd="/", umask=077):
241
  """Setup a daemon's environment.
242

243
  This should be called between the first and second fork, due to
244
  setsid usage.
245

246
  @param cwd: the directory to which to chdir
247
  @param umask: the umask to setup
248

249
  """
250
  os.chdir(cwd)
251
  os.umask(umask)
252
  os.setsid()
253

    
254

    
255
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
256
                pidfile=None):
257
  """Start a daemon process after forking twice.
258

259
  @type cmd: string or list
260
  @param cmd: Command to run
261
  @type env: dict
262
  @param env: Additional environment variables
263
  @type cwd: string
264
  @param cwd: Working directory for the program
265
  @type output: string
266
  @param output: Path to file in which to save the output
267
  @type output_fd: int
268
  @param output_fd: File descriptor for output
269
  @type pidfile: string
270
  @param pidfile: Process ID file
271
  @rtype: int
272
  @return: Daemon process ID
273
  @raise errors.ProgrammerError: if we call this when forks are disabled
274

275
  """
276
  if no_fork:
277
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
278
                                 " disabled")
279

    
280
  if output and not (bool(output) ^ (output_fd is not None)):
281
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
282
                                 " specified")
283

    
284
  if isinstance(cmd, basestring):
285
    cmd = ["/bin/sh", "-c", cmd]
286

    
287
  strcmd = ShellQuoteArgs(cmd)
288

    
289
  if output:
290
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
291
  else:
292
    logging.debug("StartDaemon %s", strcmd)
293

    
294
  cmd_env = _BuildCmdEnvironment(env, False)
295

    
296
  # Create pipe for sending PID back
297
  (pidpipe_read, pidpipe_write) = os.pipe()
298
  try:
299
    try:
300
      # Create pipe for sending error messages
301
      (errpipe_read, errpipe_write) = os.pipe()
302
      try:
303
        try:
304
          # First fork
305
          pid = os.fork()
306
          if pid == 0:
307
            try:
308
              # Child process, won't return
309
              _StartDaemonChild(errpipe_read, errpipe_write,
310
                                pidpipe_read, pidpipe_write,
311
                                cmd, cmd_env, cwd,
312
                                output, output_fd, pidfile)
313
            finally:
314
              # Well, maybe child process failed
315
              os._exit(1) # pylint: disable-msg=W0212
316
        finally:
317
          _CloseFDNoErr(errpipe_write)
318

    
319
        # Wait for daemon to be started (or an error message to arrive) and read
320
        # up to 100 KB as an error message
321
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
322
      finally:
323
        _CloseFDNoErr(errpipe_read)
324
    finally:
325
      _CloseFDNoErr(pidpipe_write)
326

    
327
    # Read up to 128 bytes for PID
328
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
329
  finally:
330
    _CloseFDNoErr(pidpipe_read)
331

    
332
  # Try to avoid zombies by waiting for child process
333
  try:
334
    os.waitpid(pid, 0)
335
  except OSError:
336
    pass
337

    
338
  if errormsg:
339
    raise errors.OpExecError("Error when starting daemon process: %r" %
340
                             errormsg)
341

    
342
  try:
343
    return int(pidtext)
344
  except (ValueError, TypeError), err:
345
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
346
                             (pidtext, err))
347

    
348

    
349
def _StartDaemonChild(errpipe_read, errpipe_write,
350
                      pidpipe_read, pidpipe_write,
351
                      args, env, cwd,
352
                      output, fd_output, pidfile):
353
  """Child process for starting daemon.
354

355
  """
356
  try:
357
    # Close parent's side
358
    _CloseFDNoErr(errpipe_read)
359
    _CloseFDNoErr(pidpipe_read)
360

    
361
    # First child process
362
    SetupDaemonEnv()
363

    
364
    # And fork for the second time
365
    pid = os.fork()
366
    if pid != 0:
367
      # Exit first child process
368
      os._exit(0) # pylint: disable-msg=W0212
369

    
370
    # Make sure pipe is closed on execv* (and thereby notifies
371
    # original process)
372
    SetCloseOnExecFlag(errpipe_write, True)
373

    
374
    # List of file descriptors to be left open
375
    noclose_fds = [errpipe_write]
376

    
377
    # Open PID file
378
    if pidfile:
379
      try:
380
        # TODO: Atomic replace with another locked file instead of writing into
381
        # it after creating
382
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
383

    
384
        # Lock the PID file (and fail if not possible to do so). Any code
385
        # wanting to send a signal to the daemon should try to lock the PID
386
        # file before reading it. If acquiring the lock succeeds, the daemon is
387
        # no longer running and the signal should not be sent.
388
        LockFile(fd_pidfile)
389

    
390
        os.write(fd_pidfile, "%d\n" % os.getpid())
391
      except Exception, err:
392
        raise Exception("Creating and locking PID file failed: %s" % err)
393

    
394
      # Keeping the file open to hold the lock
395
      noclose_fds.append(fd_pidfile)
396

    
397
      SetCloseOnExecFlag(fd_pidfile, False)
398
    else:
399
      fd_pidfile = None
400

    
401
    # Open /dev/null
402
    fd_devnull = os.open(os.devnull, os.O_RDWR)
403

    
404
    assert not output or (bool(output) ^ (fd_output is not None))
405

    
406
    if fd_output is not None:
407
      pass
408
    elif output:
409
      # Open output file
410
      try:
411
        # TODO: Implement flag to set append=yes/no
412
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
413
      except EnvironmentError, err:
414
        raise Exception("Opening output file failed: %s" % err)
415
    else:
416
      fd_output = fd_devnull
417

    
418
    # Redirect standard I/O
419
    os.dup2(fd_devnull, 0)
420
    os.dup2(fd_output, 1)
421
    os.dup2(fd_output, 2)
422

    
423
    # Send daemon PID to parent
424
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
425

    
426
    # Close all file descriptors except stdio and error message pipe
427
    CloseFDs(noclose_fds=noclose_fds)
428

    
429
    # Change working directory
430
    os.chdir(cwd)
431

    
432
    if env is None:
433
      os.execvp(args[0], args)
434
    else:
435
      os.execvpe(args[0], args, env)
436
  except: # pylint: disable-msg=W0702
437
    try:
438
      # Report errors to original process
439
      buf = str(sys.exc_info()[1])
440

    
441
      RetryOnSignal(os.write, errpipe_write, buf)
442
    except: # pylint: disable-msg=W0702
443
      # Ignore errors in error handling
444
      pass
445

    
446
  os._exit(1) # pylint: disable-msg=W0212
447

    
448

    
449
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive):
450
  """Run a command and return its output.
451

452
  @type  cmd: string or list
453
  @param cmd: Command to run
454
  @type env: dict
455
  @param env: The environment to use
456
  @type via_shell: bool
457
  @param via_shell: if we should run via the shell
458
  @type cwd: string
459
  @param cwd: the working directory for the program
460
  @type interactive: boolean
461
  @param interactive: Run command interactive (without piping)
462
  @rtype: tuple
463
  @return: (out, err, status)
464

465
  """
466
  poller = select.poll()
467

    
468
  stderr = subprocess.PIPE
469
  stdout = subprocess.PIPE
470
  stdin = subprocess.PIPE
471

    
472
  if interactive:
473
    stderr = stdout = stdin = None
474

    
475
  child = subprocess.Popen(cmd, shell=via_shell,
476
                           stderr=stderr,
477
                           stdout=stdout,
478
                           stdin=stdin,
479
                           close_fds=True, env=env,
480
                           cwd=cwd)
481

    
482
  out = StringIO()
483
  err = StringIO()
484
  if not interactive:
485
    child.stdin.close()
486
    poller.register(child.stdout, select.POLLIN)
487
    poller.register(child.stderr, select.POLLIN)
488
    fdmap = {
489
      child.stdout.fileno(): (out, child.stdout),
490
      child.stderr.fileno(): (err, child.stderr),
491
      }
492
    for fd in fdmap:
493
      SetNonblockFlag(fd, True)
494

    
495
    while fdmap:
496
      pollresult = RetryOnSignal(poller.poll)
497

    
498
      for fd, event in pollresult:
499
        if event & select.POLLIN or event & select.POLLPRI:
500
          data = fdmap[fd][1].read()
501
          # no data from read signifies EOF (the same as POLLHUP)
502
          if not data:
503
            poller.unregister(fd)
504
            del fdmap[fd]
505
            continue
506
          fdmap[fd][0].write(data)
507
        if (event & select.POLLNVAL or event & select.POLLHUP or
508
            event & select.POLLERR):
509
          poller.unregister(fd)
510
          del fdmap[fd]
511

    
512
  out = out.getvalue()
513
  err = err.getvalue()
514

    
515
  status = child.wait()
516
  return out, err, status
517

    
518

    
519
def _RunCmdFile(cmd, env, via_shell, output, cwd):
520
  """Run a command and save its output to a file.
521

522
  @type  cmd: string or list
523
  @param cmd: Command to run
524
  @type env: dict
525
  @param env: The environment to use
526
  @type via_shell: bool
527
  @param via_shell: if we should run via the shell
528
  @type output: str
529
  @param output: the filename in which to save the output
530
  @type cwd: string
531
  @param cwd: the working directory for the program
532
  @rtype: int
533
  @return: the exit status
534

535
  """
536
  fh = open(output, "a")
537
  try:
538
    child = subprocess.Popen(cmd, shell=via_shell,
539
                             stderr=subprocess.STDOUT,
540
                             stdout=fh,
541
                             stdin=subprocess.PIPE,
542
                             close_fds=True, env=env,
543
                             cwd=cwd)
544

    
545
    child.stdin.close()
546
    status = child.wait()
547
  finally:
548
    fh.close()
549
  return status
550

    
551

    
552
def SetCloseOnExecFlag(fd, enable):
553
  """Sets or unsets the close-on-exec flag on a file descriptor.
554

555
  @type fd: int
556
  @param fd: File descriptor
557
  @type enable: bool
558
  @param enable: Whether to set or unset it.
559

560
  """
561
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
562

    
563
  if enable:
564
    flags |= fcntl.FD_CLOEXEC
565
  else:
566
    flags &= ~fcntl.FD_CLOEXEC
567

    
568
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
569

    
570

    
571
def SetNonblockFlag(fd, enable):
572
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
573

574
  @type fd: int
575
  @param fd: File descriptor
576
  @type enable: bool
577
  @param enable: Whether to set or unset it
578

579
  """
580
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
581

    
582
  if enable:
583
    flags |= os.O_NONBLOCK
584
  else:
585
    flags &= ~os.O_NONBLOCK
586

    
587
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
588

    
589

    
590
def RetryOnSignal(fn, *args, **kwargs):
591
  """Calls a function again if it failed due to EINTR.
592

593
  """
594
  while True:
595
    try:
596
      return fn(*args, **kwargs)
597
    except EnvironmentError, err:
598
      if err.errno != errno.EINTR:
599
        raise
600
    except (socket.error, select.error), err:
601
      # In python 2.6 and above select.error is an IOError, so it's handled
602
      # above, in 2.5 and below it's not, and it's handled here.
603
      if not (err.args and err.args[0] == errno.EINTR):
604
        raise
605

    
606

    
607
def RunParts(dir_name, env=None, reset_env=False):
608
  """Run Scripts or programs in a directory
609

610
  @type dir_name: string
611
  @param dir_name: absolute path to a directory
612
  @type env: dict
613
  @param env: The environment to use
614
  @type reset_env: boolean
615
  @param reset_env: whether to reset or keep the default os environment
616
  @rtype: list of tuples
617
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
618

619
  """
620
  rr = []
621

    
622
  try:
623
    dir_contents = ListVisibleFiles(dir_name)
624
  except OSError, err:
625
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
626
    return rr
627

    
628
  for relname in sorted(dir_contents):
629
    fname = PathJoin(dir_name, relname)
630
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
631
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
632
      rr.append((relname, constants.RUNPARTS_SKIP, None))
633
    else:
634
      try:
635
        result = RunCmd([fname], env=env, reset_env=reset_env)
636
      except Exception, err: # pylint: disable-msg=W0703
637
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
638
      else:
639
        rr.append((relname, constants.RUNPARTS_RUN, result))
640

    
641
  return rr
642

    
643

    
644
def RemoveFile(filename):
645
  """Remove a file ignoring some errors.
646

647
  Remove a file, ignoring non-existing ones or directories. Other
648
  errors are passed.
649

650
  @type filename: str
651
  @param filename: the file to be removed
652

653
  """
654
  try:
655
    os.unlink(filename)
656
  except OSError, err:
657
    if err.errno not in (errno.ENOENT, errno.EISDIR):
658
      raise
659

    
660

    
661
def RemoveDir(dirname):
662
  """Remove an empty directory.
663

664
  Remove a directory, ignoring non-existing ones.
665
  Other errors are passed. This includes the case,
666
  where the directory is not empty, so it can't be removed.
667

668
  @type dirname: str
669
  @param dirname: the empty directory to be removed
670

671
  """
672
  try:
673
    os.rmdir(dirname)
674
  except OSError, err:
675
    if err.errno != errno.ENOENT:
676
      raise
677

    
678

    
679
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
680
  """Renames a file.
681

682
  @type old: string
683
  @param old: Original path
684
  @type new: string
685
  @param new: New path
686
  @type mkdir: bool
687
  @param mkdir: Whether to create target directory if it doesn't exist
688
  @type mkdir_mode: int
689
  @param mkdir_mode: Mode for newly created directories
690

691
  """
692
  try:
693
    return os.rename(old, new)
694
  except OSError, err:
695
    # In at least one use case of this function, the job queue, directory
696
    # creation is very rare. Checking for the directory before renaming is not
697
    # as efficient.
698
    if mkdir and err.errno == errno.ENOENT:
699
      # Create directory and try again
700
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
701

    
702
      return os.rename(old, new)
703

    
704
    raise
705

    
706

    
707
def Makedirs(path, mode=0750):
708
  """Super-mkdir; create a leaf directory and all intermediate ones.
709

710
  This is a wrapper around C{os.makedirs} adding error handling not implemented
711
  before Python 2.5.
712

713
  """
714
  try:
715
    os.makedirs(path, mode)
716
  except OSError, err:
717
    # Ignore EEXIST. This is only handled in os.makedirs as included in
718
    # Python 2.5 and above.
719
    if err.errno != errno.EEXIST or not os.path.exists(path):
720
      raise
721

    
722

    
723
def ResetTempfileModule():
724
  """Resets the random name generator of the tempfile module.
725

726
  This function should be called after C{os.fork} in the child process to
727
  ensure it creates a newly seeded random generator. Otherwise it would
728
  generate the same random parts as the parent process. If several processes
729
  race for the creation of a temporary file, this could lead to one not getting
730
  a temporary name.
731

732
  """
733
  # pylint: disable-msg=W0212
734
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
735
    tempfile._once_lock.acquire()
736
    try:
737
      # Reset random name generator
738
      tempfile._name_sequence = None
739
    finally:
740
      tempfile._once_lock.release()
741
  else:
742
    logging.critical("The tempfile module misses at least one of the"
743
                     " '_once_lock' and '_name_sequence' attributes")
744

    
745

    
746
def _FingerprintFile(filename):
747
  """Compute the fingerprint of a file.
748

749
  If the file does not exist, a None will be returned
750
  instead.
751

752
  @type filename: str
753
  @param filename: the filename to checksum
754
  @rtype: str
755
  @return: the hex digest of the sha checksum of the contents
756
      of the file
757

758
  """
759
  if not (os.path.exists(filename) and os.path.isfile(filename)):
760
    return None
761

    
762
  f = open(filename)
763

    
764
  fp = compat.sha1_hash()
765
  while True:
766
    data = f.read(4096)
767
    if not data:
768
      break
769

    
770
    fp.update(data)
771

    
772
  return fp.hexdigest()
773

    
774

    
775
def FingerprintFiles(files):
776
  """Compute fingerprints for a list of files.
777

778
  @type files: list
779
  @param files: the list of filename to fingerprint
780
  @rtype: dict
781
  @return: a dictionary filename: fingerprint, holding only
782
      existing files
783

784
  """
785
  ret = {}
786

    
787
  for filename in files:
788
    cksum = _FingerprintFile(filename)
789
    if cksum:
790
      ret[filename] = cksum
791

    
792
  return ret
793

    
794

    
795
def ForceDictType(target, key_types, allowed_values=None):
796
  """Force the values of a dict to have certain types.
797

798
  @type target: dict
799
  @param target: the dict to update
800
  @type key_types: dict
801
  @param key_types: dict mapping target dict keys to types
802
                    in constants.ENFORCEABLE_TYPES
803
  @type allowed_values: list
804
  @keyword allowed_values: list of specially allowed values
805

806
  """
807
  if allowed_values is None:
808
    allowed_values = []
809

    
810
  if not isinstance(target, dict):
811
    msg = "Expected dictionary, got '%s'" % target
812
    raise errors.TypeEnforcementError(msg)
813

    
814
  for key in target:
815
    if key not in key_types:
816
      msg = "Unknown key '%s'" % key
817
      raise errors.TypeEnforcementError(msg)
818

    
819
    if target[key] in allowed_values:
820
      continue
821

    
822
    ktype = key_types[key]
823
    if ktype not in constants.ENFORCEABLE_TYPES:
824
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
825
      raise errors.ProgrammerError(msg)
826

    
827
    if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
828
      if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
829
        pass
830
      elif not isinstance(target[key], basestring):
831
        if isinstance(target[key], bool) and not target[key]:
832
          target[key] = ''
833
        else:
834
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
835
          raise errors.TypeEnforcementError(msg)
836
    elif ktype == constants.VTYPE_BOOL:
837
      if isinstance(target[key], basestring) and target[key]:
838
        if target[key].lower() == constants.VALUE_FALSE:
839
          target[key] = False
840
        elif target[key].lower() == constants.VALUE_TRUE:
841
          target[key] = True
842
        else:
843
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
844
          raise errors.TypeEnforcementError(msg)
845
      elif target[key]:
846
        target[key] = True
847
      else:
848
        target[key] = False
849
    elif ktype == constants.VTYPE_SIZE:
850
      try:
851
        target[key] = ParseUnit(target[key])
852
      except errors.UnitParseError, err:
853
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
854
              (key, target[key], err)
855
        raise errors.TypeEnforcementError(msg)
856
    elif ktype == constants.VTYPE_INT:
857
      try:
858
        target[key] = int(target[key])
859
      except (ValueError, TypeError):
860
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
861
        raise errors.TypeEnforcementError(msg)
862

    
863

    
864
def _GetProcStatusPath(pid):
865
  """Returns the path for a PID's proc status file.
866

867
  @type pid: int
868
  @param pid: Process ID
869
  @rtype: string
870

871
  """
872
  return "/proc/%d/status" % pid
873

    
874

    
875
def IsProcessAlive(pid):
876
  """Check if a given pid exists on the system.
877

878
  @note: zombie status is not handled, so zombie processes
879
      will be returned as alive
880
  @type pid: int
881
  @param pid: the process ID to check
882
  @rtype: boolean
883
  @return: True if the process exists
884

885
  """
886
  def _TryStat(name):
887
    try:
888
      os.stat(name)
889
      return True
890
    except EnvironmentError, err:
891
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
892
        return False
893
      elif err.errno == errno.EINVAL:
894
        raise RetryAgain(err)
895
      raise
896

    
897
  assert isinstance(pid, int), "pid must be an integer"
898
  if pid <= 0:
899
    return False
900

    
901
  # /proc in a multiprocessor environment can have strange behaviors.
902
  # Retry the os.stat a few times until we get a good result.
903
  try:
904
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
905
                 args=[_GetProcStatusPath(pid)])
906
  except RetryTimeout, err:
907
    err.RaiseInner()
908

    
909

    
910
def _ParseSigsetT(sigset):
911
  """Parse a rendered sigset_t value.
912

913
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
914
  function.
915

916
  @type sigset: string
917
  @param sigset: Rendered signal set from /proc/$pid/status
918
  @rtype: set
919
  @return: Set of all enabled signal numbers
920

921
  """
922
  result = set()
923

    
924
  signum = 0
925
  for ch in reversed(sigset):
926
    chv = int(ch, 16)
927

    
928
    # The following could be done in a loop, but it's easier to read and
929
    # understand in the unrolled form
930
    if chv & 1:
931
      result.add(signum + 1)
932
    if chv & 2:
933
      result.add(signum + 2)
934
    if chv & 4:
935
      result.add(signum + 3)
936
    if chv & 8:
937
      result.add(signum + 4)
938

    
939
    signum += 4
940

    
941
  return result
942

    
943

    
944
def _GetProcStatusField(pstatus, field):
945
  """Retrieves a field from the contents of a proc status file.
946

947
  @type pstatus: string
948
  @param pstatus: Contents of /proc/$pid/status
949
  @type field: string
950
  @param field: Name of field whose value should be returned
951
  @rtype: string
952

953
  """
954
  for line in pstatus.splitlines():
955
    parts = line.split(":", 1)
956

    
957
    if len(parts) < 2 or parts[0] != field:
958
      continue
959

    
960
    return parts[1].strip()
961

    
962
  return None
963

    
964

    
965
def IsProcessHandlingSignal(pid, signum, status_path=None):
966
  """Checks whether a process is handling a signal.
967

968
  @type pid: int
969
  @param pid: Process ID
970
  @type signum: int
971
  @param signum: Signal number
972
  @rtype: bool
973

974
  """
975
  if status_path is None:
976
    status_path = _GetProcStatusPath(pid)
977

    
978
  try:
979
    proc_status = ReadFile(status_path)
980
  except EnvironmentError, err:
981
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
982
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
983
      return False
984
    raise
985

    
986
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
987
  if sigcgt is None:
988
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
989

    
990
  # Now check whether signal is handled
991
  return signum in _ParseSigsetT(sigcgt)
992

    
993

    
994
def ReadPidFile(pidfile):
995
  """Read a pid from a file.
996

997
  @type  pidfile: string
998
  @param pidfile: path to the file containing the pid
999
  @rtype: int
1000
  @return: The process id, if the file exists and contains a valid PID,
1001
           otherwise 0
1002

1003
  """
1004
  try:
1005
    raw_data = ReadOneLineFile(pidfile)
1006
  except EnvironmentError, err:
1007
    if err.errno != errno.ENOENT:
1008
      logging.exception("Can't read pid file")
1009
    return 0
1010

    
1011
  try:
1012
    pid = int(raw_data)
1013
  except (TypeError, ValueError), err:
1014
    logging.info("Can't parse pid file contents", exc_info=True)
1015
    return 0
1016

    
1017
  return pid
1018

    
1019

    
1020
def ReadLockedPidFile(path):
1021
  """Reads a locked PID file.
1022

1023
  This can be used together with L{StartDaemon}.
1024

1025
  @type path: string
1026
  @param path: Path to PID file
1027
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1028

1029
  """
1030
  try:
1031
    fd = os.open(path, os.O_RDONLY)
1032
  except EnvironmentError, err:
1033
    if err.errno == errno.ENOENT:
1034
      # PID file doesn't exist
1035
      return None
1036
    raise
1037

    
1038
  try:
1039
    try:
1040
      # Try to acquire lock
1041
      LockFile(fd)
1042
    except errors.LockError:
1043
      # Couldn't lock, daemon is running
1044
      return int(os.read(fd, 100))
1045
  finally:
1046
    os.close(fd)
1047

    
1048
  return None
1049

    
1050

    
1051
def MatchNameComponent(key, name_list, case_sensitive=True):
1052
  """Try to match a name against a list.
1053

1054
  This function will try to match a name like test1 against a list
1055
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1056
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1057
  not I{'test1.ex'}. A multiple match will be considered as no match
1058
  at all (e.g. I{'test1'} against C{['test1.example.com',
1059
  'test1.example.org']}), except when the key fully matches an entry
1060
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1061

1062
  @type key: str
1063
  @param key: the name to be searched
1064
  @type name_list: list
1065
  @param name_list: the list of strings against which to search the key
1066
  @type case_sensitive: boolean
1067
  @param case_sensitive: whether to provide a case-sensitive match
1068

1069
  @rtype: None or str
1070
  @return: None if there is no match I{or} if there are multiple matches,
1071
      otherwise the element from the list which matches
1072

1073
  """
1074
  if key in name_list:
1075
    return key
1076

    
1077
  re_flags = 0
1078
  if not case_sensitive:
1079
    re_flags |= re.IGNORECASE
1080
    key = key.upper()
1081
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1082
  names_filtered = []
1083
  string_matches = []
1084
  for name in name_list:
1085
    if mo.match(name) is not None:
1086
      names_filtered.append(name)
1087
      if not case_sensitive and key == name.upper():
1088
        string_matches.append(name)
1089

    
1090
  if len(string_matches) == 1:
1091
    return string_matches[0]
1092
  if len(names_filtered) == 1:
1093
    return names_filtered[0]
1094
  return None
1095

    
1096

    
1097
def ValidateServiceName(name):
1098
  """Validate the given service name.
1099

1100
  @type name: number or string
1101
  @param name: Service name or port specification
1102

1103
  """
1104
  try:
1105
    numport = int(name)
1106
  except (ValueError, TypeError):
1107
    # Non-numeric service name
1108
    valid = _VALID_SERVICE_NAME_RE.match(name)
1109
  else:
1110
    # Numeric port (protocols other than TCP or UDP might need adjustments
1111
    # here)
1112
    valid = (numport >= 0 and numport < (1 << 16))
1113

    
1114
  if not valid:
1115
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1116
                               errors.ECODE_INVAL)
1117

    
1118
  return name
1119

    
1120

    
1121
def ListVolumeGroups():
1122
  """List volume groups and their size
1123

1124
  @rtype: dict
1125
  @return:
1126
       Dictionary with keys volume name and values
1127
       the size of the volume
1128

1129
  """
1130
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1131
  result = RunCmd(command)
1132
  retval = {}
1133
  if result.failed:
1134
    return retval
1135

    
1136
  for line in result.stdout.splitlines():
1137
    try:
1138
      name, size = line.split()
1139
      size = int(float(size))
1140
    except (IndexError, ValueError), err:
1141
      logging.error("Invalid output from vgs (%s): %s", err, line)
1142
      continue
1143

    
1144
    retval[name] = size
1145

    
1146
  return retval
1147

    
1148

    
1149
def BridgeExists(bridge):
1150
  """Check whether the given bridge exists in the system
1151

1152
  @type bridge: str
1153
  @param bridge: the bridge name to check
1154
  @rtype: boolean
1155
  @return: True if it does
1156

1157
  """
1158
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1159

    
1160

    
1161
def NiceSort(name_list):
1162
  """Sort a list of strings based on digit and non-digit groupings.
1163

1164
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1165
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1166
  'a11']}.
1167

1168
  The sort algorithm breaks each name in groups of either only-digits
1169
  or no-digits. Only the first eight such groups are considered, and
1170
  after that we just use what's left of the string.
1171

1172
  @type name_list: list
1173
  @param name_list: the names to be sorted
1174
  @rtype: list
1175
  @return: a copy of the name list sorted with our algorithm
1176

1177
  """
1178
  _SORTER_BASE = "(\D+|\d+)"
1179
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1180
                                                  _SORTER_BASE, _SORTER_BASE,
1181
                                                  _SORTER_BASE, _SORTER_BASE,
1182
                                                  _SORTER_BASE, _SORTER_BASE)
1183
  _SORTER_RE = re.compile(_SORTER_FULL)
1184
  _SORTER_NODIGIT = re.compile("^\D*$")
1185
  def _TryInt(val):
1186
    """Attempts to convert a variable to integer."""
1187
    if val is None or _SORTER_NODIGIT.match(val):
1188
      return val
1189
    rval = int(val)
1190
    return rval
1191

    
1192
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1193
             for name in name_list]
1194
  to_sort.sort()
1195
  return [tup[1] for tup in to_sort]
1196

    
1197

    
1198
def TryConvert(fn, val):
1199
  """Try to convert a value ignoring errors.
1200

1201
  This function tries to apply function I{fn} to I{val}. If no
1202
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1203
  the result, else it will return the original value. Any other
1204
  exceptions are propagated to the caller.
1205

1206
  @type fn: callable
1207
  @param fn: function to apply to the value
1208
  @param val: the value to be converted
1209
  @return: The converted value if the conversion was successful,
1210
      otherwise the original value.
1211

1212
  """
1213
  try:
1214
    nv = fn(val)
1215
  except (ValueError, TypeError):
1216
    nv = val
1217
  return nv
1218

    
1219

    
1220
def IsValidShellParam(word):
1221
  """Verifies is the given word is safe from the shell's p.o.v.
1222

1223
  This means that we can pass this to a command via the shell and be
1224
  sure that it doesn't alter the command line and is passed as such to
1225
  the actual command.
1226

1227
  Note that we are overly restrictive here, in order to be on the safe
1228
  side.
1229

1230
  @type word: str
1231
  @param word: the word to check
1232
  @rtype: boolean
1233
  @return: True if the word is 'safe'
1234

1235
  """
1236
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1237

    
1238

    
1239
def BuildShellCmd(template, *args):
1240
  """Build a safe shell command line from the given arguments.
1241

1242
  This function will check all arguments in the args list so that they
1243
  are valid shell parameters (i.e. they don't contain shell
1244
  metacharacters). If everything is ok, it will return the result of
1245
  template % args.
1246

1247
  @type template: str
1248
  @param template: the string holding the template for the
1249
      string formatting
1250
  @rtype: str
1251
  @return: the expanded command line
1252

1253
  """
1254
  for word in args:
1255
    if not IsValidShellParam(word):
1256
      raise errors.ProgrammerError("Shell argument '%s' contains"
1257
                                   " invalid characters" % word)
1258
  return template % args
1259

    
1260

    
1261
def FormatUnit(value, units):
1262
  """Formats an incoming number of MiB with the appropriate unit.
1263

1264
  @type value: int
1265
  @param value: integer representing the value in MiB (1048576)
1266
  @type units: char
1267
  @param units: the type of formatting we should do:
1268
      - 'h' for automatic scaling
1269
      - 'm' for MiBs
1270
      - 'g' for GiBs
1271
      - 't' for TiBs
1272
  @rtype: str
1273
  @return: the formatted value (with suffix)
1274

1275
  """
1276
  if units not in ('m', 'g', 't', 'h'):
1277
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1278

    
1279
  suffix = ''
1280

    
1281
  if units == 'm' or (units == 'h' and value < 1024):
1282
    if units == 'h':
1283
      suffix = 'M'
1284
    return "%d%s" % (round(value, 0), suffix)
1285

    
1286
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1287
    if units == 'h':
1288
      suffix = 'G'
1289
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1290

    
1291
  else:
1292
    if units == 'h':
1293
      suffix = 'T'
1294
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1295

    
1296

    
1297
def ParseUnit(input_string):
1298
  """Tries to extract number and scale from the given string.
1299

1300
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1301
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1302
  is always an int in MiB.
1303

1304
  """
1305
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1306
  if not m:
1307
    raise errors.UnitParseError("Invalid format")
1308

    
1309
  value = float(m.groups()[0])
1310

    
1311
  unit = m.groups()[1]
1312
  if unit:
1313
    lcunit = unit.lower()
1314
  else:
1315
    lcunit = 'm'
1316

    
1317
  if lcunit in ('m', 'mb', 'mib'):
1318
    # Value already in MiB
1319
    pass
1320

    
1321
  elif lcunit in ('g', 'gb', 'gib'):
1322
    value *= 1024
1323

    
1324
  elif lcunit in ('t', 'tb', 'tib'):
1325
    value *= 1024 * 1024
1326

    
1327
  else:
1328
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1329

    
1330
  # Make sure we round up
1331
  if int(value) < value:
1332
    value += 1
1333

    
1334
  # Round up to the next multiple of 4
1335
  value = int(value)
1336
  if value % 4:
1337
    value += 4 - value % 4
1338

    
1339
  return value
1340

    
1341

    
1342
def ParseCpuMask(cpu_mask):
1343
  """Parse a CPU mask definition and return the list of CPU IDs.
1344

1345
  CPU mask format: comma-separated list of CPU IDs
1346
  or dash-separated ID ranges
1347
  Example: "0-2,5" -> "0,1,2,5"
1348

1349
  @type cpu_mask: str
1350
  @param cpu_mask: CPU mask definition
1351
  @rtype: list of int
1352
  @return: list of CPU IDs
1353

1354
  """
1355
  if not cpu_mask:
1356
    return []
1357
  cpu_list = []
1358
  for range_def in cpu_mask.split(","):
1359
    boundaries = range_def.split("-")
1360
    n_elements = len(boundaries)
1361
    if n_elements > 2:
1362
      raise errors.ParseError("Invalid CPU ID range definition"
1363
                              " (only one hyphen allowed): %s" % range_def)
1364
    try:
1365
      lower = int(boundaries[0])
1366
    except (ValueError, TypeError), err:
1367
      raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1368
                              " CPU ID range: %s" % str(err))
1369
    try:
1370
      higher = int(boundaries[-1])
1371
    except (ValueError, TypeError), err:
1372
      raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1373
                              " CPU ID range: %s" % str(err))
1374
    if lower > higher:
1375
      raise errors.ParseError("Invalid CPU ID range definition"
1376
                              " (%d > %d): %s" % (lower, higher, range_def))
1377
    cpu_list.extend(range(lower, higher + 1))
1378
  return cpu_list
1379

    
1380

    
1381
def AddAuthorizedKey(file_obj, key):
1382
  """Adds an SSH public key to an authorized_keys file.
1383

1384
  @type file_obj: str or file handle
1385
  @param file_obj: path to authorized_keys file
1386
  @type key: str
1387
  @param key: string containing key
1388

1389
  """
1390
  key_fields = key.split()
1391

    
1392
  if isinstance(file_obj, basestring):
1393
    f = open(file_obj, 'a+')
1394
  else:
1395
    f = file_obj
1396

    
1397
  try:
1398
    nl = True
1399
    for line in f:
1400
      # Ignore whitespace changes
1401
      if line.split() == key_fields:
1402
        break
1403
      nl = line.endswith('\n')
1404
    else:
1405
      if not nl:
1406
        f.write("\n")
1407
      f.write(key.rstrip('\r\n'))
1408
      f.write("\n")
1409
      f.flush()
1410
  finally:
1411
    f.close()
1412

    
1413

    
1414
def RemoveAuthorizedKey(file_name, key):
1415
  """Removes an SSH public key from an authorized_keys file.
1416

1417
  @type file_name: str
1418
  @param file_name: path to authorized_keys file
1419
  @type key: str
1420
  @param key: string containing key
1421

1422
  """
1423
  key_fields = key.split()
1424

    
1425
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1426
  try:
1427
    out = os.fdopen(fd, 'w')
1428
    try:
1429
      f = open(file_name, 'r')
1430
      try:
1431
        for line in f:
1432
          # Ignore whitespace changes while comparing lines
1433
          if line.split() != key_fields:
1434
            out.write(line)
1435

    
1436
        out.flush()
1437
        os.rename(tmpname, file_name)
1438
      finally:
1439
        f.close()
1440
    finally:
1441
      out.close()
1442
  except:
1443
    RemoveFile(tmpname)
1444
    raise
1445

    
1446

    
1447
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1448
  """Sets the name of an IP address and hostname in /etc/hosts.
1449

1450
  @type file_name: str
1451
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1452
  @type ip: str
1453
  @param ip: the IP address
1454
  @type hostname: str
1455
  @param hostname: the hostname to be added
1456
  @type aliases: list
1457
  @param aliases: the list of aliases to add for the hostname
1458

1459
  """
1460
  # Ensure aliases are unique
1461
  aliases = UniqueSequence([hostname] + aliases)[1:]
1462

    
1463
  def _WriteEtcHosts(fd):
1464
    # Duplicating file descriptor because os.fdopen's result will automatically
1465
    # close the descriptor, but we would still like to have its functionality.
1466
    out = os.fdopen(os.dup(fd), "w")
1467
    try:
1468
      for line in ReadFile(file_name).splitlines(True):
1469
        fields = line.split()
1470
        if fields and not fields[0].startswith("#") and ip == fields[0]:
1471
          continue
1472
        out.write(line)
1473

    
1474
      out.write("%s\t%s" % (ip, hostname))
1475
      if aliases:
1476
        out.write(" %s" % " ".join(aliases))
1477
      out.write("\n")
1478
      out.flush()
1479
    finally:
1480
      out.close()
1481

    
1482
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1483

    
1484

    
1485
def AddHostToEtcHosts(hostname, ip):
1486
  """Wrapper around SetEtcHostsEntry.
1487

1488
  @type hostname: str
1489
  @param hostname: a hostname that will be resolved and added to
1490
      L{constants.ETC_HOSTS}
1491
  @type ip: str
1492
  @param ip: The ip address of the host
1493

1494
  """
1495
  SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1496

    
1497

    
1498
def RemoveEtcHostsEntry(file_name, hostname):
1499
  """Removes a hostname from /etc/hosts.
1500

1501
  IP addresses without names are removed from the file.
1502

1503
  @type file_name: str
1504
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1505
  @type hostname: str
1506
  @param hostname: the hostname to be removed
1507

1508
  """
1509
  def _WriteEtcHosts(fd):
1510
    # Duplicating file descriptor because os.fdopen's result will automatically
1511
    # close the descriptor, but we would still like to have its functionality.
1512
    out = os.fdopen(os.dup(fd), "w")
1513
    try:
1514
      for line in ReadFile(file_name).splitlines(True):
1515
        fields = line.split()
1516
        if len(fields) > 1 and not fields[0].startswith("#"):
1517
          names = fields[1:]
1518
          if hostname in names:
1519
            while hostname in names:
1520
              names.remove(hostname)
1521
            if names:
1522
              out.write("%s %s\n" % (fields[0], " ".join(names)))
1523
            continue
1524

    
1525
        out.write(line)
1526

    
1527
      out.flush()
1528
    finally:
1529
      out.close()
1530

    
1531
  WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1532

    
1533

    
1534
def RemoveHostFromEtcHosts(hostname):
1535
  """Wrapper around RemoveEtcHostsEntry.
1536

1537
  @type hostname: str
1538
  @param hostname: hostname that will be resolved and its
1539
      full and shot name will be removed from
1540
      L{constants.ETC_HOSTS}
1541

1542
  """
1543
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1544
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1545

    
1546

    
1547
def TimestampForFilename():
1548
  """Returns the current time formatted for filenames.
1549

1550
  The format doesn't contain colons as some shells and applications them as
1551
  separators.
1552

1553
  """
1554
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1555

    
1556

    
1557
def CreateBackup(file_name):
1558
  """Creates a backup of a file.
1559

1560
  @type file_name: str
1561
  @param file_name: file to be backed up
1562
  @rtype: str
1563
  @return: the path to the newly created backup
1564
  @raise errors.ProgrammerError: for invalid file names
1565

1566
  """
1567
  if not os.path.isfile(file_name):
1568
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1569
                                file_name)
1570

    
1571
  prefix = ("%s.backup-%s." %
1572
            (os.path.basename(file_name), TimestampForFilename()))
1573
  dir_name = os.path.dirname(file_name)
1574

    
1575
  fsrc = open(file_name, 'rb')
1576
  try:
1577
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1578
    fdst = os.fdopen(fd, 'wb')
1579
    try:
1580
      logging.debug("Backing up %s at %s", file_name, backup_name)
1581
      shutil.copyfileobj(fsrc, fdst)
1582
    finally:
1583
      fdst.close()
1584
  finally:
1585
    fsrc.close()
1586

    
1587
  return backup_name
1588

    
1589

    
1590
def ShellQuote(value):
1591
  """Quotes shell argument according to POSIX.
1592

1593
  @type value: str
1594
  @param value: the argument to be quoted
1595
  @rtype: str
1596
  @return: the quoted value
1597

1598
  """
1599
  if _re_shell_unquoted.match(value):
1600
    return value
1601
  else:
1602
    return "'%s'" % value.replace("'", "'\\''")
1603

    
1604

    
1605
def ShellQuoteArgs(args):
1606
  """Quotes a list of shell arguments.
1607

1608
  @type args: list
1609
  @param args: list of arguments to be quoted
1610
  @rtype: str
1611
  @return: the quoted arguments concatenated with spaces
1612

1613
  """
1614
  return ' '.join([ShellQuote(i) for i in args])
1615

    
1616

    
1617
class ShellWriter:
1618
  """Helper class to write scripts with indentation.
1619

1620
  """
1621
  INDENT_STR = "  "
1622

    
1623
  def __init__(self, fh):
1624
    """Initializes this class.
1625

1626
    """
1627
    self._fh = fh
1628
    self._indent = 0
1629

    
1630
  def IncIndent(self):
1631
    """Increase indentation level by 1.
1632

1633
    """
1634
    self._indent += 1
1635

    
1636
  def DecIndent(self):
1637
    """Decrease indentation level by 1.
1638

1639
    """
1640
    assert self._indent > 0
1641
    self._indent -= 1
1642

    
1643
  def Write(self, txt, *args):
1644
    """Write line to output file.
1645

1646
    """
1647
    assert self._indent >= 0
1648

    
1649
    self._fh.write(self._indent * self.INDENT_STR)
1650

    
1651
    if args:
1652
      self._fh.write(txt % args)
1653
    else:
1654
      self._fh.write(txt)
1655

    
1656
    self._fh.write("\n")
1657

    
1658

    
1659
def ListVisibleFiles(path):
1660
  """Returns a list of visible files in a directory.
1661

1662
  @type path: str
1663
  @param path: the directory to enumerate
1664
  @rtype: list
1665
  @return: the list of all files not starting with a dot
1666
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1667

1668
  """
1669
  if not IsNormAbsPath(path):
1670
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1671
                                 " absolute/normalized: '%s'" % path)
1672
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1673
  return files
1674

    
1675

    
1676
def GetHomeDir(user, default=None):
1677
  """Try to get the homedir of the given user.
1678

1679
  The user can be passed either as a string (denoting the name) or as
1680
  an integer (denoting the user id). If the user is not found, the
1681
  'default' argument is returned, which defaults to None.
1682

1683
  """
1684
  try:
1685
    if isinstance(user, basestring):
1686
      result = pwd.getpwnam(user)
1687
    elif isinstance(user, (int, long)):
1688
      result = pwd.getpwuid(user)
1689
    else:
1690
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1691
                                   type(user))
1692
  except KeyError:
1693
    return default
1694
  return result.pw_dir
1695

    
1696

    
1697
def NewUUID():
1698
  """Returns a random UUID.
1699

1700
  @note: This is a Linux-specific method as it uses the /proc
1701
      filesystem.
1702
  @rtype: str
1703

1704
  """
1705
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1706

    
1707

    
1708
def GenerateSecret(numbytes=20):
1709
  """Generates a random secret.
1710

1711
  This will generate a pseudo-random secret returning an hex string
1712
  (so that it can be used where an ASCII string is needed).
1713

1714
  @param numbytes: the number of bytes which will be represented by the returned
1715
      string (defaulting to 20, the length of a SHA1 hash)
1716
  @rtype: str
1717
  @return: an hex representation of the pseudo-random sequence
1718

1719
  """
1720
  return os.urandom(numbytes).encode('hex')
1721

    
1722

    
1723
def EnsureDirs(dirs):
1724
  """Make required directories, if they don't exist.
1725

1726
  @param dirs: list of tuples (dir_name, dir_mode)
1727
  @type dirs: list of (string, integer)
1728

1729
  """
1730
  for dir_name, dir_mode in dirs:
1731
    try:
1732
      os.mkdir(dir_name, dir_mode)
1733
    except EnvironmentError, err:
1734
      if err.errno != errno.EEXIST:
1735
        raise errors.GenericError("Cannot create needed directory"
1736
                                  " '%s': %s" % (dir_name, err))
1737
    try:
1738
      os.chmod(dir_name, dir_mode)
1739
    except EnvironmentError, err:
1740
      raise errors.GenericError("Cannot change directory permissions on"
1741
                                " '%s': %s" % (dir_name, err))
1742
    if not os.path.isdir(dir_name):
1743
      raise errors.GenericError("%s is not a directory" % dir_name)
1744

    
1745

    
1746
def ReadFile(file_name, size=-1):
1747
  """Reads a file.
1748

1749
  @type size: int
1750
  @param size: Read at most size bytes (if negative, entire file)
1751
  @rtype: str
1752
  @return: the (possibly partial) content of the file
1753

1754
  """
1755
  f = open(file_name, "r")
1756
  try:
1757
    return f.read(size)
1758
  finally:
1759
    f.close()
1760

    
1761

    
1762
def WriteFile(file_name, fn=None, data=None,
1763
              mode=None, uid=-1, gid=-1,
1764
              atime=None, mtime=None, close=True,
1765
              dry_run=False, backup=False,
1766
              prewrite=None, postwrite=None):
1767
  """(Over)write a file atomically.
1768

1769
  The file_name and either fn (a function taking one argument, the
1770
  file descriptor, and which should write the data to it) or data (the
1771
  contents of the file) must be passed. The other arguments are
1772
  optional and allow setting the file mode, owner and group, and the
1773
  mtime/atime of the file.
1774

1775
  If the function doesn't raise an exception, it has succeeded and the
1776
  target file has the new contents. If the function has raised an
1777
  exception, an existing target file should be unmodified and the
1778
  temporary file should be removed.
1779

1780
  @type file_name: str
1781
  @param file_name: the target filename
1782
  @type fn: callable
1783
  @param fn: content writing function, called with
1784
      file descriptor as parameter
1785
  @type data: str
1786
  @param data: contents of the file
1787
  @type mode: int
1788
  @param mode: file mode
1789
  @type uid: int
1790
  @param uid: the owner of the file
1791
  @type gid: int
1792
  @param gid: the group of the file
1793
  @type atime: int
1794
  @param atime: a custom access time to be set on the file
1795
  @type mtime: int
1796
  @param mtime: a custom modification time to be set on the file
1797
  @type close: boolean
1798
  @param close: whether to close file after writing it
1799
  @type prewrite: callable
1800
  @param prewrite: function to be called before writing content
1801
  @type postwrite: callable
1802
  @param postwrite: function to be called after writing content
1803

1804
  @rtype: None or int
1805
  @return: None if the 'close' parameter evaluates to True,
1806
      otherwise the file descriptor
1807

1808
  @raise errors.ProgrammerError: if any of the arguments are not valid
1809

1810
  """
1811
  if not os.path.isabs(file_name):
1812
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1813
                                 " absolute: '%s'" % file_name)
1814

    
1815
  if [fn, data].count(None) != 1:
1816
    raise errors.ProgrammerError("fn or data required")
1817

    
1818
  if [atime, mtime].count(None) == 1:
1819
    raise errors.ProgrammerError("Both atime and mtime must be either"
1820
                                 " set or None")
1821

    
1822
  if backup and not dry_run and os.path.isfile(file_name):
1823
    CreateBackup(file_name)
1824

    
1825
  dir_name, base_name = os.path.split(file_name)
1826
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1827
  do_remove = True
1828
  # here we need to make sure we remove the temp file, if any error
1829
  # leaves it in place
1830
  try:
1831
    if uid != -1 or gid != -1:
1832
      os.chown(new_name, uid, gid)
1833
    if mode:
1834
      os.chmod(new_name, mode)
1835
    if callable(prewrite):
1836
      prewrite(fd)
1837
    if data is not None:
1838
      os.write(fd, data)
1839
    else:
1840
      fn(fd)
1841
    if callable(postwrite):
1842
      postwrite(fd)
1843
    os.fsync(fd)
1844
    if atime is not None and mtime is not None:
1845
      os.utime(new_name, (atime, mtime))
1846
    if not dry_run:
1847
      os.rename(new_name, file_name)
1848
      do_remove = False
1849
  finally:
1850
    if close:
1851
      os.close(fd)
1852
      result = None
1853
    else:
1854
      result = fd
1855
    if do_remove:
1856
      RemoveFile(new_name)
1857

    
1858
  return result
1859

    
1860

    
1861
def ReadOneLineFile(file_name, strict=False):
1862
  """Return the first non-empty line from a file.
1863

1864
  @type strict: boolean
1865
  @param strict: if True, abort if the file has more than one
1866
      non-empty line
1867

1868
  """
1869
  file_lines = ReadFile(file_name).splitlines()
1870
  full_lines = filter(bool, file_lines)
1871
  if not file_lines or not full_lines:
1872
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1873
  elif strict and len(full_lines) > 1:
1874
    raise errors.GenericError("Too many lines in one-liner file %s" %
1875
                              file_name)
1876
  return full_lines[0]
1877

    
1878

    
1879
def FirstFree(seq, base=0):
1880
  """Returns the first non-existing integer from seq.
1881

1882
  The seq argument should be a sorted list of positive integers. The
1883
  first time the index of an element is smaller than the element
1884
  value, the index will be returned.
1885

1886
  The base argument is used to start at a different offset,
1887
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1888

1889
  Example: C{[0, 1, 3]} will return I{2}.
1890

1891
  @type seq: sequence
1892
  @param seq: the sequence to be analyzed.
1893
  @type base: int
1894
  @param base: use this value as the base index of the sequence
1895
  @rtype: int
1896
  @return: the first non-used index in the sequence
1897

1898
  """
1899
  for idx, elem in enumerate(seq):
1900
    assert elem >= base, "Passed element is higher than base offset"
1901
    if elem > idx + base:
1902
      # idx is not used
1903
      return idx + base
1904
  return None
1905

    
1906

    
1907
def SingleWaitForFdCondition(fdobj, event, timeout):
1908
  """Waits for a condition to occur on the socket.
1909

1910
  Immediately returns at the first interruption.
1911

1912
  @type fdobj: integer or object supporting a fileno() method
1913
  @param fdobj: entity to wait for events on
1914
  @type event: integer
1915
  @param event: ORed condition (see select module)
1916
  @type timeout: float or None
1917
  @param timeout: Timeout in seconds
1918
  @rtype: int or None
1919
  @return: None for timeout, otherwise occured conditions
1920

1921
  """
1922
  check = (event | select.POLLPRI |
1923
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1924

    
1925
  if timeout is not None:
1926
    # Poller object expects milliseconds
1927
    timeout *= 1000
1928

    
1929
  poller = select.poll()
1930
  poller.register(fdobj, event)
1931
  try:
1932
    # TODO: If the main thread receives a signal and we have no timeout, we
1933
    # could wait forever. This should check a global "quit" flag or something
1934
    # every so often.
1935
    io_events = poller.poll(timeout)
1936
  except select.error, err:
1937
    if err[0] != errno.EINTR:
1938
      raise
1939
    io_events = []
1940
  if io_events and io_events[0][1] & check:
1941
    return io_events[0][1]
1942
  else:
1943
    return None
1944

    
1945

    
1946
class FdConditionWaiterHelper(object):
1947
  """Retry helper for WaitForFdCondition.
1948

1949
  This class contains the retried and wait functions that make sure
1950
  WaitForFdCondition can continue waiting until the timeout is actually
1951
  expired.
1952

1953
  """
1954

    
1955
  def __init__(self, timeout):
1956
    self.timeout = timeout
1957

    
1958
  def Poll(self, fdobj, event):
1959
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1960
    if result is None:
1961
      raise RetryAgain()
1962
    else:
1963
      return result
1964

    
1965
  def UpdateTimeout(self, timeout):
1966
    self.timeout = timeout
1967

    
1968

    
1969
def WaitForFdCondition(fdobj, event, timeout):
1970
  """Waits for a condition to occur on the socket.
1971

1972
  Retries until the timeout is expired, even if interrupted.
1973

1974
  @type fdobj: integer or object supporting a fileno() method
1975
  @param fdobj: entity to wait for events on
1976
  @type event: integer
1977
  @param event: ORed condition (see select module)
1978
  @type timeout: float or None
1979
  @param timeout: Timeout in seconds
1980
  @rtype: int or None
1981
  @return: None for timeout, otherwise occured conditions
1982

1983
  """
1984
  if timeout is not None:
1985
    retrywaiter = FdConditionWaiterHelper(timeout)
1986
    try:
1987
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1988
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1989
    except RetryTimeout:
1990
      result = None
1991
  else:
1992
    result = None
1993
    while result is None:
1994
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1995
  return result
1996

    
1997

    
1998
def UniqueSequence(seq):
1999
  """Returns a list with unique elements.
2000

2001
  Element order is preserved.
2002

2003
  @type seq: sequence
2004
  @param seq: the sequence with the source elements
2005
  @rtype: list
2006
  @return: list of unique elements from seq
2007

2008
  """
2009
  seen = set()
2010
  return [i for i in seq if i not in seen and not seen.add(i)]
2011

    
2012

    
2013
def NormalizeAndValidateMac(mac):
2014
  """Normalizes and check if a MAC address is valid.
2015

2016
  Checks whether the supplied MAC address is formally correct, only
2017
  accepts colon separated format. Normalize it to all lower.
2018

2019
  @type mac: str
2020
  @param mac: the MAC to be validated
2021
  @rtype: str
2022
  @return: returns the normalized and validated MAC.
2023

2024
  @raise errors.OpPrereqError: If the MAC isn't valid
2025

2026
  """
2027
  if not _MAC_CHECK.match(mac):
2028
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2029
                               mac, errors.ECODE_INVAL)
2030

    
2031
  return mac.lower()
2032

    
2033

    
2034
def TestDelay(duration):
2035
  """Sleep for a fixed amount of time.
2036

2037
  @type duration: float
2038
  @param duration: the sleep duration
2039
  @rtype: boolean
2040
  @return: False for negative value, True otherwise
2041

2042
  """
2043
  if duration < 0:
2044
    return False, "Invalid sleep duration"
2045
  time.sleep(duration)
2046
  return True, None
2047

    
2048

    
2049
def _CloseFDNoErr(fd, retries=5):
2050
  """Close a file descriptor ignoring errors.
2051

2052
  @type fd: int
2053
  @param fd: the file descriptor
2054
  @type retries: int
2055
  @param retries: how many retries to make, in case we get any
2056
      other error than EBADF
2057

2058
  """
2059
  try:
2060
    os.close(fd)
2061
  except OSError, err:
2062
    if err.errno != errno.EBADF:
2063
      if retries > 0:
2064
        _CloseFDNoErr(fd, retries - 1)
2065
    # else either it's closed already or we're out of retries, so we
2066
    # ignore this and go on
2067

    
2068

    
2069
def CloseFDs(noclose_fds=None):
2070
  """Close file descriptors.
2071

2072
  This closes all file descriptors above 2 (i.e. except
2073
  stdin/out/err).
2074

2075
  @type noclose_fds: list or None
2076
  @param noclose_fds: if given, it denotes a list of file descriptor
2077
      that should not be closed
2078

2079
  """
2080
  # Default maximum for the number of available file descriptors.
2081
  if 'SC_OPEN_MAX' in os.sysconf_names:
2082
    try:
2083
      MAXFD = os.sysconf('SC_OPEN_MAX')
2084
      if MAXFD < 0:
2085
        MAXFD = 1024
2086
    except OSError:
2087
      MAXFD = 1024
2088
  else:
2089
    MAXFD = 1024
2090
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2091
  if (maxfd == resource.RLIM_INFINITY):
2092
    maxfd = MAXFD
2093

    
2094
  # Iterate through and close all file descriptors (except the standard ones)
2095
  for fd in range(3, maxfd):
2096
    if noclose_fds and fd in noclose_fds:
2097
      continue
2098
    _CloseFDNoErr(fd)
2099

    
2100

    
2101
def Mlockall(_ctypes=ctypes):
2102
  """Lock current process' virtual address space into RAM.
2103

2104
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2105
  see mlock(2) for more details. This function requires ctypes module.
2106

2107
  @raises errors.NoCtypesError: if ctypes module is not found
2108

2109
  """
2110
  if _ctypes is None:
2111
    raise errors.NoCtypesError()
2112

    
2113
  libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2114
  if libc is None:
2115
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2116
    return
2117

    
2118
  # Some older version of the ctypes module don't have built-in functionality
2119
  # to access the errno global variable, where function error codes are stored.
2120
  # By declaring this variable as a pointer to an integer we can then access
2121
  # its value correctly, should the mlockall call fail, in order to see what
2122
  # the actual error code was.
2123
  # pylint: disable-msg=W0212
2124
  libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2125

    
2126
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2127
    # pylint: disable-msg=W0212
2128
    logging.error("Cannot set memory lock: %s",
2129
                  os.strerror(libc.__errno_location().contents.value))
2130
    return
2131

    
2132
  logging.debug("Memory lock set")
2133

    
2134

    
2135
def Daemonize(logfile):
2136
  """Daemonize the current process.
2137

2138
  This detaches the current process from the controlling terminal and
2139
  runs it in the background as a daemon.
2140

2141
  @type logfile: str
2142
  @param logfile: the logfile to which we should redirect stdout/stderr
2143
  @rtype: int
2144
  @return: the value zero
2145

2146
  """
2147
  # pylint: disable-msg=W0212
2148
  # yes, we really want os._exit
2149

    
2150
  # this might fail
2151
  pid = os.fork()
2152
  if (pid == 0):  # The first child.
2153
    SetupDaemonEnv()
2154

    
2155
    # this might fail
2156
    pid = os.fork() # Fork a second child.
2157
    if (pid == 0):  # The second child.
2158
      pass # nothing special to do in the child
2159
    else:
2160
      # exit() or _exit()?  See below.
2161
      os._exit(0) # Exit parent (the first child) of the second child.
2162
  else:
2163
    os._exit(0) # Exit parent of the first child.
2164

    
2165
  for fd in range(3):
2166
    _CloseFDNoErr(fd)
2167
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2168
  assert i == 0, "Can't close/reopen stdin"
2169
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2170
  assert i == 1, "Can't close/reopen stdout"
2171
  # Duplicate standard output to standard error.
2172
  os.dup2(1, 2)
2173
  return 0
2174

    
2175

    
2176
def DaemonPidFileName(name):
2177
  """Compute a ganeti pid file absolute path
2178

2179
  @type name: str
2180
  @param name: the daemon name
2181
  @rtype: str
2182
  @return: the full path to the pidfile corresponding to the given
2183
      daemon name
2184

2185
  """
2186
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2187

    
2188

    
2189
def EnsureDaemon(name):
2190
  """Check for and start daemon if not alive.
2191

2192
  """
2193
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2194
  if result.failed:
2195
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2196
                  name, result.fail_reason, result.output)
2197
    return False
2198

    
2199
  return True
2200

    
2201

    
2202
def StopDaemon(name):
2203
  """Stop daemon
2204

2205
  """
2206
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2207
  if result.failed:
2208
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2209
                  name, result.fail_reason, result.output)
2210
    return False
2211

    
2212
  return True
2213

    
2214

    
2215
def WritePidFile(name):
2216
  """Write the current process pidfile.
2217

2218
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2219

2220
  @type name: str
2221
  @param name: the daemon name to use
2222
  @raise errors.GenericError: if the pid file already exists and
2223
      points to a live process
2224

2225
  """
2226
  pid = os.getpid()
2227
  pidfilename = DaemonPidFileName(name)
2228
  if IsProcessAlive(ReadPidFile(pidfilename)):
2229
    raise errors.GenericError("%s contains a live process" % pidfilename)
2230

    
2231
  WriteFile(pidfilename, data="%d\n" % pid)
2232

    
2233

    
2234
def RemovePidFile(name):
2235
  """Remove the current process pidfile.
2236

2237
  Any errors are ignored.
2238

2239
  @type name: str
2240
  @param name: the daemon name used to derive the pidfile name
2241

2242
  """
2243
  pidfilename = DaemonPidFileName(name)
2244
  # TODO: we could check here that the file contains our pid
2245
  try:
2246
    RemoveFile(pidfilename)
2247
  except: # pylint: disable-msg=W0702
2248
    pass
2249

    
2250

    
2251
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2252
                waitpid=False):
2253
  """Kill a process given by its pid.
2254

2255
  @type pid: int
2256
  @param pid: The PID to terminate.
2257
  @type signal_: int
2258
  @param signal_: The signal to send, by default SIGTERM
2259
  @type timeout: int
2260
  @param timeout: The timeout after which, if the process is still alive,
2261
                  a SIGKILL will be sent. If not positive, no such checking
2262
                  will be done
2263
  @type waitpid: boolean
2264
  @param waitpid: If true, we should waitpid on this process after
2265
      sending signals, since it's our own child and otherwise it
2266
      would remain as zombie
2267

2268
  """
2269
  def _helper(pid, signal_, wait):
2270
    """Simple helper to encapsulate the kill/waitpid sequence"""
2271
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2272
      try:
2273
        os.waitpid(pid, os.WNOHANG)
2274
      except OSError:
2275
        pass
2276

    
2277
  if pid <= 0:
2278
    # kill with pid=0 == suicide
2279
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2280

    
2281
  if not IsProcessAlive(pid):
2282
    return
2283

    
2284
  _helper(pid, signal_, waitpid)
2285

    
2286
  if timeout <= 0:
2287
    return
2288

    
2289
  def _CheckProcess():
2290
    if not IsProcessAlive(pid):
2291
      return
2292

    
2293
    try:
2294
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2295
    except OSError:
2296
      raise RetryAgain()
2297

    
2298
    if result_pid > 0:
2299
      return
2300

    
2301
    raise RetryAgain()
2302

    
2303
  try:
2304
    # Wait up to $timeout seconds
2305
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2306
  except RetryTimeout:
2307
    pass
2308

    
2309
  if IsProcessAlive(pid):
2310
    # Kill process if it's still alive
2311
    _helper(pid, signal.SIGKILL, waitpid)
2312

    
2313

    
2314
def FindFile(name, search_path, test=os.path.exists):
2315
  """Look for a filesystem object in a given path.
2316

2317
  This is an abstract method to search for filesystem object (files,
2318
  dirs) under a given search path.
2319

2320
  @type name: str
2321
  @param name: the name to look for
2322
  @type search_path: str
2323
  @param search_path: location to start at
2324
  @type test: callable
2325
  @param test: a function taking one argument that should return True
2326
      if the a given object is valid; the default value is
2327
      os.path.exists, causing only existing files to be returned
2328
  @rtype: str or None
2329
  @return: full path to the object if found, None otherwise
2330

2331
  """
2332
  # validate the filename mask
2333
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2334
    logging.critical("Invalid value passed for external script name: '%s'",
2335
                     name)
2336
    return None
2337

    
2338
  for dir_name in search_path:
2339
    # FIXME: investigate switch to PathJoin
2340
    item_name = os.path.sep.join([dir_name, name])
2341
    # check the user test and that we're indeed resolving to the given
2342
    # basename
2343
    if test(item_name) and os.path.basename(item_name) == name:
2344
      return item_name
2345
  return None
2346

    
2347

    
2348
def CheckVolumeGroupSize(vglist, vgname, minsize):
2349
  """Checks if the volume group list is valid.
2350

2351
  The function will check if a given volume group is in the list of
2352
  volume groups and has a minimum size.
2353

2354
  @type vglist: dict
2355
  @param vglist: dictionary of volume group names and their size
2356
  @type vgname: str
2357
  @param vgname: the volume group we should check
2358
  @type minsize: int
2359
  @param minsize: the minimum size we accept
2360
  @rtype: None or str
2361
  @return: None for success, otherwise the error message
2362

2363
  """
2364
  vgsize = vglist.get(vgname, None)
2365
  if vgsize is None:
2366
    return "volume group '%s' missing" % vgname
2367
  elif vgsize < minsize:
2368
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2369
            (vgname, minsize, vgsize))
2370
  return None
2371

    
2372

    
2373
def SplitTime(value):
2374
  """Splits time as floating point number into a tuple.
2375

2376
  @param value: Time in seconds
2377
  @type value: int or float
2378
  @return: Tuple containing (seconds, microseconds)
2379

2380
  """
2381
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2382

    
2383
  assert 0 <= seconds, \
2384
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2385
  assert 0 <= microseconds <= 999999, \
2386
    "Microseconds must be 0-999999, but are %s" % microseconds
2387

    
2388
  return (int(seconds), int(microseconds))
2389

    
2390

    
2391
def MergeTime(timetuple):
2392
  """Merges a tuple into time as a floating point number.
2393

2394
  @param timetuple: Time as tuple, (seconds, microseconds)
2395
  @type timetuple: tuple
2396
  @return: Time as a floating point number expressed in seconds
2397

2398
  """
2399
  (seconds, microseconds) = timetuple
2400

    
2401
  assert 0 <= seconds, \
2402
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2403
  assert 0 <= microseconds <= 999999, \
2404
    "Microseconds must be 0-999999, but are %s" % microseconds
2405

    
2406
  return float(seconds) + (float(microseconds) * 0.000001)
2407

    
2408

    
2409
class LogFileHandler(logging.FileHandler):
2410
  """Log handler that doesn't fallback to stderr.
2411

2412
  When an error occurs while writing on the logfile, logging.FileHandler tries
2413
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2414
  the logfile. This class avoids failures reporting errors to /dev/console.
2415

2416
  """
2417
  def __init__(self, filename, mode="a", encoding=None):
2418
    """Open the specified file and use it as the stream for logging.
2419

2420
    Also open /dev/console to report errors while logging.
2421

2422
    """
2423
    logging.FileHandler.__init__(self, filename, mode, encoding)
2424
    self.console = open(constants.DEV_CONSOLE, "a")
2425

    
2426
  def handleError(self, record): # pylint: disable-msg=C0103
2427
    """Handle errors which occur during an emit() call.
2428

2429
    Try to handle errors with FileHandler method, if it fails write to
2430
    /dev/console.
2431

2432
    """
2433
    try:
2434
      logging.FileHandler.handleError(self, record)
2435
    except Exception: # pylint: disable-msg=W0703
2436
      try:
2437
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2438
      except Exception: # pylint: disable-msg=W0703
2439
        # Log handler tried everything it could, now just give up
2440
        pass
2441

    
2442

    
2443
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2444
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2445
                 console_logging=False):
2446
  """Configures the logging module.
2447

2448
  @type logfile: str
2449
  @param logfile: the filename to which we should log
2450
  @type debug: integer
2451
  @param debug: if greater than zero, enable debug messages, otherwise
2452
      only those at C{INFO} and above level
2453
  @type stderr_logging: boolean
2454
  @param stderr_logging: whether we should also log to the standard error
2455
  @type program: str
2456
  @param program: the name under which we should log messages
2457
  @type multithreaded: boolean
2458
  @param multithreaded: if True, will add the thread name to the log file
2459
  @type syslog: string
2460
  @param syslog: one of 'no', 'yes', 'only':
2461
      - if no, syslog is not used
2462
      - if yes, syslog is used (in addition to file-logging)
2463
      - if only, only syslog is used
2464
  @type console_logging: boolean
2465
  @param console_logging: if True, will use a FileHandler which falls back to
2466
      the system console if logging fails
2467
  @raise EnvironmentError: if we can't open the log file and
2468
      syslog/stderr logging is disabled
2469

2470
  """
2471
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2472
  sft = program + "[%(process)d]:"
2473
  if multithreaded:
2474
    fmt += "/%(threadName)s"
2475
    sft += " (%(threadName)s)"
2476
  if debug:
2477
    fmt += " %(module)s:%(lineno)s"
2478
    # no debug info for syslog loggers
2479
  fmt += " %(levelname)s %(message)s"
2480
  # yes, we do want the textual level, as remote syslog will probably
2481
  # lose the error level, and it's easier to grep for it
2482
  sft += " %(levelname)s %(message)s"
2483
  formatter = logging.Formatter(fmt)
2484
  sys_fmt = logging.Formatter(sft)
2485

    
2486
  root_logger = logging.getLogger("")
2487
  root_logger.setLevel(logging.NOTSET)
2488

    
2489
  # Remove all previously setup handlers
2490
  for handler in root_logger.handlers:
2491
    handler.close()
2492
    root_logger.removeHandler(handler)
2493

    
2494
  if stderr_logging:
2495
    stderr_handler = logging.StreamHandler()
2496
    stderr_handler.setFormatter(formatter)
2497
    if debug:
2498
      stderr_handler.setLevel(logging.NOTSET)
2499
    else:
2500
      stderr_handler.setLevel(logging.CRITICAL)
2501
    root_logger.addHandler(stderr_handler)
2502

    
2503
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2504
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2505
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2506
                                                    facility)
2507
    syslog_handler.setFormatter(sys_fmt)
2508
    # Never enable debug over syslog
2509
    syslog_handler.setLevel(logging.INFO)
2510
    root_logger.addHandler(syslog_handler)
2511

    
2512
  if syslog != constants.SYSLOG_ONLY:
2513
    # this can fail, if the logging directories are not setup or we have
2514
    # a permisssion problem; in this case, it's best to log but ignore
2515
    # the error if stderr_logging is True, and if false we re-raise the
2516
    # exception since otherwise we could run but without any logs at all
2517
    try:
2518
      if console_logging:
2519
        logfile_handler = LogFileHandler(logfile)
2520
      else:
2521
        logfile_handler = logging.FileHandler(logfile)
2522
      logfile_handler.setFormatter(formatter)
2523
      if debug:
2524
        logfile_handler.setLevel(logging.DEBUG)
2525
      else:
2526
        logfile_handler.setLevel(logging.INFO)
2527
      root_logger.addHandler(logfile_handler)
2528
    except EnvironmentError:
2529
      if stderr_logging or syslog == constants.SYSLOG_YES:
2530
        logging.exception("Failed to enable logging to file '%s'", logfile)
2531
      else:
2532
        # we need to re-raise the exception
2533
        raise
2534

    
2535

    
2536
def IsNormAbsPath(path):
2537
  """Check whether a path is absolute and also normalized
2538

2539
  This avoids things like /dir/../../other/path to be valid.
2540

2541
  """
2542
  return os.path.normpath(path) == path and os.path.isabs(path)
2543

    
2544

    
2545
def PathJoin(*args):
2546
  """Safe-join a list of path components.
2547

2548
  Requirements:
2549
      - the first argument must be an absolute path
2550
      - no component in the path must have backtracking (e.g. /../),
2551
        since we check for normalization at the end
2552

2553
  @param args: the path components to be joined
2554
  @raise ValueError: for invalid paths
2555

2556
  """
2557
  # ensure we're having at least one path passed in
2558
  assert args
2559
  # ensure the first component is an absolute and normalized path name
2560
  root = args[0]
2561
  if not IsNormAbsPath(root):
2562
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2563
  result = os.path.join(*args)
2564
  # ensure that the whole path is normalized
2565
  if not IsNormAbsPath(result):
2566
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2567
  # check that we're still under the original prefix
2568
  prefix = os.path.commonprefix([root, result])
2569
  if prefix != root:
2570
    raise ValueError("Error: path joining resulted in different prefix"
2571
                     " (%s != %s)" % (prefix, root))
2572
  return result
2573

    
2574

    
2575
def TailFile(fname, lines=20):
2576
  """Return the last lines from a file.
2577

2578
  @note: this function will only read and parse the last 4KB of
2579
      the file; if the lines are very long, it could be that less
2580
      than the requested number of lines are returned
2581

2582
  @param fname: the file name
2583
  @type lines: int
2584
  @param lines: the (maximum) number of lines to return
2585

2586
  """
2587
  fd = open(fname, "r")
2588
  try:
2589
    fd.seek(0, 2)
2590
    pos = fd.tell()
2591
    pos = max(0, pos-4096)
2592
    fd.seek(pos, 0)
2593
    raw_data = fd.read()
2594
  finally:
2595
    fd.close()
2596

    
2597
  rows = raw_data.splitlines()
2598
  return rows[-lines:]
2599

    
2600

    
2601
def FormatTimestampWithTZ(secs):
2602
  """Formats a Unix timestamp with the local timezone.
2603

2604
  """
2605
  return time.strftime("%F %T %Z", time.gmtime(secs))
2606

    
2607

    
2608
def _ParseAsn1Generalizedtime(value):
2609
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2610

2611
  @type value: string
2612
  @param value: ASN1 GENERALIZEDTIME timestamp
2613

2614
  """
2615
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2616
  if m:
2617
    # We have an offset
2618
    asn1time = m.group(1)
2619
    hours = int(m.group(2))
2620
    minutes = int(m.group(3))
2621
    utcoffset = (60 * hours) + minutes
2622
  else:
2623
    if not value.endswith("Z"):
2624
      raise ValueError("Missing timezone")
2625
    asn1time = value[:-1]
2626
    utcoffset = 0
2627

    
2628
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2629

    
2630
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2631

    
2632
  return calendar.timegm(tt.utctimetuple())
2633

    
2634

    
2635
def GetX509CertValidity(cert):
2636
  """Returns the validity period of the certificate.
2637

2638
  @type cert: OpenSSL.crypto.X509
2639
  @param cert: X509 certificate object
2640

2641
  """
2642
  # The get_notBefore and get_notAfter functions are only supported in
2643
  # pyOpenSSL 0.7 and above.
2644
  try:
2645
    get_notbefore_fn = cert.get_notBefore
2646
  except AttributeError:
2647
    not_before = None
2648
  else:
2649
    not_before_asn1 = get_notbefore_fn()
2650

    
2651
    if not_before_asn1 is None:
2652
      not_before = None
2653
    else:
2654
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2655

    
2656
  try:
2657
    get_notafter_fn = cert.get_notAfter
2658
  except AttributeError:
2659
    not_after = None
2660
  else:
2661
    not_after_asn1 = get_notafter_fn()
2662

    
2663
    if not_after_asn1 is None:
2664
      not_after = None
2665
    else:
2666
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2667

    
2668
  return (not_before, not_after)
2669

    
2670

    
2671
def _VerifyCertificateInner(expired, not_before, not_after, now,
2672
                            warn_days, error_days):
2673
  """Verifies certificate validity.
2674

2675
  @type expired: bool
2676
  @param expired: Whether pyOpenSSL considers the certificate as expired
2677
  @type not_before: number or None
2678
  @param not_before: Unix timestamp before which certificate is not valid
2679
  @type not_after: number or None
2680
  @param not_after: Unix timestamp after which certificate is invalid
2681
  @type now: number
2682
  @param now: Current time as Unix timestamp
2683
  @type warn_days: number or None
2684
  @param warn_days: How many days before expiration a warning should be reported
2685
  @type error_days: number or None
2686
  @param error_days: How many days before expiration an error should be reported
2687

2688
  """
2689
  if expired:
2690
    msg = "Certificate is expired"
2691

    
2692
    if not_before is not None and not_after is not None:
2693
      msg += (" (valid from %s to %s)" %
2694
              (FormatTimestampWithTZ(not_before),
2695
               FormatTimestampWithTZ(not_after)))
2696
    elif not_before is not None:
2697
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2698
    elif not_after is not None:
2699
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2700

    
2701
    return (CERT_ERROR, msg)
2702

    
2703
  elif not_before is not None and not_before > now:
2704
    return (CERT_WARNING,
2705
            "Certificate not yet valid (valid from %s)" %
2706
            FormatTimestampWithTZ(not_before))
2707

    
2708
  elif not_after is not None:
2709
    remaining_days = int((not_after - now) / (24 * 3600))
2710

    
2711
    msg = "Certificate expires in about %d days" % remaining_days
2712

    
2713
    if error_days is not None and remaining_days <= error_days:
2714
      return (CERT_ERROR, msg)
2715

    
2716
    if warn_days is not None and remaining_days <= warn_days:
2717
      return (CERT_WARNING, msg)
2718

    
2719
  return (None, None)
2720

    
2721

    
2722
def VerifyX509Certificate(cert, warn_days, error_days):
2723
  """Verifies a certificate for LUVerifyCluster.
2724

2725
  @type cert: OpenSSL.crypto.X509
2726
  @param cert: X509 certificate object
2727
  @type warn_days: number or None
2728
  @param warn_days: How many days before expiration a warning should be reported
2729
  @type error_days: number or None
2730
  @param error_days: How many days before expiration an error should be reported
2731

2732
  """
2733
  # Depending on the pyOpenSSL version, this can just return (None, None)
2734
  (not_before, not_after) = GetX509CertValidity(cert)
2735

    
2736
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2737
                                 time.time(), warn_days, error_days)
2738

    
2739

    
2740
def SignX509Certificate(cert, key, salt):
2741
  """Sign a X509 certificate.
2742

2743
  An RFC822-like signature header is added in front of the certificate.
2744

2745
  @type cert: OpenSSL.crypto.X509
2746
  @param cert: X509 certificate object
2747
  @type key: string
2748
  @param key: Key for HMAC
2749
  @type salt: string
2750
  @param salt: Salt for HMAC
2751
  @rtype: string
2752
  @return: Serialized and signed certificate in PEM format
2753

2754
  """
2755
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2756
    raise errors.GenericError("Invalid salt: %r" % salt)
2757

    
2758
  # Dumping as PEM here ensures the certificate is in a sane format
2759
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2760

    
2761
  return ("%s: %s/%s\n\n%s" %
2762
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2763
           Sha1Hmac(key, cert_pem, salt=salt),
2764
           cert_pem))
2765

    
2766

    
2767
def _ExtractX509CertificateSignature(cert_pem):
2768
  """Helper function to extract signature from X509 certificate.
2769

2770
  """
2771
  # Extract signature from original PEM data
2772
  for line in cert_pem.splitlines():
2773
    if line.startswith("---"):
2774
      break
2775

    
2776
    m = X509_SIGNATURE.match(line.strip())
2777
    if m:
2778
      return (m.group("salt"), m.group("sign"))
2779

    
2780
  raise errors.GenericError("X509 certificate signature is missing")
2781

    
2782

    
2783
def LoadSignedX509Certificate(cert_pem, key):
2784
  """Verifies a signed X509 certificate.
2785

2786
  @type cert_pem: string
2787
  @param cert_pem: Certificate in PEM format and with signature header
2788
  @type key: string
2789
  @param key: Key for HMAC
2790
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2791
  @return: X509 certificate object and salt
2792

2793
  """
2794
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2795

    
2796
  # Load certificate
2797
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2798

    
2799
  # Dump again to ensure it's in a sane format
2800
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2801

    
2802
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2803
    raise errors.GenericError("X509 certificate signature is invalid")
2804

    
2805
  return (cert, salt)
2806

    
2807

    
2808
def Sha1Hmac(key, text, salt=None):
2809
  """Calculates the HMAC-SHA1 digest of a text.
2810

2811
  HMAC is defined in RFC2104.
2812

2813
  @type key: string
2814
  @param key: Secret key
2815
  @type text: string
2816

2817
  """
2818
  if salt:
2819
    salted_text = salt + text
2820
  else:
2821
    salted_text = text
2822

    
2823
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2824

    
2825

    
2826
def VerifySha1Hmac(key, text, digest, salt=None):
2827
  """Verifies the HMAC-SHA1 digest of a text.
2828

2829
  HMAC is defined in RFC2104.
2830

2831
  @type key: string
2832
  @param key: Secret key
2833
  @type text: string
2834
  @type digest: string
2835
  @param digest: Expected digest
2836
  @rtype: bool
2837
  @return: Whether HMAC-SHA1 digest matches
2838

2839
  """
2840
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2841

    
2842

    
2843
def SafeEncode(text):
2844
  """Return a 'safe' version of a source string.
2845

2846
  This function mangles the input string and returns a version that
2847
  should be safe to display/encode as ASCII. To this end, we first
2848
  convert it to ASCII using the 'backslashreplace' encoding which
2849
  should get rid of any non-ASCII chars, and then we process it
2850
  through a loop copied from the string repr sources in the python; we
2851
  don't use string_escape anymore since that escape single quotes and
2852
  backslashes too, and that is too much; and that escaping is not
2853
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2854

2855
  @type text: str or unicode
2856
  @param text: input data
2857
  @rtype: str
2858
  @return: a safe version of text
2859

2860
  """
2861
  if isinstance(text, unicode):
2862
    # only if unicode; if str already, we handle it below
2863
    text = text.encode('ascii', 'backslashreplace')
2864
  resu = ""
2865
  for char in text:
2866
    c = ord(char)
2867
    if char  == '\t':
2868
      resu += r'\t'
2869
    elif char == '\n':
2870
      resu += r'\n'
2871
    elif char == '\r':
2872
      resu += r'\'r'
2873
    elif c < 32 or c >= 127: # non-printable
2874
      resu += "\\x%02x" % (c & 0xff)
2875
    else:
2876
      resu += char
2877
  return resu
2878

    
2879

    
2880
def UnescapeAndSplit(text, sep=","):
2881
  """Split and unescape a string based on a given separator.
2882

2883
  This function splits a string based on a separator where the
2884
  separator itself can be escape in order to be an element of the
2885
  elements. The escaping rules are (assuming coma being the
2886
  separator):
2887
    - a plain , separates the elements
2888
    - a sequence \\\\, (double backslash plus comma) is handled as a
2889
      backslash plus a separator comma
2890
    - a sequence \, (backslash plus comma) is handled as a
2891
      non-separator comma
2892

2893
  @type text: string
2894
  @param text: the string to split
2895
  @type sep: string
2896
  @param text: the separator
2897
  @rtype: string
2898
  @return: a list of strings
2899

2900
  """
2901
  # we split the list by sep (with no escaping at this stage)
2902
  slist = text.split(sep)
2903
  # next, we revisit the elements and if any of them ended with an odd
2904
  # number of backslashes, then we join it with the next
2905
  rlist = []
2906
  while slist:
2907
    e1 = slist.pop(0)
2908
    if e1.endswith("\\"):
2909
      num_b = len(e1) - len(e1.rstrip("\\"))
2910
      if num_b % 2 == 1:
2911
        e2 = slist.pop(0)
2912
        # here the backslashes remain (all), and will be reduced in
2913
        # the next step
2914
        rlist.append(e1 + sep + e2)
2915
        continue
2916
    rlist.append(e1)
2917
  # finally, replace backslash-something with something
2918
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2919
  return rlist
2920

    
2921

    
2922
def CommaJoin(names):
2923
  """Nicely join a set of identifiers.
2924

2925
  @param names: set, list or tuple
2926
  @return: a string with the formatted results
2927

2928
  """
2929
  return ", ".join([str(val) for val in names])
2930

    
2931

    
2932
def BytesToMebibyte(value):
2933
  """Converts bytes to mebibytes.
2934

2935
  @type value: int
2936
  @param value: Value in bytes
2937
  @rtype: int
2938
  @return: Value in mebibytes
2939

2940
  """
2941
  return int(round(value / (1024.0 * 1024.0), 0))
2942

    
2943

    
2944
def CalculateDirectorySize(path):
2945
  """Calculates the size of a directory recursively.
2946

2947
  @type path: string
2948
  @param path: Path to directory
2949
  @rtype: int
2950
  @return: Size in mebibytes
2951

2952
  """
2953
  size = 0
2954

    
2955
  for (curpath, _, files) in os.walk(path):
2956
    for filename in files:
2957
      st = os.lstat(PathJoin(curpath, filename))
2958
      size += st.st_size
2959

    
2960
  return BytesToMebibyte(size)
2961

    
2962

    
2963
def GetMounts(filename=constants.PROC_MOUNTS):
2964
  """Returns the list of mounted filesystems.
2965

2966
  This function is Linux-specific.
2967

2968
  @param filename: path of mounts file (/proc/mounts by default)
2969
  @rtype: list of tuples
2970
  @return: list of mount entries (device, mountpoint, fstype, options)
2971

2972
  """
2973
  # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2974
  data = []
2975
  mountlines = ReadFile(filename).splitlines()
2976
  for line in mountlines:
2977
    device, mountpoint, fstype, options, _ = line.split(None, 4)
2978
    data.append((device, mountpoint, fstype, options))
2979

    
2980
  return data
2981

    
2982

    
2983
def GetFilesystemStats(path):
2984
  """Returns the total and free space on a filesystem.
2985

2986
  @type path: string
2987
  @param path: Path on filesystem to be examined
2988
  @rtype: int
2989
  @return: tuple of (Total space, Free space) in mebibytes
2990

2991
  """
2992
  st = os.statvfs(path)
2993

    
2994
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2995
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2996
  return (tsize, fsize)
2997

    
2998

    
2999
def RunInSeparateProcess(fn, *args):
3000
  """Runs a function in a separate process.
3001

3002
  Note: Only boolean return values are supported.
3003

3004
  @type fn: callable
3005
  @param fn: Function to be called
3006
  @rtype: bool
3007
  @return: Function's result
3008

3009
  """
3010
  pid = os.fork()
3011
  if pid == 0:
3012
    # Child process
3013
    try:
3014
      # In case the function uses temporary files
3015
      ResetTempfileModule()
3016

    
3017
      # Call function
3018
      result = int(bool(fn(*args)))
3019
      assert result in (0, 1)
3020
    except: # pylint: disable-msg=W0702
3021
      logging.exception("Error while calling function in separate process")
3022
      # 0 and 1 are reserved for the return value
3023
      result = 33
3024

    
3025
    os._exit(result) # pylint: disable-msg=W0212
3026

    
3027
  # Parent process
3028

    
3029
  # Avoid zombies and check exit code
3030
  (_, status) = os.waitpid(pid, 0)
3031

    
3032
  if os.WIFSIGNALED(status):
3033
    exitcode = None
3034
    signum = os.WTERMSIG(status)
3035
  else:
3036
    exitcode = os.WEXITSTATUS(status)
3037
    signum = None
3038

    
3039
  if not (exitcode in (0, 1) and signum is None):
3040
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3041
                              (exitcode, signum))
3042

    
3043
  return bool(exitcode)
3044

    
3045

    
3046
def IgnoreProcessNotFound(fn, *args, **kwargs):
3047
  """Ignores ESRCH when calling a process-related function.
3048

3049
  ESRCH is raised when a process is not found.
3050

3051
  @rtype: bool
3052
  @return: Whether process was found
3053

3054
  """
3055
  try:
3056
    fn(*args, **kwargs)
3057
  except EnvironmentError, err:
3058
    # Ignore ESRCH
3059
    if err.errno == errno.ESRCH:
3060
      return False
3061
    raise
3062

    
3063
  return True
3064

    
3065

    
3066
def IgnoreSignals(fn, *args, **kwargs):
3067
  """Tries to call a function ignoring failures due to EINTR.
3068

3069
  """
3070
  try:
3071
    return fn(*args, **kwargs)
3072
  except EnvironmentError, err:
3073
    if err.errno == errno.EINTR:
3074
      return None
3075
    else:
3076
      raise
3077
  except (select.error, socket.error), err:
3078
    # In python 2.6 and above select.error is an IOError, so it's handled
3079
    # above, in 2.5 and below it's not, and it's handled here.
3080
    if err.args and err.args[0] == errno.EINTR:
3081
      return None
3082
    else:
3083
      raise
3084

    
3085

    
3086
def LockFile(fd):
3087
  """Locks a file using POSIX locks.
3088

3089
  @type fd: int
3090
  @param fd: the file descriptor we need to lock
3091

3092
  """
3093
  try:
3094
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3095
  except IOError, err:
3096
    if err.errno == errno.EAGAIN:
3097
      raise errors.LockError("File already locked")
3098
    raise
3099

    
3100

    
3101
def FormatTime(val):
3102
  """Formats a time value.
3103

3104
  @type val: float or None
3105
  @param val: the timestamp as returned by time.time()
3106
  @return: a string value or N/A if we don't have a valid timestamp
3107

3108
  """
3109
  if val is None or not isinstance(val, (int, float)):
3110
    return "N/A"
3111
  # these two codes works on Linux, but they are not guaranteed on all
3112
  # platforms
3113
  return time.strftime("%F %T", time.localtime(val))
3114

    
3115

    
3116
def FormatSeconds(secs):
3117
  """Formats seconds for easier reading.
3118

3119
  @type secs: number
3120
  @param secs: Number of seconds
3121
  @rtype: string
3122
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3123

3124
  """
3125
  parts = []
3126

    
3127
  secs = round(secs, 0)
3128

    
3129
  if secs > 0:
3130
    # Negative values would be a bit tricky
3131
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3132
      (complete, secs) = divmod(secs, one)
3133
      if complete or parts:
3134
        parts.append("%d%s" % (complete, unit))
3135

    
3136
  parts.append("%ds" % secs)
3137

    
3138
  return " ".join(parts)
3139

    
3140

    
3141
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3142
  """Reads the watcher pause file.
3143

3144
  @type filename: string
3145
  @param filename: Path to watcher pause file
3146
  @type now: None, float or int
3147
  @param now: Current time as Unix timestamp
3148
  @type remove_after: int
3149
  @param remove_after: Remove watcher pause file after specified amount of
3150
    seconds past the pause end time
3151

3152
  """
3153
  if now is None:
3154
    now = time.time()
3155

    
3156
  try:
3157
    value = ReadFile(filename)
3158
  except IOError, err:
3159
    if err.errno != errno.ENOENT:
3160
      raise
3161
    value = None
3162

    
3163
  if value is not None:
3164
    try:
3165
      value = int(value)
3166
    except ValueError:
3167
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3168
                       " removing it"), filename)
3169
      RemoveFile(filename)
3170
      value = None
3171

    
3172
    if value is not None:
3173
      # Remove file if it's outdated
3174
      if now > (value + remove_after):
3175
        RemoveFile(filename)
3176
        value = None
3177

    
3178
      elif now > value:
3179
        value = None
3180

    
3181
  return value
3182

    
3183

    
3184
class RetryTimeout(Exception):
3185
  """Retry loop timed out.
3186

3187
  Any arguments which was passed by the retried function to RetryAgain will be
3188
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3189
  the RaiseInner helper method will reraise it.
3190

3191
  """
3192
  def RaiseInner(self):
3193
    if self.args and isinstance(self.args[0], Exception):
3194
      raise self.args[0]
3195
    else:
3196
      raise RetryTimeout(*self.args)
3197

    
3198

    
3199
class RetryAgain(Exception):
3200
  """Retry again.
3201

3202
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3203
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3204
  of the RetryTimeout() method can be used to reraise it.
3205

3206
  """
3207

    
3208

    
3209
class _RetryDelayCalculator(object):
3210
  """Calculator for increasing delays.
3211

3212
  """
3213
  __slots__ = [
3214
    "_factor",
3215
    "_limit",
3216
    "_next",
3217
    "_start",
3218
    ]
3219

    
3220
  def __init__(self, start, factor, limit):
3221
    """Initializes this class.
3222

3223
    @type start: float
3224
    @param start: Initial delay
3225
    @type factor: float
3226
    @param factor: Factor for delay increase
3227
    @type limit: float or None
3228
    @param limit: Upper limit for delay or None for no limit
3229

3230
    """
3231
    assert start > 0.0
3232
    assert factor >= 1.0
3233
    assert limit is None or limit >= 0.0
3234

    
3235
    self._start = start
3236
    self._factor = factor
3237
    self._limit = limit
3238

    
3239
    self._next = start
3240

    
3241
  def __call__(self):
3242
    """Returns current delay and calculates the next one.
3243

3244
    """
3245
    current = self._next
3246

    
3247
    # Update for next run
3248
    if self._limit is None or self._next < self._limit:
3249
      self._next = min(self._limit, self._next * self._factor)
3250

    
3251
    return current
3252

    
3253

    
3254
#: Special delay to specify whole remaining timeout
3255
RETRY_REMAINING_TIME = object()
3256

    
3257

    
3258
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3259
          _time_fn=time.time):
3260
  """Call a function repeatedly until it succeeds.
3261

3262
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3263
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3264
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3265

3266
  C{delay} can be one of the following:
3267
    - callable returning the delay length as a float
3268
    - Tuple of (start, factor, limit)
3269
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3270
      useful when overriding L{wait_fn} to wait for an external event)
3271
    - A static delay as a number (int or float)
3272

3273
  @type fn: callable
3274
  @param fn: Function to be called
3275
  @param delay: Either a callable (returning the delay), a tuple of (start,
3276
                factor, limit) (see L{_RetryDelayCalculator}),
3277
                L{RETRY_REMAINING_TIME} or a number (int or float)
3278
  @type timeout: float
3279
  @param timeout: Total timeout
3280
  @type wait_fn: callable
3281
  @param wait_fn: Waiting function
3282
  @return: Return value of function
3283

3284
  """
3285
  assert callable(fn)
3286
  assert callable(wait_fn)
3287
  assert callable(_time_fn)
3288

    
3289
  if args is None:
3290
    args = []
3291

    
3292
  end_time = _time_fn() + timeout
3293

    
3294
  if callable(delay):
3295
    # External function to calculate delay
3296
    calc_delay = delay
3297

    
3298
  elif isinstance(delay, (tuple, list)):
3299
    # Increasing delay with optional upper boundary
3300
    (start, factor, limit) = delay
3301
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3302

    
3303
  elif delay is RETRY_REMAINING_TIME:
3304
    # Always use the remaining time
3305
    calc_delay = None
3306

    
3307
  else:
3308
    # Static delay
3309
    calc_delay = lambda: delay
3310

    
3311
  assert calc_delay is None or callable(calc_delay)
3312

    
3313
  while True:
3314
    retry_args = []
3315
    try:
3316
      # pylint: disable-msg=W0142
3317
      return fn(*args)
3318
    except RetryAgain, err:
3319
      retry_args = err.args
3320
    except RetryTimeout:
3321
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3322
                                   " handle RetryTimeout")
3323

    
3324
    remaining_time = end_time - _time_fn()
3325

    
3326
    if remaining_time < 0.0:
3327
      # pylint: disable-msg=W0142
3328
      raise RetryTimeout(*retry_args)
3329

    
3330
    assert remaining_time >= 0.0
3331

    
3332
    if calc_delay is None:
3333
      wait_fn(remaining_time)
3334
    else:
3335
      current_delay = calc_delay()
3336
      if current_delay > 0.0:
3337
        wait_fn(current_delay)
3338

    
3339

    
3340
def GetClosedTempfile(*args, **kwargs):
3341
  """Creates a temporary file and returns its path.
3342

3343
  """
3344
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3345
  _CloseFDNoErr(fd)
3346
  return path
3347

    
3348

    
3349
def GenerateSelfSignedX509Cert(common_name, validity):
3350
  """Generates a self-signed X509 certificate.
3351

3352
  @type common_name: string
3353
  @param common_name: commonName value
3354
  @type validity: int
3355
  @param validity: Validity for certificate in seconds
3356

3357
  """
3358
  # Create private and public key
3359
  key = OpenSSL.crypto.PKey()
3360
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3361

    
3362
  # Create self-signed certificate
3363
  cert = OpenSSL.crypto.X509()
3364
  if common_name:
3365
    cert.get_subject().CN = common_name
3366
  cert.set_serial_number(1)
3367
  cert.gmtime_adj_notBefore(0)
3368
  cert.gmtime_adj_notAfter(validity)
3369
  cert.set_issuer(cert.get_subject())
3370
  cert.set_pubkey(key)
3371
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3372

    
3373
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3374
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3375

    
3376
  return (key_pem, cert_pem)
3377

    
3378

    
3379
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3380
                              validity=constants.X509_CERT_DEFAULT_VALIDITY):
3381
  """Legacy function to generate self-signed X509 certificate.
3382

3383
  @type filename: str
3384
  @param filename: path to write certificate to
3385
  @type common_name: string
3386
  @param common_name: commonName value
3387
  @type validity: int
3388
  @param validity: validity of certificate in number of days
3389

3390
  """
3391
  # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3392
  # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3393
  # and node daemon certificates have the proper Subject/Issuer.
3394
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3395
                                                   validity * 24 * 60 * 60)
3396

    
3397
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3398

    
3399

    
3400
class FileLock(object):
3401
  """Utility class for file locks.
3402

3403
  """
3404
  def __init__(self, fd, filename):
3405
    """Constructor for FileLock.
3406

3407
    @type fd: file
3408
    @param fd: File object
3409
    @type filename: str
3410
    @param filename: Path of the file opened at I{fd}
3411

3412
    """
3413
    self.fd = fd
3414
    self.filename = filename
3415

    
3416
  @classmethod
3417
  def Open(cls, filename):
3418
    """Creates and opens a file to be used as a file-based lock.
3419

3420
    @type filename: string
3421
    @param filename: path to the file to be locked
3422

3423
    """
3424
    # Using "os.open" is necessary to allow both opening existing file
3425
    # read/write and creating if not existing. Vanilla "open" will truncate an
3426
    # existing file -or- allow creating if not existing.
3427
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3428
               filename)
3429

    
3430
  def __del__(self):
3431
    self.Close()
3432

    
3433
  def Close(self):
3434
    """Close the file and release the lock.
3435

3436
    """
3437
    if hasattr(self, "fd") and self.fd:
3438
      self.fd.close()
3439
      self.fd = None
3440

    
3441
  def _flock(self, flag, blocking, timeout, errmsg):
3442
    """Wrapper for fcntl.flock.
3443

3444
    @type flag: int
3445
    @param flag: operation flag
3446
    @type blocking: bool
3447
    @param blocking: whether the operation should be done in blocking mode.
3448
    @type timeout: None or float
3449
    @param timeout: for how long the operation should be retried (implies
3450
                    non-blocking mode).
3451
    @type errmsg: string
3452
    @param errmsg: error message in case operation fails.
3453

3454
    """
3455
    assert self.fd, "Lock was closed"
3456
    assert timeout is None or timeout >= 0, \
3457
      "If specified, timeout must be positive"
3458
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3459

    
3460
    # When a timeout is used, LOCK_NB must always be set
3461
    if not (timeout is None and blocking):
3462
      flag |= fcntl.LOCK_NB
3463

    
3464
    if timeout is None:
3465
      self._Lock(self.fd, flag, timeout)
3466
    else:
3467
      try:
3468
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3469
              args=(self.fd, flag, timeout))
3470
      except RetryTimeout:
3471
        raise errors.LockError(errmsg)
3472

    
3473
  @staticmethod
3474
  def _Lock(fd, flag, timeout):
3475
    try:
3476
      fcntl.flock(fd, flag)
3477
    except IOError, err:
3478
      if timeout is not None and err.errno == errno.EAGAIN:
3479
        raise RetryAgain()
3480

    
3481
      logging.exception("fcntl.flock failed")
3482
      raise
3483

    
3484
  def Exclusive(self, blocking=False, timeout=None):
3485
    """Locks the file in exclusive mode.
3486

3487
    @type blocking: boolean
3488
    @param blocking: whether to block and wait until we
3489
        can lock the file or return immediately
3490
    @type timeout: int or None
3491
    @param timeout: if not None, the duration to wait for the lock
3492
        (in blocking mode)
3493

3494
    """
3495
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3496
                "Failed to lock %s in exclusive mode" % self.filename)
3497

    
3498
  def Shared(self, blocking=False, timeout=None):
3499
    """Locks the file in shared mode.
3500

3501
    @type blocking: boolean
3502
    @param blocking: whether to block and wait until we
3503
        can lock the file or return immediately
3504
    @type timeout: int or None
3505
    @param timeout: if not None, the duration to wait for the lock
3506
        (in blocking mode)
3507

3508
    """
3509
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3510
                "Failed to lock %s in shared mode" % self.filename)
3511

    
3512
  def Unlock(self, blocking=True, timeout=None):
3513
    """Unlocks the file.
3514

3515
    According to C{flock(2)}, unlocking can also be a nonblocking
3516
    operation::
3517

3518
      To make a non-blocking request, include LOCK_NB with any of the above
3519
      operations.
3520

3521
    @type blocking: boolean
3522
    @param blocking: whether to block and wait until we
3523
        can lock the file or return immediately
3524
    @type timeout: int or None
3525
    @param timeout: if not None, the duration to wait for the lock
3526
        (in blocking mode)
3527

3528
    """
3529
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3530
                "Failed to unlock %s" % self.filename)
3531

    
3532

    
3533
class LineSplitter:
3534
  """Splits data chunks into lines separated by newline.
3535

3536
  Instances provide a file-like interface.
3537

3538
  """
3539
  def __init__(self, line_fn, *args):
3540
    """Initializes this class.
3541

3542
    @type line_fn: callable
3543
    @param line_fn: Function called for each line, first parameter is line
3544
    @param args: Extra arguments for L{line_fn}
3545

3546
    """
3547
    assert callable(line_fn)
3548

    
3549
    if args:
3550
      # Python 2.4 doesn't have functools.partial yet
3551
      self._line_fn = \
3552
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3553
    else:
3554
      self._line_fn = line_fn
3555

    
3556
    self._lines = collections.deque()
3557
    self._buffer = ""
3558

    
3559
  def write(self, data):
3560
    parts = (self._buffer + data).split("\n")
3561
    self._buffer = parts.pop()
3562
    self._lines.extend(parts)
3563

    
3564
  def flush(self):
3565
    while self._lines:
3566
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3567

    
3568
  def close(self):
3569
    self.flush()
3570
    if self._buffer:
3571
      self._line_fn(self._buffer)
3572

    
3573

    
3574
def SignalHandled(signums):
3575
  """Signal Handled decoration.
3576

3577
  This special decorator installs a signal handler and then calls the target
3578
  function. The function must accept a 'signal_handlers' keyword argument,
3579
  which will contain a dict indexed by signal number, with SignalHandler
3580
  objects as values.
3581

3582
  The decorator can be safely stacked with iself, to handle multiple signals
3583
  with different handlers.
3584

3585
  @type signums: list
3586
  @param signums: signals to intercept
3587

3588
  """
3589
  def wrap(fn):
3590
    def sig_function(*args, **kwargs):
3591
      assert 'signal_handlers' not in kwargs or \
3592
             kwargs['signal_handlers'] is None or \
3593
             isinstance(kwargs['signal_handlers'], dict), \
3594
             "Wrong signal_handlers parameter in original function call"
3595
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3596
        signal_handlers = kwargs['signal_handlers']
3597
      else:
3598
        signal_handlers = {}
3599
        kwargs['signal_handlers'] = signal_handlers
3600
      sighandler = SignalHandler(signums)
3601
      try:
3602
        for sig in signums:
3603
          signal_handlers[sig] = sighandler
3604
        return fn(*args, **kwargs)
3605
      finally:
3606
        sighandler.Reset()
3607
    return sig_function
3608
  return wrap
3609

    
3610

    
3611
class SignalWakeupFd(object):
3612
  try:
3613
    # This is only supported in Python 2.5 and above (some distributions
3614
    # backported it to Python 2.4)
3615
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3616
  except AttributeError:
3617
    # Not supported
3618
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3619
      return -1
3620
  else:
3621
    def _SetWakeupFd(self, fd):
3622
      return self._set_wakeup_fd_fn(fd)
3623

    
3624
  def __init__(self):
3625
    """Initializes this class.
3626

3627
    """
3628
    (read_fd, write_fd) = os.pipe()
3629

    
3630
    # Once these succeeded, the file descriptors will be closed automatically.
3631
    # Buffer size 0 is important, otherwise .read() with a specified length
3632
    # might buffer data and the file descriptors won't be marked readable.
3633
    self._read_fh = os.fdopen(read_fd, "r", 0)
3634
    self._write_fh = os.fdopen(write_fd, "w", 0)
3635

    
3636
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3637

    
3638
    # Utility functions
3639
    self.fileno = self._read_fh.fileno
3640
    self.read = self._read_fh.read
3641

    
3642
  def Reset(self):
3643
    """Restores the previous wakeup file descriptor.
3644

3645
    """
3646
    if hasattr(self, "_previous") and self._previous is not None:
3647
      self._SetWakeupFd(self._previous)
3648
      self._previous = None
3649

    
3650
  def Notify(self):
3651
    """Notifies the wakeup file descriptor.
3652

3653
    """
3654
    self._write_fh.write("\0")
3655

    
3656
  def __del__(self):
3657
    """Called before object deletion.
3658

3659
    """
3660
    self.Reset()
3661

    
3662

    
3663
class SignalHandler(object):
3664
  """Generic signal handler class.
3665

3666
  It automatically restores the original handler when deconstructed or
3667
  when L{Reset} is called. You can either pass your own handler
3668
  function in or query the L{called} attribute to detect whether the
3669
  signal was sent.
3670

3671
  @type signum: list
3672
  @ivar signum: the signals we handle
3673
  @type called: boolean
3674
  @ivar called: tracks whether any of the signals have been raised
3675

3676
  """
3677
  def __init__(self, signum, handler_fn=None, wakeup=None):
3678
    """Constructs a new SignalHandler instance.
3679

3680
    @type signum: int or list of ints
3681
    @param signum: Single signal number or set of signal numbers
3682
    @type handler_fn: callable
3683
    @param handler_fn: Signal handling function
3684

3685
    """
3686
    assert handler_fn is None or callable(handler_fn)
3687

    
3688
    self.signum = set(signum)
3689
    self.called = False
3690

    
3691
    self._handler_fn = handler_fn
3692
    self._wakeup = wakeup
3693

    
3694
    self._previous = {}
3695
    try:
3696
      for signum in self.signum:
3697
        # Setup handler
3698
        prev_handler = signal.signal(signum, self._HandleSignal)
3699
        try:
3700
          self._previous[signum] = prev_handler
3701
        except:
3702
          # Restore previous handler
3703
          signal.signal(signum, prev_handler)
3704
          raise
3705
    except:
3706
      # Reset all handlers
3707
      self.Reset()
3708
      # Here we have a race condition: a handler may have already been called,
3709
      # but there's not much we can do about it at this point.
3710
      raise
3711

    
3712
  def __del__(self):
3713
    self.Reset()
3714

    
3715
  def Reset(self):
3716
    """Restore previous handler.
3717

3718
    This will reset all the signals to their previous handlers.
3719

3720
    """
3721
    for signum, prev_handler in self._previous.items():
3722
      signal.signal(signum, prev_handler)
3723
      # If successful, remove from dict
3724
      del self._previous[signum]
3725

    
3726
  def Clear(self):
3727
    """Unsets the L{called} flag.
3728

3729
    This function can be used in case a signal may arrive several times.
3730

3731
    """
3732
    self.called = False
3733

    
3734
  def _HandleSignal(self, signum, frame):
3735
    """Actual signal handling function.
3736

3737
    """
3738
    # This is not nice and not absolutely atomic, but it appears to be the only
3739
    # solution in Python -- there are no atomic types.
3740
    self.called = True
3741

    
3742
    if self._wakeup:
3743
      # Notify whoever is interested in signals
3744
      self._wakeup.Notify()
3745

    
3746
    if self._handler_fn:
3747
      self._handler_fn(signum, frame)
3748

    
3749

    
3750
class FieldSet(object):
3751
  """A simple field set.
3752

3753
  Among the features are:
3754
    - checking if a string is among a list of static string or regex objects
3755
    - checking if a whole list of string matches
3756
    - returning the matching groups from a regex match
3757

3758
  Internally, all fields are held as regular expression objects.
3759

3760
  """
3761
  def __init__(self, *items):
3762
    self.items = [re.compile("^%s$" % value) for value in items]
3763

    
3764
  def Extend(self, other_set):
3765
    """Extend the field set with the items from another one"""
3766
    self.items.extend(other_set.items)
3767

    
3768
  def Matches(self, field):
3769
    """Checks if a field matches the current set
3770

3771
    @type field: str
3772
    @param field: the string to match
3773
    @return: either None or a regular expression match object
3774

3775
    """
3776
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3777
      return m
3778
    return None
3779

    
3780
  def NonMatching(self, items):
3781
    """Returns the list of fields not matching the current set
3782

3783
    @type items: list
3784
    @param items: the list of fields to check
3785
    @rtype: list
3786
    @return: list of non-matching fields
3787

3788
    """
3789
    return [val for val in items if not self.Matches(val)]