Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 716a32cb

History | View | Annotate | Download (100.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52
import struct
53
import IN
54

    
55
from cStringIO import StringIO
56

    
57
try:
58
  import ctypes
59
except ImportError:
60
  ctypes = None
61

    
62
from ganeti import errors
63
from ganeti import constants
64
from ganeti import compat
65

    
66

    
67
_locksheld = []
68
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
69

    
70
debug_locks = False
71

    
72
#: when set to True, L{RunCmd} is disabled
73
no_fork = False
74

    
75
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
76

    
77
HEX_CHAR_RE = r"[a-zA-Z0-9]"
78
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
79
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
80
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
81
                             HEX_CHAR_RE, HEX_CHAR_RE),
82
                            re.S | re.I)
83

    
84
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
85
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
86
#
87
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
88
# pid_t to "int".
89
#
90
# IEEE Std 1003.1-2008:
91
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
92
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
93
_STRUCT_UCRED = "iII"
94
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
95

    
96
# Certificate verification results
97
(CERT_WARNING,
98
 CERT_ERROR) = range(1, 3)
99

    
100
# Flags for mlockall() (from bits/mman.h)
101
_MCL_CURRENT = 1
102
_MCL_FUTURE = 2
103

    
104

    
105
class RunResult(object):
106
  """Holds the result of running external programs.
107

108
  @type exit_code: int
109
  @ivar exit_code: the exit code of the program, or None (if the program
110
      didn't exit())
111
  @type signal: int or None
112
  @ivar signal: the signal that caused the program to finish, or None
113
      (if the program wasn't terminated by a signal)
114
  @type stdout: str
115
  @ivar stdout: the standard output of the program
116
  @type stderr: str
117
  @ivar stderr: the standard error of the program
118
  @type failed: boolean
119
  @ivar failed: True in case the program was
120
      terminated by a signal or exited with a non-zero exit code
121
  @ivar fail_reason: a string detailing the termination reason
122

123
  """
124
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
125
               "failed", "fail_reason", "cmd"]
126

    
127

    
128
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
129
    self.cmd = cmd
130
    self.exit_code = exit_code
131
    self.signal = signal_
132
    self.stdout = stdout
133
    self.stderr = stderr
134
    self.failed = (signal_ is not None or exit_code != 0)
135

    
136
    if self.signal is not None:
137
      self.fail_reason = "terminated by signal %s" % self.signal
138
    elif self.exit_code is not None:
139
      self.fail_reason = "exited with exit code %s" % self.exit_code
140
    else:
141
      self.fail_reason = "unable to determine termination reason"
142

    
143
    if self.failed:
144
      logging.debug("Command '%s' failed (%s); output: %s",
145
                    self.cmd, self.fail_reason, self.output)
146

    
147
  def _GetOutput(self):
148
    """Returns the combined stdout and stderr for easier usage.
149

150
    """
151
    return self.stdout + self.stderr
152

    
153
  output = property(_GetOutput, None, None, "Return full output")
154

    
155

    
156
def _BuildCmdEnvironment(env, reset):
157
  """Builds the environment for an external program.
158

159
  """
160
  if reset:
161
    cmd_env = {}
162
  else:
163
    cmd_env = os.environ.copy()
164
    cmd_env["LC_ALL"] = "C"
165

    
166
  if env is not None:
167
    cmd_env.update(env)
168

    
169
  return cmd_env
170

    
171

    
172
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
173
  """Execute a (shell) command.
174

175
  The command should not read from its standard input, as it will be
176
  closed.
177

178
  @type cmd: string or list
179
  @param cmd: Command to run
180
  @type env: dict
181
  @param env: Additional environment variables
182
  @type output: str
183
  @param output: if desired, the output of the command can be
184
      saved in a file instead of the RunResult instance; this
185
      parameter denotes the file name (if not None)
186
  @type cwd: string
187
  @param cwd: if specified, will be used as the working
188
      directory for the command; the default will be /
189
  @type reset_env: boolean
190
  @param reset_env: whether to reset or keep the default os environment
191
  @rtype: L{RunResult}
192
  @return: RunResult instance
193
  @raise errors.ProgrammerError: if we call this when forks are disabled
194

195
  """
196
  if no_fork:
197
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
198

    
199
  if isinstance(cmd, basestring):
200
    strcmd = cmd
201
    shell = True
202
  else:
203
    cmd = [str(val) for val in cmd]
204
    strcmd = ShellQuoteArgs(cmd)
205
    shell = False
206

    
207
  if output:
208
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
209
  else:
210
    logging.debug("RunCmd %s", strcmd)
211

    
212
  cmd_env = _BuildCmdEnvironment(env, reset_env)
213

    
214
  try:
215
    if output is None:
216
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
217
    else:
218
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
219
      out = err = ""
220
  except OSError, err:
221
    if err.errno == errno.ENOENT:
222
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
223
                               (strcmd, err))
224
    else:
225
      raise
226

    
227
  if status >= 0:
228
    exitcode = status
229
    signal_ = None
230
  else:
231
    exitcode = None
232
    signal_ = -status
233

    
234
  return RunResult(exitcode, signal_, out, err, strcmd)
235

    
236

    
237
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
238
                pidfile=None):
239
  """Start a daemon process after forking twice.
240

241
  @type cmd: string or list
242
  @param cmd: Command to run
243
  @type env: dict
244
  @param env: Additional environment variables
245
  @type cwd: string
246
  @param cwd: Working directory for the program
247
  @type output: string
248
  @param output: Path to file in which to save the output
249
  @type output_fd: int
250
  @param output_fd: File descriptor for output
251
  @type pidfile: string
252
  @param pidfile: Process ID file
253
  @rtype: int
254
  @return: Daemon process ID
255
  @raise errors.ProgrammerError: if we call this when forks are disabled
256

257
  """
258
  if no_fork:
259
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
260
                                 " disabled")
261

    
262
  if output and not (bool(output) ^ (output_fd is not None)):
263
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
264
                                 " specified")
265

    
266
  if isinstance(cmd, basestring):
267
    cmd = ["/bin/sh", "-c", cmd]
268

    
269
  strcmd = ShellQuoteArgs(cmd)
270

    
271
  if output:
272
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
273
  else:
274
    logging.debug("StartDaemon %s", strcmd)
275

    
276
  cmd_env = _BuildCmdEnvironment(env, False)
277

    
278
  # Create pipe for sending PID back
279
  (pidpipe_read, pidpipe_write) = os.pipe()
280
  try:
281
    try:
282
      # Create pipe for sending error messages
283
      (errpipe_read, errpipe_write) = os.pipe()
284
      try:
285
        try:
286
          # First fork
287
          pid = os.fork()
288
          if pid == 0:
289
            try:
290
              # Child process, won't return
291
              _StartDaemonChild(errpipe_read, errpipe_write,
292
                                pidpipe_read, pidpipe_write,
293
                                cmd, cmd_env, cwd,
294
                                output, output_fd, pidfile)
295
            finally:
296
              # Well, maybe child process failed
297
              os._exit(1) # pylint: disable-msg=W0212
298
        finally:
299
          _CloseFDNoErr(errpipe_write)
300

    
301
        # Wait for daemon to be started (or an error message to arrive) and read
302
        # up to 100 KB as an error message
303
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
304
      finally:
305
        _CloseFDNoErr(errpipe_read)
306
    finally:
307
      _CloseFDNoErr(pidpipe_write)
308

    
309
    # Read up to 128 bytes for PID
310
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
311
  finally:
312
    _CloseFDNoErr(pidpipe_read)
313

    
314
  # Try to avoid zombies by waiting for child process
315
  try:
316
    os.waitpid(pid, 0)
317
  except OSError:
318
    pass
319

    
320
  if errormsg:
321
    raise errors.OpExecError("Error when starting daemon process: %r" %
322
                             errormsg)
323

    
324
  try:
325
    return int(pidtext)
326
  except (ValueError, TypeError), err:
327
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
328
                             (pidtext, err))
329

    
330

    
331
def _StartDaemonChild(errpipe_read, errpipe_write,
332
                      pidpipe_read, pidpipe_write,
333
                      args, env, cwd,
334
                      output, fd_output, pidfile):
335
  """Child process for starting daemon.
336

337
  """
338
  try:
339
    # Close parent's side
340
    _CloseFDNoErr(errpipe_read)
341
    _CloseFDNoErr(pidpipe_read)
342

    
343
    # First child process
344
    os.chdir("/")
345
    os.umask(077)
346
    os.setsid()
347

    
348
    # And fork for the second time
349
    pid = os.fork()
350
    if pid != 0:
351
      # Exit first child process
352
      os._exit(0) # pylint: disable-msg=W0212
353

    
354
    # Make sure pipe is closed on execv* (and thereby notifies original process)
355
    SetCloseOnExecFlag(errpipe_write, True)
356

    
357
    # List of file descriptors to be left open
358
    noclose_fds = [errpipe_write]
359

    
360
    # Open PID file
361
    if pidfile:
362
      try:
363
        # TODO: Atomic replace with another locked file instead of writing into
364
        # it after creating
365
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
366

    
367
        # Lock the PID file (and fail if not possible to do so). Any code
368
        # wanting to send a signal to the daemon should try to lock the PID
369
        # file before reading it. If acquiring the lock succeeds, the daemon is
370
        # no longer running and the signal should not be sent.
371
        LockFile(fd_pidfile)
372

    
373
        os.write(fd_pidfile, "%d\n" % os.getpid())
374
      except Exception, err:
375
        raise Exception("Creating and locking PID file failed: %s" % err)
376

    
377
      # Keeping the file open to hold the lock
378
      noclose_fds.append(fd_pidfile)
379

    
380
      SetCloseOnExecFlag(fd_pidfile, False)
381
    else:
382
      fd_pidfile = None
383

    
384
    # Open /dev/null
385
    fd_devnull = os.open(os.devnull, os.O_RDWR)
386

    
387
    assert not output or (bool(output) ^ (fd_output is not None))
388

    
389
    if fd_output is not None:
390
      pass
391
    elif output:
392
      # Open output file
393
      try:
394
        # TODO: Implement flag to set append=yes/no
395
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
396
      except EnvironmentError, err:
397
        raise Exception("Opening output file failed: %s" % err)
398
    else:
399
      fd_output = fd_devnull
400

    
401
    # Redirect standard I/O
402
    os.dup2(fd_devnull, 0)
403
    os.dup2(fd_output, 1)
404
    os.dup2(fd_output, 2)
405

    
406
    # Send daemon PID to parent
407
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
408

    
409
    # Close all file descriptors except stdio and error message pipe
410
    CloseFDs(noclose_fds=noclose_fds)
411

    
412
    # Change working directory
413
    os.chdir(cwd)
414

    
415
    if env is None:
416
      os.execvp(args[0], args)
417
    else:
418
      os.execvpe(args[0], args, env)
419
  except: # pylint: disable-msg=W0702
420
    try:
421
      # Report errors to original process
422
      buf = str(sys.exc_info()[1])
423

    
424
      RetryOnSignal(os.write, errpipe_write, buf)
425
    except: # pylint: disable-msg=W0702
426
      # Ignore errors in error handling
427
      pass
428

    
429
  os._exit(1) # pylint: disable-msg=W0212
430

    
431

    
432
def _RunCmdPipe(cmd, env, via_shell, cwd):
433
  """Run a command and return its output.
434

435
  @type  cmd: string or list
436
  @param cmd: Command to run
437
  @type env: dict
438
  @param env: The environment to use
439
  @type via_shell: bool
440
  @param via_shell: if we should run via the shell
441
  @type cwd: string
442
  @param cwd: the working directory for the program
443
  @rtype: tuple
444
  @return: (out, err, status)
445

446
  """
447
  poller = select.poll()
448
  child = subprocess.Popen(cmd, shell=via_shell,
449
                           stderr=subprocess.PIPE,
450
                           stdout=subprocess.PIPE,
451
                           stdin=subprocess.PIPE,
452
                           close_fds=True, env=env,
453
                           cwd=cwd)
454

    
455
  child.stdin.close()
456
  poller.register(child.stdout, select.POLLIN)
457
  poller.register(child.stderr, select.POLLIN)
458
  out = StringIO()
459
  err = StringIO()
460
  fdmap = {
461
    child.stdout.fileno(): (out, child.stdout),
462
    child.stderr.fileno(): (err, child.stderr),
463
    }
464
  for fd in fdmap:
465
    SetNonblockFlag(fd, True)
466

    
467
  while fdmap:
468
    pollresult = RetryOnSignal(poller.poll)
469

    
470
    for fd, event in pollresult:
471
      if event & select.POLLIN or event & select.POLLPRI:
472
        data = fdmap[fd][1].read()
473
        # no data from read signifies EOF (the same as POLLHUP)
474
        if not data:
475
          poller.unregister(fd)
476
          del fdmap[fd]
477
          continue
478
        fdmap[fd][0].write(data)
479
      if (event & select.POLLNVAL or event & select.POLLHUP or
480
          event & select.POLLERR):
481
        poller.unregister(fd)
482
        del fdmap[fd]
483

    
484
  out = out.getvalue()
485
  err = err.getvalue()
486

    
487
  status = child.wait()
488
  return out, err, status
489

    
490

    
491
def _RunCmdFile(cmd, env, via_shell, output, cwd):
492
  """Run a command and save its output to a file.
493

494
  @type  cmd: string or list
495
  @param cmd: Command to run
496
  @type env: dict
497
  @param env: The environment to use
498
  @type via_shell: bool
499
  @param via_shell: if we should run via the shell
500
  @type output: str
501
  @param output: the filename in which to save the output
502
  @type cwd: string
503
  @param cwd: the working directory for the program
504
  @rtype: int
505
  @return: the exit status
506

507
  """
508
  fh = open(output, "a")
509
  try:
510
    child = subprocess.Popen(cmd, shell=via_shell,
511
                             stderr=subprocess.STDOUT,
512
                             stdout=fh,
513
                             stdin=subprocess.PIPE,
514
                             close_fds=True, env=env,
515
                             cwd=cwd)
516

    
517
    child.stdin.close()
518
    status = child.wait()
519
  finally:
520
    fh.close()
521
  return status
522

    
523

    
524
def SetCloseOnExecFlag(fd, enable):
525
  """Sets or unsets the close-on-exec flag on a file descriptor.
526

527
  @type fd: int
528
  @param fd: File descriptor
529
  @type enable: bool
530
  @param enable: Whether to set or unset it.
531

532
  """
533
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
534

    
535
  if enable:
536
    flags |= fcntl.FD_CLOEXEC
537
  else:
538
    flags &= ~fcntl.FD_CLOEXEC
539

    
540
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
541

    
542

    
543
def SetNonblockFlag(fd, enable):
544
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
545

546
  @type fd: int
547
  @param fd: File descriptor
548
  @type enable: bool
549
  @param enable: Whether to set or unset it
550

551
  """
552
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
553

    
554
  if enable:
555
    flags |= os.O_NONBLOCK
556
  else:
557
    flags &= ~os.O_NONBLOCK
558

    
559
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
560

    
561

    
562
def RetryOnSignal(fn, *args, **kwargs):
563
  """Calls a function again if it failed due to EINTR.
564

565
  """
566
  while True:
567
    try:
568
      return fn(*args, **kwargs)
569
    except (EnvironmentError, socket.error), err:
570
      if err.errno != errno.EINTR:
571
        raise
572
    except select.error, err:
573
      if not (err.args and err.args[0] == errno.EINTR):
574
        raise
575

    
576

    
577
def RunParts(dir_name, env=None, reset_env=False):
578
  """Run Scripts or programs in a directory
579

580
  @type dir_name: string
581
  @param dir_name: absolute path to a directory
582
  @type env: dict
583
  @param env: The environment to use
584
  @type reset_env: boolean
585
  @param reset_env: whether to reset or keep the default os environment
586
  @rtype: list of tuples
587
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
588

589
  """
590
  rr = []
591

    
592
  try:
593
    dir_contents = ListVisibleFiles(dir_name)
594
  except OSError, err:
595
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
596
    return rr
597

    
598
  for relname in sorted(dir_contents):
599
    fname = PathJoin(dir_name, relname)
600
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
601
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
602
      rr.append((relname, constants.RUNPARTS_SKIP, None))
603
    else:
604
      try:
605
        result = RunCmd([fname], env=env, reset_env=reset_env)
606
      except Exception, err: # pylint: disable-msg=W0703
607
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
608
      else:
609
        rr.append((relname, constants.RUNPARTS_RUN, result))
610

    
611
  return rr
612

    
613

    
614
def GetSocketCredentials(sock):
615
  """Returns the credentials of the foreign process connected to a socket.
616

617
  @param sock: Unix socket
618
  @rtype: tuple; (number, number, number)
619
  @return: The PID, UID and GID of the connected foreign process.
620

621
  """
622
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
623
                             _STRUCT_UCRED_SIZE)
624
  return struct.unpack(_STRUCT_UCRED, peercred)
625

    
626

    
627
def RemoveFile(filename):
628
  """Remove a file ignoring some errors.
629

630
  Remove a file, ignoring non-existing ones or directories. Other
631
  errors are passed.
632

633
  @type filename: str
634
  @param filename: the file to be removed
635

636
  """
637
  try:
638
    os.unlink(filename)
639
  except OSError, err:
640
    if err.errno not in (errno.ENOENT, errno.EISDIR):
641
      raise
642

    
643

    
644
def RemoveDir(dirname):
645
  """Remove an empty directory.
646

647
  Remove a directory, ignoring non-existing ones.
648
  Other errors are passed. This includes the case,
649
  where the directory is not empty, so it can't be removed.
650

651
  @type dirname: str
652
  @param dirname: the empty directory to be removed
653

654
  """
655
  try:
656
    os.rmdir(dirname)
657
  except OSError, err:
658
    if err.errno != errno.ENOENT:
659
      raise
660

    
661

    
662
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
663
  """Renames a file.
664

665
  @type old: string
666
  @param old: Original path
667
  @type new: string
668
  @param new: New path
669
  @type mkdir: bool
670
  @param mkdir: Whether to create target directory if it doesn't exist
671
  @type mkdir_mode: int
672
  @param mkdir_mode: Mode for newly created directories
673

674
  """
675
  try:
676
    return os.rename(old, new)
677
  except OSError, err:
678
    # In at least one use case of this function, the job queue, directory
679
    # creation is very rare. Checking for the directory before renaming is not
680
    # as efficient.
681
    if mkdir and err.errno == errno.ENOENT:
682
      # Create directory and try again
683
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
684

    
685
      return os.rename(old, new)
686

    
687
    raise
688

    
689

    
690
def Makedirs(path, mode=0750):
691
  """Super-mkdir; create a leaf directory and all intermediate ones.
692

693
  This is a wrapper around C{os.makedirs} adding error handling not implemented
694
  before Python 2.5.
695

696
  """
697
  try:
698
    os.makedirs(path, mode)
699
  except OSError, err:
700
    # Ignore EEXIST. This is only handled in os.makedirs as included in
701
    # Python 2.5 and above.
702
    if err.errno != errno.EEXIST or not os.path.exists(path):
703
      raise
704

    
705

    
706
def ResetTempfileModule():
707
  """Resets the random name generator of the tempfile module.
708

709
  This function should be called after C{os.fork} in the child process to
710
  ensure it creates a newly seeded random generator. Otherwise it would
711
  generate the same random parts as the parent process. If several processes
712
  race for the creation of a temporary file, this could lead to one not getting
713
  a temporary name.
714

715
  """
716
  # pylint: disable-msg=W0212
717
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
718
    tempfile._once_lock.acquire()
719
    try:
720
      # Reset random name generator
721
      tempfile._name_sequence = None
722
    finally:
723
      tempfile._once_lock.release()
724
  else:
725
    logging.critical("The tempfile module misses at least one of the"
726
                     " '_once_lock' and '_name_sequence' attributes")
727

    
728

    
729
def _FingerprintFile(filename):
730
  """Compute the fingerprint of a file.
731

732
  If the file does not exist, a None will be returned
733
  instead.
734

735
  @type filename: str
736
  @param filename: the filename to checksum
737
  @rtype: str
738
  @return: the hex digest of the sha checksum of the contents
739
      of the file
740

741
  """
742
  if not (os.path.exists(filename) and os.path.isfile(filename)):
743
    return None
744

    
745
  f = open(filename)
746

    
747
  fp = compat.sha1_hash()
748
  while True:
749
    data = f.read(4096)
750
    if not data:
751
      break
752

    
753
    fp.update(data)
754

    
755
  return fp.hexdigest()
756

    
757

    
758
def FingerprintFiles(files):
759
  """Compute fingerprints for a list of files.
760

761
  @type files: list
762
  @param files: the list of filename to fingerprint
763
  @rtype: dict
764
  @return: a dictionary filename: fingerprint, holding only
765
      existing files
766

767
  """
768
  ret = {}
769

    
770
  for filename in files:
771
    cksum = _FingerprintFile(filename)
772
    if cksum:
773
      ret[filename] = cksum
774

    
775
  return ret
776

    
777

    
778
def ForceDictType(target, key_types, allowed_values=None):
779
  """Force the values of a dict to have certain types.
780

781
  @type target: dict
782
  @param target: the dict to update
783
  @type key_types: dict
784
  @param key_types: dict mapping target dict keys to types
785
                    in constants.ENFORCEABLE_TYPES
786
  @type allowed_values: list
787
  @keyword allowed_values: list of specially allowed values
788

789
  """
790
  if allowed_values is None:
791
    allowed_values = []
792

    
793
  if not isinstance(target, dict):
794
    msg = "Expected dictionary, got '%s'" % target
795
    raise errors.TypeEnforcementError(msg)
796

    
797
  for key in target:
798
    if key not in key_types:
799
      msg = "Unknown key '%s'" % key
800
      raise errors.TypeEnforcementError(msg)
801

    
802
    if target[key] in allowed_values:
803
      continue
804

    
805
    ktype = key_types[key]
806
    if ktype not in constants.ENFORCEABLE_TYPES:
807
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
808
      raise errors.ProgrammerError(msg)
809

    
810
    if ktype == constants.VTYPE_STRING:
811
      if not isinstance(target[key], basestring):
812
        if isinstance(target[key], bool) and not target[key]:
813
          target[key] = ''
814
        else:
815
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
816
          raise errors.TypeEnforcementError(msg)
817
    elif ktype == constants.VTYPE_BOOL:
818
      if isinstance(target[key], basestring) and target[key]:
819
        if target[key].lower() == constants.VALUE_FALSE:
820
          target[key] = False
821
        elif target[key].lower() == constants.VALUE_TRUE:
822
          target[key] = True
823
        else:
824
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
825
          raise errors.TypeEnforcementError(msg)
826
      elif target[key]:
827
        target[key] = True
828
      else:
829
        target[key] = False
830
    elif ktype == constants.VTYPE_SIZE:
831
      try:
832
        target[key] = ParseUnit(target[key])
833
      except errors.UnitParseError, err:
834
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
835
              (key, target[key], err)
836
        raise errors.TypeEnforcementError(msg)
837
    elif ktype == constants.VTYPE_INT:
838
      try:
839
        target[key] = int(target[key])
840
      except (ValueError, TypeError):
841
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
842
        raise errors.TypeEnforcementError(msg)
843

    
844

    
845
def IsProcessAlive(pid):
846
  """Check if a given pid exists on the system.
847

848
  @note: zombie status is not handled, so zombie processes
849
      will be returned as alive
850
  @type pid: int
851
  @param pid: the process ID to check
852
  @rtype: boolean
853
  @return: True if the process exists
854

855
  """
856
  def _TryStat(name):
857
    try:
858
      os.stat(name)
859
      return True
860
    except EnvironmentError, err:
861
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
862
        return False
863
      elif err.errno == errno.EINVAL:
864
        raise RetryAgain(err)
865
      raise
866

    
867
  assert isinstance(pid, int), "pid must be an integer"
868
  if pid <= 0:
869
    return False
870

    
871
  proc_entry = "/proc/%d/status" % pid
872
  # /proc in a multiprocessor environment can have strange behaviors.
873
  # Retry the os.stat a few times until we get a good result.
874
  try:
875
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, args=[proc_entry])
876
  except RetryTimeout, err:
877
    err.RaiseInner()
878

    
879

    
880
def ReadPidFile(pidfile):
881
  """Read a pid from a file.
882

883
  @type  pidfile: string
884
  @param pidfile: path to the file containing the pid
885
  @rtype: int
886
  @return: The process id, if the file exists and contains a valid PID,
887
           otherwise 0
888

889
  """
890
  try:
891
    raw_data = ReadOneLineFile(pidfile)
892
  except EnvironmentError, err:
893
    if err.errno != errno.ENOENT:
894
      logging.exception("Can't read pid file")
895
    return 0
896

    
897
  try:
898
    pid = int(raw_data)
899
  except (TypeError, ValueError), err:
900
    logging.info("Can't parse pid file contents", exc_info=True)
901
    return 0
902

    
903
  return pid
904

    
905

    
906
def ReadLockedPidFile(path):
907
  """Reads a locked PID file.
908

909
  This can be used together with L{StartDaemon}.
910

911
  @type path: string
912
  @param path: Path to PID file
913
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
914

915
  """
916
  try:
917
    fd = os.open(path, os.O_RDONLY)
918
  except EnvironmentError, err:
919
    if err.errno == errno.ENOENT:
920
      # PID file doesn't exist
921
      return None
922
    raise
923

    
924
  try:
925
    try:
926
      # Try to acquire lock
927
      LockFile(fd)
928
    except errors.LockError:
929
      # Couldn't lock, daemon is running
930
      return int(os.read(fd, 100))
931
  finally:
932
    os.close(fd)
933

    
934
  return None
935

    
936

    
937
def MatchNameComponent(key, name_list, case_sensitive=True):
938
  """Try to match a name against a list.
939

940
  This function will try to match a name like test1 against a list
941
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
942
  this list, I{'test1'} as well as I{'test1.example'} will match, but
943
  not I{'test1.ex'}. A multiple match will be considered as no match
944
  at all (e.g. I{'test1'} against C{['test1.example.com',
945
  'test1.example.org']}), except when the key fully matches an entry
946
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
947

948
  @type key: str
949
  @param key: the name to be searched
950
  @type name_list: list
951
  @param name_list: the list of strings against which to search the key
952
  @type case_sensitive: boolean
953
  @param case_sensitive: whether to provide a case-sensitive match
954

955
  @rtype: None or str
956
  @return: None if there is no match I{or} if there are multiple matches,
957
      otherwise the element from the list which matches
958

959
  """
960
  if key in name_list:
961
    return key
962

    
963
  re_flags = 0
964
  if not case_sensitive:
965
    re_flags |= re.IGNORECASE
966
    key = key.upper()
967
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
968
  names_filtered = []
969
  string_matches = []
970
  for name in name_list:
971
    if mo.match(name) is not None:
972
      names_filtered.append(name)
973
      if not case_sensitive and key == name.upper():
974
        string_matches.append(name)
975

    
976
  if len(string_matches) == 1:
977
    return string_matches[0]
978
  if len(names_filtered) == 1:
979
    return names_filtered[0]
980
  return None
981

    
982

    
983
class HostInfo:
984
  """Class implementing resolver and hostname functionality
985

986
  """
987
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
988

    
989
  def __init__(self, name=None):
990
    """Initialize the host name object.
991

992
    If the name argument is not passed, it will use this system's
993
    name.
994

995
    """
996
    if name is None:
997
      name = self.SysName()
998

    
999
    self.query = name
1000
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
1001
    self.ip = self.ipaddrs[0]
1002

    
1003
  def ShortName(self):
1004
    """Returns the hostname without domain.
1005

1006
    """
1007
    return self.name.split('.')[0]
1008

    
1009
  @staticmethod
1010
  def SysName():
1011
    """Return the current system's name.
1012

1013
    This is simply a wrapper over C{socket.gethostname()}.
1014

1015
    """
1016
    return socket.gethostname()
1017

    
1018
  @staticmethod
1019
  def LookupHostname(hostname):
1020
    """Look up hostname
1021

1022
    @type hostname: str
1023
    @param hostname: hostname to look up
1024

1025
    @rtype: tuple
1026
    @return: a tuple (name, aliases, ipaddrs) as returned by
1027
        C{socket.gethostbyname_ex}
1028
    @raise errors.ResolverError: in case of errors in resolving
1029

1030
    """
1031
    try:
1032
      result = socket.gethostbyname_ex(hostname)
1033
    except socket.gaierror, err:
1034
      # hostname not found in DNS
1035
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
1036

    
1037
    return result
1038

    
1039
  @classmethod
1040
  def NormalizeName(cls, hostname):
1041
    """Validate and normalize the given hostname.
1042

1043
    @attention: the validation is a bit more relaxed than the standards
1044
        require; most importantly, we allow underscores in names
1045
    @raise errors.OpPrereqError: when the name is not valid
1046

1047
    """
1048
    hostname = hostname.lower()
1049
    if (not cls._VALID_NAME_RE.match(hostname) or
1050
        # double-dots, meaning empty label
1051
        ".." in hostname or
1052
        # empty initial label
1053
        hostname.startswith(".")):
1054
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
1055
                                 errors.ECODE_INVAL)
1056
    if hostname.endswith("."):
1057
      hostname = hostname.rstrip(".")
1058
    return hostname
1059

    
1060

    
1061
def GetHostInfo(name=None):
1062
  """Lookup host name and raise an OpPrereqError for failures"""
1063

    
1064
  try:
1065
    return HostInfo(name)
1066
  except errors.ResolverError, err:
1067
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
1068
                               (err[0], err[2]), errors.ECODE_RESOLVER)
1069

    
1070

    
1071
def ListVolumeGroups():
1072
  """List volume groups and their size
1073

1074
  @rtype: dict
1075
  @return:
1076
       Dictionary with keys volume name and values
1077
       the size of the volume
1078

1079
  """
1080
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1081
  result = RunCmd(command)
1082
  retval = {}
1083
  if result.failed:
1084
    return retval
1085

    
1086
  for line in result.stdout.splitlines():
1087
    try:
1088
      name, size = line.split()
1089
      size = int(float(size))
1090
    except (IndexError, ValueError), err:
1091
      logging.error("Invalid output from vgs (%s): %s", err, line)
1092
      continue
1093

    
1094
    retval[name] = size
1095

    
1096
  return retval
1097

    
1098

    
1099
def BridgeExists(bridge):
1100
  """Check whether the given bridge exists in the system
1101

1102
  @type bridge: str
1103
  @param bridge: the bridge name to check
1104
  @rtype: boolean
1105
  @return: True if it does
1106

1107
  """
1108
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1109

    
1110

    
1111
def NiceSort(name_list):
1112
  """Sort a list of strings based on digit and non-digit groupings.
1113

1114
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1115
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1116
  'a11']}.
1117

1118
  The sort algorithm breaks each name in groups of either only-digits
1119
  or no-digits. Only the first eight such groups are considered, and
1120
  after that we just use what's left of the string.
1121

1122
  @type name_list: list
1123
  @param name_list: the names to be sorted
1124
  @rtype: list
1125
  @return: a copy of the name list sorted with our algorithm
1126

1127
  """
1128
  _SORTER_BASE = "(\D+|\d+)"
1129
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1130
                                                  _SORTER_BASE, _SORTER_BASE,
1131
                                                  _SORTER_BASE, _SORTER_BASE,
1132
                                                  _SORTER_BASE, _SORTER_BASE)
1133
  _SORTER_RE = re.compile(_SORTER_FULL)
1134
  _SORTER_NODIGIT = re.compile("^\D*$")
1135
  def _TryInt(val):
1136
    """Attempts to convert a variable to integer."""
1137
    if val is None or _SORTER_NODIGIT.match(val):
1138
      return val
1139
    rval = int(val)
1140
    return rval
1141

    
1142
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1143
             for name in name_list]
1144
  to_sort.sort()
1145
  return [tup[1] for tup in to_sort]
1146

    
1147

    
1148
def TryConvert(fn, val):
1149
  """Try to convert a value ignoring errors.
1150

1151
  This function tries to apply function I{fn} to I{val}. If no
1152
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1153
  the result, else it will return the original value. Any other
1154
  exceptions are propagated to the caller.
1155

1156
  @type fn: callable
1157
  @param fn: function to apply to the value
1158
  @param val: the value to be converted
1159
  @return: The converted value if the conversion was successful,
1160
      otherwise the original value.
1161

1162
  """
1163
  try:
1164
    nv = fn(val)
1165
  except (ValueError, TypeError):
1166
    nv = val
1167
  return nv
1168

    
1169

    
1170
def IsValidIP(ip):
1171
  """Verifies the syntax of an IPv4 address.
1172

1173
  This function checks if the IPv4 address passes is valid or not based
1174
  on syntax (not IP range, class calculations, etc.).
1175

1176
  @type ip: str
1177
  @param ip: the address to be checked
1178
  @rtype: a regular expression match object
1179
  @return: a regular expression match object, or None if the
1180
      address is not valid
1181

1182
  """
1183
  unit = "(0|[1-9]\d{0,2})"
1184
  #TODO: convert and return only boolean
1185
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
1186

    
1187

    
1188
def IsValidShellParam(word):
1189
  """Verifies is the given word is safe from the shell's p.o.v.
1190

1191
  This means that we can pass this to a command via the shell and be
1192
  sure that it doesn't alter the command line and is passed as such to
1193
  the actual command.
1194

1195
  Note that we are overly restrictive here, in order to be on the safe
1196
  side.
1197

1198
  @type word: str
1199
  @param word: the word to check
1200
  @rtype: boolean
1201
  @return: True if the word is 'safe'
1202

1203
  """
1204
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1205

    
1206

    
1207
def BuildShellCmd(template, *args):
1208
  """Build a safe shell command line from the given arguments.
1209

1210
  This function will check all arguments in the args list so that they
1211
  are valid shell parameters (i.e. they don't contain shell
1212
  metacharacters). If everything is ok, it will return the result of
1213
  template % args.
1214

1215
  @type template: str
1216
  @param template: the string holding the template for the
1217
      string formatting
1218
  @rtype: str
1219
  @return: the expanded command line
1220

1221
  """
1222
  for word in args:
1223
    if not IsValidShellParam(word):
1224
      raise errors.ProgrammerError("Shell argument '%s' contains"
1225
                                   " invalid characters" % word)
1226
  return template % args
1227

    
1228

    
1229
def FormatUnit(value, units):
1230
  """Formats an incoming number of MiB with the appropriate unit.
1231

1232
  @type value: int
1233
  @param value: integer representing the value in MiB (1048576)
1234
  @type units: char
1235
  @param units: the type of formatting we should do:
1236
      - 'h' for automatic scaling
1237
      - 'm' for MiBs
1238
      - 'g' for GiBs
1239
      - 't' for TiBs
1240
  @rtype: str
1241
  @return: the formatted value (with suffix)
1242

1243
  """
1244
  if units not in ('m', 'g', 't', 'h'):
1245
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1246

    
1247
  suffix = ''
1248

    
1249
  if units == 'm' or (units == 'h' and value < 1024):
1250
    if units == 'h':
1251
      suffix = 'M'
1252
    return "%d%s" % (round(value, 0), suffix)
1253

    
1254
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1255
    if units == 'h':
1256
      suffix = 'G'
1257
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1258

    
1259
  else:
1260
    if units == 'h':
1261
      suffix = 'T'
1262
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1263

    
1264

    
1265
def ParseUnit(input_string):
1266
  """Tries to extract number and scale from the given string.
1267

1268
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1269
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1270
  is always an int in MiB.
1271

1272
  """
1273
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1274
  if not m:
1275
    raise errors.UnitParseError("Invalid format")
1276

    
1277
  value = float(m.groups()[0])
1278

    
1279
  unit = m.groups()[1]
1280
  if unit:
1281
    lcunit = unit.lower()
1282
  else:
1283
    lcunit = 'm'
1284

    
1285
  if lcunit in ('m', 'mb', 'mib'):
1286
    # Value already in MiB
1287
    pass
1288

    
1289
  elif lcunit in ('g', 'gb', 'gib'):
1290
    value *= 1024
1291

    
1292
  elif lcunit in ('t', 'tb', 'tib'):
1293
    value *= 1024 * 1024
1294

    
1295
  else:
1296
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1297

    
1298
  # Make sure we round up
1299
  if int(value) < value:
1300
    value += 1
1301

    
1302
  # Round up to the next multiple of 4
1303
  value = int(value)
1304
  if value % 4:
1305
    value += 4 - value % 4
1306

    
1307
  return value
1308

    
1309

    
1310
def AddAuthorizedKey(file_name, key):
1311
  """Adds an SSH public key to an authorized_keys file.
1312

1313
  @type file_name: str
1314
  @param file_name: path to authorized_keys file
1315
  @type key: str
1316
  @param key: string containing key
1317

1318
  """
1319
  key_fields = key.split()
1320

    
1321
  f = open(file_name, 'a+')
1322
  try:
1323
    nl = True
1324
    for line in f:
1325
      # Ignore whitespace changes
1326
      if line.split() == key_fields:
1327
        break
1328
      nl = line.endswith('\n')
1329
    else:
1330
      if not nl:
1331
        f.write("\n")
1332
      f.write(key.rstrip('\r\n'))
1333
      f.write("\n")
1334
      f.flush()
1335
  finally:
1336
    f.close()
1337

    
1338

    
1339
def RemoveAuthorizedKey(file_name, key):
1340
  """Removes an SSH public key from an authorized_keys file.
1341

1342
  @type file_name: str
1343
  @param file_name: path to authorized_keys file
1344
  @type key: str
1345
  @param key: string containing key
1346

1347
  """
1348
  key_fields = key.split()
1349

    
1350
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1351
  try:
1352
    out = os.fdopen(fd, 'w')
1353
    try:
1354
      f = open(file_name, 'r')
1355
      try:
1356
        for line in f:
1357
          # Ignore whitespace changes while comparing lines
1358
          if line.split() != key_fields:
1359
            out.write(line)
1360

    
1361
        out.flush()
1362
        os.rename(tmpname, file_name)
1363
      finally:
1364
        f.close()
1365
    finally:
1366
      out.close()
1367
  except:
1368
    RemoveFile(tmpname)
1369
    raise
1370

    
1371

    
1372
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1373
  """Sets the name of an IP address and hostname in /etc/hosts.
1374

1375
  @type file_name: str
1376
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1377
  @type ip: str
1378
  @param ip: the IP address
1379
  @type hostname: str
1380
  @param hostname: the hostname to be added
1381
  @type aliases: list
1382
  @param aliases: the list of aliases to add for the hostname
1383

1384
  """
1385
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1386
  # Ensure aliases are unique
1387
  aliases = UniqueSequence([hostname] + aliases)[1:]
1388

    
1389
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1390
  try:
1391
    out = os.fdopen(fd, 'w')
1392
    try:
1393
      f = open(file_name, 'r')
1394
      try:
1395
        for line in f:
1396
          fields = line.split()
1397
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1398
            continue
1399
          out.write(line)
1400

    
1401
        out.write("%s\t%s" % (ip, hostname))
1402
        if aliases:
1403
          out.write(" %s" % ' '.join(aliases))
1404
        out.write('\n')
1405

    
1406
        out.flush()
1407
        os.fsync(out)
1408
        os.chmod(tmpname, 0644)
1409
        os.rename(tmpname, file_name)
1410
      finally:
1411
        f.close()
1412
    finally:
1413
      out.close()
1414
  except:
1415
    RemoveFile(tmpname)
1416
    raise
1417

    
1418

    
1419
def AddHostToEtcHosts(hostname):
1420
  """Wrapper around SetEtcHostsEntry.
1421

1422
  @type hostname: str
1423
  @param hostname: a hostname that will be resolved and added to
1424
      L{constants.ETC_HOSTS}
1425

1426
  """
1427
  hi = HostInfo(name=hostname)
1428
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1429

    
1430

    
1431
def RemoveEtcHostsEntry(file_name, hostname):
1432
  """Removes a hostname from /etc/hosts.
1433

1434
  IP addresses without names are removed from the file.
1435

1436
  @type file_name: str
1437
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1438
  @type hostname: str
1439
  @param hostname: the hostname to be removed
1440

1441
  """
1442
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1443
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1444
  try:
1445
    out = os.fdopen(fd, 'w')
1446
    try:
1447
      f = open(file_name, 'r')
1448
      try:
1449
        for line in f:
1450
          fields = line.split()
1451
          if len(fields) > 1 and not fields[0].startswith('#'):
1452
            names = fields[1:]
1453
            if hostname in names:
1454
              while hostname in names:
1455
                names.remove(hostname)
1456
              if names:
1457
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1458
              continue
1459

    
1460
          out.write(line)
1461

    
1462
        out.flush()
1463
        os.fsync(out)
1464
        os.chmod(tmpname, 0644)
1465
        os.rename(tmpname, file_name)
1466
      finally:
1467
        f.close()
1468
    finally:
1469
      out.close()
1470
  except:
1471
    RemoveFile(tmpname)
1472
    raise
1473

    
1474

    
1475
def RemoveHostFromEtcHosts(hostname):
1476
  """Wrapper around RemoveEtcHostsEntry.
1477

1478
  @type hostname: str
1479
  @param hostname: hostname that will be resolved and its
1480
      full and shot name will be removed from
1481
      L{constants.ETC_HOSTS}
1482

1483
  """
1484
  hi = HostInfo(name=hostname)
1485
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1486
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1487

    
1488

    
1489
def TimestampForFilename():
1490
  """Returns the current time formatted for filenames.
1491

1492
  The format doesn't contain colons as some shells and applications them as
1493
  separators.
1494

1495
  """
1496
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1497

    
1498

    
1499
def CreateBackup(file_name):
1500
  """Creates a backup of a file.
1501

1502
  @type file_name: str
1503
  @param file_name: file to be backed up
1504
  @rtype: str
1505
  @return: the path to the newly created backup
1506
  @raise errors.ProgrammerError: for invalid file names
1507

1508
  """
1509
  if not os.path.isfile(file_name):
1510
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1511
                                file_name)
1512

    
1513
  prefix = ("%s.backup-%s." %
1514
            (os.path.basename(file_name), TimestampForFilename()))
1515
  dir_name = os.path.dirname(file_name)
1516

    
1517
  fsrc = open(file_name, 'rb')
1518
  try:
1519
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1520
    fdst = os.fdopen(fd, 'wb')
1521
    try:
1522
      logging.debug("Backing up %s at %s", file_name, backup_name)
1523
      shutil.copyfileobj(fsrc, fdst)
1524
    finally:
1525
      fdst.close()
1526
  finally:
1527
    fsrc.close()
1528

    
1529
  return backup_name
1530

    
1531

    
1532
def ShellQuote(value):
1533
  """Quotes shell argument according to POSIX.
1534

1535
  @type value: str
1536
  @param value: the argument to be quoted
1537
  @rtype: str
1538
  @return: the quoted value
1539

1540
  """
1541
  if _re_shell_unquoted.match(value):
1542
    return value
1543
  else:
1544
    return "'%s'" % value.replace("'", "'\\''")
1545

    
1546

    
1547
def ShellQuoteArgs(args):
1548
  """Quotes a list of shell arguments.
1549

1550
  @type args: list
1551
  @param args: list of arguments to be quoted
1552
  @rtype: str
1553
  @return: the quoted arguments concatenated with spaces
1554

1555
  """
1556
  return ' '.join([ShellQuote(i) for i in args])
1557

    
1558

    
1559
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1560
  """Simple ping implementation using TCP connect(2).
1561

1562
  Check if the given IP is reachable by doing attempting a TCP connect
1563
  to it.
1564

1565
  @type target: str
1566
  @param target: the IP or hostname to ping
1567
  @type port: int
1568
  @param port: the port to connect to
1569
  @type timeout: int
1570
  @param timeout: the timeout on the connection attempt
1571
  @type live_port_needed: boolean
1572
  @param live_port_needed: whether a closed port will cause the
1573
      function to return failure, as if there was a timeout
1574
  @type source: str or None
1575
  @param source: if specified, will cause the connect to be made
1576
      from this specific source address; failures to bind other
1577
      than C{EADDRNOTAVAIL} will be ignored
1578

1579
  """
1580
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1581

    
1582
  success = False
1583

    
1584
  if source is not None:
1585
    try:
1586
      sock.bind((source, 0))
1587
    except socket.error, (errcode, _):
1588
      if errcode == errno.EADDRNOTAVAIL:
1589
        success = False
1590

    
1591
  sock.settimeout(timeout)
1592

    
1593
  try:
1594
    sock.connect((target, port))
1595
    sock.close()
1596
    success = True
1597
  except socket.timeout:
1598
    success = False
1599
  except socket.error, (errcode, _):
1600
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1601

    
1602
  return success
1603

    
1604

    
1605
def OwnIpAddress(address):
1606
  """Check if the current host has the the given IP address.
1607

1608
  Currently this is done by TCP-pinging the address from the loopback
1609
  address.
1610

1611
  @type address: string
1612
  @param address: the address to check
1613
  @rtype: bool
1614
  @return: True if we own the address
1615

1616
  """
1617
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1618
                 source=constants.LOCALHOST_IP_ADDRESS)
1619

    
1620

    
1621
def ListVisibleFiles(path):
1622
  """Returns a list of visible files in a directory.
1623

1624
  @type path: str
1625
  @param path: the directory to enumerate
1626
  @rtype: list
1627
  @return: the list of all files not starting with a dot
1628
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1629

1630
  """
1631
  if not IsNormAbsPath(path):
1632
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1633
                                 " absolute/normalized: '%s'" % path)
1634
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1635
  files.sort()
1636
  return files
1637

    
1638

    
1639
def GetHomeDir(user, default=None):
1640
  """Try to get the homedir of the given user.
1641

1642
  The user can be passed either as a string (denoting the name) or as
1643
  an integer (denoting the user id). If the user is not found, the
1644
  'default' argument is returned, which defaults to None.
1645

1646
  """
1647
  try:
1648
    if isinstance(user, basestring):
1649
      result = pwd.getpwnam(user)
1650
    elif isinstance(user, (int, long)):
1651
      result = pwd.getpwuid(user)
1652
    else:
1653
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1654
                                   type(user))
1655
  except KeyError:
1656
    return default
1657
  return result.pw_dir
1658

    
1659

    
1660
def NewUUID():
1661
  """Returns a random UUID.
1662

1663
  @note: This is a Linux-specific method as it uses the /proc
1664
      filesystem.
1665
  @rtype: str
1666

1667
  """
1668
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1669

    
1670

    
1671
def GenerateSecret(numbytes=20):
1672
  """Generates a random secret.
1673

1674
  This will generate a pseudo-random secret returning an hex string
1675
  (so that it can be used where an ASCII string is needed).
1676

1677
  @param numbytes: the number of bytes which will be represented by the returned
1678
      string (defaulting to 20, the length of a SHA1 hash)
1679
  @rtype: str
1680
  @return: an hex representation of the pseudo-random sequence
1681

1682
  """
1683
  return os.urandom(numbytes).encode('hex')
1684

    
1685

    
1686
def EnsureDirs(dirs):
1687
  """Make required directories, if they don't exist.
1688

1689
  @param dirs: list of tuples (dir_name, dir_mode)
1690
  @type dirs: list of (string, integer)
1691

1692
  """
1693
  for dir_name, dir_mode in dirs:
1694
    try:
1695
      os.mkdir(dir_name, dir_mode)
1696
    except EnvironmentError, err:
1697
      if err.errno != errno.EEXIST:
1698
        raise errors.GenericError("Cannot create needed directory"
1699
                                  " '%s': %s" % (dir_name, err))
1700
    if not os.path.isdir(dir_name):
1701
      raise errors.GenericError("%s is not a directory" % dir_name)
1702

    
1703

    
1704
def ReadFile(file_name, size=-1):
1705
  """Reads a file.
1706

1707
  @type size: int
1708
  @param size: Read at most size bytes (if negative, entire file)
1709
  @rtype: str
1710
  @return: the (possibly partial) content of the file
1711

1712
  """
1713
  f = open(file_name, "r")
1714
  try:
1715
    return f.read(size)
1716
  finally:
1717
    f.close()
1718

    
1719

    
1720
def WriteFile(file_name, fn=None, data=None,
1721
              mode=None, uid=-1, gid=-1,
1722
              atime=None, mtime=None, close=True,
1723
              dry_run=False, backup=False,
1724
              prewrite=None, postwrite=None):
1725
  """(Over)write a file atomically.
1726

1727
  The file_name and either fn (a function taking one argument, the
1728
  file descriptor, and which should write the data to it) or data (the
1729
  contents of the file) must be passed. The other arguments are
1730
  optional and allow setting the file mode, owner and group, and the
1731
  mtime/atime of the file.
1732

1733
  If the function doesn't raise an exception, it has succeeded and the
1734
  target file has the new contents. If the function has raised an
1735
  exception, an existing target file should be unmodified and the
1736
  temporary file should be removed.
1737

1738
  @type file_name: str
1739
  @param file_name: the target filename
1740
  @type fn: callable
1741
  @param fn: content writing function, called with
1742
      file descriptor as parameter
1743
  @type data: str
1744
  @param data: contents of the file
1745
  @type mode: int
1746
  @param mode: file mode
1747
  @type uid: int
1748
  @param uid: the owner of the file
1749
  @type gid: int
1750
  @param gid: the group of the file
1751
  @type atime: int
1752
  @param atime: a custom access time to be set on the file
1753
  @type mtime: int
1754
  @param mtime: a custom modification time to be set on the file
1755
  @type close: boolean
1756
  @param close: whether to close file after writing it
1757
  @type prewrite: callable
1758
  @param prewrite: function to be called before writing content
1759
  @type postwrite: callable
1760
  @param postwrite: function to be called after writing content
1761

1762
  @rtype: None or int
1763
  @return: None if the 'close' parameter evaluates to True,
1764
      otherwise the file descriptor
1765

1766
  @raise errors.ProgrammerError: if any of the arguments are not valid
1767

1768
  """
1769
  if not os.path.isabs(file_name):
1770
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1771
                                 " absolute: '%s'" % file_name)
1772

    
1773
  if [fn, data].count(None) != 1:
1774
    raise errors.ProgrammerError("fn or data required")
1775

    
1776
  if [atime, mtime].count(None) == 1:
1777
    raise errors.ProgrammerError("Both atime and mtime must be either"
1778
                                 " set or None")
1779

    
1780
  if backup and not dry_run and os.path.isfile(file_name):
1781
    CreateBackup(file_name)
1782

    
1783
  dir_name, base_name = os.path.split(file_name)
1784
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1785
  do_remove = True
1786
  # here we need to make sure we remove the temp file, if any error
1787
  # leaves it in place
1788
  try:
1789
    if uid != -1 or gid != -1:
1790
      os.chown(new_name, uid, gid)
1791
    if mode:
1792
      os.chmod(new_name, mode)
1793
    if callable(prewrite):
1794
      prewrite(fd)
1795
    if data is not None:
1796
      os.write(fd, data)
1797
    else:
1798
      fn(fd)
1799
    if callable(postwrite):
1800
      postwrite(fd)
1801
    os.fsync(fd)
1802
    if atime is not None and mtime is not None:
1803
      os.utime(new_name, (atime, mtime))
1804
    if not dry_run:
1805
      os.rename(new_name, file_name)
1806
      do_remove = False
1807
  finally:
1808
    if close:
1809
      os.close(fd)
1810
      result = None
1811
    else:
1812
      result = fd
1813
    if do_remove:
1814
      RemoveFile(new_name)
1815

    
1816
  return result
1817

    
1818

    
1819
def ReadOneLineFile(file_name, strict=False):
1820
  """Return the first non-empty line from a file.
1821

1822
  @type strict: boolean
1823
  @param strict: if True, abort if the file has more than one
1824
      non-empty line
1825

1826
  """
1827
  file_lines = ReadFile(file_name).splitlines()
1828
  full_lines = filter(bool, file_lines)
1829
  if not file_lines or not full_lines:
1830
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1831
  elif strict and len(full_lines) > 1:
1832
    raise errors.GenericError("Too many lines in one-liner file %s" %
1833
                              file_name)
1834
  return full_lines[0]
1835

    
1836

    
1837
def FirstFree(seq, base=0):
1838
  """Returns the first non-existing integer from seq.
1839

1840
  The seq argument should be a sorted list of positive integers. The
1841
  first time the index of an element is smaller than the element
1842
  value, the index will be returned.
1843

1844
  The base argument is used to start at a different offset,
1845
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1846

1847
  Example: C{[0, 1, 3]} will return I{2}.
1848

1849
  @type seq: sequence
1850
  @param seq: the sequence to be analyzed.
1851
  @type base: int
1852
  @param base: use this value as the base index of the sequence
1853
  @rtype: int
1854
  @return: the first non-used index in the sequence
1855

1856
  """
1857
  for idx, elem in enumerate(seq):
1858
    assert elem >= base, "Passed element is higher than base offset"
1859
    if elem > idx + base:
1860
      # idx is not used
1861
      return idx + base
1862
  return None
1863

    
1864

    
1865
def SingleWaitForFdCondition(fdobj, event, timeout):
1866
  """Waits for a condition to occur on the socket.
1867

1868
  Immediately returns at the first interruption.
1869

1870
  @type fdobj: integer or object supporting a fileno() method
1871
  @param fdobj: entity to wait for events on
1872
  @type event: integer
1873
  @param event: ORed condition (see select module)
1874
  @type timeout: float or None
1875
  @param timeout: Timeout in seconds
1876
  @rtype: int or None
1877
  @return: None for timeout, otherwise occured conditions
1878

1879
  """
1880
  check = (event | select.POLLPRI |
1881
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1882

    
1883
  if timeout is not None:
1884
    # Poller object expects milliseconds
1885
    timeout *= 1000
1886

    
1887
  poller = select.poll()
1888
  poller.register(fdobj, event)
1889
  try:
1890
    # TODO: If the main thread receives a signal and we have no timeout, we
1891
    # could wait forever. This should check a global "quit" flag or something
1892
    # every so often.
1893
    io_events = poller.poll(timeout)
1894
  except select.error, err:
1895
    if err[0] != errno.EINTR:
1896
      raise
1897
    io_events = []
1898
  if io_events and io_events[0][1] & check:
1899
    return io_events[0][1]
1900
  else:
1901
    return None
1902

    
1903

    
1904
class FdConditionWaiterHelper(object):
1905
  """Retry helper for WaitForFdCondition.
1906

1907
  This class contains the retried and wait functions that make sure
1908
  WaitForFdCondition can continue waiting until the timeout is actually
1909
  expired.
1910

1911
  """
1912

    
1913
  def __init__(self, timeout):
1914
    self.timeout = timeout
1915

    
1916
  def Poll(self, fdobj, event):
1917
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1918
    if result is None:
1919
      raise RetryAgain()
1920
    else:
1921
      return result
1922

    
1923
  def UpdateTimeout(self, timeout):
1924
    self.timeout = timeout
1925

    
1926

    
1927
def WaitForFdCondition(fdobj, event, timeout):
1928
  """Waits for a condition to occur on the socket.
1929

1930
  Retries until the timeout is expired, even if interrupted.
1931

1932
  @type fdobj: integer or object supporting a fileno() method
1933
  @param fdobj: entity to wait for events on
1934
  @type event: integer
1935
  @param event: ORed condition (see select module)
1936
  @type timeout: float or None
1937
  @param timeout: Timeout in seconds
1938
  @rtype: int or None
1939
  @return: None for timeout, otherwise occured conditions
1940

1941
  """
1942
  if timeout is not None:
1943
    retrywaiter = FdConditionWaiterHelper(timeout)
1944
    try:
1945
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1946
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1947
    except RetryTimeout:
1948
      result = None
1949
  else:
1950
    result = None
1951
    while result is None:
1952
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1953
  return result
1954

    
1955

    
1956
def UniqueSequence(seq):
1957
  """Returns a list with unique elements.
1958

1959
  Element order is preserved.
1960

1961
  @type seq: sequence
1962
  @param seq: the sequence with the source elements
1963
  @rtype: list
1964
  @return: list of unique elements from seq
1965

1966
  """
1967
  seen = set()
1968
  return [i for i in seq if i not in seen and not seen.add(i)]
1969

    
1970

    
1971
def NormalizeAndValidateMac(mac):
1972
  """Normalizes and check if a MAC address is valid.
1973

1974
  Checks whether the supplied MAC address is formally correct, only
1975
  accepts colon separated format. Normalize it to all lower.
1976

1977
  @type mac: str
1978
  @param mac: the MAC to be validated
1979
  @rtype: str
1980
  @return: returns the normalized and validated MAC.
1981

1982
  @raise errors.OpPrereqError: If the MAC isn't valid
1983

1984
  """
1985
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1986
  if not mac_check.match(mac):
1987
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1988
                               mac, errors.ECODE_INVAL)
1989

    
1990
  return mac.lower()
1991

    
1992

    
1993
def TestDelay(duration):
1994
  """Sleep for a fixed amount of time.
1995

1996
  @type duration: float
1997
  @param duration: the sleep duration
1998
  @rtype: boolean
1999
  @return: False for negative value, True otherwise
2000

2001
  """
2002
  if duration < 0:
2003
    return False, "Invalid sleep duration"
2004
  time.sleep(duration)
2005
  return True, None
2006

    
2007

    
2008
def _CloseFDNoErr(fd, retries=5):
2009
  """Close a file descriptor ignoring errors.
2010

2011
  @type fd: int
2012
  @param fd: the file descriptor
2013
  @type retries: int
2014
  @param retries: how many retries to make, in case we get any
2015
      other error than EBADF
2016

2017
  """
2018
  try:
2019
    os.close(fd)
2020
  except OSError, err:
2021
    if err.errno != errno.EBADF:
2022
      if retries > 0:
2023
        _CloseFDNoErr(fd, retries - 1)
2024
    # else either it's closed already or we're out of retries, so we
2025
    # ignore this and go on
2026

    
2027

    
2028
def CloseFDs(noclose_fds=None):
2029
  """Close file descriptors.
2030

2031
  This closes all file descriptors above 2 (i.e. except
2032
  stdin/out/err).
2033

2034
  @type noclose_fds: list or None
2035
  @param noclose_fds: if given, it denotes a list of file descriptor
2036
      that should not be closed
2037

2038
  """
2039
  # Default maximum for the number of available file descriptors.
2040
  if 'SC_OPEN_MAX' in os.sysconf_names:
2041
    try:
2042
      MAXFD = os.sysconf('SC_OPEN_MAX')
2043
      if MAXFD < 0:
2044
        MAXFD = 1024
2045
    except OSError:
2046
      MAXFD = 1024
2047
  else:
2048
    MAXFD = 1024
2049
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2050
  if (maxfd == resource.RLIM_INFINITY):
2051
    maxfd = MAXFD
2052

    
2053
  # Iterate through and close all file descriptors (except the standard ones)
2054
  for fd in range(3, maxfd):
2055
    if noclose_fds and fd in noclose_fds:
2056
      continue
2057
    _CloseFDNoErr(fd)
2058

    
2059

    
2060
def Mlockall():
2061
  """Lock current process' virtual address space into RAM.
2062

2063
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2064
  see mlock(2) for more details. This function requires ctypes module.
2065

2066
  """
2067
  if ctypes is None:
2068
    logging.warning("Cannot set memory lock, ctypes module not found")
2069
    return
2070

    
2071
  libc = ctypes.cdll.LoadLibrary("libc.so.6")
2072
  if libc is None:
2073
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2074
    return
2075

    
2076
  # Some older version of the ctypes module don't have built-in functionality
2077
  # to access the errno global variable, where function error codes are stored.
2078
  # By declaring this variable as a pointer to an integer we can then access
2079
  # its value correctly, should the mlockall call fail, in order to see what
2080
  # the actual error code was.
2081
  # pylint: disable-msg=W0212
2082
  libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int)
2083

    
2084
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2085
    # pylint: disable-msg=W0212
2086
    logging.error("Cannot set memory lock: %s",
2087
                  os.strerror(libc.__errno_location().contents.value))
2088
    return
2089

    
2090
  logging.debug("Memory lock set")
2091

    
2092

    
2093
def Daemonize(logfile):
2094
  """Daemonize the current process.
2095

2096
  This detaches the current process from the controlling terminal and
2097
  runs it in the background as a daemon.
2098

2099
  @type logfile: str
2100
  @param logfile: the logfile to which we should redirect stdout/stderr
2101
  @rtype: int
2102
  @return: the value zero
2103

2104
  """
2105
  # pylint: disable-msg=W0212
2106
  # yes, we really want os._exit
2107
  UMASK = 077
2108
  WORKDIR = "/"
2109

    
2110
  # this might fail
2111
  pid = os.fork()
2112
  if (pid == 0):  # The first child.
2113
    os.setsid()
2114
    # this might fail
2115
    pid = os.fork() # Fork a second child.
2116
    if (pid == 0):  # The second child.
2117
      os.chdir(WORKDIR)
2118
      os.umask(UMASK)
2119
    else:
2120
      # exit() or _exit()?  See below.
2121
      os._exit(0) # Exit parent (the first child) of the second child.
2122
  else:
2123
    os._exit(0) # Exit parent of the first child.
2124

    
2125
  for fd in range(3):
2126
    _CloseFDNoErr(fd)
2127
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2128
  assert i == 0, "Can't close/reopen stdin"
2129
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2130
  assert i == 1, "Can't close/reopen stdout"
2131
  # Duplicate standard output to standard error.
2132
  os.dup2(1, 2)
2133
  return 0
2134

    
2135

    
2136
def DaemonPidFileName(name):
2137
  """Compute a ganeti pid file absolute path
2138

2139
  @type name: str
2140
  @param name: the daemon name
2141
  @rtype: str
2142
  @return: the full path to the pidfile corresponding to the given
2143
      daemon name
2144

2145
  """
2146
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2147

    
2148

    
2149
def EnsureDaemon(name):
2150
  """Check for and start daemon if not alive.
2151

2152
  """
2153
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2154
  if result.failed:
2155
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2156
                  name, result.fail_reason, result.output)
2157
    return False
2158

    
2159
  return True
2160

    
2161

    
2162
def WritePidFile(name):
2163
  """Write the current process pidfile.
2164

2165
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2166

2167
  @type name: str
2168
  @param name: the daemon name to use
2169
  @raise errors.GenericError: if the pid file already exists and
2170
      points to a live process
2171

2172
  """
2173
  pid = os.getpid()
2174
  pidfilename = DaemonPidFileName(name)
2175
  if IsProcessAlive(ReadPidFile(pidfilename)):
2176
    raise errors.GenericError("%s contains a live process" % pidfilename)
2177

    
2178
  WriteFile(pidfilename, data="%d\n" % pid)
2179

    
2180

    
2181
def RemovePidFile(name):
2182
  """Remove the current process pidfile.
2183

2184
  Any errors are ignored.
2185

2186
  @type name: str
2187
  @param name: the daemon name used to derive the pidfile name
2188

2189
  """
2190
  pidfilename = DaemonPidFileName(name)
2191
  # TODO: we could check here that the file contains our pid
2192
  try:
2193
    RemoveFile(pidfilename)
2194
  except: # pylint: disable-msg=W0702
2195
    pass
2196

    
2197

    
2198
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2199
                waitpid=False):
2200
  """Kill a process given by its pid.
2201

2202
  @type pid: int
2203
  @param pid: The PID to terminate.
2204
  @type signal_: int
2205
  @param signal_: The signal to send, by default SIGTERM
2206
  @type timeout: int
2207
  @param timeout: The timeout after which, if the process is still alive,
2208
                  a SIGKILL will be sent. If not positive, no such checking
2209
                  will be done
2210
  @type waitpid: boolean
2211
  @param waitpid: If true, we should waitpid on this process after
2212
      sending signals, since it's our own child and otherwise it
2213
      would remain as zombie
2214

2215
  """
2216
  def _helper(pid, signal_, wait):
2217
    """Simple helper to encapsulate the kill/waitpid sequence"""
2218
    os.kill(pid, signal_)
2219
    if wait:
2220
      try:
2221
        os.waitpid(pid, os.WNOHANG)
2222
      except OSError:
2223
        pass
2224

    
2225
  if pid <= 0:
2226
    # kill with pid=0 == suicide
2227
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2228

    
2229
  if not IsProcessAlive(pid):
2230
    return
2231

    
2232
  _helper(pid, signal_, waitpid)
2233

    
2234
  if timeout <= 0:
2235
    return
2236

    
2237
  def _CheckProcess():
2238
    if not IsProcessAlive(pid):
2239
      return
2240

    
2241
    try:
2242
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2243
    except OSError:
2244
      raise RetryAgain()
2245

    
2246
    if result_pid > 0:
2247
      return
2248

    
2249
    raise RetryAgain()
2250

    
2251
  try:
2252
    # Wait up to $timeout seconds
2253
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2254
  except RetryTimeout:
2255
    pass
2256

    
2257
  if IsProcessAlive(pid):
2258
    # Kill process if it's still alive
2259
    _helper(pid, signal.SIGKILL, waitpid)
2260

    
2261

    
2262
def FindFile(name, search_path, test=os.path.exists):
2263
  """Look for a filesystem object in a given path.
2264

2265
  This is an abstract method to search for filesystem object (files,
2266
  dirs) under a given search path.
2267

2268
  @type name: str
2269
  @param name: the name to look for
2270
  @type search_path: str
2271
  @param search_path: location to start at
2272
  @type test: callable
2273
  @param test: a function taking one argument that should return True
2274
      if the a given object is valid; the default value is
2275
      os.path.exists, causing only existing files to be returned
2276
  @rtype: str or None
2277
  @return: full path to the object if found, None otherwise
2278

2279
  """
2280
  # validate the filename mask
2281
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2282
    logging.critical("Invalid value passed for external script name: '%s'",
2283
                     name)
2284
    return None
2285

    
2286
  for dir_name in search_path:
2287
    # FIXME: investigate switch to PathJoin
2288
    item_name = os.path.sep.join([dir_name, name])
2289
    # check the user test and that we're indeed resolving to the given
2290
    # basename
2291
    if test(item_name) and os.path.basename(item_name) == name:
2292
      return item_name
2293
  return None
2294

    
2295

    
2296
def CheckVolumeGroupSize(vglist, vgname, minsize):
2297
  """Checks if the volume group list is valid.
2298

2299
  The function will check if a given volume group is in the list of
2300
  volume groups and has a minimum size.
2301

2302
  @type vglist: dict
2303
  @param vglist: dictionary of volume group names and their size
2304
  @type vgname: str
2305
  @param vgname: the volume group we should check
2306
  @type minsize: int
2307
  @param minsize: the minimum size we accept
2308
  @rtype: None or str
2309
  @return: None for success, otherwise the error message
2310

2311
  """
2312
  vgsize = vglist.get(vgname, None)
2313
  if vgsize is None:
2314
    return "volume group '%s' missing" % vgname
2315
  elif vgsize < minsize:
2316
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2317
            (vgname, minsize, vgsize))
2318
  return None
2319

    
2320

    
2321
def SplitTime(value):
2322
  """Splits time as floating point number into a tuple.
2323

2324
  @param value: Time in seconds
2325
  @type value: int or float
2326
  @return: Tuple containing (seconds, microseconds)
2327

2328
  """
2329
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2330

    
2331
  assert 0 <= seconds, \
2332
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2333
  assert 0 <= microseconds <= 999999, \
2334
    "Microseconds must be 0-999999, but are %s" % microseconds
2335

    
2336
  return (int(seconds), int(microseconds))
2337

    
2338

    
2339
def MergeTime(timetuple):
2340
  """Merges a tuple into time as a floating point number.
2341

2342
  @param timetuple: Time as tuple, (seconds, microseconds)
2343
  @type timetuple: tuple
2344
  @return: Time as a floating point number expressed in seconds
2345

2346
  """
2347
  (seconds, microseconds) = timetuple
2348

    
2349
  assert 0 <= seconds, \
2350
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2351
  assert 0 <= microseconds <= 999999, \
2352
    "Microseconds must be 0-999999, but are %s" % microseconds
2353

    
2354
  return float(seconds) + (float(microseconds) * 0.000001)
2355

    
2356

    
2357
def GetDaemonPort(daemon_name):
2358
  """Get the daemon port for this cluster.
2359

2360
  Note that this routine does not read a ganeti-specific file, but
2361
  instead uses C{socket.getservbyname} to allow pre-customization of
2362
  this parameter outside of Ganeti.
2363

2364
  @type daemon_name: string
2365
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2366
  @rtype: int
2367

2368
  """
2369
  if daemon_name not in constants.DAEMONS_PORTS:
2370
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2371

    
2372
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2373
  try:
2374
    port = socket.getservbyname(daemon_name, proto)
2375
  except socket.error:
2376
    port = default_port
2377

    
2378
  return port
2379

    
2380

    
2381
class LogFileHandler(logging.FileHandler):
2382
  """Log handler that doesn't fallback to stderr.
2383

2384
  When an error occurs while writing on the logfile, logging.FileHandler tries
2385
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2386
  the logfile. This class avoids failures reporting errors to /dev/console.
2387

2388
  """
2389
  def __init__(self, filename, mode="a", encoding=None):
2390
    """Open the specified file and use it as the stream for logging.
2391

2392
    Also open /dev/console to report errors while logging.
2393

2394
    """
2395
    logging.FileHandler.__init__(self, filename, mode, encoding)
2396
    self.console = open(constants.DEV_CONSOLE, "a")
2397

    
2398
  def handleError(self, record): # pylint: disable-msg=C0103
2399
    """Handle errors which occur during an emit() call.
2400

2401
    Try to handle errors with FileHandler method, if it fails write to
2402
    /dev/console.
2403

2404
    """
2405
    try:
2406
      logging.FileHandler.handleError(self, record)
2407
    except Exception: # pylint: disable-msg=W0703
2408
      try:
2409
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2410
      except Exception: # pylint: disable-msg=W0703
2411
        # Log handler tried everything it could, now just give up
2412
        pass
2413

    
2414

    
2415
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2416
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2417
                 console_logging=False):
2418
  """Configures the logging module.
2419

2420
  @type logfile: str
2421
  @param logfile: the filename to which we should log
2422
  @type debug: integer
2423
  @param debug: if greater than zero, enable debug messages, otherwise
2424
      only those at C{INFO} and above level
2425
  @type stderr_logging: boolean
2426
  @param stderr_logging: whether we should also log to the standard error
2427
  @type program: str
2428
  @param program: the name under which we should log messages
2429
  @type multithreaded: boolean
2430
  @param multithreaded: if True, will add the thread name to the log file
2431
  @type syslog: string
2432
  @param syslog: one of 'no', 'yes', 'only':
2433
      - if no, syslog is not used
2434
      - if yes, syslog is used (in addition to file-logging)
2435
      - if only, only syslog is used
2436
  @type console_logging: boolean
2437
  @param console_logging: if True, will use a FileHandler which falls back to
2438
      the system console if logging fails
2439
  @raise EnvironmentError: if we can't open the log file and
2440
      syslog/stderr logging is disabled
2441

2442
  """
2443
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2444
  sft = program + "[%(process)d]:"
2445
  if multithreaded:
2446
    fmt += "/%(threadName)s"
2447
    sft += " (%(threadName)s)"
2448
  if debug:
2449
    fmt += " %(module)s:%(lineno)s"
2450
    # no debug info for syslog loggers
2451
  fmt += " %(levelname)s %(message)s"
2452
  # yes, we do want the textual level, as remote syslog will probably
2453
  # lose the error level, and it's easier to grep for it
2454
  sft += " %(levelname)s %(message)s"
2455
  formatter = logging.Formatter(fmt)
2456
  sys_fmt = logging.Formatter(sft)
2457

    
2458
  root_logger = logging.getLogger("")
2459
  root_logger.setLevel(logging.NOTSET)
2460

    
2461
  # Remove all previously setup handlers
2462
  for handler in root_logger.handlers:
2463
    handler.close()
2464
    root_logger.removeHandler(handler)
2465

    
2466
  if stderr_logging:
2467
    stderr_handler = logging.StreamHandler()
2468
    stderr_handler.setFormatter(formatter)
2469
    if debug:
2470
      stderr_handler.setLevel(logging.NOTSET)
2471
    else:
2472
      stderr_handler.setLevel(logging.CRITICAL)
2473
    root_logger.addHandler(stderr_handler)
2474

    
2475
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2476
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2477
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2478
                                                    facility)
2479
    syslog_handler.setFormatter(sys_fmt)
2480
    # Never enable debug over syslog
2481
    syslog_handler.setLevel(logging.INFO)
2482
    root_logger.addHandler(syslog_handler)
2483

    
2484
  if syslog != constants.SYSLOG_ONLY:
2485
    # this can fail, if the logging directories are not setup or we have
2486
    # a permisssion problem; in this case, it's best to log but ignore
2487
    # the error if stderr_logging is True, and if false we re-raise the
2488
    # exception since otherwise we could run but without any logs at all
2489
    try:
2490
      if console_logging:
2491
        logfile_handler = LogFileHandler(logfile)
2492
      else:
2493
        logfile_handler = logging.FileHandler(logfile)
2494
      logfile_handler.setFormatter(formatter)
2495
      if debug:
2496
        logfile_handler.setLevel(logging.DEBUG)
2497
      else:
2498
        logfile_handler.setLevel(logging.INFO)
2499
      root_logger.addHandler(logfile_handler)
2500
    except EnvironmentError:
2501
      if stderr_logging or syslog == constants.SYSLOG_YES:
2502
        logging.exception("Failed to enable logging to file '%s'", logfile)
2503
      else:
2504
        # we need to re-raise the exception
2505
        raise
2506

    
2507

    
2508
def IsNormAbsPath(path):
2509
  """Check whether a path is absolute and also normalized
2510

2511
  This avoids things like /dir/../../other/path to be valid.
2512

2513
  """
2514
  return os.path.normpath(path) == path and os.path.isabs(path)
2515

    
2516

    
2517
def PathJoin(*args):
2518
  """Safe-join a list of path components.
2519

2520
  Requirements:
2521
      - the first argument must be an absolute path
2522
      - no component in the path must have backtracking (e.g. /../),
2523
        since we check for normalization at the end
2524

2525
  @param args: the path components to be joined
2526
  @raise ValueError: for invalid paths
2527

2528
  """
2529
  # ensure we're having at least one path passed in
2530
  assert args
2531
  # ensure the first component is an absolute and normalized path name
2532
  root = args[0]
2533
  if not IsNormAbsPath(root):
2534
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2535
  result = os.path.join(*args)
2536
  # ensure that the whole path is normalized
2537
  if not IsNormAbsPath(result):
2538
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2539
  # check that we're still under the original prefix
2540
  prefix = os.path.commonprefix([root, result])
2541
  if prefix != root:
2542
    raise ValueError("Error: path joining resulted in different prefix"
2543
                     " (%s != %s)" % (prefix, root))
2544
  return result
2545

    
2546

    
2547
def TailFile(fname, lines=20):
2548
  """Return the last lines from a file.
2549

2550
  @note: this function will only read and parse the last 4KB of
2551
      the file; if the lines are very long, it could be that less
2552
      than the requested number of lines are returned
2553

2554
  @param fname: the file name
2555
  @type lines: int
2556
  @param lines: the (maximum) number of lines to return
2557

2558
  """
2559
  fd = open(fname, "r")
2560
  try:
2561
    fd.seek(0, 2)
2562
    pos = fd.tell()
2563
    pos = max(0, pos-4096)
2564
    fd.seek(pos, 0)
2565
    raw_data = fd.read()
2566
  finally:
2567
    fd.close()
2568

    
2569
  rows = raw_data.splitlines()
2570
  return rows[-lines:]
2571

    
2572

    
2573
def FormatTimestampWithTZ(secs):
2574
  """Formats a Unix timestamp with the local timezone.
2575

2576
  """
2577
  return time.strftime("%F %T %Z", time.gmtime(secs))
2578

    
2579

    
2580
def _ParseAsn1Generalizedtime(value):
2581
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2582

2583
  @type value: string
2584
  @param value: ASN1 GENERALIZEDTIME timestamp
2585

2586
  """
2587
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2588
  if m:
2589
    # We have an offset
2590
    asn1time = m.group(1)
2591
    hours = int(m.group(2))
2592
    minutes = int(m.group(3))
2593
    utcoffset = (60 * hours) + minutes
2594
  else:
2595
    if not value.endswith("Z"):
2596
      raise ValueError("Missing timezone")
2597
    asn1time = value[:-1]
2598
    utcoffset = 0
2599

    
2600
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2601

    
2602
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2603

    
2604
  return calendar.timegm(tt.utctimetuple())
2605

    
2606

    
2607
def GetX509CertValidity(cert):
2608
  """Returns the validity period of the certificate.
2609

2610
  @type cert: OpenSSL.crypto.X509
2611
  @param cert: X509 certificate object
2612

2613
  """
2614
  # The get_notBefore and get_notAfter functions are only supported in
2615
  # pyOpenSSL 0.7 and above.
2616
  try:
2617
    get_notbefore_fn = cert.get_notBefore
2618
  except AttributeError:
2619
    not_before = None
2620
  else:
2621
    not_before_asn1 = get_notbefore_fn()
2622

    
2623
    if not_before_asn1 is None:
2624
      not_before = None
2625
    else:
2626
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2627

    
2628
  try:
2629
    get_notafter_fn = cert.get_notAfter
2630
  except AttributeError:
2631
    not_after = None
2632
  else:
2633
    not_after_asn1 = get_notafter_fn()
2634

    
2635
    if not_after_asn1 is None:
2636
      not_after = None
2637
    else:
2638
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2639

    
2640
  return (not_before, not_after)
2641

    
2642

    
2643
def _VerifyCertificateInner(expired, not_before, not_after, now,
2644
                            warn_days, error_days):
2645
  """Verifies certificate validity.
2646

2647
  @type expired: bool
2648
  @param expired: Whether pyOpenSSL considers the certificate as expired
2649
  @type not_before: number or None
2650
  @param not_before: Unix timestamp before which certificate is not valid
2651
  @type not_after: number or None
2652
  @param not_after: Unix timestamp after which certificate is invalid
2653
  @type now: number
2654
  @param now: Current time as Unix timestamp
2655
  @type warn_days: number or None
2656
  @param warn_days: How many days before expiration a warning should be reported
2657
  @type error_days: number or None
2658
  @param error_days: How many days before expiration an error should be reported
2659

2660
  """
2661
  if expired:
2662
    msg = "Certificate is expired"
2663

    
2664
    if not_before is not None and not_after is not None:
2665
      msg += (" (valid from %s to %s)" %
2666
              (FormatTimestampWithTZ(not_before),
2667
               FormatTimestampWithTZ(not_after)))
2668
    elif not_before is not None:
2669
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2670
    elif not_after is not None:
2671
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2672

    
2673
    return (CERT_ERROR, msg)
2674

    
2675
  elif not_before is not None and not_before > now:
2676
    return (CERT_WARNING,
2677
            "Certificate not yet valid (valid from %s)" %
2678
            FormatTimestampWithTZ(not_before))
2679

    
2680
  elif not_after is not None:
2681
    remaining_days = int((not_after - now) / (24 * 3600))
2682

    
2683
    msg = "Certificate expires in about %d days" % remaining_days
2684

    
2685
    if error_days is not None and remaining_days <= error_days:
2686
      return (CERT_ERROR, msg)
2687

    
2688
    if warn_days is not None and remaining_days <= warn_days:
2689
      return (CERT_WARNING, msg)
2690

    
2691
  return (None, None)
2692

    
2693

    
2694
def VerifyX509Certificate(cert, warn_days, error_days):
2695
  """Verifies a certificate for LUVerifyCluster.
2696

2697
  @type cert: OpenSSL.crypto.X509
2698
  @param cert: X509 certificate object
2699
  @type warn_days: number or None
2700
  @param warn_days: How many days before expiration a warning should be reported
2701
  @type error_days: number or None
2702
  @param error_days: How many days before expiration an error should be reported
2703

2704
  """
2705
  # Depending on the pyOpenSSL version, this can just return (None, None)
2706
  (not_before, not_after) = GetX509CertValidity(cert)
2707

    
2708
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2709
                                 time.time(), warn_days, error_days)
2710

    
2711

    
2712
def SignX509Certificate(cert, key, salt):
2713
  """Sign a X509 certificate.
2714

2715
  An RFC822-like signature header is added in front of the certificate.
2716

2717
  @type cert: OpenSSL.crypto.X509
2718
  @param cert: X509 certificate object
2719
  @type key: string
2720
  @param key: Key for HMAC
2721
  @type salt: string
2722
  @param salt: Salt for HMAC
2723
  @rtype: string
2724
  @return: Serialized and signed certificate in PEM format
2725

2726
  """
2727
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2728
    raise errors.GenericError("Invalid salt: %r" % salt)
2729

    
2730
  # Dumping as PEM here ensures the certificate is in a sane format
2731
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2732

    
2733
  return ("%s: %s/%s\n\n%s" %
2734
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2735
           Sha1Hmac(key, cert_pem, salt=salt),
2736
           cert_pem))
2737

    
2738

    
2739
def _ExtractX509CertificateSignature(cert_pem):
2740
  """Helper function to extract signature from X509 certificate.
2741

2742
  """
2743
  # Extract signature from original PEM data
2744
  for line in cert_pem.splitlines():
2745
    if line.startswith("---"):
2746
      break
2747

    
2748
    m = X509_SIGNATURE.match(line.strip())
2749
    if m:
2750
      return (m.group("salt"), m.group("sign"))
2751

    
2752
  raise errors.GenericError("X509 certificate signature is missing")
2753

    
2754

    
2755
def LoadSignedX509Certificate(cert_pem, key):
2756
  """Verifies a signed X509 certificate.
2757

2758
  @type cert_pem: string
2759
  @param cert_pem: Certificate in PEM format and with signature header
2760
  @type key: string
2761
  @param key: Key for HMAC
2762
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2763
  @return: X509 certificate object and salt
2764

2765
  """
2766
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2767

    
2768
  # Load certificate
2769
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2770

    
2771
  # Dump again to ensure it's in a sane format
2772
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2773

    
2774
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2775
    raise errors.GenericError("X509 certificate signature is invalid")
2776

    
2777
  return (cert, salt)
2778

    
2779

    
2780
def Sha1Hmac(key, text, salt=None):
2781
  """Calculates the HMAC-SHA1 digest of a text.
2782

2783
  HMAC is defined in RFC2104.
2784

2785
  @type key: string
2786
  @param key: Secret key
2787
  @type text: string
2788

2789
  """
2790
  if salt:
2791
    salted_text = salt + text
2792
  else:
2793
    salted_text = text
2794

    
2795
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2796

    
2797

    
2798
def VerifySha1Hmac(key, text, digest, salt=None):
2799
  """Verifies the HMAC-SHA1 digest of a text.
2800

2801
  HMAC is defined in RFC2104.
2802

2803
  @type key: string
2804
  @param key: Secret key
2805
  @type text: string
2806
  @type digest: string
2807
  @param digest: Expected digest
2808
  @rtype: bool
2809
  @return: Whether HMAC-SHA1 digest matches
2810

2811
  """
2812
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2813

    
2814

    
2815
def SafeEncode(text):
2816
  """Return a 'safe' version of a source string.
2817

2818
  This function mangles the input string and returns a version that
2819
  should be safe to display/encode as ASCII. To this end, we first
2820
  convert it to ASCII using the 'backslashreplace' encoding which
2821
  should get rid of any non-ASCII chars, and then we process it
2822
  through a loop copied from the string repr sources in the python; we
2823
  don't use string_escape anymore since that escape single quotes and
2824
  backslashes too, and that is too much; and that escaping is not
2825
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2826

2827
  @type text: str or unicode
2828
  @param text: input data
2829
  @rtype: str
2830
  @return: a safe version of text
2831

2832
  """
2833
  if isinstance(text, unicode):
2834
    # only if unicode; if str already, we handle it below
2835
    text = text.encode('ascii', 'backslashreplace')
2836
  resu = ""
2837
  for char in text:
2838
    c = ord(char)
2839
    if char  == '\t':
2840
      resu += r'\t'
2841
    elif char == '\n':
2842
      resu += r'\n'
2843
    elif char == '\r':
2844
      resu += r'\'r'
2845
    elif c < 32 or c >= 127: # non-printable
2846
      resu += "\\x%02x" % (c & 0xff)
2847
    else:
2848
      resu += char
2849
  return resu
2850

    
2851

    
2852
def UnescapeAndSplit(text, sep=","):
2853
  """Split and unescape a string based on a given separator.
2854

2855
  This function splits a string based on a separator where the
2856
  separator itself can be escape in order to be an element of the
2857
  elements. The escaping rules are (assuming coma being the
2858
  separator):
2859
    - a plain , separates the elements
2860
    - a sequence \\\\, (double backslash plus comma) is handled as a
2861
      backslash plus a separator comma
2862
    - a sequence \, (backslash plus comma) is handled as a
2863
      non-separator comma
2864

2865
  @type text: string
2866
  @param text: the string to split
2867
  @type sep: string
2868
  @param text: the separator
2869
  @rtype: string
2870
  @return: a list of strings
2871

2872
  """
2873
  # we split the list by sep (with no escaping at this stage)
2874
  slist = text.split(sep)
2875
  # next, we revisit the elements and if any of them ended with an odd
2876
  # number of backslashes, then we join it with the next
2877
  rlist = []
2878
  while slist:
2879
    e1 = slist.pop(0)
2880
    if e1.endswith("\\"):
2881
      num_b = len(e1) - len(e1.rstrip("\\"))
2882
      if num_b % 2 == 1:
2883
        e2 = slist.pop(0)
2884
        # here the backslashes remain (all), and will be reduced in
2885
        # the next step
2886
        rlist.append(e1 + sep + e2)
2887
        continue
2888
    rlist.append(e1)
2889
  # finally, replace backslash-something with something
2890
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2891
  return rlist
2892

    
2893

    
2894
def CommaJoin(names):
2895
  """Nicely join a set of identifiers.
2896

2897
  @param names: set, list or tuple
2898
  @return: a string with the formatted results
2899

2900
  """
2901
  return ", ".join([str(val) for val in names])
2902

    
2903

    
2904
def BytesToMebibyte(value):
2905
  """Converts bytes to mebibytes.
2906

2907
  @type value: int
2908
  @param value: Value in bytes
2909
  @rtype: int
2910
  @return: Value in mebibytes
2911

2912
  """
2913
  return int(round(value / (1024.0 * 1024.0), 0))
2914

    
2915

    
2916
def CalculateDirectorySize(path):
2917
  """Calculates the size of a directory recursively.
2918

2919
  @type path: string
2920
  @param path: Path to directory
2921
  @rtype: int
2922
  @return: Size in mebibytes
2923

2924
  """
2925
  size = 0
2926

    
2927
  for (curpath, _, files) in os.walk(path):
2928
    for filename in files:
2929
      st = os.lstat(PathJoin(curpath, filename))
2930
      size += st.st_size
2931

    
2932
  return BytesToMebibyte(size)
2933

    
2934

    
2935
def GetFilesystemStats(path):
2936
  """Returns the total and free space on a filesystem.
2937

2938
  @type path: string
2939
  @param path: Path on filesystem to be examined
2940
  @rtype: int
2941
  @return: tuple of (Total space, Free space) in mebibytes
2942

2943
  """
2944
  st = os.statvfs(path)
2945

    
2946
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2947
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2948
  return (tsize, fsize)
2949

    
2950

    
2951
def RunInSeparateProcess(fn, *args):
2952
  """Runs a function in a separate process.
2953

2954
  Note: Only boolean return values are supported.
2955

2956
  @type fn: callable
2957
  @param fn: Function to be called
2958
  @rtype: bool
2959
  @return: Function's result
2960

2961
  """
2962
  pid = os.fork()
2963
  if pid == 0:
2964
    # Child process
2965
    try:
2966
      # In case the function uses temporary files
2967
      ResetTempfileModule()
2968

    
2969
      # Call function
2970
      result = int(bool(fn(*args)))
2971
      assert result in (0, 1)
2972
    except: # pylint: disable-msg=W0702
2973
      logging.exception("Error while calling function in separate process")
2974
      # 0 and 1 are reserved for the return value
2975
      result = 33
2976

    
2977
    os._exit(result) # pylint: disable-msg=W0212
2978

    
2979
  # Parent process
2980

    
2981
  # Avoid zombies and check exit code
2982
  (_, status) = os.waitpid(pid, 0)
2983

    
2984
  if os.WIFSIGNALED(status):
2985
    exitcode = None
2986
    signum = os.WTERMSIG(status)
2987
  else:
2988
    exitcode = os.WEXITSTATUS(status)
2989
    signum = None
2990

    
2991
  if not (exitcode in (0, 1) and signum is None):
2992
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2993
                              (exitcode, signum))
2994

    
2995
  return bool(exitcode)
2996

    
2997

    
2998
def IgnoreSignals(fn, *args, **kwargs):
2999
  """Tries to call a function ignoring failures due to EINTR.
3000

3001
  """
3002
  try:
3003
    return fn(*args, **kwargs)
3004
  except (EnvironmentError, socket.error), err:
3005
    if err.errno != errno.EINTR:
3006
      raise
3007
  except select.error, err:
3008
    if not (err.args and err.args[0] == errno.EINTR):
3009
      raise
3010

    
3011

    
3012
def LockedMethod(fn):
3013
  """Synchronized object access decorator.
3014

3015
  This decorator is intended to protect access to an object using the
3016
  object's own lock which is hardcoded to '_lock'.
3017

3018
  """
3019
  def _LockDebug(*args, **kwargs):
3020
    if debug_locks:
3021
      logging.debug(*args, **kwargs)
3022

    
3023
  def wrapper(self, *args, **kwargs):
3024
    # pylint: disable-msg=W0212
3025
    assert hasattr(self, '_lock')
3026
    lock = self._lock
3027
    _LockDebug("Waiting for %s", lock)
3028
    lock.acquire()
3029
    try:
3030
      _LockDebug("Acquired %s", lock)
3031
      result = fn(self, *args, **kwargs)
3032
    finally:
3033
      _LockDebug("Releasing %s", lock)
3034
      lock.release()
3035
      _LockDebug("Released %s", lock)
3036
    return result
3037
  return wrapper
3038

    
3039

    
3040
def LockFile(fd):
3041
  """Locks a file using POSIX locks.
3042

3043
  @type fd: int
3044
  @param fd: the file descriptor we need to lock
3045

3046
  """
3047
  try:
3048
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3049
  except IOError, err:
3050
    if err.errno == errno.EAGAIN:
3051
      raise errors.LockError("File already locked")
3052
    raise
3053

    
3054

    
3055
def FormatTime(val):
3056
  """Formats a time value.
3057

3058
  @type val: float or None
3059
  @param val: the timestamp as returned by time.time()
3060
  @return: a string value or N/A if we don't have a valid timestamp
3061

3062
  """
3063
  if val is None or not isinstance(val, (int, float)):
3064
    return "N/A"
3065
  # these two codes works on Linux, but they are not guaranteed on all
3066
  # platforms
3067
  return time.strftime("%F %T", time.localtime(val))
3068

    
3069

    
3070
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3071
  """Reads the watcher pause file.
3072

3073
  @type filename: string
3074
  @param filename: Path to watcher pause file
3075
  @type now: None, float or int
3076
  @param now: Current time as Unix timestamp
3077
  @type remove_after: int
3078
  @param remove_after: Remove watcher pause file after specified amount of
3079
    seconds past the pause end time
3080

3081
  """
3082
  if now is None:
3083
    now = time.time()
3084

    
3085
  try:
3086
    value = ReadFile(filename)
3087
  except IOError, err:
3088
    if err.errno != errno.ENOENT:
3089
      raise
3090
    value = None
3091

    
3092
  if value is not None:
3093
    try:
3094
      value = int(value)
3095
    except ValueError:
3096
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3097
                       " removing it"), filename)
3098
      RemoveFile(filename)
3099
      value = None
3100

    
3101
    if value is not None:
3102
      # Remove file if it's outdated
3103
      if now > (value + remove_after):
3104
        RemoveFile(filename)
3105
        value = None
3106

    
3107
      elif now > value:
3108
        value = None
3109

    
3110
  return value
3111

    
3112

    
3113
class RetryTimeout(Exception):
3114
  """Retry loop timed out.
3115

3116
  Any arguments which was passed by the retried function to RetryAgain will be
3117
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3118
  the RaiseInner helper method will reraise it.
3119

3120
  """
3121
  def RaiseInner(self):
3122
    if self.args and isinstance(self.args[0], Exception):
3123
      raise self.args[0]
3124
    else:
3125
      raise RetryTimeout(*self.args)
3126

    
3127

    
3128
class RetryAgain(Exception):
3129
  """Retry again.
3130

3131
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3132
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3133
  of the RetryTimeout() method can be used to reraise it.
3134

3135
  """
3136

    
3137

    
3138
class _RetryDelayCalculator(object):
3139
  """Calculator for increasing delays.
3140

3141
  """
3142
  __slots__ = [
3143
    "_factor",
3144
    "_limit",
3145
    "_next",
3146
    "_start",
3147
    ]
3148

    
3149
  def __init__(self, start, factor, limit):
3150
    """Initializes this class.
3151

3152
    @type start: float
3153
    @param start: Initial delay
3154
    @type factor: float
3155
    @param factor: Factor for delay increase
3156
    @type limit: float or None
3157
    @param limit: Upper limit for delay or None for no limit
3158

3159
    """
3160
    assert start > 0.0
3161
    assert factor >= 1.0
3162
    assert limit is None or limit >= 0.0
3163

    
3164
    self._start = start
3165
    self._factor = factor
3166
    self._limit = limit
3167

    
3168
    self._next = start
3169

    
3170
  def __call__(self):
3171
    """Returns current delay and calculates the next one.
3172

3173
    """
3174
    current = self._next
3175

    
3176
    # Update for next run
3177
    if self._limit is None or self._next < self._limit:
3178
      self._next = min(self._limit, self._next * self._factor)
3179

    
3180
    return current
3181

    
3182

    
3183
#: Special delay to specify whole remaining timeout
3184
RETRY_REMAINING_TIME = object()
3185

    
3186

    
3187
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3188
          _time_fn=time.time):
3189
  """Call a function repeatedly until it succeeds.
3190

3191
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3192
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3193
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3194

3195
  C{delay} can be one of the following:
3196
    - callable returning the delay length as a float
3197
    - Tuple of (start, factor, limit)
3198
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3199
      useful when overriding L{wait_fn} to wait for an external event)
3200
    - A static delay as a number (int or float)
3201

3202
  @type fn: callable
3203
  @param fn: Function to be called
3204
  @param delay: Either a callable (returning the delay), a tuple of (start,
3205
                factor, limit) (see L{_RetryDelayCalculator}),
3206
                L{RETRY_REMAINING_TIME} or a number (int or float)
3207
  @type timeout: float
3208
  @param timeout: Total timeout
3209
  @type wait_fn: callable
3210
  @param wait_fn: Waiting function
3211
  @return: Return value of function
3212

3213
  """
3214
  assert callable(fn)
3215
  assert callable(wait_fn)
3216
  assert callable(_time_fn)
3217

    
3218
  if args is None:
3219
    args = []
3220

    
3221
  end_time = _time_fn() + timeout
3222

    
3223
  if callable(delay):
3224
    # External function to calculate delay
3225
    calc_delay = delay
3226

    
3227
  elif isinstance(delay, (tuple, list)):
3228
    # Increasing delay with optional upper boundary
3229
    (start, factor, limit) = delay
3230
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3231

    
3232
  elif delay is RETRY_REMAINING_TIME:
3233
    # Always use the remaining time
3234
    calc_delay = None
3235

    
3236
  else:
3237
    # Static delay
3238
    calc_delay = lambda: delay
3239

    
3240
  assert calc_delay is None or callable(calc_delay)
3241

    
3242
  while True:
3243
    retry_args = []
3244
    try:
3245
      # pylint: disable-msg=W0142
3246
      return fn(*args)
3247
    except RetryAgain, err:
3248
      retry_args = err.args
3249
    except RetryTimeout:
3250
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3251
                                   " handle RetryTimeout")
3252

    
3253
    remaining_time = end_time - _time_fn()
3254

    
3255
    if remaining_time < 0.0:
3256
      # pylint: disable-msg=W0142
3257
      raise RetryTimeout(*retry_args)
3258

    
3259
    assert remaining_time >= 0.0
3260

    
3261
    if calc_delay is None:
3262
      wait_fn(remaining_time)
3263
    else:
3264
      current_delay = calc_delay()
3265
      if current_delay > 0.0:
3266
        wait_fn(current_delay)
3267

    
3268

    
3269
def GetClosedTempfile(*args, **kwargs):
3270
  """Creates a temporary file and returns its path.
3271

3272
  """
3273
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3274
  _CloseFDNoErr(fd)
3275
  return path
3276

    
3277

    
3278
def GenerateSelfSignedX509Cert(common_name, validity):
3279
  """Generates a self-signed X509 certificate.
3280

3281
  @type common_name: string
3282
  @param common_name: commonName value
3283
  @type validity: int
3284
  @param validity: Validity for certificate in seconds
3285

3286
  """
3287
  # Create private and public key
3288
  key = OpenSSL.crypto.PKey()
3289
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3290

    
3291
  # Create self-signed certificate
3292
  cert = OpenSSL.crypto.X509()
3293
  if common_name:
3294
    cert.get_subject().CN = common_name
3295
  cert.set_serial_number(1)
3296
  cert.gmtime_adj_notBefore(0)
3297
  cert.gmtime_adj_notAfter(validity)
3298
  cert.set_issuer(cert.get_subject())
3299
  cert.set_pubkey(key)
3300
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3301

    
3302
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3303
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3304

    
3305
  return (key_pem, cert_pem)
3306

    
3307

    
3308
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3309
  """Legacy function to generate self-signed X509 certificate.
3310

3311
  """
3312
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3313
                                                   validity * 24 * 60 * 60)
3314

    
3315
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3316

    
3317

    
3318
class FileLock(object):
3319
  """Utility class for file locks.
3320

3321
  """
3322
  def __init__(self, fd, filename):
3323
    """Constructor for FileLock.
3324

3325
    @type fd: file
3326
    @param fd: File object
3327
    @type filename: str
3328
    @param filename: Path of the file opened at I{fd}
3329

3330
    """
3331
    self.fd = fd
3332
    self.filename = filename
3333

    
3334
  @classmethod
3335
  def Open(cls, filename):
3336
    """Creates and opens a file to be used as a file-based lock.
3337

3338
    @type filename: string
3339
    @param filename: path to the file to be locked
3340

3341
    """
3342
    # Using "os.open" is necessary to allow both opening existing file
3343
    # read/write and creating if not existing. Vanilla "open" will truncate an
3344
    # existing file -or- allow creating if not existing.
3345
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3346
               filename)
3347

    
3348
  def __del__(self):
3349
    self.Close()
3350

    
3351
  def Close(self):
3352
    """Close the file and release the lock.
3353

3354
    """
3355
    if hasattr(self, "fd") and self.fd:
3356
      self.fd.close()
3357
      self.fd = None
3358

    
3359
  def _flock(self, flag, blocking, timeout, errmsg):
3360
    """Wrapper for fcntl.flock.
3361

3362
    @type flag: int
3363
    @param flag: operation flag
3364
    @type blocking: bool
3365
    @param blocking: whether the operation should be done in blocking mode.
3366
    @type timeout: None or float
3367
    @param timeout: for how long the operation should be retried (implies
3368
                    non-blocking mode).
3369
    @type errmsg: string
3370
    @param errmsg: error message in case operation fails.
3371

3372
    """
3373
    assert self.fd, "Lock was closed"
3374
    assert timeout is None or timeout >= 0, \
3375
      "If specified, timeout must be positive"
3376
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3377

    
3378
    # When a timeout is used, LOCK_NB must always be set
3379
    if not (timeout is None and blocking):
3380
      flag |= fcntl.LOCK_NB
3381

    
3382
    if timeout is None:
3383
      self._Lock(self.fd, flag, timeout)
3384
    else:
3385
      try:
3386
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3387
              args=(self.fd, flag, timeout))
3388
      except RetryTimeout:
3389
        raise errors.LockError(errmsg)
3390

    
3391
  @staticmethod
3392
  def _Lock(fd, flag, timeout):
3393
    try:
3394
      fcntl.flock(fd, flag)
3395
    except IOError, err:
3396
      if timeout is not None and err.errno == errno.EAGAIN:
3397
        raise RetryAgain()
3398

    
3399
      logging.exception("fcntl.flock failed")
3400
      raise
3401

    
3402
  def Exclusive(self, blocking=False, timeout=None):
3403
    """Locks the file in exclusive mode.
3404

3405
    @type blocking: boolean
3406
    @param blocking: whether to block and wait until we
3407
        can lock the file or return immediately
3408
    @type timeout: int or None
3409
    @param timeout: if not None, the duration to wait for the lock
3410
        (in blocking mode)
3411

3412
    """
3413
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3414
                "Failed to lock %s in exclusive mode" % self.filename)
3415

    
3416
  def Shared(self, blocking=False, timeout=None):
3417
    """Locks the file in shared mode.
3418

3419
    @type blocking: boolean
3420
    @param blocking: whether to block and wait until we
3421
        can lock the file or return immediately
3422
    @type timeout: int or None
3423
    @param timeout: if not None, the duration to wait for the lock
3424
        (in blocking mode)
3425

3426
    """
3427
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3428
                "Failed to lock %s in shared mode" % self.filename)
3429

    
3430
  def Unlock(self, blocking=True, timeout=None):
3431
    """Unlocks the file.
3432

3433
    According to C{flock(2)}, unlocking can also be a nonblocking
3434
    operation::
3435

3436
      To make a non-blocking request, include LOCK_NB with any of the above
3437
      operations.
3438

3439
    @type blocking: boolean
3440
    @param blocking: whether to block and wait until we
3441
        can lock the file or return immediately
3442
    @type timeout: int or None
3443
    @param timeout: if not None, the duration to wait for the lock
3444
        (in blocking mode)
3445

3446
    """
3447
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3448
                "Failed to unlock %s" % self.filename)
3449

    
3450

    
3451
class LineSplitter:
3452
  """Splits data chunks into lines separated by newline.
3453

3454
  Instances provide a file-like interface.
3455

3456
  """
3457
  def __init__(self, line_fn, *args):
3458
    """Initializes this class.
3459

3460
    @type line_fn: callable
3461
    @param line_fn: Function called for each line, first parameter is line
3462
    @param args: Extra arguments for L{line_fn}
3463

3464
    """
3465
    assert callable(line_fn)
3466

    
3467
    if args:
3468
      # Python 2.4 doesn't have functools.partial yet
3469
      self._line_fn = \
3470
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3471
    else:
3472
      self._line_fn = line_fn
3473

    
3474
    self._lines = collections.deque()
3475
    self._buffer = ""
3476

    
3477
  def write(self, data):
3478
    parts = (self._buffer + data).split("\n")
3479
    self._buffer = parts.pop()
3480
    self._lines.extend(parts)
3481

    
3482
  def flush(self):
3483
    while self._lines:
3484
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3485

    
3486
  def close(self):
3487
    self.flush()
3488
    if self._buffer:
3489
      self._line_fn(self._buffer)
3490

    
3491

    
3492
def SignalHandled(signums):
3493
  """Signal Handled decoration.
3494

3495
  This special decorator installs a signal handler and then calls the target
3496
  function. The function must accept a 'signal_handlers' keyword argument,
3497
  which will contain a dict indexed by signal number, with SignalHandler
3498
  objects as values.
3499

3500
  The decorator can be safely stacked with iself, to handle multiple signals
3501
  with different handlers.
3502

3503
  @type signums: list
3504
  @param signums: signals to intercept
3505

3506
  """
3507
  def wrap(fn):
3508
    def sig_function(*args, **kwargs):
3509
      assert 'signal_handlers' not in kwargs or \
3510
             kwargs['signal_handlers'] is None or \
3511
             isinstance(kwargs['signal_handlers'], dict), \
3512
             "Wrong signal_handlers parameter in original function call"
3513
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3514
        signal_handlers = kwargs['signal_handlers']
3515
      else:
3516
        signal_handlers = {}
3517
        kwargs['signal_handlers'] = signal_handlers
3518
      sighandler = SignalHandler(signums)
3519
      try:
3520
        for sig in signums:
3521
          signal_handlers[sig] = sighandler
3522
        return fn(*args, **kwargs)
3523
      finally:
3524
        sighandler.Reset()
3525
    return sig_function
3526
  return wrap
3527

    
3528

    
3529
class SignalWakeupFd(object):
3530
  try:
3531
    # This is only supported in Python 2.5 and above (some distributions
3532
    # backported it to Python 2.4)
3533
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3534
  except AttributeError:
3535
    # Not supported
3536
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3537
      return -1
3538
  else:
3539
    def _SetWakeupFd(self, fd):
3540
      return self._set_wakeup_fd_fn(fd)
3541

    
3542
  def __init__(self):
3543
    """Initializes this class.
3544

3545
    """
3546
    (read_fd, write_fd) = os.pipe()
3547

    
3548
    # Once these succeeded, the file descriptors will be closed automatically.
3549
    # Buffer size 0 is important, otherwise .read() with a specified length
3550
    # might buffer data and the file descriptors won't be marked readable.
3551
    self._read_fh = os.fdopen(read_fd, "r", 0)
3552
    self._write_fh = os.fdopen(write_fd, "w", 0)
3553

    
3554
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3555

    
3556
    # Utility functions
3557
    self.fileno = self._read_fh.fileno
3558
    self.read = self._read_fh.read
3559

    
3560
  def Reset(self):
3561
    """Restores the previous wakeup file descriptor.
3562

3563
    """
3564
    if hasattr(self, "_previous") and self._previous is not None:
3565
      self._SetWakeupFd(self._previous)
3566
      self._previous = None
3567

    
3568
  def Notify(self):
3569
    """Notifies the wakeup file descriptor.
3570

3571
    """
3572
    self._write_fh.write("\0")
3573

    
3574
  def __del__(self):
3575
    """Called before object deletion.
3576

3577
    """
3578
    self.Reset()
3579

    
3580

    
3581
class SignalHandler(object):
3582
  """Generic signal handler class.
3583

3584
  It automatically restores the original handler when deconstructed or
3585
  when L{Reset} is called. You can either pass your own handler
3586
  function in or query the L{called} attribute to detect whether the
3587
  signal was sent.
3588

3589
  @type signum: list
3590
  @ivar signum: the signals we handle
3591
  @type called: boolean
3592
  @ivar called: tracks whether any of the signals have been raised
3593

3594
  """
3595
  def __init__(self, signum, handler_fn=None, wakeup=None):
3596
    """Constructs a new SignalHandler instance.
3597

3598
    @type signum: int or list of ints
3599
    @param signum: Single signal number or set of signal numbers
3600
    @type handler_fn: callable
3601
    @param handler_fn: Signal handling function
3602

3603
    """
3604
    assert handler_fn is None or callable(handler_fn)
3605

    
3606
    self.signum = set(signum)
3607
    self.called = False
3608

    
3609
    self._handler_fn = handler_fn
3610
    self._wakeup = wakeup
3611

    
3612
    self._previous = {}
3613
    try:
3614
      for signum in self.signum:
3615
        # Setup handler
3616
        prev_handler = signal.signal(signum, self._HandleSignal)
3617
        try:
3618
          self._previous[signum] = prev_handler
3619
        except:
3620
          # Restore previous handler
3621
          signal.signal(signum, prev_handler)
3622
          raise
3623
    except:
3624
      # Reset all handlers
3625
      self.Reset()
3626
      # Here we have a race condition: a handler may have already been called,
3627
      # but there's not much we can do about it at this point.
3628
      raise
3629

    
3630
  def __del__(self):
3631
    self.Reset()
3632

    
3633
  def Reset(self):
3634
    """Restore previous handler.
3635

3636
    This will reset all the signals to their previous handlers.
3637

3638
    """
3639
    for signum, prev_handler in self._previous.items():
3640
      signal.signal(signum, prev_handler)
3641
      # If successful, remove from dict
3642
      del self._previous[signum]
3643

    
3644
  def Clear(self):
3645
    """Unsets the L{called} flag.
3646

3647
    This function can be used in case a signal may arrive several times.
3648

3649
    """
3650
    self.called = False
3651

    
3652
  def _HandleSignal(self, signum, frame):
3653
    """Actual signal handling function.
3654

3655
    """
3656
    # This is not nice and not absolutely atomic, but it appears to be the only
3657
    # solution in Python -- there are no atomic types.
3658
    self.called = True
3659

    
3660
    if self._wakeup:
3661
      # Notify whoever is interested in signals
3662
      self._wakeup.Notify()
3663

    
3664
    if self._handler_fn:
3665
      self._handler_fn(signum, frame)
3666

    
3667

    
3668
class FieldSet(object):
3669
  """A simple field set.
3670

3671
  Among the features are:
3672
    - checking if a string is among a list of static string or regex objects
3673
    - checking if a whole list of string matches
3674
    - returning the matching groups from a regex match
3675

3676
  Internally, all fields are held as regular expression objects.
3677

3678
  """
3679
  def __init__(self, *items):
3680
    self.items = [re.compile("^%s$" % value) for value in items]
3681

    
3682
  def Extend(self, other_set):
3683
    """Extend the field set with the items from another one"""
3684
    self.items.extend(other_set.items)
3685

    
3686
  def Matches(self, field):
3687
    """Checks if a field matches the current set
3688

3689
    @type field: str
3690
    @param field: the string to match
3691
    @return: either None or a regular expression match object
3692

3693
    """
3694
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3695
      return m
3696
    return None
3697

    
3698
  def NonMatching(self, items):
3699
    """Returns the list of fields not matching the current set
3700

3701
    @type items: list
3702
    @param items: the list of fields to check
3703
    @rtype: list
3704
    @return: list of non-matching fields
3705

3706
    """
3707
    return [val for val in items if not self.Matches(val)]