Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 23e0ef8c

History | View | Annotate | Download (104.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52
import struct
53
import IN
54

    
55
from cStringIO import StringIO
56

    
57
try:
58
  # pylint: disable-msg=F0401
59
  import ctypes
60
except ImportError:
61
  ctypes = None
62

    
63
from ganeti import errors
64
from ganeti import constants
65
from ganeti import compat
66

    
67

    
68
_locksheld = []
69
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
70

    
71
debug_locks = False
72

    
73
#: when set to True, L{RunCmd} is disabled
74
no_fork = False
75

    
76
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
77

    
78
HEX_CHAR_RE = r"[a-zA-Z0-9]"
79
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
80
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
81
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
82
                             HEX_CHAR_RE, HEX_CHAR_RE),
83
                            re.S | re.I)
84

    
85
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
86

    
87
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
88
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
89
#
90
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
91
# pid_t to "int".
92
#
93
# IEEE Std 1003.1-2008:
94
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
95
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
96
_STRUCT_UCRED = "iII"
97
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
98

    
99
# Certificate verification results
100
(CERT_WARNING,
101
 CERT_ERROR) = range(1, 3)
102

    
103
# Flags for mlockall() (from bits/mman.h)
104
_MCL_CURRENT = 1
105
_MCL_FUTURE = 2
106

    
107

    
108
class RunResult(object):
109
  """Holds the result of running external programs.
110

111
  @type exit_code: int
112
  @ivar exit_code: the exit code of the program, or None (if the program
113
      didn't exit())
114
  @type signal: int or None
115
  @ivar signal: the signal that caused the program to finish, or None
116
      (if the program wasn't terminated by a signal)
117
  @type stdout: str
118
  @ivar stdout: the standard output of the program
119
  @type stderr: str
120
  @ivar stderr: the standard error of the program
121
  @type failed: boolean
122
  @ivar failed: True in case the program was
123
      terminated by a signal or exited with a non-zero exit code
124
  @ivar fail_reason: a string detailing the termination reason
125

126
  """
127
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
128
               "failed", "fail_reason", "cmd"]
129

    
130

    
131
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
132
    self.cmd = cmd
133
    self.exit_code = exit_code
134
    self.signal = signal_
135
    self.stdout = stdout
136
    self.stderr = stderr
137
    self.failed = (signal_ is not None or exit_code != 0)
138

    
139
    if self.signal is not None:
140
      self.fail_reason = "terminated by signal %s" % self.signal
141
    elif self.exit_code is not None:
142
      self.fail_reason = "exited with exit code %s" % self.exit_code
143
    else:
144
      self.fail_reason = "unable to determine termination reason"
145

    
146
    if self.failed:
147
      logging.debug("Command '%s' failed (%s); output: %s",
148
                    self.cmd, self.fail_reason, self.output)
149

    
150
  def _GetOutput(self):
151
    """Returns the combined stdout and stderr for easier usage.
152

153
    """
154
    return self.stdout + self.stderr
155

    
156
  output = property(_GetOutput, None, None, "Return full output")
157

    
158

    
159
def _BuildCmdEnvironment(env, reset):
160
  """Builds the environment for an external program.
161

162
  """
163
  if reset:
164
    cmd_env = {}
165
  else:
166
    cmd_env = os.environ.copy()
167
    cmd_env["LC_ALL"] = "C"
168

    
169
  if env is not None:
170
    cmd_env.update(env)
171

    
172
  return cmd_env
173

    
174

    
175
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
176
  """Execute a (shell) command.
177

178
  The command should not read from its standard input, as it will be
179
  closed.
180

181
  @type cmd: string or list
182
  @param cmd: Command to run
183
  @type env: dict
184
  @param env: Additional environment variables
185
  @type output: str
186
  @param output: if desired, the output of the command can be
187
      saved in a file instead of the RunResult instance; this
188
      parameter denotes the file name (if not None)
189
  @type cwd: string
190
  @param cwd: if specified, will be used as the working
191
      directory for the command; the default will be /
192
  @type reset_env: boolean
193
  @param reset_env: whether to reset or keep the default os environment
194
  @rtype: L{RunResult}
195
  @return: RunResult instance
196
  @raise errors.ProgrammerError: if we call this when forks are disabled
197

198
  """
199
  if no_fork:
200
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
201

    
202
  if isinstance(cmd, basestring):
203
    strcmd = cmd
204
    shell = True
205
  else:
206
    cmd = [str(val) for val in cmd]
207
    strcmd = ShellQuoteArgs(cmd)
208
    shell = False
209

    
210
  if output:
211
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
212
  else:
213
    logging.debug("RunCmd %s", strcmd)
214

    
215
  cmd_env = _BuildCmdEnvironment(env, reset_env)
216

    
217
  try:
218
    if output is None:
219
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
220
    else:
221
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
222
      out = err = ""
223
  except OSError, err:
224
    if err.errno == errno.ENOENT:
225
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
226
                               (strcmd, err))
227
    else:
228
      raise
229

    
230
  if status >= 0:
231
    exitcode = status
232
    signal_ = None
233
  else:
234
    exitcode = None
235
    signal_ = -status
236

    
237
  return RunResult(exitcode, signal_, out, err, strcmd)
238

    
239

    
240
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
241
                pidfile=None):
242
  """Start a daemon process after forking twice.
243

244
  @type cmd: string or list
245
  @param cmd: Command to run
246
  @type env: dict
247
  @param env: Additional environment variables
248
  @type cwd: string
249
  @param cwd: Working directory for the program
250
  @type output: string
251
  @param output: Path to file in which to save the output
252
  @type output_fd: int
253
  @param output_fd: File descriptor for output
254
  @type pidfile: string
255
  @param pidfile: Process ID file
256
  @rtype: int
257
  @return: Daemon process ID
258
  @raise errors.ProgrammerError: if we call this when forks are disabled
259

260
  """
261
  if no_fork:
262
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
263
                                 " disabled")
264

    
265
  if output and not (bool(output) ^ (output_fd is not None)):
266
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
267
                                 " specified")
268

    
269
  if isinstance(cmd, basestring):
270
    cmd = ["/bin/sh", "-c", cmd]
271

    
272
  strcmd = ShellQuoteArgs(cmd)
273

    
274
  if output:
275
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
276
  else:
277
    logging.debug("StartDaemon %s", strcmd)
278

    
279
  cmd_env = _BuildCmdEnvironment(env, False)
280

    
281
  # Create pipe for sending PID back
282
  (pidpipe_read, pidpipe_write) = os.pipe()
283
  try:
284
    try:
285
      # Create pipe for sending error messages
286
      (errpipe_read, errpipe_write) = os.pipe()
287
      try:
288
        try:
289
          # First fork
290
          pid = os.fork()
291
          if pid == 0:
292
            try:
293
              # Child process, won't return
294
              _StartDaemonChild(errpipe_read, errpipe_write,
295
                                pidpipe_read, pidpipe_write,
296
                                cmd, cmd_env, cwd,
297
                                output, output_fd, pidfile)
298
            finally:
299
              # Well, maybe child process failed
300
              os._exit(1) # pylint: disable-msg=W0212
301
        finally:
302
          _CloseFDNoErr(errpipe_write)
303

    
304
        # Wait for daemon to be started (or an error message to arrive) and read
305
        # up to 100 KB as an error message
306
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
307
      finally:
308
        _CloseFDNoErr(errpipe_read)
309
    finally:
310
      _CloseFDNoErr(pidpipe_write)
311

    
312
    # Read up to 128 bytes for PID
313
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
314
  finally:
315
    _CloseFDNoErr(pidpipe_read)
316

    
317
  # Try to avoid zombies by waiting for child process
318
  try:
319
    os.waitpid(pid, 0)
320
  except OSError:
321
    pass
322

    
323
  if errormsg:
324
    raise errors.OpExecError("Error when starting daemon process: %r" %
325
                             errormsg)
326

    
327
  try:
328
    return int(pidtext)
329
  except (ValueError, TypeError), err:
330
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
331
                             (pidtext, err))
332

    
333

    
334
def _StartDaemonChild(errpipe_read, errpipe_write,
335
                      pidpipe_read, pidpipe_write,
336
                      args, env, cwd,
337
                      output, fd_output, pidfile):
338
  """Child process for starting daemon.
339

340
  """
341
  try:
342
    # Close parent's side
343
    _CloseFDNoErr(errpipe_read)
344
    _CloseFDNoErr(pidpipe_read)
345

    
346
    # First child process
347
    os.chdir("/")
348
    os.umask(077)
349
    os.setsid()
350

    
351
    # And fork for the second time
352
    pid = os.fork()
353
    if pid != 0:
354
      # Exit first child process
355
      os._exit(0) # pylint: disable-msg=W0212
356

    
357
    # Make sure pipe is closed on execv* (and thereby notifies original process)
358
    SetCloseOnExecFlag(errpipe_write, True)
359

    
360
    # List of file descriptors to be left open
361
    noclose_fds = [errpipe_write]
362

    
363
    # Open PID file
364
    if pidfile:
365
      try:
366
        # TODO: Atomic replace with another locked file instead of writing into
367
        # it after creating
368
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
369

    
370
        # Lock the PID file (and fail if not possible to do so). Any code
371
        # wanting to send a signal to the daemon should try to lock the PID
372
        # file before reading it. If acquiring the lock succeeds, the daemon is
373
        # no longer running and the signal should not be sent.
374
        LockFile(fd_pidfile)
375

    
376
        os.write(fd_pidfile, "%d\n" % os.getpid())
377
      except Exception, err:
378
        raise Exception("Creating and locking PID file failed: %s" % err)
379

    
380
      # Keeping the file open to hold the lock
381
      noclose_fds.append(fd_pidfile)
382

    
383
      SetCloseOnExecFlag(fd_pidfile, False)
384
    else:
385
      fd_pidfile = None
386

    
387
    # Open /dev/null
388
    fd_devnull = os.open(os.devnull, os.O_RDWR)
389

    
390
    assert not output or (bool(output) ^ (fd_output is not None))
391

    
392
    if fd_output is not None:
393
      pass
394
    elif output:
395
      # Open output file
396
      try:
397
        # TODO: Implement flag to set append=yes/no
398
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
399
      except EnvironmentError, err:
400
        raise Exception("Opening output file failed: %s" % err)
401
    else:
402
      fd_output = fd_devnull
403

    
404
    # Redirect standard I/O
405
    os.dup2(fd_devnull, 0)
406
    os.dup2(fd_output, 1)
407
    os.dup2(fd_output, 2)
408

    
409
    # Send daemon PID to parent
410
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
411

    
412
    # Close all file descriptors except stdio and error message pipe
413
    CloseFDs(noclose_fds=noclose_fds)
414

    
415
    # Change working directory
416
    os.chdir(cwd)
417

    
418
    if env is None:
419
      os.execvp(args[0], args)
420
    else:
421
      os.execvpe(args[0], args, env)
422
  except: # pylint: disable-msg=W0702
423
    try:
424
      # Report errors to original process
425
      buf = str(sys.exc_info()[1])
426

    
427
      RetryOnSignal(os.write, errpipe_write, buf)
428
    except: # pylint: disable-msg=W0702
429
      # Ignore errors in error handling
430
      pass
431

    
432
  os._exit(1) # pylint: disable-msg=W0212
433

    
434

    
435
def _RunCmdPipe(cmd, env, via_shell, cwd):
436
  """Run a command and return its output.
437

438
  @type  cmd: string or list
439
  @param cmd: Command to run
440
  @type env: dict
441
  @param env: The environment to use
442
  @type via_shell: bool
443
  @param via_shell: if we should run via the shell
444
  @type cwd: string
445
  @param cwd: the working directory for the program
446
  @rtype: tuple
447
  @return: (out, err, status)
448

449
  """
450
  poller = select.poll()
451
  child = subprocess.Popen(cmd, shell=via_shell,
452
                           stderr=subprocess.PIPE,
453
                           stdout=subprocess.PIPE,
454
                           stdin=subprocess.PIPE,
455
                           close_fds=True, env=env,
456
                           cwd=cwd)
457

    
458
  child.stdin.close()
459
  poller.register(child.stdout, select.POLLIN)
460
  poller.register(child.stderr, select.POLLIN)
461
  out = StringIO()
462
  err = StringIO()
463
  fdmap = {
464
    child.stdout.fileno(): (out, child.stdout),
465
    child.stderr.fileno(): (err, child.stderr),
466
    }
467
  for fd in fdmap:
468
    SetNonblockFlag(fd, True)
469

    
470
  while fdmap:
471
    pollresult = RetryOnSignal(poller.poll)
472

    
473
    for fd, event in pollresult:
474
      if event & select.POLLIN or event & select.POLLPRI:
475
        data = fdmap[fd][1].read()
476
        # no data from read signifies EOF (the same as POLLHUP)
477
        if not data:
478
          poller.unregister(fd)
479
          del fdmap[fd]
480
          continue
481
        fdmap[fd][0].write(data)
482
      if (event & select.POLLNVAL or event & select.POLLHUP or
483
          event & select.POLLERR):
484
        poller.unregister(fd)
485
        del fdmap[fd]
486

    
487
  out = out.getvalue()
488
  err = err.getvalue()
489

    
490
  status = child.wait()
491
  return out, err, status
492

    
493

    
494
def _RunCmdFile(cmd, env, via_shell, output, cwd):
495
  """Run a command and save its output to a file.
496

497
  @type  cmd: string or list
498
  @param cmd: Command to run
499
  @type env: dict
500
  @param env: The environment to use
501
  @type via_shell: bool
502
  @param via_shell: if we should run via the shell
503
  @type output: str
504
  @param output: the filename in which to save the output
505
  @type cwd: string
506
  @param cwd: the working directory for the program
507
  @rtype: int
508
  @return: the exit status
509

510
  """
511
  fh = open(output, "a")
512
  try:
513
    child = subprocess.Popen(cmd, shell=via_shell,
514
                             stderr=subprocess.STDOUT,
515
                             stdout=fh,
516
                             stdin=subprocess.PIPE,
517
                             close_fds=True, env=env,
518
                             cwd=cwd)
519

    
520
    child.stdin.close()
521
    status = child.wait()
522
  finally:
523
    fh.close()
524
  return status
525

    
526

    
527
def SetCloseOnExecFlag(fd, enable):
528
  """Sets or unsets the close-on-exec flag on a file descriptor.
529

530
  @type fd: int
531
  @param fd: File descriptor
532
  @type enable: bool
533
  @param enable: Whether to set or unset it.
534

535
  """
536
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
537

    
538
  if enable:
539
    flags |= fcntl.FD_CLOEXEC
540
  else:
541
    flags &= ~fcntl.FD_CLOEXEC
542

    
543
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
544

    
545

    
546
def SetNonblockFlag(fd, enable):
547
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
548

549
  @type fd: int
550
  @param fd: File descriptor
551
  @type enable: bool
552
  @param enable: Whether to set or unset it
553

554
  """
555
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
556

    
557
  if enable:
558
    flags |= os.O_NONBLOCK
559
  else:
560
    flags &= ~os.O_NONBLOCK
561

    
562
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
563

    
564

    
565
def RetryOnSignal(fn, *args, **kwargs):
566
  """Calls a function again if it failed due to EINTR.
567

568
  """
569
  while True:
570
    try:
571
      return fn(*args, **kwargs)
572
    except EnvironmentError, err:
573
      if err.errno != errno.EINTR:
574
        raise
575
    except (socket.error, select.error), err:
576
      # In python 2.6 and above select.error is an IOError, so it's handled
577
      # above, in 2.5 and below it's not, and it's handled here.
578
      if not (err.args and err.args[0] == errno.EINTR):
579
        raise
580

    
581

    
582
def RunParts(dir_name, env=None, reset_env=False):
583
  """Run Scripts or programs in a directory
584

585
  @type dir_name: string
586
  @param dir_name: absolute path to a directory
587
  @type env: dict
588
  @param env: The environment to use
589
  @type reset_env: boolean
590
  @param reset_env: whether to reset or keep the default os environment
591
  @rtype: list of tuples
592
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
593

594
  """
595
  rr = []
596

    
597
  try:
598
    dir_contents = ListVisibleFiles(dir_name)
599
  except OSError, err:
600
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
601
    return rr
602

    
603
  for relname in sorted(dir_contents):
604
    fname = PathJoin(dir_name, relname)
605
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
606
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
607
      rr.append((relname, constants.RUNPARTS_SKIP, None))
608
    else:
609
      try:
610
        result = RunCmd([fname], env=env, reset_env=reset_env)
611
      except Exception, err: # pylint: disable-msg=W0703
612
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
613
      else:
614
        rr.append((relname, constants.RUNPARTS_RUN, result))
615

    
616
  return rr
617

    
618

    
619
def GetSocketCredentials(sock):
620
  """Returns the credentials of the foreign process connected to a socket.
621

622
  @param sock: Unix socket
623
  @rtype: tuple; (number, number, number)
624
  @return: The PID, UID and GID of the connected foreign process.
625

626
  """
627
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
628
                             _STRUCT_UCRED_SIZE)
629
  return struct.unpack(_STRUCT_UCRED, peercred)
630

    
631

    
632
def RemoveFile(filename):
633
  """Remove a file ignoring some errors.
634

635
  Remove a file, ignoring non-existing ones or directories. Other
636
  errors are passed.
637

638
  @type filename: str
639
  @param filename: the file to be removed
640

641
  """
642
  try:
643
    os.unlink(filename)
644
  except OSError, err:
645
    if err.errno not in (errno.ENOENT, errno.EISDIR):
646
      raise
647

    
648

    
649
def RemoveDir(dirname):
650
  """Remove an empty directory.
651

652
  Remove a directory, ignoring non-existing ones.
653
  Other errors are passed. This includes the case,
654
  where the directory is not empty, so it can't be removed.
655

656
  @type dirname: str
657
  @param dirname: the empty directory to be removed
658

659
  """
660
  try:
661
    os.rmdir(dirname)
662
  except OSError, err:
663
    if err.errno != errno.ENOENT:
664
      raise
665

    
666

    
667
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
668
  """Renames a file.
669

670
  @type old: string
671
  @param old: Original path
672
  @type new: string
673
  @param new: New path
674
  @type mkdir: bool
675
  @param mkdir: Whether to create target directory if it doesn't exist
676
  @type mkdir_mode: int
677
  @param mkdir_mode: Mode for newly created directories
678

679
  """
680
  try:
681
    return os.rename(old, new)
682
  except OSError, err:
683
    # In at least one use case of this function, the job queue, directory
684
    # creation is very rare. Checking for the directory before renaming is not
685
    # as efficient.
686
    if mkdir and err.errno == errno.ENOENT:
687
      # Create directory and try again
688
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
689

    
690
      return os.rename(old, new)
691

    
692
    raise
693

    
694

    
695
def Makedirs(path, mode=0750):
696
  """Super-mkdir; create a leaf directory and all intermediate ones.
697

698
  This is a wrapper around C{os.makedirs} adding error handling not implemented
699
  before Python 2.5.
700

701
  """
702
  try:
703
    os.makedirs(path, mode)
704
  except OSError, err:
705
    # Ignore EEXIST. This is only handled in os.makedirs as included in
706
    # Python 2.5 and above.
707
    if err.errno != errno.EEXIST or not os.path.exists(path):
708
      raise
709

    
710

    
711
def ResetTempfileModule():
712
  """Resets the random name generator of the tempfile module.
713

714
  This function should be called after C{os.fork} in the child process to
715
  ensure it creates a newly seeded random generator. Otherwise it would
716
  generate the same random parts as the parent process. If several processes
717
  race for the creation of a temporary file, this could lead to one not getting
718
  a temporary name.
719

720
  """
721
  # pylint: disable-msg=W0212
722
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
723
    tempfile._once_lock.acquire()
724
    try:
725
      # Reset random name generator
726
      tempfile._name_sequence = None
727
    finally:
728
      tempfile._once_lock.release()
729
  else:
730
    logging.critical("The tempfile module misses at least one of the"
731
                     " '_once_lock' and '_name_sequence' attributes")
732

    
733

    
734
def _FingerprintFile(filename):
735
  """Compute the fingerprint of a file.
736

737
  If the file does not exist, a None will be returned
738
  instead.
739

740
  @type filename: str
741
  @param filename: the filename to checksum
742
  @rtype: str
743
  @return: the hex digest of the sha checksum of the contents
744
      of the file
745

746
  """
747
  if not (os.path.exists(filename) and os.path.isfile(filename)):
748
    return None
749

    
750
  f = open(filename)
751

    
752
  fp = compat.sha1_hash()
753
  while True:
754
    data = f.read(4096)
755
    if not data:
756
      break
757

    
758
    fp.update(data)
759

    
760
  return fp.hexdigest()
761

    
762

    
763
def FingerprintFiles(files):
764
  """Compute fingerprints for a list of files.
765

766
  @type files: list
767
  @param files: the list of filename to fingerprint
768
  @rtype: dict
769
  @return: a dictionary filename: fingerprint, holding only
770
      existing files
771

772
  """
773
  ret = {}
774

    
775
  for filename in files:
776
    cksum = _FingerprintFile(filename)
777
    if cksum:
778
      ret[filename] = cksum
779

    
780
  return ret
781

    
782

    
783
def ForceDictType(target, key_types, allowed_values=None):
784
  """Force the values of a dict to have certain types.
785

786
  @type target: dict
787
  @param target: the dict to update
788
  @type key_types: dict
789
  @param key_types: dict mapping target dict keys to types
790
                    in constants.ENFORCEABLE_TYPES
791
  @type allowed_values: list
792
  @keyword allowed_values: list of specially allowed values
793

794
  """
795
  if allowed_values is None:
796
    allowed_values = []
797

    
798
  if not isinstance(target, dict):
799
    msg = "Expected dictionary, got '%s'" % target
800
    raise errors.TypeEnforcementError(msg)
801

    
802
  for key in target:
803
    if key not in key_types:
804
      msg = "Unknown key '%s'" % key
805
      raise errors.TypeEnforcementError(msg)
806

    
807
    if target[key] in allowed_values:
808
      continue
809

    
810
    ktype = key_types[key]
811
    if ktype not in constants.ENFORCEABLE_TYPES:
812
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
813
      raise errors.ProgrammerError(msg)
814

    
815
    if ktype == constants.VTYPE_STRING:
816
      if not isinstance(target[key], basestring):
817
        if isinstance(target[key], bool) and not target[key]:
818
          target[key] = ''
819
        else:
820
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
821
          raise errors.TypeEnforcementError(msg)
822
    elif ktype == constants.VTYPE_BOOL:
823
      if isinstance(target[key], basestring) and target[key]:
824
        if target[key].lower() == constants.VALUE_FALSE:
825
          target[key] = False
826
        elif target[key].lower() == constants.VALUE_TRUE:
827
          target[key] = True
828
        else:
829
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
830
          raise errors.TypeEnforcementError(msg)
831
      elif target[key]:
832
        target[key] = True
833
      else:
834
        target[key] = False
835
    elif ktype == constants.VTYPE_SIZE:
836
      try:
837
        target[key] = ParseUnit(target[key])
838
      except errors.UnitParseError, err:
839
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
840
              (key, target[key], err)
841
        raise errors.TypeEnforcementError(msg)
842
    elif ktype == constants.VTYPE_INT:
843
      try:
844
        target[key] = int(target[key])
845
      except (ValueError, TypeError):
846
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
847
        raise errors.TypeEnforcementError(msg)
848

    
849

    
850
def _GetProcStatusPath(pid):
851
  """Returns the path for a PID's proc status file.
852

853
  @type pid: int
854
  @param pid: Process ID
855
  @rtype: string
856

857
  """
858
  return "/proc/%d/status" % pid
859

    
860

    
861
def IsProcessAlive(pid):
862
  """Check if a given pid exists on the system.
863

864
  @note: zombie status is not handled, so zombie processes
865
      will be returned as alive
866
  @type pid: int
867
  @param pid: the process ID to check
868
  @rtype: boolean
869
  @return: True if the process exists
870

871
  """
872
  def _TryStat(name):
873
    try:
874
      os.stat(name)
875
      return True
876
    except EnvironmentError, err:
877
      if err.errno in (errno.ENOENT, errno.ENOTDIR):
878
        return False
879
      elif err.errno == errno.EINVAL:
880
        raise RetryAgain(err)
881
      raise
882

    
883
  assert isinstance(pid, int), "pid must be an integer"
884
  if pid <= 0:
885
    return False
886

    
887
  # /proc in a multiprocessor environment can have strange behaviors.
888
  # Retry the os.stat a few times until we get a good result.
889
  try:
890
    return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
891
                 args=[_GetProcStatusPath(pid)])
892
  except RetryTimeout, err:
893
    err.RaiseInner()
894

    
895

    
896
def _ParseSigsetT(sigset):
897
  """Parse a rendered sigset_t value.
898

899
  This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
900
  function.
901

902
  @type sigset: string
903
  @param sigset: Rendered signal set from /proc/$pid/status
904
  @rtype: set
905
  @return: Set of all enabled signal numbers
906

907
  """
908
  result = set()
909

    
910
  signum = 0
911
  for ch in reversed(sigset):
912
    chv = int(ch, 16)
913

    
914
    # The following could be done in a loop, but it's easier to read and
915
    # understand in the unrolled form
916
    if chv & 1:
917
      result.add(signum + 1)
918
    if chv & 2:
919
      result.add(signum + 2)
920
    if chv & 4:
921
      result.add(signum + 3)
922
    if chv & 8:
923
      result.add(signum + 4)
924

    
925
    signum += 4
926

    
927
  return result
928

    
929

    
930
def _GetProcStatusField(pstatus, field):
931
  """Retrieves a field from the contents of a proc status file.
932

933
  @type pstatus: string
934
  @param pstatus: Contents of /proc/$pid/status
935
  @type field: string
936
  @param field: Name of field whose value should be returned
937
  @rtype: string
938

939
  """
940
  for line in pstatus.splitlines():
941
    parts = line.split(":", 1)
942

    
943
    if len(parts) < 2 or parts[0] != field:
944
      continue
945

    
946
    return parts[1].strip()
947

    
948
  return None
949

    
950

    
951
def IsProcessHandlingSignal(pid, signum, status_path=None):
952
  """Checks whether a process is handling a signal.
953

954
  @type pid: int
955
  @param pid: Process ID
956
  @type signum: int
957
  @param signum: Signal number
958
  @rtype: bool
959

960
  """
961
  if status_path is None:
962
    status_path = _GetProcStatusPath(pid)
963

    
964
  try:
965
    proc_status = ReadFile(status_path)
966
  except EnvironmentError, err:
967
    # In at least one case, reading /proc/$pid/status failed with ESRCH.
968
    if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
969
      return False
970
    raise
971

    
972
  sigcgt = _GetProcStatusField(proc_status, "SigCgt")
973
  if sigcgt is None:
974
    raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
975

    
976
  # Now check whether signal is handled
977
  return signum in _ParseSigsetT(sigcgt)
978

    
979

    
980
def ReadPidFile(pidfile):
981
  """Read a pid from a file.
982

983
  @type  pidfile: string
984
  @param pidfile: path to the file containing the pid
985
  @rtype: int
986
  @return: The process id, if the file exists and contains a valid PID,
987
           otherwise 0
988

989
  """
990
  try:
991
    raw_data = ReadOneLineFile(pidfile)
992
  except EnvironmentError, err:
993
    if err.errno != errno.ENOENT:
994
      logging.exception("Can't read pid file")
995
    return 0
996

    
997
  try:
998
    pid = int(raw_data)
999
  except (TypeError, ValueError), err:
1000
    logging.info("Can't parse pid file contents", exc_info=True)
1001
    return 0
1002

    
1003
  return pid
1004

    
1005

    
1006
def ReadLockedPidFile(path):
1007
  """Reads a locked PID file.
1008

1009
  This can be used together with L{StartDaemon}.
1010

1011
  @type path: string
1012
  @param path: Path to PID file
1013
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
1014

1015
  """
1016
  try:
1017
    fd = os.open(path, os.O_RDONLY)
1018
  except EnvironmentError, err:
1019
    if err.errno == errno.ENOENT:
1020
      # PID file doesn't exist
1021
      return None
1022
    raise
1023

    
1024
  try:
1025
    try:
1026
      # Try to acquire lock
1027
      LockFile(fd)
1028
    except errors.LockError:
1029
      # Couldn't lock, daemon is running
1030
      return int(os.read(fd, 100))
1031
  finally:
1032
    os.close(fd)
1033

    
1034
  return None
1035

    
1036

    
1037
def MatchNameComponent(key, name_list, case_sensitive=True):
1038
  """Try to match a name against a list.
1039

1040
  This function will try to match a name like test1 against a list
1041
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
1042
  this list, I{'test1'} as well as I{'test1.example'} will match, but
1043
  not I{'test1.ex'}. A multiple match will be considered as no match
1044
  at all (e.g. I{'test1'} against C{['test1.example.com',
1045
  'test1.example.org']}), except when the key fully matches an entry
1046
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1047

1048
  @type key: str
1049
  @param key: the name to be searched
1050
  @type name_list: list
1051
  @param name_list: the list of strings against which to search the key
1052
  @type case_sensitive: boolean
1053
  @param case_sensitive: whether to provide a case-sensitive match
1054

1055
  @rtype: None or str
1056
  @return: None if there is no match I{or} if there are multiple matches,
1057
      otherwise the element from the list which matches
1058

1059
  """
1060
  if key in name_list:
1061
    return key
1062

    
1063
  re_flags = 0
1064
  if not case_sensitive:
1065
    re_flags |= re.IGNORECASE
1066
    key = key.upper()
1067
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1068
  names_filtered = []
1069
  string_matches = []
1070
  for name in name_list:
1071
    if mo.match(name) is not None:
1072
      names_filtered.append(name)
1073
      if not case_sensitive and key == name.upper():
1074
        string_matches.append(name)
1075

    
1076
  if len(string_matches) == 1:
1077
    return string_matches[0]
1078
  if len(names_filtered) == 1:
1079
    return names_filtered[0]
1080
  return None
1081

    
1082

    
1083
class HostInfo:
1084
  """Class implementing resolver and hostname functionality
1085

1086
  """
1087
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
1088

    
1089
  def __init__(self, name=None):
1090
    """Initialize the host name object.
1091

1092
    If the name argument is not passed, it will use this system's
1093
    name.
1094

1095
    """
1096
    if name is None:
1097
      name = self.SysName()
1098

    
1099
    self.query = name
1100
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
1101
    self.ip = self.ipaddrs[0]
1102

    
1103
  def ShortName(self):
1104
    """Returns the hostname without domain.
1105

1106
    """
1107
    return self.name.split('.')[0]
1108

    
1109
  @staticmethod
1110
  def SysName():
1111
    """Return the current system's name.
1112

1113
    This is simply a wrapper over C{socket.gethostname()}.
1114

1115
    """
1116
    return socket.gethostname()
1117

    
1118
  @staticmethod
1119
  def LookupHostname(hostname):
1120
    """Look up hostname
1121

1122
    @type hostname: str
1123
    @param hostname: hostname to look up
1124

1125
    @rtype: tuple
1126
    @return: a tuple (name, aliases, ipaddrs) as returned by
1127
        C{socket.gethostbyname_ex}
1128
    @raise errors.ResolverError: in case of errors in resolving
1129

1130
    """
1131
    try:
1132
      result = socket.gethostbyname_ex(hostname)
1133
    except (socket.gaierror, socket.herror, socket.error), err:
1134
      # hostname not found in DNS, or other socket exception in the
1135
      # (code, description format)
1136
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
1137

    
1138
    return result
1139

    
1140
  @classmethod
1141
  def NormalizeName(cls, hostname):
1142
    """Validate and normalize the given hostname.
1143

1144
    @attention: the validation is a bit more relaxed than the standards
1145
        require; most importantly, we allow underscores in names
1146
    @raise errors.OpPrereqError: when the name is not valid
1147

1148
    """
1149
    hostname = hostname.lower()
1150
    if (not cls._VALID_NAME_RE.match(hostname) or
1151
        # double-dots, meaning empty label
1152
        ".." in hostname or
1153
        # empty initial label
1154
        hostname.startswith(".")):
1155
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
1156
                                 errors.ECODE_INVAL)
1157
    if hostname.endswith("."):
1158
      hostname = hostname.rstrip(".")
1159
    return hostname
1160

    
1161

    
1162
def ValidateServiceName(name):
1163
  """Validate the given service name.
1164

1165
  @type name: number or string
1166
  @param name: Service name or port specification
1167

1168
  """
1169
  try:
1170
    numport = int(name)
1171
  except (ValueError, TypeError):
1172
    # Non-numeric service name
1173
    valid = _VALID_SERVICE_NAME_RE.match(name)
1174
  else:
1175
    # Numeric port (protocols other than TCP or UDP might need adjustments
1176
    # here)
1177
    valid = (numport >= 0 and numport < (1 << 16))
1178

    
1179
  if not valid:
1180
    raise errors.OpPrereqError("Invalid service name '%s'" % name,
1181
                               errors.ECODE_INVAL)
1182

    
1183
  return name
1184

    
1185

    
1186
def GetHostInfo(name=None):
1187
  """Lookup host name and raise an OpPrereqError for failures"""
1188

    
1189
  try:
1190
    return HostInfo(name)
1191
  except errors.ResolverError, err:
1192
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
1193
                               (err[0], err[2]), errors.ECODE_RESOLVER)
1194

    
1195

    
1196
def ListVolumeGroups():
1197
  """List volume groups and their size
1198

1199
  @rtype: dict
1200
  @return:
1201
       Dictionary with keys volume name and values
1202
       the size of the volume
1203

1204
  """
1205
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1206
  result = RunCmd(command)
1207
  retval = {}
1208
  if result.failed:
1209
    return retval
1210

    
1211
  for line in result.stdout.splitlines():
1212
    try:
1213
      name, size = line.split()
1214
      size = int(float(size))
1215
    except (IndexError, ValueError), err:
1216
      logging.error("Invalid output from vgs (%s): %s", err, line)
1217
      continue
1218

    
1219
    retval[name] = size
1220

    
1221
  return retval
1222

    
1223

    
1224
def BridgeExists(bridge):
1225
  """Check whether the given bridge exists in the system
1226

1227
  @type bridge: str
1228
  @param bridge: the bridge name to check
1229
  @rtype: boolean
1230
  @return: True if it does
1231

1232
  """
1233
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1234

    
1235

    
1236
def NiceSort(name_list):
1237
  """Sort a list of strings based on digit and non-digit groupings.
1238

1239
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1240
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1241
  'a11']}.
1242

1243
  The sort algorithm breaks each name in groups of either only-digits
1244
  or no-digits. Only the first eight such groups are considered, and
1245
  after that we just use what's left of the string.
1246

1247
  @type name_list: list
1248
  @param name_list: the names to be sorted
1249
  @rtype: list
1250
  @return: a copy of the name list sorted with our algorithm
1251

1252
  """
1253
  _SORTER_BASE = "(\D+|\d+)"
1254
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1255
                                                  _SORTER_BASE, _SORTER_BASE,
1256
                                                  _SORTER_BASE, _SORTER_BASE,
1257
                                                  _SORTER_BASE, _SORTER_BASE)
1258
  _SORTER_RE = re.compile(_SORTER_FULL)
1259
  _SORTER_NODIGIT = re.compile("^\D*$")
1260
  def _TryInt(val):
1261
    """Attempts to convert a variable to integer."""
1262
    if val is None or _SORTER_NODIGIT.match(val):
1263
      return val
1264
    rval = int(val)
1265
    return rval
1266

    
1267
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1268
             for name in name_list]
1269
  to_sort.sort()
1270
  return [tup[1] for tup in to_sort]
1271

    
1272

    
1273
def TryConvert(fn, val):
1274
  """Try to convert a value ignoring errors.
1275

1276
  This function tries to apply function I{fn} to I{val}. If no
1277
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1278
  the result, else it will return the original value. Any other
1279
  exceptions are propagated to the caller.
1280

1281
  @type fn: callable
1282
  @param fn: function to apply to the value
1283
  @param val: the value to be converted
1284
  @return: The converted value if the conversion was successful,
1285
      otherwise the original value.
1286

1287
  """
1288
  try:
1289
    nv = fn(val)
1290
  except (ValueError, TypeError):
1291
    nv = val
1292
  return nv
1293

    
1294

    
1295
def IsValidIP(ip):
1296
  """Verifies the syntax of an IPv4 address.
1297

1298
  This function checks if the IPv4 address passes is valid or not based
1299
  on syntax (not IP range, class calculations, etc.).
1300

1301
  @type ip: str
1302
  @param ip: the address to be checked
1303
  @rtype: a regular expression match object
1304
  @return: a regular expression match object, or None if the
1305
      address is not valid
1306

1307
  """
1308
  unit = "(0|[1-9]\d{0,2})"
1309
  #TODO: convert and return only boolean
1310
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
1311

    
1312

    
1313
def IsValidShellParam(word):
1314
  """Verifies is the given word is safe from the shell's p.o.v.
1315

1316
  This means that we can pass this to a command via the shell and be
1317
  sure that it doesn't alter the command line and is passed as such to
1318
  the actual command.
1319

1320
  Note that we are overly restrictive here, in order to be on the safe
1321
  side.
1322

1323
  @type word: str
1324
  @param word: the word to check
1325
  @rtype: boolean
1326
  @return: True if the word is 'safe'
1327

1328
  """
1329
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1330

    
1331

    
1332
def BuildShellCmd(template, *args):
1333
  """Build a safe shell command line from the given arguments.
1334

1335
  This function will check all arguments in the args list so that they
1336
  are valid shell parameters (i.e. they don't contain shell
1337
  metacharacters). If everything is ok, it will return the result of
1338
  template % args.
1339

1340
  @type template: str
1341
  @param template: the string holding the template for the
1342
      string formatting
1343
  @rtype: str
1344
  @return: the expanded command line
1345

1346
  """
1347
  for word in args:
1348
    if not IsValidShellParam(word):
1349
      raise errors.ProgrammerError("Shell argument '%s' contains"
1350
                                   " invalid characters" % word)
1351
  return template % args
1352

    
1353

    
1354
def FormatUnit(value, units):
1355
  """Formats an incoming number of MiB with the appropriate unit.
1356

1357
  @type value: int
1358
  @param value: integer representing the value in MiB (1048576)
1359
  @type units: char
1360
  @param units: the type of formatting we should do:
1361
      - 'h' for automatic scaling
1362
      - 'm' for MiBs
1363
      - 'g' for GiBs
1364
      - 't' for TiBs
1365
  @rtype: str
1366
  @return: the formatted value (with suffix)
1367

1368
  """
1369
  if units not in ('m', 'g', 't', 'h'):
1370
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1371

    
1372
  suffix = ''
1373

    
1374
  if units == 'm' or (units == 'h' and value < 1024):
1375
    if units == 'h':
1376
      suffix = 'M'
1377
    return "%d%s" % (round(value, 0), suffix)
1378

    
1379
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1380
    if units == 'h':
1381
      suffix = 'G'
1382
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1383

    
1384
  else:
1385
    if units == 'h':
1386
      suffix = 'T'
1387
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1388

    
1389

    
1390
def ParseUnit(input_string):
1391
  """Tries to extract number and scale from the given string.
1392

1393
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1394
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1395
  is always an int in MiB.
1396

1397
  """
1398
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1399
  if not m:
1400
    raise errors.UnitParseError("Invalid format")
1401

    
1402
  value = float(m.groups()[0])
1403

    
1404
  unit = m.groups()[1]
1405
  if unit:
1406
    lcunit = unit.lower()
1407
  else:
1408
    lcunit = 'm'
1409

    
1410
  if lcunit in ('m', 'mb', 'mib'):
1411
    # Value already in MiB
1412
    pass
1413

    
1414
  elif lcunit in ('g', 'gb', 'gib'):
1415
    value *= 1024
1416

    
1417
  elif lcunit in ('t', 'tb', 'tib'):
1418
    value *= 1024 * 1024
1419

    
1420
  else:
1421
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1422

    
1423
  # Make sure we round up
1424
  if int(value) < value:
1425
    value += 1
1426

    
1427
  # Round up to the next multiple of 4
1428
  value = int(value)
1429
  if value % 4:
1430
    value += 4 - value % 4
1431

    
1432
  return value
1433

    
1434

    
1435
def AddAuthorizedKey(file_name, key):
1436
  """Adds an SSH public key to an authorized_keys file.
1437

1438
  @type file_name: str
1439
  @param file_name: path to authorized_keys file
1440
  @type key: str
1441
  @param key: string containing key
1442

1443
  """
1444
  key_fields = key.split()
1445

    
1446
  f = open(file_name, 'a+')
1447
  try:
1448
    nl = True
1449
    for line in f:
1450
      # Ignore whitespace changes
1451
      if line.split() == key_fields:
1452
        break
1453
      nl = line.endswith('\n')
1454
    else:
1455
      if not nl:
1456
        f.write("\n")
1457
      f.write(key.rstrip('\r\n'))
1458
      f.write("\n")
1459
      f.flush()
1460
  finally:
1461
    f.close()
1462

    
1463

    
1464
def RemoveAuthorizedKey(file_name, key):
1465
  """Removes an SSH public key from an authorized_keys file.
1466

1467
  @type file_name: str
1468
  @param file_name: path to authorized_keys file
1469
  @type key: str
1470
  @param key: string containing key
1471

1472
  """
1473
  key_fields = key.split()
1474

    
1475
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1476
  try:
1477
    out = os.fdopen(fd, 'w')
1478
    try:
1479
      f = open(file_name, 'r')
1480
      try:
1481
        for line in f:
1482
          # Ignore whitespace changes while comparing lines
1483
          if line.split() != key_fields:
1484
            out.write(line)
1485

    
1486
        out.flush()
1487
        os.rename(tmpname, file_name)
1488
      finally:
1489
        f.close()
1490
    finally:
1491
      out.close()
1492
  except:
1493
    RemoveFile(tmpname)
1494
    raise
1495

    
1496

    
1497
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1498
  """Sets the name of an IP address and hostname in /etc/hosts.
1499

1500
  @type file_name: str
1501
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1502
  @type ip: str
1503
  @param ip: the IP address
1504
  @type hostname: str
1505
  @param hostname: the hostname to be added
1506
  @type aliases: list
1507
  @param aliases: the list of aliases to add for the hostname
1508

1509
  """
1510
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1511
  # Ensure aliases are unique
1512
  aliases = UniqueSequence([hostname] + aliases)[1:]
1513

    
1514
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1515
  try:
1516
    out = os.fdopen(fd, 'w')
1517
    try:
1518
      f = open(file_name, 'r')
1519
      try:
1520
        for line in f:
1521
          fields = line.split()
1522
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1523
            continue
1524
          out.write(line)
1525

    
1526
        out.write("%s\t%s" % (ip, hostname))
1527
        if aliases:
1528
          out.write(" %s" % ' '.join(aliases))
1529
        out.write('\n')
1530

    
1531
        out.flush()
1532
        os.fsync(out)
1533
        os.chmod(tmpname, 0644)
1534
        os.rename(tmpname, file_name)
1535
      finally:
1536
        f.close()
1537
    finally:
1538
      out.close()
1539
  except:
1540
    RemoveFile(tmpname)
1541
    raise
1542

    
1543

    
1544
def AddHostToEtcHosts(hostname):
1545
  """Wrapper around SetEtcHostsEntry.
1546

1547
  @type hostname: str
1548
  @param hostname: a hostname that will be resolved and added to
1549
      L{constants.ETC_HOSTS}
1550

1551
  """
1552
  hi = HostInfo(name=hostname)
1553
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1554

    
1555

    
1556
def RemoveEtcHostsEntry(file_name, hostname):
1557
  """Removes a hostname from /etc/hosts.
1558

1559
  IP addresses without names are removed from the file.
1560

1561
  @type file_name: str
1562
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1563
  @type hostname: str
1564
  @param hostname: the hostname to be removed
1565

1566
  """
1567
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1568
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1569
  try:
1570
    out = os.fdopen(fd, 'w')
1571
    try:
1572
      f = open(file_name, 'r')
1573
      try:
1574
        for line in f:
1575
          fields = line.split()
1576
          if len(fields) > 1 and not fields[0].startswith('#'):
1577
            names = fields[1:]
1578
            if hostname in names:
1579
              while hostname in names:
1580
                names.remove(hostname)
1581
              if names:
1582
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1583
              continue
1584

    
1585
          out.write(line)
1586

    
1587
        out.flush()
1588
        os.fsync(out)
1589
        os.chmod(tmpname, 0644)
1590
        os.rename(tmpname, file_name)
1591
      finally:
1592
        f.close()
1593
    finally:
1594
      out.close()
1595
  except:
1596
    RemoveFile(tmpname)
1597
    raise
1598

    
1599

    
1600
def RemoveHostFromEtcHosts(hostname):
1601
  """Wrapper around RemoveEtcHostsEntry.
1602

1603
  @type hostname: str
1604
  @param hostname: hostname that will be resolved and its
1605
      full and shot name will be removed from
1606
      L{constants.ETC_HOSTS}
1607

1608
  """
1609
  hi = HostInfo(name=hostname)
1610
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1611
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1612

    
1613

    
1614
def TimestampForFilename():
1615
  """Returns the current time formatted for filenames.
1616

1617
  The format doesn't contain colons as some shells and applications them as
1618
  separators.
1619

1620
  """
1621
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1622

    
1623

    
1624
def CreateBackup(file_name):
1625
  """Creates a backup of a file.
1626

1627
  @type file_name: str
1628
  @param file_name: file to be backed up
1629
  @rtype: str
1630
  @return: the path to the newly created backup
1631
  @raise errors.ProgrammerError: for invalid file names
1632

1633
  """
1634
  if not os.path.isfile(file_name):
1635
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1636
                                file_name)
1637

    
1638
  prefix = ("%s.backup-%s." %
1639
            (os.path.basename(file_name), TimestampForFilename()))
1640
  dir_name = os.path.dirname(file_name)
1641

    
1642
  fsrc = open(file_name, 'rb')
1643
  try:
1644
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1645
    fdst = os.fdopen(fd, 'wb')
1646
    try:
1647
      logging.debug("Backing up %s at %s", file_name, backup_name)
1648
      shutil.copyfileobj(fsrc, fdst)
1649
    finally:
1650
      fdst.close()
1651
  finally:
1652
    fsrc.close()
1653

    
1654
  return backup_name
1655

    
1656

    
1657
def ShellQuote(value):
1658
  """Quotes shell argument according to POSIX.
1659

1660
  @type value: str
1661
  @param value: the argument to be quoted
1662
  @rtype: str
1663
  @return: the quoted value
1664

1665
  """
1666
  if _re_shell_unquoted.match(value):
1667
    return value
1668
  else:
1669
    return "'%s'" % value.replace("'", "'\\''")
1670

    
1671

    
1672
def ShellQuoteArgs(args):
1673
  """Quotes a list of shell arguments.
1674

1675
  @type args: list
1676
  @param args: list of arguments to be quoted
1677
  @rtype: str
1678
  @return: the quoted arguments concatenated with spaces
1679

1680
  """
1681
  return ' '.join([ShellQuote(i) for i in args])
1682

    
1683

    
1684
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1685
  """Simple ping implementation using TCP connect(2).
1686

1687
  Check if the given IP is reachable by doing attempting a TCP connect
1688
  to it.
1689

1690
  @type target: str
1691
  @param target: the IP or hostname to ping
1692
  @type port: int
1693
  @param port: the port to connect to
1694
  @type timeout: int
1695
  @param timeout: the timeout on the connection attempt
1696
  @type live_port_needed: boolean
1697
  @param live_port_needed: whether a closed port will cause the
1698
      function to return failure, as if there was a timeout
1699
  @type source: str or None
1700
  @param source: if specified, will cause the connect to be made
1701
      from this specific source address; failures to bind other
1702
      than C{EADDRNOTAVAIL} will be ignored
1703

1704
  """
1705
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1706

    
1707
  success = False
1708

    
1709
  if source is not None:
1710
    try:
1711
      sock.bind((source, 0))
1712
    except socket.error, (errcode, _):
1713
      if errcode == errno.EADDRNOTAVAIL:
1714
        success = False
1715

    
1716
  sock.settimeout(timeout)
1717

    
1718
  try:
1719
    sock.connect((target, port))
1720
    sock.close()
1721
    success = True
1722
  except socket.timeout:
1723
    success = False
1724
  except socket.error, (errcode, _):
1725
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1726

    
1727
  return success
1728

    
1729

    
1730
def OwnIpAddress(address):
1731
  """Check if the current host has the the given IP address.
1732

1733
  Currently this is done by TCP-pinging the address from the loopback
1734
  address.
1735

1736
  @type address: string
1737
  @param address: the address to check
1738
  @rtype: bool
1739
  @return: True if we own the address
1740

1741
  """
1742
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1743
                 source=constants.LOCALHOST_IP_ADDRESS)
1744

    
1745

    
1746
def ListVisibleFiles(path):
1747
  """Returns a list of visible files in a directory.
1748

1749
  @type path: str
1750
  @param path: the directory to enumerate
1751
  @rtype: list
1752
  @return: the list of all files not starting with a dot
1753
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1754

1755
  """
1756
  if not IsNormAbsPath(path):
1757
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1758
                                 " absolute/normalized: '%s'" % path)
1759
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1760
  return files
1761

    
1762

    
1763
def GetHomeDir(user, default=None):
1764
  """Try to get the homedir of the given user.
1765

1766
  The user can be passed either as a string (denoting the name) or as
1767
  an integer (denoting the user id). If the user is not found, the
1768
  'default' argument is returned, which defaults to None.
1769

1770
  """
1771
  try:
1772
    if isinstance(user, basestring):
1773
      result = pwd.getpwnam(user)
1774
    elif isinstance(user, (int, long)):
1775
      result = pwd.getpwuid(user)
1776
    else:
1777
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1778
                                   type(user))
1779
  except KeyError:
1780
    return default
1781
  return result.pw_dir
1782

    
1783

    
1784
def NewUUID():
1785
  """Returns a random UUID.
1786

1787
  @note: This is a Linux-specific method as it uses the /proc
1788
      filesystem.
1789
  @rtype: str
1790

1791
  """
1792
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1793

    
1794

    
1795
def GenerateSecret(numbytes=20):
1796
  """Generates a random secret.
1797

1798
  This will generate a pseudo-random secret returning an hex string
1799
  (so that it can be used where an ASCII string is needed).
1800

1801
  @param numbytes: the number of bytes which will be represented by the returned
1802
      string (defaulting to 20, the length of a SHA1 hash)
1803
  @rtype: str
1804
  @return: an hex representation of the pseudo-random sequence
1805

1806
  """
1807
  return os.urandom(numbytes).encode('hex')
1808

    
1809

    
1810
def EnsureDirs(dirs):
1811
  """Make required directories, if they don't exist.
1812

1813
  @param dirs: list of tuples (dir_name, dir_mode)
1814
  @type dirs: list of (string, integer)
1815

1816
  """
1817
  for dir_name, dir_mode in dirs:
1818
    try:
1819
      os.mkdir(dir_name, dir_mode)
1820
    except EnvironmentError, err:
1821
      if err.errno != errno.EEXIST:
1822
        raise errors.GenericError("Cannot create needed directory"
1823
                                  " '%s': %s" % (dir_name, err))
1824
    try:
1825
      os.chmod(dir_name, dir_mode)
1826
    except EnvironmentError, err:
1827
      raise errors.GenericError("Cannot change directory permissions on"
1828
                                " '%s': %s" % (dir_name, err))
1829
    if not os.path.isdir(dir_name):
1830
      raise errors.GenericError("%s is not a directory" % dir_name)
1831

    
1832

    
1833
def ReadFile(file_name, size=-1):
1834
  """Reads a file.
1835

1836
  @type size: int
1837
  @param size: Read at most size bytes (if negative, entire file)
1838
  @rtype: str
1839
  @return: the (possibly partial) content of the file
1840

1841
  """
1842
  f = open(file_name, "r")
1843
  try:
1844
    return f.read(size)
1845
  finally:
1846
    f.close()
1847

    
1848

    
1849
def WriteFile(file_name, fn=None, data=None,
1850
              mode=None, uid=-1, gid=-1,
1851
              atime=None, mtime=None, close=True,
1852
              dry_run=False, backup=False,
1853
              prewrite=None, postwrite=None):
1854
  """(Over)write a file atomically.
1855

1856
  The file_name and either fn (a function taking one argument, the
1857
  file descriptor, and which should write the data to it) or data (the
1858
  contents of the file) must be passed. The other arguments are
1859
  optional and allow setting the file mode, owner and group, and the
1860
  mtime/atime of the file.
1861

1862
  If the function doesn't raise an exception, it has succeeded and the
1863
  target file has the new contents. If the function has raised an
1864
  exception, an existing target file should be unmodified and the
1865
  temporary file should be removed.
1866

1867
  @type file_name: str
1868
  @param file_name: the target filename
1869
  @type fn: callable
1870
  @param fn: content writing function, called with
1871
      file descriptor as parameter
1872
  @type data: str
1873
  @param data: contents of the file
1874
  @type mode: int
1875
  @param mode: file mode
1876
  @type uid: int
1877
  @param uid: the owner of the file
1878
  @type gid: int
1879
  @param gid: the group of the file
1880
  @type atime: int
1881
  @param atime: a custom access time to be set on the file
1882
  @type mtime: int
1883
  @param mtime: a custom modification time to be set on the file
1884
  @type close: boolean
1885
  @param close: whether to close file after writing it
1886
  @type prewrite: callable
1887
  @param prewrite: function to be called before writing content
1888
  @type postwrite: callable
1889
  @param postwrite: function to be called after writing content
1890

1891
  @rtype: None or int
1892
  @return: None if the 'close' parameter evaluates to True,
1893
      otherwise the file descriptor
1894

1895
  @raise errors.ProgrammerError: if any of the arguments are not valid
1896

1897
  """
1898
  if not os.path.isabs(file_name):
1899
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1900
                                 " absolute: '%s'" % file_name)
1901

    
1902
  if [fn, data].count(None) != 1:
1903
    raise errors.ProgrammerError("fn or data required")
1904

    
1905
  if [atime, mtime].count(None) == 1:
1906
    raise errors.ProgrammerError("Both atime and mtime must be either"
1907
                                 " set or None")
1908

    
1909
  if backup and not dry_run and os.path.isfile(file_name):
1910
    CreateBackup(file_name)
1911

    
1912
  dir_name, base_name = os.path.split(file_name)
1913
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1914
  do_remove = True
1915
  # here we need to make sure we remove the temp file, if any error
1916
  # leaves it in place
1917
  try:
1918
    if uid != -1 or gid != -1:
1919
      os.chown(new_name, uid, gid)
1920
    if mode:
1921
      os.chmod(new_name, mode)
1922
    if callable(prewrite):
1923
      prewrite(fd)
1924
    if data is not None:
1925
      os.write(fd, data)
1926
    else:
1927
      fn(fd)
1928
    if callable(postwrite):
1929
      postwrite(fd)
1930
    os.fsync(fd)
1931
    if atime is not None and mtime is not None:
1932
      os.utime(new_name, (atime, mtime))
1933
    if not dry_run:
1934
      os.rename(new_name, file_name)
1935
      do_remove = False
1936
  finally:
1937
    if close:
1938
      os.close(fd)
1939
      result = None
1940
    else:
1941
      result = fd
1942
    if do_remove:
1943
      RemoveFile(new_name)
1944

    
1945
  return result
1946

    
1947

    
1948
def ReadOneLineFile(file_name, strict=False):
1949
  """Return the first non-empty line from a file.
1950

1951
  @type strict: boolean
1952
  @param strict: if True, abort if the file has more than one
1953
      non-empty line
1954

1955
  """
1956
  file_lines = ReadFile(file_name).splitlines()
1957
  full_lines = filter(bool, file_lines)
1958
  if not file_lines or not full_lines:
1959
    raise errors.GenericError("No data in one-liner file %s" % file_name)
1960
  elif strict and len(full_lines) > 1:
1961
    raise errors.GenericError("Too many lines in one-liner file %s" %
1962
                              file_name)
1963
  return full_lines[0]
1964

    
1965

    
1966
def FirstFree(seq, base=0):
1967
  """Returns the first non-existing integer from seq.
1968

1969
  The seq argument should be a sorted list of positive integers. The
1970
  first time the index of an element is smaller than the element
1971
  value, the index will be returned.
1972

1973
  The base argument is used to start at a different offset,
1974
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1975

1976
  Example: C{[0, 1, 3]} will return I{2}.
1977

1978
  @type seq: sequence
1979
  @param seq: the sequence to be analyzed.
1980
  @type base: int
1981
  @param base: use this value as the base index of the sequence
1982
  @rtype: int
1983
  @return: the first non-used index in the sequence
1984

1985
  """
1986
  for idx, elem in enumerate(seq):
1987
    assert elem >= base, "Passed element is higher than base offset"
1988
    if elem > idx + base:
1989
      # idx is not used
1990
      return idx + base
1991
  return None
1992

    
1993

    
1994
def SingleWaitForFdCondition(fdobj, event, timeout):
1995
  """Waits for a condition to occur on the socket.
1996

1997
  Immediately returns at the first interruption.
1998

1999
  @type fdobj: integer or object supporting a fileno() method
2000
  @param fdobj: entity to wait for events on
2001
  @type event: integer
2002
  @param event: ORed condition (see select module)
2003
  @type timeout: float or None
2004
  @param timeout: Timeout in seconds
2005
  @rtype: int or None
2006
  @return: None for timeout, otherwise occured conditions
2007

2008
  """
2009
  check = (event | select.POLLPRI |
2010
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
2011

    
2012
  if timeout is not None:
2013
    # Poller object expects milliseconds
2014
    timeout *= 1000
2015

    
2016
  poller = select.poll()
2017
  poller.register(fdobj, event)
2018
  try:
2019
    # TODO: If the main thread receives a signal and we have no timeout, we
2020
    # could wait forever. This should check a global "quit" flag or something
2021
    # every so often.
2022
    io_events = poller.poll(timeout)
2023
  except select.error, err:
2024
    if err[0] != errno.EINTR:
2025
      raise
2026
    io_events = []
2027
  if io_events and io_events[0][1] & check:
2028
    return io_events[0][1]
2029
  else:
2030
    return None
2031

    
2032

    
2033
class FdConditionWaiterHelper(object):
2034
  """Retry helper for WaitForFdCondition.
2035

2036
  This class contains the retried and wait functions that make sure
2037
  WaitForFdCondition can continue waiting until the timeout is actually
2038
  expired.
2039

2040
  """
2041

    
2042
  def __init__(self, timeout):
2043
    self.timeout = timeout
2044

    
2045
  def Poll(self, fdobj, event):
2046
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2047
    if result is None:
2048
      raise RetryAgain()
2049
    else:
2050
      return result
2051

    
2052
  def UpdateTimeout(self, timeout):
2053
    self.timeout = timeout
2054

    
2055

    
2056
def WaitForFdCondition(fdobj, event, timeout):
2057
  """Waits for a condition to occur on the socket.
2058

2059
  Retries until the timeout is expired, even if interrupted.
2060

2061
  @type fdobj: integer or object supporting a fileno() method
2062
  @param fdobj: entity to wait for events on
2063
  @type event: integer
2064
  @param event: ORed condition (see select module)
2065
  @type timeout: float or None
2066
  @param timeout: Timeout in seconds
2067
  @rtype: int or None
2068
  @return: None for timeout, otherwise occured conditions
2069

2070
  """
2071
  if timeout is not None:
2072
    retrywaiter = FdConditionWaiterHelper(timeout)
2073
    try:
2074
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2075
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2076
    except RetryTimeout:
2077
      result = None
2078
  else:
2079
    result = None
2080
    while result is None:
2081
      result = SingleWaitForFdCondition(fdobj, event, timeout)
2082
  return result
2083

    
2084

    
2085
def UniqueSequence(seq):
2086
  """Returns a list with unique elements.
2087

2088
  Element order is preserved.
2089

2090
  @type seq: sequence
2091
  @param seq: the sequence with the source elements
2092
  @rtype: list
2093
  @return: list of unique elements from seq
2094

2095
  """
2096
  seen = set()
2097
  return [i for i in seq if i not in seen and not seen.add(i)]
2098

    
2099

    
2100
def NormalizeAndValidateMac(mac):
2101
  """Normalizes and check if a MAC address is valid.
2102

2103
  Checks whether the supplied MAC address is formally correct, only
2104
  accepts colon separated format. Normalize it to all lower.
2105

2106
  @type mac: str
2107
  @param mac: the MAC to be validated
2108
  @rtype: str
2109
  @return: returns the normalized and validated MAC.
2110

2111
  @raise errors.OpPrereqError: If the MAC isn't valid
2112

2113
  """
2114
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
2115
  if not mac_check.match(mac):
2116
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2117
                               mac, errors.ECODE_INVAL)
2118

    
2119
  return mac.lower()
2120

    
2121

    
2122
def TestDelay(duration):
2123
  """Sleep for a fixed amount of time.
2124

2125
  @type duration: float
2126
  @param duration: the sleep duration
2127
  @rtype: boolean
2128
  @return: False for negative value, True otherwise
2129

2130
  """
2131
  if duration < 0:
2132
    return False, "Invalid sleep duration"
2133
  time.sleep(duration)
2134
  return True, None
2135

    
2136

    
2137
def _CloseFDNoErr(fd, retries=5):
2138
  """Close a file descriptor ignoring errors.
2139

2140
  @type fd: int
2141
  @param fd: the file descriptor
2142
  @type retries: int
2143
  @param retries: how many retries to make, in case we get any
2144
      other error than EBADF
2145

2146
  """
2147
  try:
2148
    os.close(fd)
2149
  except OSError, err:
2150
    if err.errno != errno.EBADF:
2151
      if retries > 0:
2152
        _CloseFDNoErr(fd, retries - 1)
2153
    # else either it's closed already or we're out of retries, so we
2154
    # ignore this and go on
2155

    
2156

    
2157
def CloseFDs(noclose_fds=None):
2158
  """Close file descriptors.
2159

2160
  This closes all file descriptors above 2 (i.e. except
2161
  stdin/out/err).
2162

2163
  @type noclose_fds: list or None
2164
  @param noclose_fds: if given, it denotes a list of file descriptor
2165
      that should not be closed
2166

2167
  """
2168
  # Default maximum for the number of available file descriptors.
2169
  if 'SC_OPEN_MAX' in os.sysconf_names:
2170
    try:
2171
      MAXFD = os.sysconf('SC_OPEN_MAX')
2172
      if MAXFD < 0:
2173
        MAXFD = 1024
2174
    except OSError:
2175
      MAXFD = 1024
2176
  else:
2177
    MAXFD = 1024
2178
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2179
  if (maxfd == resource.RLIM_INFINITY):
2180
    maxfd = MAXFD
2181

    
2182
  # Iterate through and close all file descriptors (except the standard ones)
2183
  for fd in range(3, maxfd):
2184
    if noclose_fds and fd in noclose_fds:
2185
      continue
2186
    _CloseFDNoErr(fd)
2187

    
2188

    
2189
def Mlockall():
2190
  """Lock current process' virtual address space into RAM.
2191

2192
  This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2193
  see mlock(2) for more details. This function requires ctypes module.
2194

2195
  """
2196
  if ctypes is None:
2197
    logging.warning("Cannot set memory lock, ctypes module not found")
2198
    return
2199

    
2200
  libc = ctypes.cdll.LoadLibrary("libc.so.6")
2201
  if libc is None:
2202
    logging.error("Cannot set memory lock, ctypes cannot load libc")
2203
    return
2204

    
2205
  # Some older version of the ctypes module don't have built-in functionality
2206
  # to access the errno global variable, where function error codes are stored.
2207
  # By declaring this variable as a pointer to an integer we can then access
2208
  # its value correctly, should the mlockall call fail, in order to see what
2209
  # the actual error code was.
2210
  # pylint: disable-msg=W0212
2211
  libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int)
2212

    
2213
  if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2214
    # pylint: disable-msg=W0212
2215
    logging.error("Cannot set memory lock: %s",
2216
                  os.strerror(libc.__errno_location().contents.value))
2217
    return
2218

    
2219
  logging.debug("Memory lock set")
2220

    
2221

    
2222
def Daemonize(logfile, run_uid, run_gid):
2223
  """Daemonize the current process.
2224

2225
  This detaches the current process from the controlling terminal and
2226
  runs it in the background as a daemon.
2227

2228
  @type logfile: str
2229
  @param logfile: the logfile to which we should redirect stdout/stderr
2230
  @type run_uid: int
2231
  @param run_uid: Run the child under this uid
2232
  @type run_gid: int
2233
  @param run_gid: Run the child under this gid
2234
  @rtype: int
2235
  @return: the value zero
2236

2237
  """
2238
  # pylint: disable-msg=W0212
2239
  # yes, we really want os._exit
2240
  UMASK = 077
2241
  WORKDIR = "/"
2242

    
2243
  # this might fail
2244
  pid = os.fork()
2245
  if (pid == 0):  # The first child.
2246
    os.setsid()
2247
    # FIXME: When removing again and moving to start-stop-daemon privilege drop
2248
    #        make sure to check for config permission and bail out when invoked
2249
    #        with wrong user.
2250
    os.setgid(run_gid)
2251
    os.setuid(run_uid)
2252
    # this might fail
2253
    pid = os.fork() # Fork a second child.
2254
    if (pid == 0):  # The second child.
2255
      os.chdir(WORKDIR)
2256
      os.umask(UMASK)
2257
    else:
2258
      # exit() or _exit()?  See below.
2259
      os._exit(0) # Exit parent (the first child) of the second child.
2260
  else:
2261
    os._exit(0) # Exit parent of the first child.
2262

    
2263
  for fd in range(3):
2264
    _CloseFDNoErr(fd)
2265
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2266
  assert i == 0, "Can't close/reopen stdin"
2267
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2268
  assert i == 1, "Can't close/reopen stdout"
2269
  # Duplicate standard output to standard error.
2270
  os.dup2(1, 2)
2271
  return 0
2272

    
2273

    
2274
def DaemonPidFileName(name):
2275
  """Compute a ganeti pid file absolute path
2276

2277
  @type name: str
2278
  @param name: the daemon name
2279
  @rtype: str
2280
  @return: the full path to the pidfile corresponding to the given
2281
      daemon name
2282

2283
  """
2284
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2285

    
2286

    
2287
def EnsureDaemon(name):
2288
  """Check for and start daemon if not alive.
2289

2290
  """
2291
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2292
  if result.failed:
2293
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2294
                  name, result.fail_reason, result.output)
2295
    return False
2296

    
2297
  return True
2298

    
2299

    
2300
def StopDaemon(name):
2301
  """Stop daemon
2302

2303
  """
2304
  result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2305
  if result.failed:
2306
    logging.error("Can't stop daemon '%s', failure %s, output: %s",
2307
                  name, result.fail_reason, result.output)
2308
    return False
2309

    
2310
  return True
2311

    
2312

    
2313
def WritePidFile(name):
2314
  """Write the current process pidfile.
2315

2316
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2317

2318
  @type name: str
2319
  @param name: the daemon name to use
2320
  @raise errors.GenericError: if the pid file already exists and
2321
      points to a live process
2322

2323
  """
2324
  pid = os.getpid()
2325
  pidfilename = DaemonPidFileName(name)
2326
  if IsProcessAlive(ReadPidFile(pidfilename)):
2327
    raise errors.GenericError("%s contains a live process" % pidfilename)
2328

    
2329
  WriteFile(pidfilename, data="%d\n" % pid)
2330

    
2331

    
2332
def RemovePidFile(name):
2333
  """Remove the current process pidfile.
2334

2335
  Any errors are ignored.
2336

2337
  @type name: str
2338
  @param name: the daemon name used to derive the pidfile name
2339

2340
  """
2341
  pidfilename = DaemonPidFileName(name)
2342
  # TODO: we could check here that the file contains our pid
2343
  try:
2344
    RemoveFile(pidfilename)
2345
  except: # pylint: disable-msg=W0702
2346
    pass
2347

    
2348

    
2349
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2350
                waitpid=False):
2351
  """Kill a process given by its pid.
2352

2353
  @type pid: int
2354
  @param pid: The PID to terminate.
2355
  @type signal_: int
2356
  @param signal_: The signal to send, by default SIGTERM
2357
  @type timeout: int
2358
  @param timeout: The timeout after which, if the process is still alive,
2359
                  a SIGKILL will be sent. If not positive, no such checking
2360
                  will be done
2361
  @type waitpid: boolean
2362
  @param waitpid: If true, we should waitpid on this process after
2363
      sending signals, since it's our own child and otherwise it
2364
      would remain as zombie
2365

2366
  """
2367
  def _helper(pid, signal_, wait):
2368
    """Simple helper to encapsulate the kill/waitpid sequence"""
2369
    if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2370
      try:
2371
        os.waitpid(pid, os.WNOHANG)
2372
      except OSError:
2373
        pass
2374

    
2375
  if pid <= 0:
2376
    # kill with pid=0 == suicide
2377
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2378

    
2379
  if not IsProcessAlive(pid):
2380
    return
2381

    
2382
  _helper(pid, signal_, waitpid)
2383

    
2384
  if timeout <= 0:
2385
    return
2386

    
2387
  def _CheckProcess():
2388
    if not IsProcessAlive(pid):
2389
      return
2390

    
2391
    try:
2392
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2393
    except OSError:
2394
      raise RetryAgain()
2395

    
2396
    if result_pid > 0:
2397
      return
2398

    
2399
    raise RetryAgain()
2400

    
2401
  try:
2402
    # Wait up to $timeout seconds
2403
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2404
  except RetryTimeout:
2405
    pass
2406

    
2407
  if IsProcessAlive(pid):
2408
    # Kill process if it's still alive
2409
    _helper(pid, signal.SIGKILL, waitpid)
2410

    
2411

    
2412
def FindFile(name, search_path, test=os.path.exists):
2413
  """Look for a filesystem object in a given path.
2414

2415
  This is an abstract method to search for filesystem object (files,
2416
  dirs) under a given search path.
2417

2418
  @type name: str
2419
  @param name: the name to look for
2420
  @type search_path: str
2421
  @param search_path: location to start at
2422
  @type test: callable
2423
  @param test: a function taking one argument that should return True
2424
      if the a given object is valid; the default value is
2425
      os.path.exists, causing only existing files to be returned
2426
  @rtype: str or None
2427
  @return: full path to the object if found, None otherwise
2428

2429
  """
2430
  # validate the filename mask
2431
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2432
    logging.critical("Invalid value passed for external script name: '%s'",
2433
                     name)
2434
    return None
2435

    
2436
  for dir_name in search_path:
2437
    # FIXME: investigate switch to PathJoin
2438
    item_name = os.path.sep.join([dir_name, name])
2439
    # check the user test and that we're indeed resolving to the given
2440
    # basename
2441
    if test(item_name) and os.path.basename(item_name) == name:
2442
      return item_name
2443
  return None
2444

    
2445

    
2446
def CheckVolumeGroupSize(vglist, vgname, minsize):
2447
  """Checks if the volume group list is valid.
2448

2449
  The function will check if a given volume group is in the list of
2450
  volume groups and has a minimum size.
2451

2452
  @type vglist: dict
2453
  @param vglist: dictionary of volume group names and their size
2454
  @type vgname: str
2455
  @param vgname: the volume group we should check
2456
  @type minsize: int
2457
  @param minsize: the minimum size we accept
2458
  @rtype: None or str
2459
  @return: None for success, otherwise the error message
2460

2461
  """
2462
  vgsize = vglist.get(vgname, None)
2463
  if vgsize is None:
2464
    return "volume group '%s' missing" % vgname
2465
  elif vgsize < minsize:
2466
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2467
            (vgname, minsize, vgsize))
2468
  return None
2469

    
2470

    
2471
def SplitTime(value):
2472
  """Splits time as floating point number into a tuple.
2473

2474
  @param value: Time in seconds
2475
  @type value: int or float
2476
  @return: Tuple containing (seconds, microseconds)
2477

2478
  """
2479
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2480

    
2481
  assert 0 <= seconds, \
2482
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2483
  assert 0 <= microseconds <= 999999, \
2484
    "Microseconds must be 0-999999, but are %s" % microseconds
2485

    
2486
  return (int(seconds), int(microseconds))
2487

    
2488

    
2489
def MergeTime(timetuple):
2490
  """Merges a tuple into time as a floating point number.
2491

2492
  @param timetuple: Time as tuple, (seconds, microseconds)
2493
  @type timetuple: tuple
2494
  @return: Time as a floating point number expressed in seconds
2495

2496
  """
2497
  (seconds, microseconds) = timetuple
2498

    
2499
  assert 0 <= seconds, \
2500
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2501
  assert 0 <= microseconds <= 999999, \
2502
    "Microseconds must be 0-999999, but are %s" % microseconds
2503

    
2504
  return float(seconds) + (float(microseconds) * 0.000001)
2505

    
2506

    
2507
def GetDaemonPort(daemon_name):
2508
  """Get the daemon port for this cluster.
2509

2510
  Note that this routine does not read a ganeti-specific file, but
2511
  instead uses C{socket.getservbyname} to allow pre-customization of
2512
  this parameter outside of Ganeti.
2513

2514
  @type daemon_name: string
2515
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2516
  @rtype: int
2517

2518
  """
2519
  if daemon_name not in constants.DAEMONS_PORTS:
2520
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2521

    
2522
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2523
  try:
2524
    port = socket.getservbyname(daemon_name, proto)
2525
  except socket.error:
2526
    port = default_port
2527

    
2528
  return port
2529

    
2530

    
2531
class LogFileHandler(logging.FileHandler):
2532
  """Log handler that doesn't fallback to stderr.
2533

2534
  When an error occurs while writing on the logfile, logging.FileHandler tries
2535
  to log on stderr. This doesn't work in ganeti since stderr is redirected to
2536
  the logfile. This class avoids failures reporting errors to /dev/console.
2537

2538
  """
2539
  def __init__(self, filename, mode="a", encoding=None):
2540
    """Open the specified file and use it as the stream for logging.
2541

2542
    Also open /dev/console to report errors while logging.
2543

2544
    """
2545
    logging.FileHandler.__init__(self, filename, mode, encoding)
2546
    self.console = open(constants.DEV_CONSOLE, "a")
2547

    
2548
  def handleError(self, record): # pylint: disable-msg=C0103
2549
    """Handle errors which occur during an emit() call.
2550

2551
    Try to handle errors with FileHandler method, if it fails write to
2552
    /dev/console.
2553

2554
    """
2555
    try:
2556
      logging.FileHandler.handleError(self, record)
2557
    except Exception: # pylint: disable-msg=W0703
2558
      try:
2559
        self.console.write("Cannot log message:\n%s\n" % self.format(record))
2560
      except Exception: # pylint: disable-msg=W0703
2561
        # Log handler tried everything it could, now just give up
2562
        pass
2563

    
2564

    
2565
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2566
                 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2567
                 console_logging=False):
2568
  """Configures the logging module.
2569

2570
  @type logfile: str
2571
  @param logfile: the filename to which we should log
2572
  @type debug: integer
2573
  @param debug: if greater than zero, enable debug messages, otherwise
2574
      only those at C{INFO} and above level
2575
  @type stderr_logging: boolean
2576
  @param stderr_logging: whether we should also log to the standard error
2577
  @type program: str
2578
  @param program: the name under which we should log messages
2579
  @type multithreaded: boolean
2580
  @param multithreaded: if True, will add the thread name to the log file
2581
  @type syslog: string
2582
  @param syslog: one of 'no', 'yes', 'only':
2583
      - if no, syslog is not used
2584
      - if yes, syslog is used (in addition to file-logging)
2585
      - if only, only syslog is used
2586
  @type console_logging: boolean
2587
  @param console_logging: if True, will use a FileHandler which falls back to
2588
      the system console if logging fails
2589
  @raise EnvironmentError: if we can't open the log file and
2590
      syslog/stderr logging is disabled
2591

2592
  """
2593
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2594
  sft = program + "[%(process)d]:"
2595
  if multithreaded:
2596
    fmt += "/%(threadName)s"
2597
    sft += " (%(threadName)s)"
2598
  if debug:
2599
    fmt += " %(module)s:%(lineno)s"
2600
    # no debug info for syslog loggers
2601
  fmt += " %(levelname)s %(message)s"
2602
  # yes, we do want the textual level, as remote syslog will probably
2603
  # lose the error level, and it's easier to grep for it
2604
  sft += " %(levelname)s %(message)s"
2605
  formatter = logging.Formatter(fmt)
2606
  sys_fmt = logging.Formatter(sft)
2607

    
2608
  root_logger = logging.getLogger("")
2609
  root_logger.setLevel(logging.NOTSET)
2610

    
2611
  # Remove all previously setup handlers
2612
  for handler in root_logger.handlers:
2613
    handler.close()
2614
    root_logger.removeHandler(handler)
2615

    
2616
  if stderr_logging:
2617
    stderr_handler = logging.StreamHandler()
2618
    stderr_handler.setFormatter(formatter)
2619
    if debug:
2620
      stderr_handler.setLevel(logging.NOTSET)
2621
    else:
2622
      stderr_handler.setLevel(logging.CRITICAL)
2623
    root_logger.addHandler(stderr_handler)
2624

    
2625
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2626
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2627
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2628
                                                    facility)
2629
    syslog_handler.setFormatter(sys_fmt)
2630
    # Never enable debug over syslog
2631
    syslog_handler.setLevel(logging.INFO)
2632
    root_logger.addHandler(syslog_handler)
2633

    
2634
  if syslog != constants.SYSLOG_ONLY:
2635
    # this can fail, if the logging directories are not setup or we have
2636
    # a permisssion problem; in this case, it's best to log but ignore
2637
    # the error if stderr_logging is True, and if false we re-raise the
2638
    # exception since otherwise we could run but without any logs at all
2639
    try:
2640
      if console_logging:
2641
        logfile_handler = LogFileHandler(logfile)
2642
      else:
2643
        logfile_handler = logging.FileHandler(logfile)
2644
      logfile_handler.setFormatter(formatter)
2645
      if debug:
2646
        logfile_handler.setLevel(logging.DEBUG)
2647
      else:
2648
        logfile_handler.setLevel(logging.INFO)
2649
      root_logger.addHandler(logfile_handler)
2650
    except EnvironmentError:
2651
      if stderr_logging or syslog == constants.SYSLOG_YES:
2652
        logging.exception("Failed to enable logging to file '%s'", logfile)
2653
      else:
2654
        # we need to re-raise the exception
2655
        raise
2656

    
2657

    
2658
def IsNormAbsPath(path):
2659
  """Check whether a path is absolute and also normalized
2660

2661
  This avoids things like /dir/../../other/path to be valid.
2662

2663
  """
2664
  return os.path.normpath(path) == path and os.path.isabs(path)
2665

    
2666

    
2667
def PathJoin(*args):
2668
  """Safe-join a list of path components.
2669

2670
  Requirements:
2671
      - the first argument must be an absolute path
2672
      - no component in the path must have backtracking (e.g. /../),
2673
        since we check for normalization at the end
2674

2675
  @param args: the path components to be joined
2676
  @raise ValueError: for invalid paths
2677

2678
  """
2679
  # ensure we're having at least one path passed in
2680
  assert args
2681
  # ensure the first component is an absolute and normalized path name
2682
  root = args[0]
2683
  if not IsNormAbsPath(root):
2684
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2685
  result = os.path.join(*args)
2686
  # ensure that the whole path is normalized
2687
  if not IsNormAbsPath(result):
2688
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2689
  # check that we're still under the original prefix
2690
  prefix = os.path.commonprefix([root, result])
2691
  if prefix != root:
2692
    raise ValueError("Error: path joining resulted in different prefix"
2693
                     " (%s != %s)" % (prefix, root))
2694
  return result
2695

    
2696

    
2697
def TailFile(fname, lines=20):
2698
  """Return the last lines from a file.
2699

2700
  @note: this function will only read and parse the last 4KB of
2701
      the file; if the lines are very long, it could be that less
2702
      than the requested number of lines are returned
2703

2704
  @param fname: the file name
2705
  @type lines: int
2706
  @param lines: the (maximum) number of lines to return
2707

2708
  """
2709
  fd = open(fname, "r")
2710
  try:
2711
    fd.seek(0, 2)
2712
    pos = fd.tell()
2713
    pos = max(0, pos-4096)
2714
    fd.seek(pos, 0)
2715
    raw_data = fd.read()
2716
  finally:
2717
    fd.close()
2718

    
2719
  rows = raw_data.splitlines()
2720
  return rows[-lines:]
2721

    
2722

    
2723
def FormatTimestampWithTZ(secs):
2724
  """Formats a Unix timestamp with the local timezone.
2725

2726
  """
2727
  return time.strftime("%F %T %Z", time.gmtime(secs))
2728

    
2729

    
2730
def _ParseAsn1Generalizedtime(value):
2731
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2732

2733
  @type value: string
2734
  @param value: ASN1 GENERALIZEDTIME timestamp
2735

2736
  """
2737
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2738
  if m:
2739
    # We have an offset
2740
    asn1time = m.group(1)
2741
    hours = int(m.group(2))
2742
    minutes = int(m.group(3))
2743
    utcoffset = (60 * hours) + minutes
2744
  else:
2745
    if not value.endswith("Z"):
2746
      raise ValueError("Missing timezone")
2747
    asn1time = value[:-1]
2748
    utcoffset = 0
2749

    
2750
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2751

    
2752
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2753

    
2754
  return calendar.timegm(tt.utctimetuple())
2755

    
2756

    
2757
def GetX509CertValidity(cert):
2758
  """Returns the validity period of the certificate.
2759

2760
  @type cert: OpenSSL.crypto.X509
2761
  @param cert: X509 certificate object
2762

2763
  """
2764
  # The get_notBefore and get_notAfter functions are only supported in
2765
  # pyOpenSSL 0.7 and above.
2766
  try:
2767
    get_notbefore_fn = cert.get_notBefore
2768
  except AttributeError:
2769
    not_before = None
2770
  else:
2771
    not_before_asn1 = get_notbefore_fn()
2772

    
2773
    if not_before_asn1 is None:
2774
      not_before = None
2775
    else:
2776
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2777

    
2778
  try:
2779
    get_notafter_fn = cert.get_notAfter
2780
  except AttributeError:
2781
    not_after = None
2782
  else:
2783
    not_after_asn1 = get_notafter_fn()
2784

    
2785
    if not_after_asn1 is None:
2786
      not_after = None
2787
    else:
2788
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2789

    
2790
  return (not_before, not_after)
2791

    
2792

    
2793
def _VerifyCertificateInner(expired, not_before, not_after, now,
2794
                            warn_days, error_days):
2795
  """Verifies certificate validity.
2796

2797
  @type expired: bool
2798
  @param expired: Whether pyOpenSSL considers the certificate as expired
2799
  @type not_before: number or None
2800
  @param not_before: Unix timestamp before which certificate is not valid
2801
  @type not_after: number or None
2802
  @param not_after: Unix timestamp after which certificate is invalid
2803
  @type now: number
2804
  @param now: Current time as Unix timestamp
2805
  @type warn_days: number or None
2806
  @param warn_days: How many days before expiration a warning should be reported
2807
  @type error_days: number or None
2808
  @param error_days: How many days before expiration an error should be reported
2809

2810
  """
2811
  if expired:
2812
    msg = "Certificate is expired"
2813

    
2814
    if not_before is not None and not_after is not None:
2815
      msg += (" (valid from %s to %s)" %
2816
              (FormatTimestampWithTZ(not_before),
2817
               FormatTimestampWithTZ(not_after)))
2818
    elif not_before is not None:
2819
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2820
    elif not_after is not None:
2821
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2822

    
2823
    return (CERT_ERROR, msg)
2824

    
2825
  elif not_before is not None and not_before > now:
2826
    return (CERT_WARNING,
2827
            "Certificate not yet valid (valid from %s)" %
2828
            FormatTimestampWithTZ(not_before))
2829

    
2830
  elif not_after is not None:
2831
    remaining_days = int((not_after - now) / (24 * 3600))
2832

    
2833
    msg = "Certificate expires in about %d days" % remaining_days
2834

    
2835
    if error_days is not None and remaining_days <= error_days:
2836
      return (CERT_ERROR, msg)
2837

    
2838
    if warn_days is not None and remaining_days <= warn_days:
2839
      return (CERT_WARNING, msg)
2840

    
2841
  return (None, None)
2842

    
2843

    
2844
def VerifyX509Certificate(cert, warn_days, error_days):
2845
  """Verifies a certificate for LUVerifyCluster.
2846

2847
  @type cert: OpenSSL.crypto.X509
2848
  @param cert: X509 certificate object
2849
  @type warn_days: number or None
2850
  @param warn_days: How many days before expiration a warning should be reported
2851
  @type error_days: number or None
2852
  @param error_days: How many days before expiration an error should be reported
2853

2854
  """
2855
  # Depending on the pyOpenSSL version, this can just return (None, None)
2856
  (not_before, not_after) = GetX509CertValidity(cert)
2857

    
2858
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2859
                                 time.time(), warn_days, error_days)
2860

    
2861

    
2862
def SignX509Certificate(cert, key, salt):
2863
  """Sign a X509 certificate.
2864

2865
  An RFC822-like signature header is added in front of the certificate.
2866

2867
  @type cert: OpenSSL.crypto.X509
2868
  @param cert: X509 certificate object
2869
  @type key: string
2870
  @param key: Key for HMAC
2871
  @type salt: string
2872
  @param salt: Salt for HMAC
2873
  @rtype: string
2874
  @return: Serialized and signed certificate in PEM format
2875

2876
  """
2877
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2878
    raise errors.GenericError("Invalid salt: %r" % salt)
2879

    
2880
  # Dumping as PEM here ensures the certificate is in a sane format
2881
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2882

    
2883
  return ("%s: %s/%s\n\n%s" %
2884
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2885
           Sha1Hmac(key, cert_pem, salt=salt),
2886
           cert_pem))
2887

    
2888

    
2889
def _ExtractX509CertificateSignature(cert_pem):
2890
  """Helper function to extract signature from X509 certificate.
2891

2892
  """
2893
  # Extract signature from original PEM data
2894
  for line in cert_pem.splitlines():
2895
    if line.startswith("---"):
2896
      break
2897

    
2898
    m = X509_SIGNATURE.match(line.strip())
2899
    if m:
2900
      return (m.group("salt"), m.group("sign"))
2901

    
2902
  raise errors.GenericError("X509 certificate signature is missing")
2903

    
2904

    
2905
def LoadSignedX509Certificate(cert_pem, key):
2906
  """Verifies a signed X509 certificate.
2907

2908
  @type cert_pem: string
2909
  @param cert_pem: Certificate in PEM format and with signature header
2910
  @type key: string
2911
  @param key: Key for HMAC
2912
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2913
  @return: X509 certificate object and salt
2914

2915
  """
2916
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2917

    
2918
  # Load certificate
2919
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2920

    
2921
  # Dump again to ensure it's in a sane format
2922
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2923

    
2924
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2925
    raise errors.GenericError("X509 certificate signature is invalid")
2926

    
2927
  return (cert, salt)
2928

    
2929

    
2930
def Sha1Hmac(key, text, salt=None):
2931
  """Calculates the HMAC-SHA1 digest of a text.
2932

2933
  HMAC is defined in RFC2104.
2934

2935
  @type key: string
2936
  @param key: Secret key
2937
  @type text: string
2938

2939
  """
2940
  if salt:
2941
    salted_text = salt + text
2942
  else:
2943
    salted_text = text
2944

    
2945
  return hmac.new(key, salted_text, compat.sha1).hexdigest()
2946

    
2947

    
2948
def VerifySha1Hmac(key, text, digest, salt=None):
2949
  """Verifies the HMAC-SHA1 digest of a text.
2950

2951
  HMAC is defined in RFC2104.
2952

2953
  @type key: string
2954
  @param key: Secret key
2955
  @type text: string
2956
  @type digest: string
2957
  @param digest: Expected digest
2958
  @rtype: bool
2959
  @return: Whether HMAC-SHA1 digest matches
2960

2961
  """
2962
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2963

    
2964

    
2965
def SafeEncode(text):
2966
  """Return a 'safe' version of a source string.
2967

2968
  This function mangles the input string and returns a version that
2969
  should be safe to display/encode as ASCII. To this end, we first
2970
  convert it to ASCII using the 'backslashreplace' encoding which
2971
  should get rid of any non-ASCII chars, and then we process it
2972
  through a loop copied from the string repr sources in the python; we
2973
  don't use string_escape anymore since that escape single quotes and
2974
  backslashes too, and that is too much; and that escaping is not
2975
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2976

2977
  @type text: str or unicode
2978
  @param text: input data
2979
  @rtype: str
2980
  @return: a safe version of text
2981

2982
  """
2983
  if isinstance(text, unicode):
2984
    # only if unicode; if str already, we handle it below
2985
    text = text.encode('ascii', 'backslashreplace')
2986
  resu = ""
2987
  for char in text:
2988
    c = ord(char)
2989
    if char  == '\t':
2990
      resu += r'\t'
2991
    elif char == '\n':
2992
      resu += r'\n'
2993
    elif char == '\r':
2994
      resu += r'\'r'
2995
    elif c < 32 or c >= 127: # non-printable
2996
      resu += "\\x%02x" % (c & 0xff)
2997
    else:
2998
      resu += char
2999
  return resu
3000

    
3001

    
3002
def UnescapeAndSplit(text, sep=","):
3003
  """Split and unescape a string based on a given separator.
3004

3005
  This function splits a string based on a separator where the
3006
  separator itself can be escape in order to be an element of the
3007
  elements. The escaping rules are (assuming coma being the
3008
  separator):
3009
    - a plain , separates the elements
3010
    - a sequence \\\\, (double backslash plus comma) is handled as a
3011
      backslash plus a separator comma
3012
    - a sequence \, (backslash plus comma) is handled as a
3013
      non-separator comma
3014

3015
  @type text: string
3016
  @param text: the string to split
3017
  @type sep: string
3018
  @param text: the separator
3019
  @rtype: string
3020
  @return: a list of strings
3021

3022
  """
3023
  # we split the list by sep (with no escaping at this stage)
3024
  slist = text.split(sep)
3025
  # next, we revisit the elements and if any of them ended with an odd
3026
  # number of backslashes, then we join it with the next
3027
  rlist = []
3028
  while slist:
3029
    e1 = slist.pop(0)
3030
    if e1.endswith("\\"):
3031
      num_b = len(e1) - len(e1.rstrip("\\"))
3032
      if num_b % 2 == 1:
3033
        e2 = slist.pop(0)
3034
        # here the backslashes remain (all), and will be reduced in
3035
        # the next step
3036
        rlist.append(e1 + sep + e2)
3037
        continue
3038
    rlist.append(e1)
3039
  # finally, replace backslash-something with something
3040
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3041
  return rlist
3042

    
3043

    
3044
def CommaJoin(names):
3045
  """Nicely join a set of identifiers.
3046

3047
  @param names: set, list or tuple
3048
  @return: a string with the formatted results
3049

3050
  """
3051
  return ", ".join([str(val) for val in names])
3052

    
3053

    
3054
def BytesToMebibyte(value):
3055
  """Converts bytes to mebibytes.
3056

3057
  @type value: int
3058
  @param value: Value in bytes
3059
  @rtype: int
3060
  @return: Value in mebibytes
3061

3062
  """
3063
  return int(round(value / (1024.0 * 1024.0), 0))
3064

    
3065

    
3066
def CalculateDirectorySize(path):
3067
  """Calculates the size of a directory recursively.
3068

3069
  @type path: string
3070
  @param path: Path to directory
3071
  @rtype: int
3072
  @return: Size in mebibytes
3073

3074
  """
3075
  size = 0
3076

    
3077
  for (curpath, _, files) in os.walk(path):
3078
    for filename in files:
3079
      st = os.lstat(PathJoin(curpath, filename))
3080
      size += st.st_size
3081

    
3082
  return BytesToMebibyte(size)
3083

    
3084

    
3085
def GetFilesystemStats(path):
3086
  """Returns the total and free space on a filesystem.
3087

3088
  @type path: string
3089
  @param path: Path on filesystem to be examined
3090
  @rtype: int
3091
  @return: tuple of (Total space, Free space) in mebibytes
3092

3093
  """
3094
  st = os.statvfs(path)
3095

    
3096
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3097
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3098
  return (tsize, fsize)
3099

    
3100

    
3101
def RunInSeparateProcess(fn, *args):
3102
  """Runs a function in a separate process.
3103

3104
  Note: Only boolean return values are supported.
3105

3106
  @type fn: callable
3107
  @param fn: Function to be called
3108
  @rtype: bool
3109
  @return: Function's result
3110

3111
  """
3112
  pid = os.fork()
3113
  if pid == 0:
3114
    # Child process
3115
    try:
3116
      # In case the function uses temporary files
3117
      ResetTempfileModule()
3118

    
3119
      # Call function
3120
      result = int(bool(fn(*args)))
3121
      assert result in (0, 1)
3122
    except: # pylint: disable-msg=W0702
3123
      logging.exception("Error while calling function in separate process")
3124
      # 0 and 1 are reserved for the return value
3125
      result = 33
3126

    
3127
    os._exit(result) # pylint: disable-msg=W0212
3128

    
3129
  # Parent process
3130

    
3131
  # Avoid zombies and check exit code
3132
  (_, status) = os.waitpid(pid, 0)
3133

    
3134
  if os.WIFSIGNALED(status):
3135
    exitcode = None
3136
    signum = os.WTERMSIG(status)
3137
  else:
3138
    exitcode = os.WEXITSTATUS(status)
3139
    signum = None
3140

    
3141
  if not (exitcode in (0, 1) and signum is None):
3142
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3143
                              (exitcode, signum))
3144

    
3145
  return bool(exitcode)
3146

    
3147

    
3148
def IgnoreProcessNotFound(fn, *args, **kwargs):
3149
  """Ignores ESRCH when calling a process-related function.
3150

3151
  ESRCH is raised when a process is not found.
3152

3153
  @rtype: bool
3154
  @return: Whether process was found
3155

3156
  """
3157
  try:
3158
    fn(*args, **kwargs)
3159
  except EnvironmentError, err:
3160
    # Ignore ESRCH
3161
    if err.errno == errno.ESRCH:
3162
      return False
3163
    raise
3164

    
3165
  return True
3166

    
3167

    
3168
def IgnoreSignals(fn, *args, **kwargs):
3169
  """Tries to call a function ignoring failures due to EINTR.
3170

3171
  """
3172
  try:
3173
    return fn(*args, **kwargs)
3174
  except EnvironmentError, err:
3175
    if err.errno == errno.EINTR:
3176
      return None
3177
    else:
3178
      raise
3179
  except (select.error, socket.error), err:
3180
    # In python 2.6 and above select.error is an IOError, so it's handled
3181
    # above, in 2.5 and below it's not, and it's handled here.
3182
    if err.args and err.args[0] == errno.EINTR:
3183
      return None
3184
    else:
3185
      raise
3186

    
3187

    
3188
def LockFile(fd):
3189
  """Locks a file using POSIX locks.
3190

3191
  @type fd: int
3192
  @param fd: the file descriptor we need to lock
3193

3194
  """
3195
  try:
3196
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3197
  except IOError, err:
3198
    if err.errno == errno.EAGAIN:
3199
      raise errors.LockError("File already locked")
3200
    raise
3201

    
3202

    
3203
def FormatTime(val):
3204
  """Formats a time value.
3205

3206
  @type val: float or None
3207
  @param val: the timestamp as returned by time.time()
3208
  @return: a string value or N/A if we don't have a valid timestamp
3209

3210
  """
3211
  if val is None or not isinstance(val, (int, float)):
3212
    return "N/A"
3213
  # these two codes works on Linux, but they are not guaranteed on all
3214
  # platforms
3215
  return time.strftime("%F %T", time.localtime(val))
3216

    
3217

    
3218
def FormatSeconds(secs):
3219
  """Formats seconds for easier reading.
3220

3221
  @type secs: number
3222
  @param secs: Number of seconds
3223
  @rtype: string
3224
  @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3225

3226
  """
3227
  parts = []
3228

    
3229
  secs = round(secs, 0)
3230

    
3231
  if secs > 0:
3232
    # Negative values would be a bit tricky
3233
    for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3234
      (complete, secs) = divmod(secs, one)
3235
      if complete or parts:
3236
        parts.append("%d%s" % (complete, unit))
3237

    
3238
  parts.append("%ds" % secs)
3239

    
3240
  return " ".join(parts)
3241

    
3242

    
3243
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3244
  """Reads the watcher pause file.
3245

3246
  @type filename: string
3247
  @param filename: Path to watcher pause file
3248
  @type now: None, float or int
3249
  @param now: Current time as Unix timestamp
3250
  @type remove_after: int
3251
  @param remove_after: Remove watcher pause file after specified amount of
3252
    seconds past the pause end time
3253

3254
  """
3255
  if now is None:
3256
    now = time.time()
3257

    
3258
  try:
3259
    value = ReadFile(filename)
3260
  except IOError, err:
3261
    if err.errno != errno.ENOENT:
3262
      raise
3263
    value = None
3264

    
3265
  if value is not None:
3266
    try:
3267
      value = int(value)
3268
    except ValueError:
3269
      logging.warning(("Watcher pause file (%s) contains invalid value,"
3270
                       " removing it"), filename)
3271
      RemoveFile(filename)
3272
      value = None
3273

    
3274
    if value is not None:
3275
      # Remove file if it's outdated
3276
      if now > (value + remove_after):
3277
        RemoveFile(filename)
3278
        value = None
3279

    
3280
      elif now > value:
3281
        value = None
3282

    
3283
  return value
3284

    
3285

    
3286
class RetryTimeout(Exception):
3287
  """Retry loop timed out.
3288

3289
  Any arguments which was passed by the retried function to RetryAgain will be
3290
  preserved in RetryTimeout, if it is raised. If such argument was an exception
3291
  the RaiseInner helper method will reraise it.
3292

3293
  """
3294
  def RaiseInner(self):
3295
    if self.args and isinstance(self.args[0], Exception):
3296
      raise self.args[0]
3297
    else:
3298
      raise RetryTimeout(*self.args)
3299

    
3300

    
3301
class RetryAgain(Exception):
3302
  """Retry again.
3303

3304
  Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3305
  arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3306
  of the RetryTimeout() method can be used to reraise it.
3307

3308
  """
3309

    
3310

    
3311
class _RetryDelayCalculator(object):
3312
  """Calculator for increasing delays.
3313

3314
  """
3315
  __slots__ = [
3316
    "_factor",
3317
    "_limit",
3318
    "_next",
3319
    "_start",
3320
    ]
3321

    
3322
  def __init__(self, start, factor, limit):
3323
    """Initializes this class.
3324

3325
    @type start: float
3326
    @param start: Initial delay
3327
    @type factor: float
3328
    @param factor: Factor for delay increase
3329
    @type limit: float or None
3330
    @param limit: Upper limit for delay or None for no limit
3331

3332
    """
3333
    assert start > 0.0
3334
    assert factor >= 1.0
3335
    assert limit is None or limit >= 0.0
3336

    
3337
    self._start = start
3338
    self._factor = factor
3339
    self._limit = limit
3340

    
3341
    self._next = start
3342

    
3343
  def __call__(self):
3344
    """Returns current delay and calculates the next one.
3345

3346
    """
3347
    current = self._next
3348

    
3349
    # Update for next run
3350
    if self._limit is None or self._next < self._limit:
3351
      self._next = min(self._limit, self._next * self._factor)
3352

    
3353
    return current
3354

    
3355

    
3356
#: Special delay to specify whole remaining timeout
3357
RETRY_REMAINING_TIME = object()
3358

    
3359

    
3360
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3361
          _time_fn=time.time):
3362
  """Call a function repeatedly until it succeeds.
3363

3364
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3365
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3366
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3367

3368
  C{delay} can be one of the following:
3369
    - callable returning the delay length as a float
3370
    - Tuple of (start, factor, limit)
3371
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3372
      useful when overriding L{wait_fn} to wait for an external event)
3373
    - A static delay as a number (int or float)
3374

3375
  @type fn: callable
3376
  @param fn: Function to be called
3377
  @param delay: Either a callable (returning the delay), a tuple of (start,
3378
                factor, limit) (see L{_RetryDelayCalculator}),
3379
                L{RETRY_REMAINING_TIME} or a number (int or float)
3380
  @type timeout: float
3381
  @param timeout: Total timeout
3382
  @type wait_fn: callable
3383
  @param wait_fn: Waiting function
3384
  @return: Return value of function
3385

3386
  """
3387
  assert callable(fn)
3388
  assert callable(wait_fn)
3389
  assert callable(_time_fn)
3390

    
3391
  if args is None:
3392
    args = []
3393

    
3394
  end_time = _time_fn() + timeout
3395

    
3396
  if callable(delay):
3397
    # External function to calculate delay
3398
    calc_delay = delay
3399

    
3400
  elif isinstance(delay, (tuple, list)):
3401
    # Increasing delay with optional upper boundary
3402
    (start, factor, limit) = delay
3403
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3404

    
3405
  elif delay is RETRY_REMAINING_TIME:
3406
    # Always use the remaining time
3407
    calc_delay = None
3408

    
3409
  else:
3410
    # Static delay
3411
    calc_delay = lambda: delay
3412

    
3413
  assert calc_delay is None or callable(calc_delay)
3414

    
3415
  while True:
3416
    retry_args = []
3417
    try:
3418
      # pylint: disable-msg=W0142
3419
      return fn(*args)
3420
    except RetryAgain, err:
3421
      retry_args = err.args
3422
    except RetryTimeout:
3423
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3424
                                   " handle RetryTimeout")
3425

    
3426
    remaining_time = end_time - _time_fn()
3427

    
3428
    if remaining_time < 0.0:
3429
      # pylint: disable-msg=W0142
3430
      raise RetryTimeout(*retry_args)
3431

    
3432
    assert remaining_time >= 0.0
3433

    
3434
    if calc_delay is None:
3435
      wait_fn(remaining_time)
3436
    else:
3437
      current_delay = calc_delay()
3438
      if current_delay > 0.0:
3439
        wait_fn(current_delay)
3440

    
3441

    
3442
def GetClosedTempfile(*args, **kwargs):
3443
  """Creates a temporary file and returns its path.
3444

3445
  """
3446
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3447
  _CloseFDNoErr(fd)
3448
  return path
3449

    
3450

    
3451
def GenerateSelfSignedX509Cert(common_name, validity):
3452
  """Generates a self-signed X509 certificate.
3453

3454
  @type common_name: string
3455
  @param common_name: commonName value
3456
  @type validity: int
3457
  @param validity: Validity for certificate in seconds
3458

3459
  """
3460
  # Create private and public key
3461
  key = OpenSSL.crypto.PKey()
3462
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3463

    
3464
  # Create self-signed certificate
3465
  cert = OpenSSL.crypto.X509()
3466
  if common_name:
3467
    cert.get_subject().CN = common_name
3468
  cert.set_serial_number(1)
3469
  cert.gmtime_adj_notBefore(0)
3470
  cert.gmtime_adj_notAfter(validity)
3471
  cert.set_issuer(cert.get_subject())
3472
  cert.set_pubkey(key)
3473
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3474

    
3475
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3476
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3477

    
3478
  return (key_pem, cert_pem)
3479

    
3480

    
3481
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3482
  """Legacy function to generate self-signed X509 certificate.
3483

3484
  """
3485
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3486
                                                   validity * 24 * 60 * 60)
3487

    
3488
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3489

    
3490

    
3491
class FileLock(object):
3492
  """Utility class for file locks.
3493

3494
  """
3495
  def __init__(self, fd, filename):
3496
    """Constructor for FileLock.
3497

3498
    @type fd: file
3499
    @param fd: File object
3500
    @type filename: str
3501
    @param filename: Path of the file opened at I{fd}
3502

3503
    """
3504
    self.fd = fd
3505
    self.filename = filename
3506

    
3507
  @classmethod
3508
  def Open(cls, filename):
3509
    """Creates and opens a file to be used as a file-based lock.
3510

3511
    @type filename: string
3512
    @param filename: path to the file to be locked
3513

3514
    """
3515
    # Using "os.open" is necessary to allow both opening existing file
3516
    # read/write and creating if not existing. Vanilla "open" will truncate an
3517
    # existing file -or- allow creating if not existing.
3518
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3519
               filename)
3520

    
3521
  def __del__(self):
3522
    self.Close()
3523

    
3524
  def Close(self):
3525
    """Close the file and release the lock.
3526

3527
    """
3528
    if hasattr(self, "fd") and self.fd:
3529
      self.fd.close()
3530
      self.fd = None
3531

    
3532
  def _flock(self, flag, blocking, timeout, errmsg):
3533
    """Wrapper for fcntl.flock.
3534

3535
    @type flag: int
3536
    @param flag: operation flag
3537
    @type blocking: bool
3538
    @param blocking: whether the operation should be done in blocking mode.
3539
    @type timeout: None or float
3540
    @param timeout: for how long the operation should be retried (implies
3541
                    non-blocking mode).
3542
    @type errmsg: string
3543
    @param errmsg: error message in case operation fails.
3544

3545
    """
3546
    assert self.fd, "Lock was closed"
3547
    assert timeout is None or timeout >= 0, \
3548
      "If specified, timeout must be positive"
3549
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3550

    
3551
    # When a timeout is used, LOCK_NB must always be set
3552
    if not (timeout is None and blocking):
3553
      flag |= fcntl.LOCK_NB
3554

    
3555
    if timeout is None:
3556
      self._Lock(self.fd, flag, timeout)
3557
    else:
3558
      try:
3559
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3560
              args=(self.fd, flag, timeout))
3561
      except RetryTimeout:
3562
        raise errors.LockError(errmsg)
3563

    
3564
  @staticmethod
3565
  def _Lock(fd, flag, timeout):
3566
    try:
3567
      fcntl.flock(fd, flag)
3568
    except IOError, err:
3569
      if timeout is not None and err.errno == errno.EAGAIN:
3570
        raise RetryAgain()
3571

    
3572
      logging.exception("fcntl.flock failed")
3573
      raise
3574

    
3575
  def Exclusive(self, blocking=False, timeout=None):
3576
    """Locks the file in exclusive mode.
3577

3578
    @type blocking: boolean
3579
    @param blocking: whether to block and wait until we
3580
        can lock the file or return immediately
3581
    @type timeout: int or None
3582
    @param timeout: if not None, the duration to wait for the lock
3583
        (in blocking mode)
3584

3585
    """
3586
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3587
                "Failed to lock %s in exclusive mode" % self.filename)
3588

    
3589
  def Shared(self, blocking=False, timeout=None):
3590
    """Locks the file in shared mode.
3591

3592
    @type blocking: boolean
3593
    @param blocking: whether to block and wait until we
3594
        can lock the file or return immediately
3595
    @type timeout: int or None
3596
    @param timeout: if not None, the duration to wait for the lock
3597
        (in blocking mode)
3598

3599
    """
3600
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3601
                "Failed to lock %s in shared mode" % self.filename)
3602

    
3603
  def Unlock(self, blocking=True, timeout=None):
3604
    """Unlocks the file.
3605

3606
    According to C{flock(2)}, unlocking can also be a nonblocking
3607
    operation::
3608

3609
      To make a non-blocking request, include LOCK_NB with any of the above
3610
      operations.
3611

3612
    @type blocking: boolean
3613
    @param blocking: whether to block and wait until we
3614
        can lock the file or return immediately
3615
    @type timeout: int or None
3616
    @param timeout: if not None, the duration to wait for the lock
3617
        (in blocking mode)
3618

3619
    """
3620
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3621
                "Failed to unlock %s" % self.filename)
3622

    
3623

    
3624
class LineSplitter:
3625
  """Splits data chunks into lines separated by newline.
3626

3627
  Instances provide a file-like interface.
3628

3629
  """
3630
  def __init__(self, line_fn, *args):
3631
    """Initializes this class.
3632

3633
    @type line_fn: callable
3634
    @param line_fn: Function called for each line, first parameter is line
3635
    @param args: Extra arguments for L{line_fn}
3636

3637
    """
3638
    assert callable(line_fn)
3639

    
3640
    if args:
3641
      # Python 2.4 doesn't have functools.partial yet
3642
      self._line_fn = \
3643
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3644
    else:
3645
      self._line_fn = line_fn
3646

    
3647
    self._lines = collections.deque()
3648
    self._buffer = ""
3649

    
3650
  def write(self, data):
3651
    parts = (self._buffer + data).split("\n")
3652
    self._buffer = parts.pop()
3653
    self._lines.extend(parts)
3654

    
3655
  def flush(self):
3656
    while self._lines:
3657
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3658

    
3659
  def close(self):
3660
    self.flush()
3661
    if self._buffer:
3662
      self._line_fn(self._buffer)
3663

    
3664

    
3665
def SignalHandled(signums):
3666
  """Signal Handled decoration.
3667

3668
  This special decorator installs a signal handler and then calls the target
3669
  function. The function must accept a 'signal_handlers' keyword argument,
3670
  which will contain a dict indexed by signal number, with SignalHandler
3671
  objects as values.
3672

3673
  The decorator can be safely stacked with iself, to handle multiple signals
3674
  with different handlers.
3675

3676
  @type signums: list
3677
  @param signums: signals to intercept
3678

3679
  """
3680
  def wrap(fn):
3681
    def sig_function(*args, **kwargs):
3682
      assert 'signal_handlers' not in kwargs or \
3683
             kwargs['signal_handlers'] is None or \
3684
             isinstance(kwargs['signal_handlers'], dict), \
3685
             "Wrong signal_handlers parameter in original function call"
3686
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3687
        signal_handlers = kwargs['signal_handlers']
3688
      else:
3689
        signal_handlers = {}
3690
        kwargs['signal_handlers'] = signal_handlers
3691
      sighandler = SignalHandler(signums)
3692
      try:
3693
        for sig in signums:
3694
          signal_handlers[sig] = sighandler
3695
        return fn(*args, **kwargs)
3696
      finally:
3697
        sighandler.Reset()
3698
    return sig_function
3699
  return wrap
3700

    
3701

    
3702
class SignalWakeupFd(object):
3703
  try:
3704
    # This is only supported in Python 2.5 and above (some distributions
3705
    # backported it to Python 2.4)
3706
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3707
  except AttributeError:
3708
    # Not supported
3709
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3710
      return -1
3711
  else:
3712
    def _SetWakeupFd(self, fd):
3713
      return self._set_wakeup_fd_fn(fd)
3714

    
3715
  def __init__(self):
3716
    """Initializes this class.
3717

3718
    """
3719
    (read_fd, write_fd) = os.pipe()
3720

    
3721
    # Once these succeeded, the file descriptors will be closed automatically.
3722
    # Buffer size 0 is important, otherwise .read() with a specified length
3723
    # might buffer data and the file descriptors won't be marked readable.
3724
    self._read_fh = os.fdopen(read_fd, "r", 0)
3725
    self._write_fh = os.fdopen(write_fd, "w", 0)
3726

    
3727
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3728

    
3729
    # Utility functions
3730
    self.fileno = self._read_fh.fileno
3731
    self.read = self._read_fh.read
3732

    
3733
  def Reset(self):
3734
    """Restores the previous wakeup file descriptor.
3735

3736
    """
3737
    if hasattr(self, "_previous") and self._previous is not None:
3738
      self._SetWakeupFd(self._previous)
3739
      self._previous = None
3740

    
3741
  def Notify(self):
3742
    """Notifies the wakeup file descriptor.
3743

3744
    """
3745
    self._write_fh.write("\0")
3746

    
3747
  def __del__(self):
3748
    """Called before object deletion.
3749

3750
    """
3751
    self.Reset()
3752

    
3753

    
3754
class SignalHandler(object):
3755
  """Generic signal handler class.
3756

3757
  It automatically restores the original handler when deconstructed or
3758
  when L{Reset} is called. You can either pass your own handler
3759
  function in or query the L{called} attribute to detect whether the
3760
  signal was sent.
3761

3762
  @type signum: list
3763
  @ivar signum: the signals we handle
3764
  @type called: boolean
3765
  @ivar called: tracks whether any of the signals have been raised
3766

3767
  """
3768
  def __init__(self, signum, handler_fn=None, wakeup=None):
3769
    """Constructs a new SignalHandler instance.
3770

3771
    @type signum: int or list of ints
3772
    @param signum: Single signal number or set of signal numbers
3773
    @type handler_fn: callable
3774
    @param handler_fn: Signal handling function
3775

3776
    """
3777
    assert handler_fn is None or callable(handler_fn)
3778

    
3779
    self.signum = set(signum)
3780
    self.called = False
3781

    
3782
    self._handler_fn = handler_fn
3783
    self._wakeup = wakeup
3784

    
3785
    self._previous = {}
3786
    try:
3787
      for signum in self.signum:
3788
        # Setup handler
3789
        prev_handler = signal.signal(signum, self._HandleSignal)
3790
        try:
3791
          self._previous[signum] = prev_handler
3792
        except:
3793
          # Restore previous handler
3794
          signal.signal(signum, prev_handler)
3795
          raise
3796
    except:
3797
      # Reset all handlers
3798
      self.Reset()
3799
      # Here we have a race condition: a handler may have already been called,
3800
      # but there's not much we can do about it at this point.
3801
      raise
3802

    
3803
  def __del__(self):
3804
    self.Reset()
3805

    
3806
  def Reset(self):
3807
    """Restore previous handler.
3808

3809
    This will reset all the signals to their previous handlers.
3810

3811
    """
3812
    for signum, prev_handler in self._previous.items():
3813
      signal.signal(signum, prev_handler)
3814
      # If successful, remove from dict
3815
      del self._previous[signum]
3816

    
3817
  def Clear(self):
3818
    """Unsets the L{called} flag.
3819

3820
    This function can be used in case a signal may arrive several times.
3821

3822
    """
3823
    self.called = False
3824

    
3825
  def _HandleSignal(self, signum, frame):
3826
    """Actual signal handling function.
3827

3828
    """
3829
    # This is not nice and not absolutely atomic, but it appears to be the only
3830
    # solution in Python -- there are no atomic types.
3831
    self.called = True
3832

    
3833
    if self._wakeup:
3834
      # Notify whoever is interested in signals
3835
      self._wakeup.Notify()
3836

    
3837
    if self._handler_fn:
3838
      self._handler_fn(signum, frame)
3839

    
3840

    
3841
class FieldSet(object):
3842
  """A simple field set.
3843

3844
  Among the features are:
3845
    - checking if a string is among a list of static string or regex objects
3846
    - checking if a whole list of string matches
3847
    - returning the matching groups from a regex match
3848

3849
  Internally, all fields are held as regular expression objects.
3850

3851
  """
3852
  def __init__(self, *items):
3853
    self.items = [re.compile("^%s$" % value) for value in items]
3854

    
3855
  def Extend(self, other_set):
3856
    """Extend the field set with the items from another one"""
3857
    self.items.extend(other_set.items)
3858

    
3859
  def Matches(self, field):
3860
    """Checks if a field matches the current set
3861

3862
    @type field: str
3863
    @param field: the string to match
3864
    @return: either None or a regular expression match object
3865

3866
    """
3867
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3868
      return m
3869
    return None
3870

    
3871
  def NonMatching(self, items):
3872
    """Returns the list of fields not matching the current set
3873

3874
    @type items: list
3875
    @param items: the list of fields to check
3876
    @rtype: list
3877
    @return: list of non-matching fields
3878

3879
    """
3880
    return [val for val in items if not self.Matches(val)]