Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ 3718bf6d

History | View | Annotate | Download (95.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52
import struct
53
import IN
54

    
55
from cStringIO import StringIO
56

    
57
try:
58
  from hashlib import sha1
59
except ImportError:
60
  import sha as sha1
61

    
62
from ganeti import errors
63
from ganeti import constants
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
84
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
85
#
86
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
87
# pid_t to "int".
88
#
89
# IEEE Std 1003.1-2008:
90
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
91
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
92
_STRUCT_UCRED = "iII"
93
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
94

    
95
# Certificate verification results
96
(CERT_WARNING,
97
 CERT_ERROR) = range(1, 3)
98

    
99

    
100
class RunResult(object):
101
  """Holds the result of running external programs.
102

103
  @type exit_code: int
104
  @ivar exit_code: the exit code of the program, or None (if the program
105
      didn't exit())
106
  @type signal: int or None
107
  @ivar signal: the signal that caused the program to finish, or None
108
      (if the program wasn't terminated by a signal)
109
  @type stdout: str
110
  @ivar stdout: the standard output of the program
111
  @type stderr: str
112
  @ivar stderr: the standard error of the program
113
  @type failed: boolean
114
  @ivar failed: True in case the program was
115
      terminated by a signal or exited with a non-zero exit code
116
  @ivar fail_reason: a string detailing the termination reason
117

118
  """
119
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
120
               "failed", "fail_reason", "cmd"]
121

    
122

    
123
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
124
    self.cmd = cmd
125
    self.exit_code = exit_code
126
    self.signal = signal_
127
    self.stdout = stdout
128
    self.stderr = stderr
129
    self.failed = (signal_ is not None or exit_code != 0)
130

    
131
    if self.signal is not None:
132
      self.fail_reason = "terminated by signal %s" % self.signal
133
    elif self.exit_code is not None:
134
      self.fail_reason = "exited with exit code %s" % self.exit_code
135
    else:
136
      self.fail_reason = "unable to determine termination reason"
137

    
138
    if self.failed:
139
      logging.debug("Command '%s' failed (%s); output: %s",
140
                    self.cmd, self.fail_reason, self.output)
141

    
142
  def _GetOutput(self):
143
    """Returns the combined stdout and stderr for easier usage.
144

145
    """
146
    return self.stdout + self.stderr
147

    
148
  output = property(_GetOutput, None, None, "Return full output")
149

    
150

    
151
def _BuildCmdEnvironment(env, reset):
152
  """Builds the environment for an external program.
153

154
  """
155
  if reset:
156
    cmd_env = {}
157
  else:
158
    cmd_env = os.environ.copy()
159
    cmd_env["LC_ALL"] = "C"
160

    
161
  if env is not None:
162
    cmd_env.update(env)
163

    
164
  return cmd_env
165

    
166

    
167
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
168
  """Execute a (shell) command.
169

170
  The command should not read from its standard input, as it will be
171
  closed.
172

173
  @type cmd: string or list
174
  @param cmd: Command to run
175
  @type env: dict
176
  @param env: Additional environment variables
177
  @type output: str
178
  @param output: if desired, the output of the command can be
179
      saved in a file instead of the RunResult instance; this
180
      parameter denotes the file name (if not None)
181
  @type cwd: string
182
  @param cwd: if specified, will be used as the working
183
      directory for the command; the default will be /
184
  @type reset_env: boolean
185
  @param reset_env: whether to reset or keep the default os environment
186
  @rtype: L{RunResult}
187
  @return: RunResult instance
188
  @raise errors.ProgrammerError: if we call this when forks are disabled
189

190
  """
191
  if no_fork:
192
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
193

    
194
  if isinstance(cmd, basestring):
195
    strcmd = cmd
196
    shell = True
197
  else:
198
    cmd = [str(val) for val in cmd]
199
    strcmd = ShellQuoteArgs(cmd)
200
    shell = False
201

    
202
  if output:
203
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
204
  else:
205
    logging.debug("RunCmd %s", strcmd)
206

    
207
  cmd_env = _BuildCmdEnvironment(env, reset_env)
208

    
209
  try:
210
    if output is None:
211
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
212
    else:
213
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
214
      out = err = ""
215
  except OSError, err:
216
    if err.errno == errno.ENOENT:
217
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
218
                               (strcmd, err))
219
    else:
220
      raise
221

    
222
  if status >= 0:
223
    exitcode = status
224
    signal_ = None
225
  else:
226
    exitcode = None
227
    signal_ = -status
228

    
229
  return RunResult(exitcode, signal_, out, err, strcmd)
230

    
231

    
232
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
233
                pidfile=None):
234
  """Start a daemon process after forking twice.
235

236
  @type cmd: string or list
237
  @param cmd: Command to run
238
  @type env: dict
239
  @param env: Additional environment variables
240
  @type cwd: string
241
  @param cwd: Working directory for the program
242
  @type output: string
243
  @param output: Path to file in which to save the output
244
  @type output_fd: int
245
  @param output_fd: File descriptor for output
246
  @type pidfile: string
247
  @param pidfile: Process ID file
248
  @rtype: int
249
  @return: Daemon process ID
250
  @raise errors.ProgrammerError: if we call this when forks are disabled
251

252
  """
253
  if no_fork:
254
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
255
                                 " disabled")
256

    
257
  if output and not (bool(output) ^ (output_fd is not None)):
258
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
259
                                 " specified")
260

    
261
  if isinstance(cmd, basestring):
262
    cmd = ["/bin/sh", "-c", cmd]
263

    
264
  strcmd = ShellQuoteArgs(cmd)
265

    
266
  if output:
267
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
268
  else:
269
    logging.debug("StartDaemon %s", strcmd)
270

    
271
  cmd_env = _BuildCmdEnvironment(env, False)
272

    
273
  # Create pipe for sending PID back
274
  (pidpipe_read, pidpipe_write) = os.pipe()
275
  try:
276
    try:
277
      # Create pipe for sending error messages
278
      (errpipe_read, errpipe_write) = os.pipe()
279
      try:
280
        try:
281
          # First fork
282
          pid = os.fork()
283
          if pid == 0:
284
            try:
285
              # Child process, won't return
286
              _StartDaemonChild(errpipe_read, errpipe_write,
287
                                pidpipe_read, pidpipe_write,
288
                                cmd, cmd_env, cwd,
289
                                output, output_fd, pidfile)
290
            finally:
291
              # Well, maybe child process failed
292
              os._exit(1) # pylint: disable-msg=W0212
293
        finally:
294
          _CloseFDNoErr(errpipe_write)
295

    
296
        # Wait for daemon to be started (or an error message to arrive) and read
297
        # up to 100 KB as an error message
298
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
299
      finally:
300
        _CloseFDNoErr(errpipe_read)
301
    finally:
302
      _CloseFDNoErr(pidpipe_write)
303

    
304
    # Read up to 128 bytes for PID
305
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
306
  finally:
307
    _CloseFDNoErr(pidpipe_read)
308

    
309
  # Try to avoid zombies by waiting for child process
310
  try:
311
    os.waitpid(pid, 0)
312
  except OSError:
313
    pass
314

    
315
  if errormsg:
316
    raise errors.OpExecError("Error when starting daemon process: %r" %
317
                             errormsg)
318

    
319
  try:
320
    return int(pidtext)
321
  except (ValueError, TypeError), err:
322
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
323
                             (pidtext, err))
324

    
325

    
326
def _StartDaemonChild(errpipe_read, errpipe_write,
327
                      pidpipe_read, pidpipe_write,
328
                      args, env, cwd,
329
                      output, fd_output, pidfile):
330
  """Child process for starting daemon.
331

332
  """
333
  try:
334
    # Close parent's side
335
    _CloseFDNoErr(errpipe_read)
336
    _CloseFDNoErr(pidpipe_read)
337

    
338
    # First child process
339
    os.chdir("/")
340
    os.umask(077)
341
    os.setsid()
342

    
343
    # And fork for the second time
344
    pid = os.fork()
345
    if pid != 0:
346
      # Exit first child process
347
      os._exit(0) # pylint: disable-msg=W0212
348

    
349
    # Make sure pipe is closed on execv* (and thereby notifies original process)
350
    SetCloseOnExecFlag(errpipe_write, True)
351

    
352
    # List of file descriptors to be left open
353
    noclose_fds = [errpipe_write]
354

    
355
    # Open PID file
356
    if pidfile:
357
      try:
358
        # TODO: Atomic replace with another locked file instead of writing into
359
        # it after creating
360
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
361

    
362
        # Lock the PID file (and fail if not possible to do so). Any code
363
        # wanting to send a signal to the daemon should try to lock the PID
364
        # file before reading it. If acquiring the lock succeeds, the daemon is
365
        # no longer running and the signal should not be sent.
366
        LockFile(fd_pidfile)
367

    
368
        os.write(fd_pidfile, "%d\n" % os.getpid())
369
      except Exception, err:
370
        raise Exception("Creating and locking PID file failed: %s" % err)
371

    
372
      # Keeping the file open to hold the lock
373
      noclose_fds.append(fd_pidfile)
374

    
375
      SetCloseOnExecFlag(fd_pidfile, False)
376
    else:
377
      fd_pidfile = None
378

    
379
    # Open /dev/null
380
    fd_devnull = os.open(os.devnull, os.O_RDWR)
381

    
382
    assert not output or (bool(output) ^ (fd_output is not None))
383

    
384
    if fd_output is not None:
385
      pass
386
    elif output:
387
      # Open output file
388
      try:
389
        # TODO: Implement flag to set append=yes/no
390
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
391
      except EnvironmentError, err:
392
        raise Exception("Opening output file failed: %s" % err)
393
    else:
394
      fd_output = fd_devnull
395

    
396
    # Redirect standard I/O
397
    os.dup2(fd_devnull, 0)
398
    os.dup2(fd_output, 1)
399
    os.dup2(fd_output, 2)
400

    
401
    # Send daemon PID to parent
402
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
403

    
404
    # Close all file descriptors except stdio and error message pipe
405
    CloseFDs(noclose_fds=noclose_fds)
406

    
407
    # Change working directory
408
    os.chdir(cwd)
409

    
410
    if env is None:
411
      os.execvp(args[0], args)
412
    else:
413
      os.execvpe(args[0], args, env)
414
  except: # pylint: disable-msg=W0702
415
    try:
416
      # Report errors to original process
417
      buf = str(sys.exc_info()[1])
418

    
419
      RetryOnSignal(os.write, errpipe_write, buf)
420
    except: # pylint: disable-msg=W0702
421
      # Ignore errors in error handling
422
      pass
423

    
424
  os._exit(1) # pylint: disable-msg=W0212
425

    
426

    
427
def _RunCmdPipe(cmd, env, via_shell, cwd):
428
  """Run a command and return its output.
429

430
  @type  cmd: string or list
431
  @param cmd: Command to run
432
  @type env: dict
433
  @param env: The environment to use
434
  @type via_shell: bool
435
  @param via_shell: if we should run via the shell
436
  @type cwd: string
437
  @param cwd: the working directory for the program
438
  @rtype: tuple
439
  @return: (out, err, status)
440

441
  """
442
  poller = select.poll()
443
  child = subprocess.Popen(cmd, shell=via_shell,
444
                           stderr=subprocess.PIPE,
445
                           stdout=subprocess.PIPE,
446
                           stdin=subprocess.PIPE,
447
                           close_fds=True, env=env,
448
                           cwd=cwd)
449

    
450
  child.stdin.close()
451
  poller.register(child.stdout, select.POLLIN)
452
  poller.register(child.stderr, select.POLLIN)
453
  out = StringIO()
454
  err = StringIO()
455
  fdmap = {
456
    child.stdout.fileno(): (out, child.stdout),
457
    child.stderr.fileno(): (err, child.stderr),
458
    }
459
  for fd in fdmap:
460
    SetNonblockFlag(fd, True)
461

    
462
  while fdmap:
463
    pollresult = RetryOnSignal(poller.poll)
464

    
465
    for fd, event in pollresult:
466
      if event & select.POLLIN or event & select.POLLPRI:
467
        data = fdmap[fd][1].read()
468
        # no data from read signifies EOF (the same as POLLHUP)
469
        if not data:
470
          poller.unregister(fd)
471
          del fdmap[fd]
472
          continue
473
        fdmap[fd][0].write(data)
474
      if (event & select.POLLNVAL or event & select.POLLHUP or
475
          event & select.POLLERR):
476
        poller.unregister(fd)
477
        del fdmap[fd]
478

    
479
  out = out.getvalue()
480
  err = err.getvalue()
481

    
482
  status = child.wait()
483
  return out, err, status
484

    
485

    
486
def _RunCmdFile(cmd, env, via_shell, output, cwd):
487
  """Run a command and save its output to a file.
488

489
  @type  cmd: string or list
490
  @param cmd: Command to run
491
  @type env: dict
492
  @param env: The environment to use
493
  @type via_shell: bool
494
  @param via_shell: if we should run via the shell
495
  @type output: str
496
  @param output: the filename in which to save the output
497
  @type cwd: string
498
  @param cwd: the working directory for the program
499
  @rtype: int
500
  @return: the exit status
501

502
  """
503
  fh = open(output, "a")
504
  try:
505
    child = subprocess.Popen(cmd, shell=via_shell,
506
                             stderr=subprocess.STDOUT,
507
                             stdout=fh,
508
                             stdin=subprocess.PIPE,
509
                             close_fds=True, env=env,
510
                             cwd=cwd)
511

    
512
    child.stdin.close()
513
    status = child.wait()
514
  finally:
515
    fh.close()
516
  return status
517

    
518

    
519
def SetCloseOnExecFlag(fd, enable):
520
  """Sets or unsets the close-on-exec flag on a file descriptor.
521

522
  @type fd: int
523
  @param fd: File descriptor
524
  @type enable: bool
525
  @param enable: Whether to set or unset it.
526

527
  """
528
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
529

    
530
  if enable:
531
    flags |= fcntl.FD_CLOEXEC
532
  else:
533
    flags &= ~fcntl.FD_CLOEXEC
534

    
535
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
536

    
537

    
538
def SetNonblockFlag(fd, enable):
539
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
540

541
  @type fd: int
542
  @param fd: File descriptor
543
  @type enable: bool
544
  @param enable: Whether to set or unset it
545

546
  """
547
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
548

    
549
  if enable:
550
    flags |= os.O_NONBLOCK
551
  else:
552
    flags &= ~os.O_NONBLOCK
553

    
554
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
555

    
556

    
557
def RetryOnSignal(fn, *args, **kwargs):
558
  """Calls a function again if it failed due to EINTR.
559

560
  """
561
  while True:
562
    try:
563
      return fn(*args, **kwargs)
564
    except EnvironmentError, err:
565
      if err.errno != errno.EINTR:
566
        raise
567
    except select.error, err:
568
      if not (err.args and err.args[0] == errno.EINTR):
569
        raise
570

    
571

    
572
def RunParts(dir_name, env=None, reset_env=False):
573
  """Run Scripts or programs in a directory
574

575
  @type dir_name: string
576
  @param dir_name: absolute path to a directory
577
  @type env: dict
578
  @param env: The environment to use
579
  @type reset_env: boolean
580
  @param reset_env: whether to reset or keep the default os environment
581
  @rtype: list of tuples
582
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
583

584
  """
585
  rr = []
586

    
587
  try:
588
    dir_contents = ListVisibleFiles(dir_name)
589
  except OSError, err:
590
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
591
    return rr
592

    
593
  for relname in sorted(dir_contents):
594
    fname = PathJoin(dir_name, relname)
595
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
596
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
597
      rr.append((relname, constants.RUNPARTS_SKIP, None))
598
    else:
599
      try:
600
        result = RunCmd([fname], env=env, reset_env=reset_env)
601
      except Exception, err: # pylint: disable-msg=W0703
602
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
603
      else:
604
        rr.append((relname, constants.RUNPARTS_RUN, result))
605

    
606
  return rr
607

    
608

    
609
def GetSocketCredentials(sock):
610
  """Returns the credentials of the foreign process connected to a socket.
611

612
  @param sock: Unix socket
613
  @rtype: tuple; (number, number, number)
614
  @return: The PID, UID and GID of the connected foreign process.
615

616
  """
617
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
618
                             _STRUCT_UCRED_SIZE)
619
  return struct.unpack(_STRUCT_UCRED, peercred)
620

    
621

    
622
def RemoveFile(filename):
623
  """Remove a file ignoring some errors.
624

625
  Remove a file, ignoring non-existing ones or directories. Other
626
  errors are passed.
627

628
  @type filename: str
629
  @param filename: the file to be removed
630

631
  """
632
  try:
633
    os.unlink(filename)
634
  except OSError, err:
635
    if err.errno not in (errno.ENOENT, errno.EISDIR):
636
      raise
637

    
638

    
639
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
640
  """Renames a file.
641

642
  @type old: string
643
  @param old: Original path
644
  @type new: string
645
  @param new: New path
646
  @type mkdir: bool
647
  @param mkdir: Whether to create target directory if it doesn't exist
648
  @type mkdir_mode: int
649
  @param mkdir_mode: Mode for newly created directories
650

651
  """
652
  try:
653
    return os.rename(old, new)
654
  except OSError, err:
655
    # In at least one use case of this function, the job queue, directory
656
    # creation is very rare. Checking for the directory before renaming is not
657
    # as efficient.
658
    if mkdir and err.errno == errno.ENOENT:
659
      # Create directory and try again
660
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
661

    
662
      return os.rename(old, new)
663

    
664
    raise
665

    
666

    
667
def Makedirs(path, mode=0750):
668
  """Super-mkdir; create a leaf directory and all intermediate ones.
669

670
  This is a wrapper around C{os.makedirs} adding error handling not implemented
671
  before Python 2.5.
672

673
  """
674
  try:
675
    os.makedirs(path, mode)
676
  except OSError, err:
677
    # Ignore EEXIST. This is only handled in os.makedirs as included in
678
    # Python 2.5 and above.
679
    if err.errno != errno.EEXIST or not os.path.exists(path):
680
      raise
681

    
682

    
683
def ResetTempfileModule():
684
  """Resets the random name generator of the tempfile module.
685

686
  This function should be called after C{os.fork} in the child process to
687
  ensure it creates a newly seeded random generator. Otherwise it would
688
  generate the same random parts as the parent process. If several processes
689
  race for the creation of a temporary file, this could lead to one not getting
690
  a temporary name.
691

692
  """
693
  # pylint: disable-msg=W0212
694
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
695
    tempfile._once_lock.acquire()
696
    try:
697
      # Reset random name generator
698
      tempfile._name_sequence = None
699
    finally:
700
      tempfile._once_lock.release()
701
  else:
702
    logging.critical("The tempfile module misses at least one of the"
703
                     " '_once_lock' and '_name_sequence' attributes")
704

    
705

    
706
def _FingerprintFile(filename):
707
  """Compute the fingerprint of a file.
708

709
  If the file does not exist, a None will be returned
710
  instead.
711

712
  @type filename: str
713
  @param filename: the filename to checksum
714
  @rtype: str
715
  @return: the hex digest of the sha checksum of the contents
716
      of the file
717

718
  """
719
  if not (os.path.exists(filename) and os.path.isfile(filename)):
720
    return None
721

    
722
  f = open(filename)
723

    
724
  if callable(sha1):
725
    fp = sha1()
726
  else:
727
    fp = sha1.new()
728
  while True:
729
    data = f.read(4096)
730
    if not data:
731
      break
732

    
733
    fp.update(data)
734

    
735
  return fp.hexdigest()
736

    
737

    
738
def FingerprintFiles(files):
739
  """Compute fingerprints for a list of files.
740

741
  @type files: list
742
  @param files: the list of filename to fingerprint
743
  @rtype: dict
744
  @return: a dictionary filename: fingerprint, holding only
745
      existing files
746

747
  """
748
  ret = {}
749

    
750
  for filename in files:
751
    cksum = _FingerprintFile(filename)
752
    if cksum:
753
      ret[filename] = cksum
754

    
755
  return ret
756

    
757

    
758
def ForceDictType(target, key_types, allowed_values=None):
759
  """Force the values of a dict to have certain types.
760

761
  @type target: dict
762
  @param target: the dict to update
763
  @type key_types: dict
764
  @param key_types: dict mapping target dict keys to types
765
                    in constants.ENFORCEABLE_TYPES
766
  @type allowed_values: list
767
  @keyword allowed_values: list of specially allowed values
768

769
  """
770
  if allowed_values is None:
771
    allowed_values = []
772

    
773
  if not isinstance(target, dict):
774
    msg = "Expected dictionary, got '%s'" % target
775
    raise errors.TypeEnforcementError(msg)
776

    
777
  for key in target:
778
    if key not in key_types:
779
      msg = "Unknown key '%s'" % key
780
      raise errors.TypeEnforcementError(msg)
781

    
782
    if target[key] in allowed_values:
783
      continue
784

    
785
    ktype = key_types[key]
786
    if ktype not in constants.ENFORCEABLE_TYPES:
787
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
788
      raise errors.ProgrammerError(msg)
789

    
790
    if ktype == constants.VTYPE_STRING:
791
      if not isinstance(target[key], basestring):
792
        if isinstance(target[key], bool) and not target[key]:
793
          target[key] = ''
794
        else:
795
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
796
          raise errors.TypeEnforcementError(msg)
797
    elif ktype == constants.VTYPE_BOOL:
798
      if isinstance(target[key], basestring) and target[key]:
799
        if target[key].lower() == constants.VALUE_FALSE:
800
          target[key] = False
801
        elif target[key].lower() == constants.VALUE_TRUE:
802
          target[key] = True
803
        else:
804
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
805
          raise errors.TypeEnforcementError(msg)
806
      elif target[key]:
807
        target[key] = True
808
      else:
809
        target[key] = False
810
    elif ktype == constants.VTYPE_SIZE:
811
      try:
812
        target[key] = ParseUnit(target[key])
813
      except errors.UnitParseError, err:
814
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
815
              (key, target[key], err)
816
        raise errors.TypeEnforcementError(msg)
817
    elif ktype == constants.VTYPE_INT:
818
      try:
819
        target[key] = int(target[key])
820
      except (ValueError, TypeError):
821
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
822
        raise errors.TypeEnforcementError(msg)
823

    
824

    
825
def IsProcessAlive(pid):
826
  """Check if a given pid exists on the system.
827

828
  @note: zombie status is not handled, so zombie processes
829
      will be returned as alive
830
  @type pid: int
831
  @param pid: the process ID to check
832
  @rtype: boolean
833
  @return: True if the process exists
834

835
  """
836
  if pid <= 0:
837
    return False
838

    
839
  try:
840
    os.stat("/proc/%d/status" % pid)
841
    return True
842
  except EnvironmentError, err:
843
    if err.errno in (errno.ENOENT, errno.ENOTDIR):
844
      return False
845
    raise
846

    
847

    
848
def ReadPidFile(pidfile):
849
  """Read a pid from a file.
850

851
  @type  pidfile: string
852
  @param pidfile: path to the file containing the pid
853
  @rtype: int
854
  @return: The process id, if the file exists and contains a valid PID,
855
           otherwise 0
856

857
  """
858
  try:
859
    raw_data = ReadFile(pidfile)
860
  except EnvironmentError, err:
861
    if err.errno != errno.ENOENT:
862
      logging.exception("Can't read pid file")
863
    return 0
864

    
865
  try:
866
    pid = int(raw_data)
867
  except (TypeError, ValueError), err:
868
    logging.info("Can't parse pid file contents", exc_info=True)
869
    return 0
870

    
871
  return pid
872

    
873

    
874
def ReadLockedPidFile(path):
875
  """Reads a locked PID file.
876

877
  This can be used together with L{StartDaemon}.
878

879
  @type path: string
880
  @param path: Path to PID file
881
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
882

883
  """
884
  try:
885
    fd = os.open(path, os.O_RDONLY)
886
  except EnvironmentError, err:
887
    if err.errno == errno.ENOENT:
888
      # PID file doesn't exist
889
      return None
890
    raise
891

    
892
  try:
893
    try:
894
      # Try to acquire lock
895
      LockFile(fd)
896
    except errors.LockError:
897
      # Couldn't lock, daemon is running
898
      return int(os.read(fd, 100))
899
  finally:
900
    os.close(fd)
901

    
902
  return None
903

    
904

    
905
def MatchNameComponent(key, name_list, case_sensitive=True):
906
  """Try to match a name against a list.
907

908
  This function will try to match a name like test1 against a list
909
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
910
  this list, I{'test1'} as well as I{'test1.example'} will match, but
911
  not I{'test1.ex'}. A multiple match will be considered as no match
912
  at all (e.g. I{'test1'} against C{['test1.example.com',
913
  'test1.example.org']}), except when the key fully matches an entry
914
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
915

916
  @type key: str
917
  @param key: the name to be searched
918
  @type name_list: list
919
  @param name_list: the list of strings against which to search the key
920
  @type case_sensitive: boolean
921
  @param case_sensitive: whether to provide a case-sensitive match
922

923
  @rtype: None or str
924
  @return: None if there is no match I{or} if there are multiple matches,
925
      otherwise the element from the list which matches
926

927
  """
928
  if key in name_list:
929
    return key
930

    
931
  re_flags = 0
932
  if not case_sensitive:
933
    re_flags |= re.IGNORECASE
934
    key = key.upper()
935
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
936
  names_filtered = []
937
  string_matches = []
938
  for name in name_list:
939
    if mo.match(name) is not None:
940
      names_filtered.append(name)
941
      if not case_sensitive and key == name.upper():
942
        string_matches.append(name)
943

    
944
  if len(string_matches) == 1:
945
    return string_matches[0]
946
  if len(names_filtered) == 1:
947
    return names_filtered[0]
948
  return None
949

    
950

    
951
class HostInfo:
952
  """Class implementing resolver and hostname functionality
953

954
  """
955
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
956

    
957
  def __init__(self, name=None):
958
    """Initialize the host name object.
959

960
    If the name argument is not passed, it will use this system's
961
    name.
962

963
    """
964
    if name is None:
965
      name = self.SysName()
966

    
967
    self.query = name
968
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
969
    self.ip = self.ipaddrs[0]
970

    
971
  def ShortName(self):
972
    """Returns the hostname without domain.
973

974
    """
975
    return self.name.split('.')[0]
976

    
977
  @staticmethod
978
  def SysName():
979
    """Return the current system's name.
980

981
    This is simply a wrapper over C{socket.gethostname()}.
982

983
    """
984
    return socket.gethostname()
985

    
986
  @staticmethod
987
  def LookupHostname(hostname):
988
    """Look up hostname
989

990
    @type hostname: str
991
    @param hostname: hostname to look up
992

993
    @rtype: tuple
994
    @return: a tuple (name, aliases, ipaddrs) as returned by
995
        C{socket.gethostbyname_ex}
996
    @raise errors.ResolverError: in case of errors in resolving
997

998
    """
999
    try:
1000
      result = socket.gethostbyname_ex(hostname)
1001
    except socket.gaierror, err:
1002
      # hostname not found in DNS
1003
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
1004

    
1005
    return result
1006

    
1007
  @classmethod
1008
  def NormalizeName(cls, hostname):
1009
    """Validate and normalize the given hostname.
1010

1011
    @attention: the validation is a bit more relaxed than the standards
1012
        require; most importantly, we allow underscores in names
1013
    @raise errors.OpPrereqError: when the name is not valid
1014

1015
    """
1016
    hostname = hostname.lower()
1017
    if (not cls._VALID_NAME_RE.match(hostname) or
1018
        # double-dots, meaning empty label
1019
        ".." in hostname or
1020
        # empty initial label
1021
        hostname.startswith(".")):
1022
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
1023
                                 errors.ECODE_INVAL)
1024
    if hostname.endswith("."):
1025
      hostname = hostname.rstrip(".")
1026
    return hostname
1027

    
1028

    
1029
def GetHostInfo(name=None):
1030
  """Lookup host name and raise an OpPrereqError for failures"""
1031

    
1032
  try:
1033
    return HostInfo(name)
1034
  except errors.ResolverError, err:
1035
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
1036
                               (err[0], err[2]), errors.ECODE_RESOLVER)
1037

    
1038

    
1039
def ListVolumeGroups():
1040
  """List volume groups and their size
1041

1042
  @rtype: dict
1043
  @return:
1044
       Dictionary with keys volume name and values
1045
       the size of the volume
1046

1047
  """
1048
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1049
  result = RunCmd(command)
1050
  retval = {}
1051
  if result.failed:
1052
    return retval
1053

    
1054
  for line in result.stdout.splitlines():
1055
    try:
1056
      name, size = line.split()
1057
      size = int(float(size))
1058
    except (IndexError, ValueError), err:
1059
      logging.error("Invalid output from vgs (%s): %s", err, line)
1060
      continue
1061

    
1062
    retval[name] = size
1063

    
1064
  return retval
1065

    
1066

    
1067
def BridgeExists(bridge):
1068
  """Check whether the given bridge exists in the system
1069

1070
  @type bridge: str
1071
  @param bridge: the bridge name to check
1072
  @rtype: boolean
1073
  @return: True if it does
1074

1075
  """
1076
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1077

    
1078

    
1079
def NiceSort(name_list):
1080
  """Sort a list of strings based on digit and non-digit groupings.
1081

1082
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1083
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1084
  'a11']}.
1085

1086
  The sort algorithm breaks each name in groups of either only-digits
1087
  or no-digits. Only the first eight such groups are considered, and
1088
  after that we just use what's left of the string.
1089

1090
  @type name_list: list
1091
  @param name_list: the names to be sorted
1092
  @rtype: list
1093
  @return: a copy of the name list sorted with our algorithm
1094

1095
  """
1096
  _SORTER_BASE = "(\D+|\d+)"
1097
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1098
                                                  _SORTER_BASE, _SORTER_BASE,
1099
                                                  _SORTER_BASE, _SORTER_BASE,
1100
                                                  _SORTER_BASE, _SORTER_BASE)
1101
  _SORTER_RE = re.compile(_SORTER_FULL)
1102
  _SORTER_NODIGIT = re.compile("^\D*$")
1103
  def _TryInt(val):
1104
    """Attempts to convert a variable to integer."""
1105
    if val is None or _SORTER_NODIGIT.match(val):
1106
      return val
1107
    rval = int(val)
1108
    return rval
1109

    
1110
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1111
             for name in name_list]
1112
  to_sort.sort()
1113
  return [tup[1] for tup in to_sort]
1114

    
1115

    
1116
def TryConvert(fn, val):
1117
  """Try to convert a value ignoring errors.
1118

1119
  This function tries to apply function I{fn} to I{val}. If no
1120
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1121
  the result, else it will return the original value. Any other
1122
  exceptions are propagated to the caller.
1123

1124
  @type fn: callable
1125
  @param fn: function to apply to the value
1126
  @param val: the value to be converted
1127
  @return: The converted value if the conversion was successful,
1128
      otherwise the original value.
1129

1130
  """
1131
  try:
1132
    nv = fn(val)
1133
  except (ValueError, TypeError):
1134
    nv = val
1135
  return nv
1136

    
1137

    
1138
def IsValidIP(ip):
1139
  """Verifies the syntax of an IPv4 address.
1140

1141
  This function checks if the IPv4 address passes is valid or not based
1142
  on syntax (not IP range, class calculations, etc.).
1143

1144
  @type ip: str
1145
  @param ip: the address to be checked
1146
  @rtype: a regular expression match object
1147
  @return: a regular expression match object, or None if the
1148
      address is not valid
1149

1150
  """
1151
  unit = "(0|[1-9]\d{0,2})"
1152
  #TODO: convert and return only boolean
1153
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
1154

    
1155

    
1156
def IsValidShellParam(word):
1157
  """Verifies is the given word is safe from the shell's p.o.v.
1158

1159
  This means that we can pass this to a command via the shell and be
1160
  sure that it doesn't alter the command line and is passed as such to
1161
  the actual command.
1162

1163
  Note that we are overly restrictive here, in order to be on the safe
1164
  side.
1165

1166
  @type word: str
1167
  @param word: the word to check
1168
  @rtype: boolean
1169
  @return: True if the word is 'safe'
1170

1171
  """
1172
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1173

    
1174

    
1175
def BuildShellCmd(template, *args):
1176
  """Build a safe shell command line from the given arguments.
1177

1178
  This function will check all arguments in the args list so that they
1179
  are valid shell parameters (i.e. they don't contain shell
1180
  metacharacters). If everything is ok, it will return the result of
1181
  template % args.
1182

1183
  @type template: str
1184
  @param template: the string holding the template for the
1185
      string formatting
1186
  @rtype: str
1187
  @return: the expanded command line
1188

1189
  """
1190
  for word in args:
1191
    if not IsValidShellParam(word):
1192
      raise errors.ProgrammerError("Shell argument '%s' contains"
1193
                                   " invalid characters" % word)
1194
  return template % args
1195

    
1196

    
1197
def FormatUnit(value, units):
1198
  """Formats an incoming number of MiB with the appropriate unit.
1199

1200
  @type value: int
1201
  @param value: integer representing the value in MiB (1048576)
1202
  @type units: char
1203
  @param units: the type of formatting we should do:
1204
      - 'h' for automatic scaling
1205
      - 'm' for MiBs
1206
      - 'g' for GiBs
1207
      - 't' for TiBs
1208
  @rtype: str
1209
  @return: the formatted value (with suffix)
1210

1211
  """
1212
  if units not in ('m', 'g', 't', 'h'):
1213
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1214

    
1215
  suffix = ''
1216

    
1217
  if units == 'm' or (units == 'h' and value < 1024):
1218
    if units == 'h':
1219
      suffix = 'M'
1220
    return "%d%s" % (round(value, 0), suffix)
1221

    
1222
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1223
    if units == 'h':
1224
      suffix = 'G'
1225
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1226

    
1227
  else:
1228
    if units == 'h':
1229
      suffix = 'T'
1230
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1231

    
1232

    
1233
def ParseUnit(input_string):
1234
  """Tries to extract number and scale from the given string.
1235

1236
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1237
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1238
  is always an int in MiB.
1239

1240
  """
1241
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1242
  if not m:
1243
    raise errors.UnitParseError("Invalid format")
1244

    
1245
  value = float(m.groups()[0])
1246

    
1247
  unit = m.groups()[1]
1248
  if unit:
1249
    lcunit = unit.lower()
1250
  else:
1251
    lcunit = 'm'
1252

    
1253
  if lcunit in ('m', 'mb', 'mib'):
1254
    # Value already in MiB
1255
    pass
1256

    
1257
  elif lcunit in ('g', 'gb', 'gib'):
1258
    value *= 1024
1259

    
1260
  elif lcunit in ('t', 'tb', 'tib'):
1261
    value *= 1024 * 1024
1262

    
1263
  else:
1264
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1265

    
1266
  # Make sure we round up
1267
  if int(value) < value:
1268
    value += 1
1269

    
1270
  # Round up to the next multiple of 4
1271
  value = int(value)
1272
  if value % 4:
1273
    value += 4 - value % 4
1274

    
1275
  return value
1276

    
1277

    
1278
def AddAuthorizedKey(file_name, key):
1279
  """Adds an SSH public key to an authorized_keys file.
1280

1281
  @type file_name: str
1282
  @param file_name: path to authorized_keys file
1283
  @type key: str
1284
  @param key: string containing key
1285

1286
  """
1287
  key_fields = key.split()
1288

    
1289
  f = open(file_name, 'a+')
1290
  try:
1291
    nl = True
1292
    for line in f:
1293
      # Ignore whitespace changes
1294
      if line.split() == key_fields:
1295
        break
1296
      nl = line.endswith('\n')
1297
    else:
1298
      if not nl:
1299
        f.write("\n")
1300
      f.write(key.rstrip('\r\n'))
1301
      f.write("\n")
1302
      f.flush()
1303
  finally:
1304
    f.close()
1305

    
1306

    
1307
def RemoveAuthorizedKey(file_name, key):
1308
  """Removes an SSH public key from an authorized_keys file.
1309

1310
  @type file_name: str
1311
  @param file_name: path to authorized_keys file
1312
  @type key: str
1313
  @param key: string containing key
1314

1315
  """
1316
  key_fields = key.split()
1317

    
1318
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1319
  try:
1320
    out = os.fdopen(fd, 'w')
1321
    try:
1322
      f = open(file_name, 'r')
1323
      try:
1324
        for line in f:
1325
          # Ignore whitespace changes while comparing lines
1326
          if line.split() != key_fields:
1327
            out.write(line)
1328

    
1329
        out.flush()
1330
        os.rename(tmpname, file_name)
1331
      finally:
1332
        f.close()
1333
    finally:
1334
      out.close()
1335
  except:
1336
    RemoveFile(tmpname)
1337
    raise
1338

    
1339

    
1340
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1341
  """Sets the name of an IP address and hostname in /etc/hosts.
1342

1343
  @type file_name: str
1344
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1345
  @type ip: str
1346
  @param ip: the IP address
1347
  @type hostname: str
1348
  @param hostname: the hostname to be added
1349
  @type aliases: list
1350
  @param aliases: the list of aliases to add for the hostname
1351

1352
  """
1353
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1354
  # Ensure aliases are unique
1355
  aliases = UniqueSequence([hostname] + aliases)[1:]
1356

    
1357
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1358
  try:
1359
    out = os.fdopen(fd, 'w')
1360
    try:
1361
      f = open(file_name, 'r')
1362
      try:
1363
        for line in f:
1364
          fields = line.split()
1365
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1366
            continue
1367
          out.write(line)
1368

    
1369
        out.write("%s\t%s" % (ip, hostname))
1370
        if aliases:
1371
          out.write(" %s" % ' '.join(aliases))
1372
        out.write('\n')
1373

    
1374
        out.flush()
1375
        os.fsync(out)
1376
        os.chmod(tmpname, 0644)
1377
        os.rename(tmpname, file_name)
1378
      finally:
1379
        f.close()
1380
    finally:
1381
      out.close()
1382
  except:
1383
    RemoveFile(tmpname)
1384
    raise
1385

    
1386

    
1387
def AddHostToEtcHosts(hostname):
1388
  """Wrapper around SetEtcHostsEntry.
1389

1390
  @type hostname: str
1391
  @param hostname: a hostname that will be resolved and added to
1392
      L{constants.ETC_HOSTS}
1393

1394
  """
1395
  hi = HostInfo(name=hostname)
1396
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1397

    
1398

    
1399
def RemoveEtcHostsEntry(file_name, hostname):
1400
  """Removes a hostname from /etc/hosts.
1401

1402
  IP addresses without names are removed from the file.
1403

1404
  @type file_name: str
1405
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1406
  @type hostname: str
1407
  @param hostname: the hostname to be removed
1408

1409
  """
1410
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1411
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1412
  try:
1413
    out = os.fdopen(fd, 'w')
1414
    try:
1415
      f = open(file_name, 'r')
1416
      try:
1417
        for line in f:
1418
          fields = line.split()
1419
          if len(fields) > 1 and not fields[0].startswith('#'):
1420
            names = fields[1:]
1421
            if hostname in names:
1422
              while hostname in names:
1423
                names.remove(hostname)
1424
              if names:
1425
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1426
              continue
1427

    
1428
          out.write(line)
1429

    
1430
        out.flush()
1431
        os.fsync(out)
1432
        os.chmod(tmpname, 0644)
1433
        os.rename(tmpname, file_name)
1434
      finally:
1435
        f.close()
1436
    finally:
1437
      out.close()
1438
  except:
1439
    RemoveFile(tmpname)
1440
    raise
1441

    
1442

    
1443
def RemoveHostFromEtcHosts(hostname):
1444
  """Wrapper around RemoveEtcHostsEntry.
1445

1446
  @type hostname: str
1447
  @param hostname: hostname that will be resolved and its
1448
      full and shot name will be removed from
1449
      L{constants.ETC_HOSTS}
1450

1451
  """
1452
  hi = HostInfo(name=hostname)
1453
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1454
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1455

    
1456

    
1457
def TimestampForFilename():
1458
  """Returns the current time formatted for filenames.
1459

1460
  The format doesn't contain colons as some shells and applications them as
1461
  separators.
1462

1463
  """
1464
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1465

    
1466

    
1467
def CreateBackup(file_name):
1468
  """Creates a backup of a file.
1469

1470
  @type file_name: str
1471
  @param file_name: file to be backed up
1472
  @rtype: str
1473
  @return: the path to the newly created backup
1474
  @raise errors.ProgrammerError: for invalid file names
1475

1476
  """
1477
  if not os.path.isfile(file_name):
1478
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1479
                                file_name)
1480

    
1481
  prefix = ("%s.backup-%s." %
1482
            (os.path.basename(file_name), TimestampForFilename()))
1483
  dir_name = os.path.dirname(file_name)
1484

    
1485
  fsrc = open(file_name, 'rb')
1486
  try:
1487
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1488
    fdst = os.fdopen(fd, 'wb')
1489
    try:
1490
      logging.debug("Backing up %s at %s", file_name, backup_name)
1491
      shutil.copyfileobj(fsrc, fdst)
1492
    finally:
1493
      fdst.close()
1494
  finally:
1495
    fsrc.close()
1496

    
1497
  return backup_name
1498

    
1499

    
1500
def ShellQuote(value):
1501
  """Quotes shell argument according to POSIX.
1502

1503
  @type value: str
1504
  @param value: the argument to be quoted
1505
  @rtype: str
1506
  @return: the quoted value
1507

1508
  """
1509
  if _re_shell_unquoted.match(value):
1510
    return value
1511
  else:
1512
    return "'%s'" % value.replace("'", "'\\''")
1513

    
1514

    
1515
def ShellQuoteArgs(args):
1516
  """Quotes a list of shell arguments.
1517

1518
  @type args: list
1519
  @param args: list of arguments to be quoted
1520
  @rtype: str
1521
  @return: the quoted arguments concatenated with spaces
1522

1523
  """
1524
  return ' '.join([ShellQuote(i) for i in args])
1525

    
1526

    
1527
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1528
  """Simple ping implementation using TCP connect(2).
1529

1530
  Check if the given IP is reachable by doing attempting a TCP connect
1531
  to it.
1532

1533
  @type target: str
1534
  @param target: the IP or hostname to ping
1535
  @type port: int
1536
  @param port: the port to connect to
1537
  @type timeout: int
1538
  @param timeout: the timeout on the connection attempt
1539
  @type live_port_needed: boolean
1540
  @param live_port_needed: whether a closed port will cause the
1541
      function to return failure, as if there was a timeout
1542
  @type source: str or None
1543
  @param source: if specified, will cause the connect to be made
1544
      from this specific source address; failures to bind other
1545
      than C{EADDRNOTAVAIL} will be ignored
1546

1547
  """
1548
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1549

    
1550
  success = False
1551

    
1552
  if source is not None:
1553
    try:
1554
      sock.bind((source, 0))
1555
    except socket.error, (errcode, _):
1556
      if errcode == errno.EADDRNOTAVAIL:
1557
        success = False
1558

    
1559
  sock.settimeout(timeout)
1560

    
1561
  try:
1562
    sock.connect((target, port))
1563
    sock.close()
1564
    success = True
1565
  except socket.timeout:
1566
    success = False
1567
  except socket.error, (errcode, _):
1568
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1569

    
1570
  return success
1571

    
1572

    
1573
def OwnIpAddress(address):
1574
  """Check if the current host has the the given IP address.
1575

1576
  Currently this is done by TCP-pinging the address from the loopback
1577
  address.
1578

1579
  @type address: string
1580
  @param address: the address to check
1581
  @rtype: bool
1582
  @return: True if we own the address
1583

1584
  """
1585
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1586
                 source=constants.LOCALHOST_IP_ADDRESS)
1587

    
1588

    
1589
def ListVisibleFiles(path):
1590
  """Returns a list of visible files in a directory.
1591

1592
  @type path: str
1593
  @param path: the directory to enumerate
1594
  @rtype: list
1595
  @return: the list of all files not starting with a dot
1596
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1597

1598
  """
1599
  if not IsNormAbsPath(path):
1600
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1601
                                 " absolute/normalized: '%s'" % path)
1602
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1603
  files.sort()
1604
  return files
1605

    
1606

    
1607
def GetHomeDir(user, default=None):
1608
  """Try to get the homedir of the given user.
1609

1610
  The user can be passed either as a string (denoting the name) or as
1611
  an integer (denoting the user id). If the user is not found, the
1612
  'default' argument is returned, which defaults to None.
1613

1614
  """
1615
  try:
1616
    if isinstance(user, basestring):
1617
      result = pwd.getpwnam(user)
1618
    elif isinstance(user, (int, long)):
1619
      result = pwd.getpwuid(user)
1620
    else:
1621
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1622
                                   type(user))
1623
  except KeyError:
1624
    return default
1625
  return result.pw_dir
1626

    
1627

    
1628
def NewUUID():
1629
  """Returns a random UUID.
1630

1631
  @note: This is a Linux-specific method as it uses the /proc
1632
      filesystem.
1633
  @rtype: str
1634

1635
  """
1636
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1637

    
1638

    
1639
def GenerateSecret(numbytes=20):
1640
  """Generates a random secret.
1641

1642
  This will generate a pseudo-random secret returning an hex string
1643
  (so that it can be used where an ASCII string is needed).
1644

1645
  @param numbytes: the number of bytes which will be represented by the returned
1646
      string (defaulting to 20, the length of a SHA1 hash)
1647
  @rtype: str
1648
  @return: an hex representation of the pseudo-random sequence
1649

1650
  """
1651
  return os.urandom(numbytes).encode('hex')
1652

    
1653

    
1654
def EnsureDirs(dirs):
1655
  """Make required directories, if they don't exist.
1656

1657
  @param dirs: list of tuples (dir_name, dir_mode)
1658
  @type dirs: list of (string, integer)
1659

1660
  """
1661
  for dir_name, dir_mode in dirs:
1662
    try:
1663
      os.mkdir(dir_name, dir_mode)
1664
    except EnvironmentError, err:
1665
      if err.errno != errno.EEXIST:
1666
        raise errors.GenericError("Cannot create needed directory"
1667
                                  " '%s': %s" % (dir_name, err))
1668
    if not os.path.isdir(dir_name):
1669
      raise errors.GenericError("%s is not a directory" % dir_name)
1670

    
1671

    
1672
def ReadFile(file_name, size=-1):
1673
  """Reads a file.
1674

1675
  @type size: int
1676
  @param size: Read at most size bytes (if negative, entire file)
1677
  @rtype: str
1678
  @return: the (possibly partial) content of the file
1679

1680
  """
1681
  f = open(file_name, "r")
1682
  try:
1683
    return f.read(size)
1684
  finally:
1685
    f.close()
1686

    
1687

    
1688
def WriteFile(file_name, fn=None, data=None,
1689
              mode=None, uid=-1, gid=-1,
1690
              atime=None, mtime=None, close=True,
1691
              dry_run=False, backup=False,
1692
              prewrite=None, postwrite=None):
1693
  """(Over)write a file atomically.
1694

1695
  The file_name and either fn (a function taking one argument, the
1696
  file descriptor, and which should write the data to it) or data (the
1697
  contents of the file) must be passed. The other arguments are
1698
  optional and allow setting the file mode, owner and group, and the
1699
  mtime/atime of the file.
1700

1701
  If the function doesn't raise an exception, it has succeeded and the
1702
  target file has the new contents. If the function has raised an
1703
  exception, an existing target file should be unmodified and the
1704
  temporary file should be removed.
1705

1706
  @type file_name: str
1707
  @param file_name: the target filename
1708
  @type fn: callable
1709
  @param fn: content writing function, called with
1710
      file descriptor as parameter
1711
  @type data: str
1712
  @param data: contents of the file
1713
  @type mode: int
1714
  @param mode: file mode
1715
  @type uid: int
1716
  @param uid: the owner of the file
1717
  @type gid: int
1718
  @param gid: the group of the file
1719
  @type atime: int
1720
  @param atime: a custom access time to be set on the file
1721
  @type mtime: int
1722
  @param mtime: a custom modification time to be set on the file
1723
  @type close: boolean
1724
  @param close: whether to close file after writing it
1725
  @type prewrite: callable
1726
  @param prewrite: function to be called before writing content
1727
  @type postwrite: callable
1728
  @param postwrite: function to be called after writing content
1729

1730
  @rtype: None or int
1731
  @return: None if the 'close' parameter evaluates to True,
1732
      otherwise the file descriptor
1733

1734
  @raise errors.ProgrammerError: if any of the arguments are not valid
1735

1736
  """
1737
  if not os.path.isabs(file_name):
1738
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1739
                                 " absolute: '%s'" % file_name)
1740

    
1741
  if [fn, data].count(None) != 1:
1742
    raise errors.ProgrammerError("fn or data required")
1743

    
1744
  if [atime, mtime].count(None) == 1:
1745
    raise errors.ProgrammerError("Both atime and mtime must be either"
1746
                                 " set or None")
1747

    
1748
  if backup and not dry_run and os.path.isfile(file_name):
1749
    CreateBackup(file_name)
1750

    
1751
  dir_name, base_name = os.path.split(file_name)
1752
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1753
  do_remove = True
1754
  # here we need to make sure we remove the temp file, if any error
1755
  # leaves it in place
1756
  try:
1757
    if uid != -1 or gid != -1:
1758
      os.chown(new_name, uid, gid)
1759
    if mode:
1760
      os.chmod(new_name, mode)
1761
    if callable(prewrite):
1762
      prewrite(fd)
1763
    if data is not None:
1764
      os.write(fd, data)
1765
    else:
1766
      fn(fd)
1767
    if callable(postwrite):
1768
      postwrite(fd)
1769
    os.fsync(fd)
1770
    if atime is not None and mtime is not None:
1771
      os.utime(new_name, (atime, mtime))
1772
    if not dry_run:
1773
      os.rename(new_name, file_name)
1774
      do_remove = False
1775
  finally:
1776
    if close:
1777
      os.close(fd)
1778
      result = None
1779
    else:
1780
      result = fd
1781
    if do_remove:
1782
      RemoveFile(new_name)
1783

    
1784
  return result
1785

    
1786

    
1787
def FirstFree(seq, base=0):
1788
  """Returns the first non-existing integer from seq.
1789

1790
  The seq argument should be a sorted list of positive integers. The
1791
  first time the index of an element is smaller than the element
1792
  value, the index will be returned.
1793

1794
  The base argument is used to start at a different offset,
1795
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1796

1797
  Example: C{[0, 1, 3]} will return I{2}.
1798

1799
  @type seq: sequence
1800
  @param seq: the sequence to be analyzed.
1801
  @type base: int
1802
  @param base: use this value as the base index of the sequence
1803
  @rtype: int
1804
  @return: the first non-used index in the sequence
1805

1806
  """
1807
  for idx, elem in enumerate(seq):
1808
    assert elem >= base, "Passed element is higher than base offset"
1809
    if elem > idx + base:
1810
      # idx is not used
1811
      return idx + base
1812
  return None
1813

    
1814

    
1815
def SingleWaitForFdCondition(fdobj, event, timeout):
1816
  """Waits for a condition to occur on the socket.
1817

1818
  Immediately returns at the first interruption.
1819

1820
  @type fdobj: integer or object supporting a fileno() method
1821
  @param fdobj: entity to wait for events on
1822
  @type event: integer
1823
  @param event: ORed condition (see select module)
1824
  @type timeout: float or None
1825
  @param timeout: Timeout in seconds
1826
  @rtype: int or None
1827
  @return: None for timeout, otherwise occured conditions
1828

1829
  """
1830
  check = (event | select.POLLPRI |
1831
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1832

    
1833
  if timeout is not None:
1834
    # Poller object expects milliseconds
1835
    timeout *= 1000
1836

    
1837
  poller = select.poll()
1838
  poller.register(fdobj, event)
1839
  try:
1840
    # TODO: If the main thread receives a signal and we have no timeout, we
1841
    # could wait forever. This should check a global "quit" flag or something
1842
    # every so often.
1843
    io_events = poller.poll(timeout)
1844
  except select.error, err:
1845
    if err[0] != errno.EINTR:
1846
      raise
1847
    io_events = []
1848
  if io_events and io_events[0][1] & check:
1849
    return io_events[0][1]
1850
  else:
1851
    return None
1852

    
1853

    
1854
class FdConditionWaiterHelper(object):
1855
  """Retry helper for WaitForFdCondition.
1856

1857
  This class contains the retried and wait functions that make sure
1858
  WaitForFdCondition can continue waiting until the timeout is actually
1859
  expired.
1860

1861
  """
1862

    
1863
  def __init__(self, timeout):
1864
    self.timeout = timeout
1865

    
1866
  def Poll(self, fdobj, event):
1867
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1868
    if result is None:
1869
      raise RetryAgain()
1870
    else:
1871
      return result
1872

    
1873
  def UpdateTimeout(self, timeout):
1874
    self.timeout = timeout
1875

    
1876

    
1877
def WaitForFdCondition(fdobj, event, timeout):
1878
  """Waits for a condition to occur on the socket.
1879

1880
  Retries until the timeout is expired, even if interrupted.
1881

1882
  @type fdobj: integer or object supporting a fileno() method
1883
  @param fdobj: entity to wait for events on
1884
  @type event: integer
1885
  @param event: ORed condition (see select module)
1886
  @type timeout: float or None
1887
  @param timeout: Timeout in seconds
1888
  @rtype: int or None
1889
  @return: None for timeout, otherwise occured conditions
1890

1891
  """
1892
  if timeout is not None:
1893
    retrywaiter = FdConditionWaiterHelper(timeout)
1894
    try:
1895
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1896
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1897
    except RetryTimeout:
1898
      result = None
1899
  else:
1900
    result = None
1901
    while result is None:
1902
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1903
  return result
1904

    
1905

    
1906
def UniqueSequence(seq):
1907
  """Returns a list with unique elements.
1908

1909
  Element order is preserved.
1910

1911
  @type seq: sequence
1912
  @param seq: the sequence with the source elements
1913
  @rtype: list
1914
  @return: list of unique elements from seq
1915

1916
  """
1917
  seen = set()
1918
  return [i for i in seq if i not in seen and not seen.add(i)]
1919

    
1920

    
1921
def NormalizeAndValidateMac(mac):
1922
  """Normalizes and check if a MAC address is valid.
1923

1924
  Checks whether the supplied MAC address is formally correct, only
1925
  accepts colon separated format. Normalize it to all lower.
1926

1927
  @type mac: str
1928
  @param mac: the MAC to be validated
1929
  @rtype: str
1930
  @return: returns the normalized and validated MAC.
1931

1932
  @raise errors.OpPrereqError: If the MAC isn't valid
1933

1934
  """
1935
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1936
  if not mac_check.match(mac):
1937
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1938
                               mac, errors.ECODE_INVAL)
1939

    
1940
  return mac.lower()
1941

    
1942

    
1943
def TestDelay(duration):
1944
  """Sleep for a fixed amount of time.
1945

1946
  @type duration: float
1947
  @param duration: the sleep duration
1948
  @rtype: boolean
1949
  @return: False for negative value, True otherwise
1950

1951
  """
1952
  if duration < 0:
1953
    return False, "Invalid sleep duration"
1954
  time.sleep(duration)
1955
  return True, None
1956

    
1957

    
1958
def _CloseFDNoErr(fd, retries=5):
1959
  """Close a file descriptor ignoring errors.
1960

1961
  @type fd: int
1962
  @param fd: the file descriptor
1963
  @type retries: int
1964
  @param retries: how many retries to make, in case we get any
1965
      other error than EBADF
1966

1967
  """
1968
  try:
1969
    os.close(fd)
1970
  except OSError, err:
1971
    if err.errno != errno.EBADF:
1972
      if retries > 0:
1973
        _CloseFDNoErr(fd, retries - 1)
1974
    # else either it's closed already or we're out of retries, so we
1975
    # ignore this and go on
1976

    
1977

    
1978
def CloseFDs(noclose_fds=None):
1979
  """Close file descriptors.
1980

1981
  This closes all file descriptors above 2 (i.e. except
1982
  stdin/out/err).
1983

1984
  @type noclose_fds: list or None
1985
  @param noclose_fds: if given, it denotes a list of file descriptor
1986
      that should not be closed
1987

1988
  """
1989
  # Default maximum for the number of available file descriptors.
1990
  if 'SC_OPEN_MAX' in os.sysconf_names:
1991
    try:
1992
      MAXFD = os.sysconf('SC_OPEN_MAX')
1993
      if MAXFD < 0:
1994
        MAXFD = 1024
1995
    except OSError:
1996
      MAXFD = 1024
1997
  else:
1998
    MAXFD = 1024
1999
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2000
  if (maxfd == resource.RLIM_INFINITY):
2001
    maxfd = MAXFD
2002

    
2003
  # Iterate through and close all file descriptors (except the standard ones)
2004
  for fd in range(3, maxfd):
2005
    if noclose_fds and fd in noclose_fds:
2006
      continue
2007
    _CloseFDNoErr(fd)
2008

    
2009

    
2010
def Daemonize(logfile):
2011
  """Daemonize the current process.
2012

2013
  This detaches the current process from the controlling terminal and
2014
  runs it in the background as a daemon.
2015

2016
  @type logfile: str
2017
  @param logfile: the logfile to which we should redirect stdout/stderr
2018
  @rtype: int
2019
  @return: the value zero
2020

2021
  """
2022
  # pylint: disable-msg=W0212
2023
  # yes, we really want os._exit
2024
  UMASK = 077
2025
  WORKDIR = "/"
2026

    
2027
  # this might fail
2028
  pid = os.fork()
2029
  if (pid == 0):  # The first child.
2030
    os.setsid()
2031
    # this might fail
2032
    pid = os.fork() # Fork a second child.
2033
    if (pid == 0):  # The second child.
2034
      os.chdir(WORKDIR)
2035
      os.umask(UMASK)
2036
    else:
2037
      # exit() or _exit()?  See below.
2038
      os._exit(0) # Exit parent (the first child) of the second child.
2039
  else:
2040
    os._exit(0) # Exit parent of the first child.
2041

    
2042
  for fd in range(3):
2043
    _CloseFDNoErr(fd)
2044
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2045
  assert i == 0, "Can't close/reopen stdin"
2046
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2047
  assert i == 1, "Can't close/reopen stdout"
2048
  # Duplicate standard output to standard error.
2049
  os.dup2(1, 2)
2050
  return 0
2051

    
2052

    
2053
def DaemonPidFileName(name):
2054
  """Compute a ganeti pid file absolute path
2055

2056
  @type name: str
2057
  @param name: the daemon name
2058
  @rtype: str
2059
  @return: the full path to the pidfile corresponding to the given
2060
      daemon name
2061

2062
  """
2063
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2064

    
2065

    
2066
def EnsureDaemon(name):
2067
  """Check for and start daemon if not alive.
2068

2069
  """
2070
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2071
  if result.failed:
2072
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2073
                  name, result.fail_reason, result.output)
2074
    return False
2075

    
2076
  return True
2077

    
2078

    
2079
def WritePidFile(name):
2080
  """Write the current process pidfile.
2081

2082
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2083

2084
  @type name: str
2085
  @param name: the daemon name to use
2086
  @raise errors.GenericError: if the pid file already exists and
2087
      points to a live process
2088

2089
  """
2090
  pid = os.getpid()
2091
  pidfilename = DaemonPidFileName(name)
2092
  if IsProcessAlive(ReadPidFile(pidfilename)):
2093
    raise errors.GenericError("%s contains a live process" % pidfilename)
2094

    
2095
  WriteFile(pidfilename, data="%d\n" % pid)
2096

    
2097

    
2098
def RemovePidFile(name):
2099
  """Remove the current process pidfile.
2100

2101
  Any errors are ignored.
2102

2103
  @type name: str
2104
  @param name: the daemon name used to derive the pidfile name
2105

2106
  """
2107
  pidfilename = DaemonPidFileName(name)
2108
  # TODO: we could check here that the file contains our pid
2109
  try:
2110
    RemoveFile(pidfilename)
2111
  except: # pylint: disable-msg=W0702
2112
    pass
2113

    
2114

    
2115
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2116
                waitpid=False):
2117
  """Kill a process given by its pid.
2118

2119
  @type pid: int
2120
  @param pid: The PID to terminate.
2121
  @type signal_: int
2122
  @param signal_: The signal to send, by default SIGTERM
2123
  @type timeout: int
2124
  @param timeout: The timeout after which, if the process is still alive,
2125
                  a SIGKILL will be sent. If not positive, no such checking
2126
                  will be done
2127
  @type waitpid: boolean
2128
  @param waitpid: If true, we should waitpid on this process after
2129
      sending signals, since it's our own child and otherwise it
2130
      would remain as zombie
2131

2132
  """
2133
  def _helper(pid, signal_, wait):
2134
    """Simple helper to encapsulate the kill/waitpid sequence"""
2135
    os.kill(pid, signal_)
2136
    if wait:
2137
      try:
2138
        os.waitpid(pid, os.WNOHANG)
2139
      except OSError:
2140
        pass
2141

    
2142
  if pid <= 0:
2143
    # kill with pid=0 == suicide
2144
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2145

    
2146
  if not IsProcessAlive(pid):
2147
    return
2148

    
2149
  _helper(pid, signal_, waitpid)
2150

    
2151
  if timeout <= 0:
2152
    return
2153

    
2154
  def _CheckProcess():
2155
    if not IsProcessAlive(pid):
2156
      return
2157

    
2158
    try:
2159
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2160
    except OSError:
2161
      raise RetryAgain()
2162

    
2163
    if result_pid > 0:
2164
      return
2165

    
2166
    raise RetryAgain()
2167

    
2168
  try:
2169
    # Wait up to $timeout seconds
2170
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2171
  except RetryTimeout:
2172
    pass
2173

    
2174
  if IsProcessAlive(pid):
2175
    # Kill process if it's still alive
2176
    _helper(pid, signal.SIGKILL, waitpid)
2177

    
2178

    
2179
def FindFile(name, search_path, test=os.path.exists):
2180
  """Look for a filesystem object in a given path.
2181

2182
  This is an abstract method to search for filesystem object (files,
2183
  dirs) under a given search path.
2184

2185
  @type name: str
2186
  @param name: the name to look for
2187
  @type search_path: str
2188
  @param search_path: location to start at
2189
  @type test: callable
2190
  @param test: a function taking one argument that should return True
2191
      if the a given object is valid; the default value is
2192
      os.path.exists, causing only existing files to be returned
2193
  @rtype: str or None
2194
  @return: full path to the object if found, None otherwise
2195

2196
  """
2197
  # validate the filename mask
2198
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2199
    logging.critical("Invalid value passed for external script name: '%s'",
2200
                     name)
2201
    return None
2202

    
2203
  for dir_name in search_path:
2204
    # FIXME: investigate switch to PathJoin
2205
    item_name = os.path.sep.join([dir_name, name])
2206
    # check the user test and that we're indeed resolving to the given
2207
    # basename
2208
    if test(item_name) and os.path.basename(item_name) == name:
2209
      return item_name
2210
  return None
2211

    
2212

    
2213
def CheckVolumeGroupSize(vglist, vgname, minsize):
2214
  """Checks if the volume group list is valid.
2215

2216
  The function will check if a given volume group is in the list of
2217
  volume groups and has a minimum size.
2218

2219
  @type vglist: dict
2220
  @param vglist: dictionary of volume group names and their size
2221
  @type vgname: str
2222
  @param vgname: the volume group we should check
2223
  @type minsize: int
2224
  @param minsize: the minimum size we accept
2225
  @rtype: None or str
2226
  @return: None for success, otherwise the error message
2227

2228
  """
2229
  vgsize = vglist.get(vgname, None)
2230
  if vgsize is None:
2231
    return "volume group '%s' missing" % vgname
2232
  elif vgsize < minsize:
2233
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2234
            (vgname, minsize, vgsize))
2235
  return None
2236

    
2237

    
2238
def SplitTime(value):
2239
  """Splits time as floating point number into a tuple.
2240

2241
  @param value: Time in seconds
2242
  @type value: int or float
2243
  @return: Tuple containing (seconds, microseconds)
2244

2245
  """
2246
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2247

    
2248
  assert 0 <= seconds, \
2249
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2250
  assert 0 <= microseconds <= 999999, \
2251
    "Microseconds must be 0-999999, but are %s" % microseconds
2252

    
2253
  return (int(seconds), int(microseconds))
2254

    
2255

    
2256
def MergeTime(timetuple):
2257
  """Merges a tuple into time as a floating point number.
2258

2259
  @param timetuple: Time as tuple, (seconds, microseconds)
2260
  @type timetuple: tuple
2261
  @return: Time as a floating point number expressed in seconds
2262

2263
  """
2264
  (seconds, microseconds) = timetuple
2265

    
2266
  assert 0 <= seconds, \
2267
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2268
  assert 0 <= microseconds <= 999999, \
2269
    "Microseconds must be 0-999999, but are %s" % microseconds
2270

    
2271
  return float(seconds) + (float(microseconds) * 0.000001)
2272

    
2273

    
2274
def GetDaemonPort(daemon_name):
2275
  """Get the daemon port for this cluster.
2276

2277
  Note that this routine does not read a ganeti-specific file, but
2278
  instead uses C{socket.getservbyname} to allow pre-customization of
2279
  this parameter outside of Ganeti.
2280

2281
  @type daemon_name: string
2282
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2283
  @rtype: int
2284

2285
  """
2286
  if daemon_name not in constants.DAEMONS_PORTS:
2287
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2288

    
2289
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2290
  try:
2291
    port = socket.getservbyname(daemon_name, proto)
2292
  except socket.error:
2293
    port = default_port
2294

    
2295
  return port
2296

    
2297

    
2298
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2299
                 multithreaded=False, syslog=constants.SYSLOG_USAGE):
2300
  """Configures the logging module.
2301

2302
  @type logfile: str
2303
  @param logfile: the filename to which we should log
2304
  @type debug: integer
2305
  @param debug: if greater than zero, enable debug messages, otherwise
2306
      only those at C{INFO} and above level
2307
  @type stderr_logging: boolean
2308
  @param stderr_logging: whether we should also log to the standard error
2309
  @type program: str
2310
  @param program: the name under which we should log messages
2311
  @type multithreaded: boolean
2312
  @param multithreaded: if True, will add the thread name to the log file
2313
  @type syslog: string
2314
  @param syslog: one of 'no', 'yes', 'only':
2315
      - if no, syslog is not used
2316
      - if yes, syslog is used (in addition to file-logging)
2317
      - if only, only syslog is used
2318
  @raise EnvironmentError: if we can't open the log file and
2319
      syslog/stderr logging is disabled
2320

2321
  """
2322
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2323
  sft = program + "[%(process)d]:"
2324
  if multithreaded:
2325
    fmt += "/%(threadName)s"
2326
    sft += " (%(threadName)s)"
2327
  if debug:
2328
    fmt += " %(module)s:%(lineno)s"
2329
    # no debug info for syslog loggers
2330
  fmt += " %(levelname)s %(message)s"
2331
  # yes, we do want the textual level, as remote syslog will probably
2332
  # lose the error level, and it's easier to grep for it
2333
  sft += " %(levelname)s %(message)s"
2334
  formatter = logging.Formatter(fmt)
2335
  sys_fmt = logging.Formatter(sft)
2336

    
2337
  root_logger = logging.getLogger("")
2338
  root_logger.setLevel(logging.NOTSET)
2339

    
2340
  # Remove all previously setup handlers
2341
  for handler in root_logger.handlers:
2342
    handler.close()
2343
    root_logger.removeHandler(handler)
2344

    
2345
  if stderr_logging:
2346
    stderr_handler = logging.StreamHandler()
2347
    stderr_handler.setFormatter(formatter)
2348
    if debug:
2349
      stderr_handler.setLevel(logging.NOTSET)
2350
    else:
2351
      stderr_handler.setLevel(logging.CRITICAL)
2352
    root_logger.addHandler(stderr_handler)
2353

    
2354
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2355
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2356
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2357
                                                    facility)
2358
    syslog_handler.setFormatter(sys_fmt)
2359
    # Never enable debug over syslog
2360
    syslog_handler.setLevel(logging.INFO)
2361
    root_logger.addHandler(syslog_handler)
2362

    
2363
  if syslog != constants.SYSLOG_ONLY:
2364
    # this can fail, if the logging directories are not setup or we have
2365
    # a permisssion problem; in this case, it's best to log but ignore
2366
    # the error if stderr_logging is True, and if false we re-raise the
2367
    # exception since otherwise we could run but without any logs at all
2368
    try:
2369
      logfile_handler = logging.FileHandler(logfile)
2370
      logfile_handler.setFormatter(formatter)
2371
      if debug:
2372
        logfile_handler.setLevel(logging.DEBUG)
2373
      else:
2374
        logfile_handler.setLevel(logging.INFO)
2375
      root_logger.addHandler(logfile_handler)
2376
    except EnvironmentError:
2377
      if stderr_logging or syslog == constants.SYSLOG_YES:
2378
        logging.exception("Failed to enable logging to file '%s'", logfile)
2379
      else:
2380
        # we need to re-raise the exception
2381
        raise
2382

    
2383

    
2384
def IsNormAbsPath(path):
2385
  """Check whether a path is absolute and also normalized
2386

2387
  This avoids things like /dir/../../other/path to be valid.
2388

2389
  """
2390
  return os.path.normpath(path) == path and os.path.isabs(path)
2391

    
2392

    
2393
def PathJoin(*args):
2394
  """Safe-join a list of path components.
2395

2396
  Requirements:
2397
      - the first argument must be an absolute path
2398
      - no component in the path must have backtracking (e.g. /../),
2399
        since we check for normalization at the end
2400

2401
  @param args: the path components to be joined
2402
  @raise ValueError: for invalid paths
2403

2404
  """
2405
  # ensure we're having at least one path passed in
2406
  assert args
2407
  # ensure the first component is an absolute and normalized path name
2408
  root = args[0]
2409
  if not IsNormAbsPath(root):
2410
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2411
  result = os.path.join(*args)
2412
  # ensure that the whole path is normalized
2413
  if not IsNormAbsPath(result):
2414
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2415
  # check that we're still under the original prefix
2416
  prefix = os.path.commonprefix([root, result])
2417
  if prefix != root:
2418
    raise ValueError("Error: path joining resulted in different prefix"
2419
                     " (%s != %s)" % (prefix, root))
2420
  return result
2421

    
2422

    
2423
def TailFile(fname, lines=20):
2424
  """Return the last lines from a file.
2425

2426
  @note: this function will only read and parse the last 4KB of
2427
      the file; if the lines are very long, it could be that less
2428
      than the requested number of lines are returned
2429

2430
  @param fname: the file name
2431
  @type lines: int
2432
  @param lines: the (maximum) number of lines to return
2433

2434
  """
2435
  fd = open(fname, "r")
2436
  try:
2437
    fd.seek(0, 2)
2438
    pos = fd.tell()
2439
    pos = max(0, pos-4096)
2440
    fd.seek(pos, 0)
2441
    raw_data = fd.read()
2442
  finally:
2443
    fd.close()
2444

    
2445
  rows = raw_data.splitlines()
2446
  return rows[-lines:]
2447

    
2448

    
2449
def FormatTimestampWithTZ(secs):
2450
  """Formats a Unix timestamp with the local timezone.
2451

2452
  """
2453
  return time.strftime("%F %T %Z", time.gmtime(secs))
2454

    
2455

    
2456
def _ParseAsn1Generalizedtime(value):
2457
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2458

2459
  @type value: string
2460
  @param value: ASN1 GENERALIZEDTIME timestamp
2461

2462
  """
2463
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2464
  if m:
2465
    # We have an offset
2466
    asn1time = m.group(1)
2467
    hours = int(m.group(2))
2468
    minutes = int(m.group(3))
2469
    utcoffset = (60 * hours) + minutes
2470
  else:
2471
    if not value.endswith("Z"):
2472
      raise ValueError("Missing timezone")
2473
    asn1time = value[:-1]
2474
    utcoffset = 0
2475

    
2476
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2477

    
2478
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2479

    
2480
  return calendar.timegm(tt.utctimetuple())
2481

    
2482

    
2483
def GetX509CertValidity(cert):
2484
  """Returns the validity period of the certificate.
2485

2486
  @type cert: OpenSSL.crypto.X509
2487
  @param cert: X509 certificate object
2488

2489
  """
2490
  # The get_notBefore and get_notAfter functions are only supported in
2491
  # pyOpenSSL 0.7 and above.
2492
  try:
2493
    get_notbefore_fn = cert.get_notBefore
2494
  except AttributeError:
2495
    not_before = None
2496
  else:
2497
    not_before_asn1 = get_notbefore_fn()
2498

    
2499
    if not_before_asn1 is None:
2500
      not_before = None
2501
    else:
2502
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2503

    
2504
  try:
2505
    get_notafter_fn = cert.get_notAfter
2506
  except AttributeError:
2507
    not_after = None
2508
  else:
2509
    not_after_asn1 = get_notafter_fn()
2510

    
2511
    if not_after_asn1 is None:
2512
      not_after = None
2513
    else:
2514
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2515

    
2516
  return (not_before, not_after)
2517

    
2518

    
2519
def _VerifyCertificateInner(expired, not_before, not_after, now,
2520
                            warn_days, error_days):
2521
  """Verifies certificate validity.
2522

2523
  @type expired: bool
2524
  @param expired: Whether pyOpenSSL considers the certificate as expired
2525
  @type not_before: number or None
2526
  @param not_before: Unix timestamp before which certificate is not valid
2527
  @type not_after: number or None
2528
  @param not_after: Unix timestamp after which certificate is invalid
2529
  @type now: number
2530
  @param now: Current time as Unix timestamp
2531
  @type warn_days: number or None
2532
  @param warn_days: How many days before expiration a warning should be reported
2533
  @type error_days: number or None
2534
  @param error_days: How many days before expiration an error should be reported
2535

2536
  """
2537
  if expired:
2538
    msg = "Certificate is expired"
2539

    
2540
    if not_before is not None and not_after is not None:
2541
      msg += (" (valid from %s to %s)" %
2542
              (FormatTimestampWithTZ(not_before),
2543
               FormatTimestampWithTZ(not_after)))
2544
    elif not_before is not None:
2545
      msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2546
    elif not_after is not None:
2547
      msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2548

    
2549
    return (CERT_ERROR, msg)
2550

    
2551
  elif not_before is not None and not_before > now:
2552
    return (CERT_WARNING,
2553
            "Certificate not yet valid (valid from %s)" %
2554
            FormatTimestampWithTZ(not_before))
2555

    
2556
  elif not_after is not None:
2557
    remaining_days = int((not_after - now) / (24 * 3600))
2558

    
2559
    msg = "Certificate expires in about %d days" % remaining_days
2560

    
2561
    if error_days is not None and remaining_days <= error_days:
2562
      return (CERT_ERROR, msg)
2563

    
2564
    if warn_days is not None and remaining_days <= warn_days:
2565
      return (CERT_WARNING, msg)
2566

    
2567
  return (None, None)
2568

    
2569

    
2570
def VerifyX509Certificate(cert, warn_days, error_days):
2571
  """Verifies a certificate for LUVerifyCluster.
2572

2573
  @type cert: OpenSSL.crypto.X509
2574
  @param cert: X509 certificate object
2575
  @type warn_days: number or None
2576
  @param warn_days: How many days before expiration a warning should be reported
2577
  @type error_days: number or None
2578
  @param error_days: How many days before expiration an error should be reported
2579

2580
  """
2581
  # Depending on the pyOpenSSL version, this can just return (None, None)
2582
  (not_before, not_after) = GetX509CertValidity(cert)
2583

    
2584
  return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2585
                                 time.time(), warn_days, error_days)
2586

    
2587

    
2588
def SignX509Certificate(cert, key, salt):
2589
  """Sign a X509 certificate.
2590

2591
  An RFC822-like signature header is added in front of the certificate.
2592

2593
  @type cert: OpenSSL.crypto.X509
2594
  @param cert: X509 certificate object
2595
  @type key: string
2596
  @param key: Key for HMAC
2597
  @type salt: string
2598
  @param salt: Salt for HMAC
2599
  @rtype: string
2600
  @return: Serialized and signed certificate in PEM format
2601

2602
  """
2603
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2604
    raise errors.GenericError("Invalid salt: %r" % salt)
2605

    
2606
  # Dumping as PEM here ensures the certificate is in a sane format
2607
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2608

    
2609
  return ("%s: %s/%s\n\n%s" %
2610
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2611
           Sha1Hmac(key, cert_pem, salt=salt),
2612
           cert_pem))
2613

    
2614

    
2615
def _ExtractX509CertificateSignature(cert_pem):
2616
  """Helper function to extract signature from X509 certificate.
2617

2618
  """
2619
  # Extract signature from original PEM data
2620
  for line in cert_pem.splitlines():
2621
    if line.startswith("---"):
2622
      break
2623

    
2624
    m = X509_SIGNATURE.match(line.strip())
2625
    if m:
2626
      return (m.group("salt"), m.group("sign"))
2627

    
2628
  raise errors.GenericError("X509 certificate signature is missing")
2629

    
2630

    
2631
def LoadSignedX509Certificate(cert_pem, key):
2632
  """Verifies a signed X509 certificate.
2633

2634
  @type cert_pem: string
2635
  @param cert_pem: Certificate in PEM format and with signature header
2636
  @type key: string
2637
  @param key: Key for HMAC
2638
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2639
  @return: X509 certificate object and salt
2640

2641
  """
2642
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2643

    
2644
  # Load certificate
2645
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2646

    
2647
  # Dump again to ensure it's in a sane format
2648
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2649

    
2650
  if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2651
    raise errors.GenericError("X509 certificate signature is invalid")
2652

    
2653
  return (cert, salt)
2654

    
2655

    
2656
def Sha1Hmac(key, text, salt=None):
2657
  """Calculates the HMAC-SHA1 digest of a text.
2658

2659
  HMAC is defined in RFC2104.
2660

2661
  @type key: string
2662
  @param key: Secret key
2663
  @type text: string
2664

2665
  """
2666
  if salt:
2667
    salted_text = salt + text
2668
  else:
2669
    salted_text = text
2670

    
2671
  return hmac.new(key, salted_text, sha1).hexdigest()
2672

    
2673

    
2674
def VerifySha1Hmac(key, text, digest, salt=None):
2675
  """Verifies the HMAC-SHA1 digest of a text.
2676

2677
  HMAC is defined in RFC2104.
2678

2679
  @type key: string
2680
  @param key: Secret key
2681
  @type text: string
2682
  @type digest: string
2683
  @param digest: Expected digest
2684
  @rtype: bool
2685
  @return: Whether HMAC-SHA1 digest matches
2686

2687
  """
2688
  return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2689

    
2690

    
2691
def SafeEncode(text):
2692
  """Return a 'safe' version of a source string.
2693

2694
  This function mangles the input string and returns a version that
2695
  should be safe to display/encode as ASCII. To this end, we first
2696
  convert it to ASCII using the 'backslashreplace' encoding which
2697
  should get rid of any non-ASCII chars, and then we process it
2698
  through a loop copied from the string repr sources in the python; we
2699
  don't use string_escape anymore since that escape single quotes and
2700
  backslashes too, and that is too much; and that escaping is not
2701
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2702

2703
  @type text: str or unicode
2704
  @param text: input data
2705
  @rtype: str
2706
  @return: a safe version of text
2707

2708
  """
2709
  if isinstance(text, unicode):
2710
    # only if unicode; if str already, we handle it below
2711
    text = text.encode('ascii', 'backslashreplace')
2712
  resu = ""
2713
  for char in text:
2714
    c = ord(char)
2715
    if char  == '\t':
2716
      resu += r'\t'
2717
    elif char == '\n':
2718
      resu += r'\n'
2719
    elif char == '\r':
2720
      resu += r'\'r'
2721
    elif c < 32 or c >= 127: # non-printable
2722
      resu += "\\x%02x" % (c & 0xff)
2723
    else:
2724
      resu += char
2725
  return resu
2726

    
2727

    
2728
def UnescapeAndSplit(text, sep=","):
2729
  """Split and unescape a string based on a given separator.
2730

2731
  This function splits a string based on a separator where the
2732
  separator itself can be escape in order to be an element of the
2733
  elements. The escaping rules are (assuming coma being the
2734
  separator):
2735
    - a plain , separates the elements
2736
    - a sequence \\\\, (double backslash plus comma) is handled as a
2737
      backslash plus a separator comma
2738
    - a sequence \, (backslash plus comma) is handled as a
2739
      non-separator comma
2740

2741
  @type text: string
2742
  @param text: the string to split
2743
  @type sep: string
2744
  @param text: the separator
2745
  @rtype: string
2746
  @return: a list of strings
2747

2748
  """
2749
  # we split the list by sep (with no escaping at this stage)
2750
  slist = text.split(sep)
2751
  # next, we revisit the elements and if any of them ended with an odd
2752
  # number of backslashes, then we join it with the next
2753
  rlist = []
2754
  while slist:
2755
    e1 = slist.pop(0)
2756
    if e1.endswith("\\"):
2757
      num_b = len(e1) - len(e1.rstrip("\\"))
2758
      if num_b % 2 == 1:
2759
        e2 = slist.pop(0)
2760
        # here the backslashes remain (all), and will be reduced in
2761
        # the next step
2762
        rlist.append(e1 + sep + e2)
2763
        continue
2764
    rlist.append(e1)
2765
  # finally, replace backslash-something with something
2766
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2767
  return rlist
2768

    
2769

    
2770
def CommaJoin(names):
2771
  """Nicely join a set of identifiers.
2772

2773
  @param names: set, list or tuple
2774
  @return: a string with the formatted results
2775

2776
  """
2777
  return ", ".join([str(val) for val in names])
2778

    
2779

    
2780
def BytesToMebibyte(value):
2781
  """Converts bytes to mebibytes.
2782

2783
  @type value: int
2784
  @param value: Value in bytes
2785
  @rtype: int
2786
  @return: Value in mebibytes
2787

2788
  """
2789
  return int(round(value / (1024.0 * 1024.0), 0))
2790

    
2791

    
2792
def CalculateDirectorySize(path):
2793
  """Calculates the size of a directory recursively.
2794

2795
  @type path: string
2796
  @param path: Path to directory
2797
  @rtype: int
2798
  @return: Size in mebibytes
2799

2800
  """
2801
  size = 0
2802

    
2803
  for (curpath, _, files) in os.walk(path):
2804
    for filename in files:
2805
      st = os.lstat(PathJoin(curpath, filename))
2806
      size += st.st_size
2807

    
2808
  return BytesToMebibyte(size)
2809

    
2810

    
2811
def GetFilesystemStats(path):
2812
  """Returns the total and free space on a filesystem.
2813

2814
  @type path: string
2815
  @param path: Path on filesystem to be examined
2816
  @rtype: int
2817
  @return: tuple of (Total space, Free space) in mebibytes
2818

2819
  """
2820
  st = os.statvfs(path)
2821

    
2822
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2823
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2824
  return (tsize, fsize)
2825

    
2826

    
2827
def RunInSeparateProcess(fn, *args):
2828
  """Runs a function in a separate process.
2829

2830
  Note: Only boolean return values are supported.
2831

2832
  @type fn: callable
2833
  @param fn: Function to be called
2834
  @rtype: bool
2835
  @return: Function's result
2836

2837
  """
2838
  pid = os.fork()
2839
  if pid == 0:
2840
    # Child process
2841
    try:
2842
      # In case the function uses temporary files
2843
      ResetTempfileModule()
2844

    
2845
      # Call function
2846
      result = int(bool(fn(*args)))
2847
      assert result in (0, 1)
2848
    except: # pylint: disable-msg=W0702
2849
      logging.exception("Error while calling function in separate process")
2850
      # 0 and 1 are reserved for the return value
2851
      result = 33
2852

    
2853
    os._exit(result) # pylint: disable-msg=W0212
2854

    
2855
  # Parent process
2856

    
2857
  # Avoid zombies and check exit code
2858
  (_, status) = os.waitpid(pid, 0)
2859

    
2860
  if os.WIFSIGNALED(status):
2861
    exitcode = None
2862
    signum = os.WTERMSIG(status)
2863
  else:
2864
    exitcode = os.WEXITSTATUS(status)
2865
    signum = None
2866

    
2867
  if not (exitcode in (0, 1) and signum is None):
2868
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2869
                              (exitcode, signum))
2870

    
2871
  return bool(exitcode)
2872

    
2873

    
2874
def LockedMethod(fn):
2875
  """Synchronized object access decorator.
2876

2877
  This decorator is intended to protect access to an object using the
2878
  object's own lock which is hardcoded to '_lock'.
2879

2880
  """
2881
  def _LockDebug(*args, **kwargs):
2882
    if debug_locks:
2883
      logging.debug(*args, **kwargs)
2884

    
2885
  def wrapper(self, *args, **kwargs):
2886
    # pylint: disable-msg=W0212
2887
    assert hasattr(self, '_lock')
2888
    lock = self._lock
2889
    _LockDebug("Waiting for %s", lock)
2890
    lock.acquire()
2891
    try:
2892
      _LockDebug("Acquired %s", lock)
2893
      result = fn(self, *args, **kwargs)
2894
    finally:
2895
      _LockDebug("Releasing %s", lock)
2896
      lock.release()
2897
      _LockDebug("Released %s", lock)
2898
    return result
2899
  return wrapper
2900

    
2901

    
2902
def LockFile(fd):
2903
  """Locks a file using POSIX locks.
2904

2905
  @type fd: int
2906
  @param fd: the file descriptor we need to lock
2907

2908
  """
2909
  try:
2910
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2911
  except IOError, err:
2912
    if err.errno == errno.EAGAIN:
2913
      raise errors.LockError("File already locked")
2914
    raise
2915

    
2916

    
2917
def FormatTime(val):
2918
  """Formats a time value.
2919

2920
  @type val: float or None
2921
  @param val: the timestamp as returned by time.time()
2922
  @return: a string value or N/A if we don't have a valid timestamp
2923

2924
  """
2925
  if val is None or not isinstance(val, (int, float)):
2926
    return "N/A"
2927
  # these two codes works on Linux, but they are not guaranteed on all
2928
  # platforms
2929
  return time.strftime("%F %T", time.localtime(val))
2930

    
2931

    
2932
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2933
  """Reads the watcher pause file.
2934

2935
  @type filename: string
2936
  @param filename: Path to watcher pause file
2937
  @type now: None, float or int
2938
  @param now: Current time as Unix timestamp
2939
  @type remove_after: int
2940
  @param remove_after: Remove watcher pause file after specified amount of
2941
    seconds past the pause end time
2942

2943
  """
2944
  if now is None:
2945
    now = time.time()
2946

    
2947
  try:
2948
    value = ReadFile(filename)
2949
  except IOError, err:
2950
    if err.errno != errno.ENOENT:
2951
      raise
2952
    value = None
2953

    
2954
  if value is not None:
2955
    try:
2956
      value = int(value)
2957
    except ValueError:
2958
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2959
                       " removing it"), filename)
2960
      RemoveFile(filename)
2961
      value = None
2962

    
2963
    if value is not None:
2964
      # Remove file if it's outdated
2965
      if now > (value + remove_after):
2966
        RemoveFile(filename)
2967
        value = None
2968

    
2969
      elif now > value:
2970
        value = None
2971

    
2972
  return value
2973

    
2974

    
2975
class RetryTimeout(Exception):
2976
  """Retry loop timed out.
2977

2978
  """
2979

    
2980

    
2981
class RetryAgain(Exception):
2982
  """Retry again.
2983

2984
  """
2985

    
2986

    
2987
class _RetryDelayCalculator(object):
2988
  """Calculator for increasing delays.
2989

2990
  """
2991
  __slots__ = [
2992
    "_factor",
2993
    "_limit",
2994
    "_next",
2995
    "_start",
2996
    ]
2997

    
2998
  def __init__(self, start, factor, limit):
2999
    """Initializes this class.
3000

3001
    @type start: float
3002
    @param start: Initial delay
3003
    @type factor: float
3004
    @param factor: Factor for delay increase
3005
    @type limit: float or None
3006
    @param limit: Upper limit for delay or None for no limit
3007

3008
    """
3009
    assert start > 0.0
3010
    assert factor >= 1.0
3011
    assert limit is None or limit >= 0.0
3012

    
3013
    self._start = start
3014
    self._factor = factor
3015
    self._limit = limit
3016

    
3017
    self._next = start
3018

    
3019
  def __call__(self):
3020
    """Returns current delay and calculates the next one.
3021

3022
    """
3023
    current = self._next
3024

    
3025
    # Update for next run
3026
    if self._limit is None or self._next < self._limit:
3027
      self._next = min(self._limit, self._next * self._factor)
3028

    
3029
    return current
3030

    
3031

    
3032
#: Special delay to specify whole remaining timeout
3033
RETRY_REMAINING_TIME = object()
3034

    
3035

    
3036
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3037
          _time_fn=time.time):
3038
  """Call a function repeatedly until it succeeds.
3039

3040
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3041
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3042
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
3043

3044
  C{delay} can be one of the following:
3045
    - callable returning the delay length as a float
3046
    - Tuple of (start, factor, limit)
3047
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3048
      useful when overriding L{wait_fn} to wait for an external event)
3049
    - A static delay as a number (int or float)
3050

3051
  @type fn: callable
3052
  @param fn: Function to be called
3053
  @param delay: Either a callable (returning the delay), a tuple of (start,
3054
                factor, limit) (see L{_RetryDelayCalculator}),
3055
                L{RETRY_REMAINING_TIME} or a number (int or float)
3056
  @type timeout: float
3057
  @param timeout: Total timeout
3058
  @type wait_fn: callable
3059
  @param wait_fn: Waiting function
3060
  @return: Return value of function
3061

3062
  """
3063
  assert callable(fn)
3064
  assert callable(wait_fn)
3065
  assert callable(_time_fn)
3066

    
3067
  if args is None:
3068
    args = []
3069

    
3070
  end_time = _time_fn() + timeout
3071

    
3072
  if callable(delay):
3073
    # External function to calculate delay
3074
    calc_delay = delay
3075

    
3076
  elif isinstance(delay, (tuple, list)):
3077
    # Increasing delay with optional upper boundary
3078
    (start, factor, limit) = delay
3079
    calc_delay = _RetryDelayCalculator(start, factor, limit)
3080

    
3081
  elif delay is RETRY_REMAINING_TIME:
3082
    # Always use the remaining time
3083
    calc_delay = None
3084

    
3085
  else:
3086
    # Static delay
3087
    calc_delay = lambda: delay
3088

    
3089
  assert calc_delay is None or callable(calc_delay)
3090

    
3091
  while True:
3092
    try:
3093
      # pylint: disable-msg=W0142
3094
      return fn(*args)
3095
    except RetryAgain:
3096
      pass
3097
    except RetryTimeout:
3098
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
3099
                                   " handle RetryTimeout")
3100

    
3101
    remaining_time = end_time - _time_fn()
3102

    
3103
    if remaining_time < 0.0:
3104
      raise RetryTimeout()
3105

    
3106
    assert remaining_time >= 0.0
3107

    
3108
    if calc_delay is None:
3109
      wait_fn(remaining_time)
3110
    else:
3111
      current_delay = calc_delay()
3112
      if current_delay > 0.0:
3113
        wait_fn(current_delay)
3114

    
3115

    
3116
def GetClosedTempfile(*args, **kwargs):
3117
  """Creates a temporary file and returns its path.
3118

3119
  """
3120
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3121
  _CloseFDNoErr(fd)
3122
  return path
3123

    
3124

    
3125
def GenerateSelfSignedX509Cert(common_name, validity):
3126
  """Generates a self-signed X509 certificate.
3127

3128
  @type common_name: string
3129
  @param common_name: commonName value
3130
  @type validity: int
3131
  @param validity: Validity for certificate in seconds
3132

3133
  """
3134
  # Create private and public key
3135
  key = OpenSSL.crypto.PKey()
3136
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3137

    
3138
  # Create self-signed certificate
3139
  cert = OpenSSL.crypto.X509()
3140
  if common_name:
3141
    cert.get_subject().CN = common_name
3142
  cert.set_serial_number(1)
3143
  cert.gmtime_adj_notBefore(0)
3144
  cert.gmtime_adj_notAfter(validity)
3145
  cert.set_issuer(cert.get_subject())
3146
  cert.set_pubkey(key)
3147
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3148

    
3149
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3150
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3151

    
3152
  return (key_pem, cert_pem)
3153

    
3154

    
3155
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3156
  """Legacy function to generate self-signed X509 certificate.
3157

3158
  """
3159
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3160
                                                   validity * 24 * 60 * 60)
3161

    
3162
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3163

    
3164

    
3165
class FileLock(object):
3166
  """Utility class for file locks.
3167

3168
  """
3169
  def __init__(self, fd, filename):
3170
    """Constructor for FileLock.
3171

3172
    @type fd: file
3173
    @param fd: File object
3174
    @type filename: str
3175
    @param filename: Path of the file opened at I{fd}
3176

3177
    """
3178
    self.fd = fd
3179
    self.filename = filename
3180

    
3181
  @classmethod
3182
  def Open(cls, filename):
3183
    """Creates and opens a file to be used as a file-based lock.
3184

3185
    @type filename: string
3186
    @param filename: path to the file to be locked
3187

3188
    """
3189
    # Using "os.open" is necessary to allow both opening existing file
3190
    # read/write and creating if not existing. Vanilla "open" will truncate an
3191
    # existing file -or- allow creating if not existing.
3192
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3193
               filename)
3194

    
3195
  def __del__(self):
3196
    self.Close()
3197

    
3198
  def Close(self):
3199
    """Close the file and release the lock.
3200

3201
    """
3202
    if hasattr(self, "fd") and self.fd:
3203
      self.fd.close()
3204
      self.fd = None
3205

    
3206
  def _flock(self, flag, blocking, timeout, errmsg):
3207
    """Wrapper for fcntl.flock.
3208

3209
    @type flag: int
3210
    @param flag: operation flag
3211
    @type blocking: bool
3212
    @param blocking: whether the operation should be done in blocking mode.
3213
    @type timeout: None or float
3214
    @param timeout: for how long the operation should be retried (implies
3215
                    non-blocking mode).
3216
    @type errmsg: string
3217
    @param errmsg: error message in case operation fails.
3218

3219
    """
3220
    assert self.fd, "Lock was closed"
3221
    assert timeout is None or timeout >= 0, \
3222
      "If specified, timeout must be positive"
3223
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3224

    
3225
    # When a timeout is used, LOCK_NB must always be set
3226
    if not (timeout is None and blocking):
3227
      flag |= fcntl.LOCK_NB
3228

    
3229
    if timeout is None:
3230
      self._Lock(self.fd, flag, timeout)
3231
    else:
3232
      try:
3233
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3234
              args=(self.fd, flag, timeout))
3235
      except RetryTimeout:
3236
        raise errors.LockError(errmsg)
3237

    
3238
  @staticmethod
3239
  def _Lock(fd, flag, timeout):
3240
    try:
3241
      fcntl.flock(fd, flag)
3242
    except IOError, err:
3243
      if timeout is not None and err.errno == errno.EAGAIN:
3244
        raise RetryAgain()
3245

    
3246
      logging.exception("fcntl.flock failed")
3247
      raise
3248

    
3249
  def Exclusive(self, blocking=False, timeout=None):
3250
    """Locks the file in exclusive mode.
3251

3252
    @type blocking: boolean
3253
    @param blocking: whether to block and wait until we
3254
        can lock the file or return immediately
3255
    @type timeout: int or None
3256
    @param timeout: if not None, the duration to wait for the lock
3257
        (in blocking mode)
3258

3259
    """
3260
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3261
                "Failed to lock %s in exclusive mode" % self.filename)
3262

    
3263
  def Shared(self, blocking=False, timeout=None):
3264
    """Locks the file in shared mode.
3265

3266
    @type blocking: boolean
3267
    @param blocking: whether to block and wait until we
3268
        can lock the file or return immediately
3269
    @type timeout: int or None
3270
    @param timeout: if not None, the duration to wait for the lock
3271
        (in blocking mode)
3272

3273
    """
3274
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3275
                "Failed to lock %s in shared mode" % self.filename)
3276

    
3277
  def Unlock(self, blocking=True, timeout=None):
3278
    """Unlocks the file.
3279

3280
    According to C{flock(2)}, unlocking can also be a nonblocking
3281
    operation::
3282

3283
      To make a non-blocking request, include LOCK_NB with any of the above
3284
      operations.
3285

3286
    @type blocking: boolean
3287
    @param blocking: whether to block and wait until we
3288
        can lock the file or return immediately
3289
    @type timeout: int or None
3290
    @param timeout: if not None, the duration to wait for the lock
3291
        (in blocking mode)
3292

3293
    """
3294
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3295
                "Failed to unlock %s" % self.filename)
3296

    
3297

    
3298
class LineSplitter:
3299
  """Splits data chunks into lines separated by newline.
3300

3301
  Instances provide a file-like interface.
3302

3303
  """
3304
  def __init__(self, line_fn, *args):
3305
    """Initializes this class.
3306

3307
    @type line_fn: callable
3308
    @param line_fn: Function called for each line, first parameter is line
3309
    @param args: Extra arguments for L{line_fn}
3310

3311
    """
3312
    assert callable(line_fn)
3313

    
3314
    if args:
3315
      # Python 2.4 doesn't have functools.partial yet
3316
      self._line_fn = \
3317
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3318
    else:
3319
      self._line_fn = line_fn
3320

    
3321
    self._lines = collections.deque()
3322
    self._buffer = ""
3323

    
3324
  def write(self, data):
3325
    parts = (self._buffer + data).split("\n")
3326
    self._buffer = parts.pop()
3327
    self._lines.extend(parts)
3328

    
3329
  def flush(self):
3330
    while self._lines:
3331
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3332

    
3333
  def close(self):
3334
    self.flush()
3335
    if self._buffer:
3336
      self._line_fn(self._buffer)
3337

    
3338

    
3339
def SignalHandled(signums):
3340
  """Signal Handled decoration.
3341

3342
  This special decorator installs a signal handler and then calls the target
3343
  function. The function must accept a 'signal_handlers' keyword argument,
3344
  which will contain a dict indexed by signal number, with SignalHandler
3345
  objects as values.
3346

3347
  The decorator can be safely stacked with iself, to handle multiple signals
3348
  with different handlers.
3349

3350
  @type signums: list
3351
  @param signums: signals to intercept
3352

3353
  """
3354
  def wrap(fn):
3355
    def sig_function(*args, **kwargs):
3356
      assert 'signal_handlers' not in kwargs or \
3357
             kwargs['signal_handlers'] is None or \
3358
             isinstance(kwargs['signal_handlers'], dict), \
3359
             "Wrong signal_handlers parameter in original function call"
3360
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3361
        signal_handlers = kwargs['signal_handlers']
3362
      else:
3363
        signal_handlers = {}
3364
        kwargs['signal_handlers'] = signal_handlers
3365
      sighandler = SignalHandler(signums)
3366
      try:
3367
        for sig in signums:
3368
          signal_handlers[sig] = sighandler
3369
        return fn(*args, **kwargs)
3370
      finally:
3371
        sighandler.Reset()
3372
    return sig_function
3373
  return wrap
3374

    
3375

    
3376
class SignalWakeupFd(object):
3377
  try:
3378
    # This is only supported in Python 2.5 and above (some distributions
3379
    # backported it to Python 2.4)
3380
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3381
  except AttributeError:
3382
    # Not supported
3383
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3384
      return -1
3385
  else:
3386
    def _SetWakeupFd(self, fd):
3387
      return self._set_wakeup_fd_fn(fd)
3388

    
3389
  def __init__(self):
3390
    """Initializes this class.
3391

3392
    """
3393
    (read_fd, write_fd) = os.pipe()
3394

    
3395
    # Once these succeeded, the file descriptors will be closed automatically.
3396
    # Buffer size 0 is important, otherwise .read() with a specified length
3397
    # might buffer data and the file descriptors won't be marked readable.
3398
    self._read_fh = os.fdopen(read_fd, "r", 0)
3399
    self._write_fh = os.fdopen(write_fd, "w", 0)
3400

    
3401
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3402

    
3403
    # Utility functions
3404
    self.fileno = self._read_fh.fileno
3405
    self.read = self._read_fh.read
3406

    
3407
  def Reset(self):
3408
    """Restores the previous wakeup file descriptor.
3409

3410
    """
3411
    if hasattr(self, "_previous") and self._previous is not None:
3412
      self._SetWakeupFd(self._previous)
3413
      self._previous = None
3414

    
3415
  def Notify(self):
3416
    """Notifies the wakeup file descriptor.
3417

3418
    """
3419
    self._write_fh.write("\0")
3420

    
3421
  def __del__(self):
3422
    """Called before object deletion.
3423

3424
    """
3425
    self.Reset()
3426

    
3427

    
3428
class SignalHandler(object):
3429
  """Generic signal handler class.
3430

3431
  It automatically restores the original handler when deconstructed or
3432
  when L{Reset} is called. You can either pass your own handler
3433
  function in or query the L{called} attribute to detect whether the
3434
  signal was sent.
3435

3436
  @type signum: list
3437
  @ivar signum: the signals we handle
3438
  @type called: boolean
3439
  @ivar called: tracks whether any of the signals have been raised
3440

3441
  """
3442
  def __init__(self, signum, handler_fn=None, wakeup=None):
3443
    """Constructs a new SignalHandler instance.
3444

3445
    @type signum: int or list of ints
3446
    @param signum: Single signal number or set of signal numbers
3447
    @type handler_fn: callable
3448
    @param handler_fn: Signal handling function
3449

3450
    """
3451
    assert handler_fn is None or callable(handler_fn)
3452

    
3453
    self.signum = set(signum)
3454
    self.called = False
3455

    
3456
    self._handler_fn = handler_fn
3457
    self._wakeup = wakeup
3458

    
3459
    self._previous = {}
3460
    try:
3461
      for signum in self.signum:
3462
        # Setup handler
3463
        prev_handler = signal.signal(signum, self._HandleSignal)
3464
        try:
3465
          self._previous[signum] = prev_handler
3466
        except:
3467
          # Restore previous handler
3468
          signal.signal(signum, prev_handler)
3469
          raise
3470
    except:
3471
      # Reset all handlers
3472
      self.Reset()
3473
      # Here we have a race condition: a handler may have already been called,
3474
      # but there's not much we can do about it at this point.
3475
      raise
3476

    
3477
  def __del__(self):
3478
    self.Reset()
3479

    
3480
  def Reset(self):
3481
    """Restore previous handler.
3482

3483
    This will reset all the signals to their previous handlers.
3484

3485
    """
3486
    for signum, prev_handler in self._previous.items():
3487
      signal.signal(signum, prev_handler)
3488
      # If successful, remove from dict
3489
      del self._previous[signum]
3490

    
3491
  def Clear(self):
3492
    """Unsets the L{called} flag.
3493

3494
    This function can be used in case a signal may arrive several times.
3495

3496
    """
3497
    self.called = False
3498

    
3499
  def _HandleSignal(self, signum, frame):
3500
    """Actual signal handling function.
3501

3502
    """
3503
    # This is not nice and not absolutely atomic, but it appears to be the only
3504
    # solution in Python -- there are no atomic types.
3505
    self.called = True
3506

    
3507
    if self._wakeup:
3508
      # Notify whoever is interested in signals
3509
      self._wakeup.Notify()
3510

    
3511
    if self._handler_fn:
3512
      self._handler_fn(signum, frame)
3513

    
3514

    
3515
class FieldSet(object):
3516
  """A simple field set.
3517

3518
  Among the features are:
3519
    - checking if a string is among a list of static string or regex objects
3520
    - checking if a whole list of string matches
3521
    - returning the matching groups from a regex match
3522

3523
  Internally, all fields are held as regular expression objects.
3524

3525
  """
3526
  def __init__(self, *items):
3527
    self.items = [re.compile("^%s$" % value) for value in items]
3528

    
3529
  def Extend(self, other_set):
3530
    """Extend the field set with the items from another one"""
3531
    self.items.extend(other_set.items)
3532

    
3533
  def Matches(self, field):
3534
    """Checks if a field matches the current set
3535

3536
    @type field: str
3537
    @param field: the string to match
3538
    @return: either None or a regular expression match object
3539

3540
    """
3541
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3542
      return m
3543
    return None
3544

    
3545
  def NonMatching(self, items):
3546
    """Returns the list of fields not matching the current set
3547

3548
    @type items: list
3549
    @param items: the list of fields to check
3550
    @rtype: list
3551
    @return: list of non-matching fields
3552

3553
    """
3554
    return [val for val in items if not self.Matches(val)]