Statistics
| Branch: | Tag: | Revision:

root / lib / utils.py @ b9768937

History | View | Annotate | Download (92.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti utility module.
23

24
This module holds functions that can be used in both daemons (all) and
25
the command line scripts.
26

27
"""
28

    
29

    
30
import os
31
import sys
32
import time
33
import subprocess
34
import re
35
import socket
36
import tempfile
37
import shutil
38
import errno
39
import pwd
40
import itertools
41
import select
42
import fcntl
43
import resource
44
import logging
45
import logging.handlers
46
import signal
47
import OpenSSL
48
import datetime
49
import calendar
50
import hmac
51
import collections
52
import struct
53
import IN
54

    
55
from cStringIO import StringIO
56

    
57
try:
58
  from hashlib import sha1
59
except ImportError:
60
  import sha as sha1
61

    
62
from ganeti import errors
63
from ganeti import constants
64

    
65

    
66
_locksheld = []
67
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68

    
69
debug_locks = False
70

    
71
#: when set to True, L{RunCmd} is disabled
72
no_fork = False
73

    
74
_RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
75

    
76
HEX_CHAR_RE = r"[a-zA-Z0-9]"
77
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79
                            (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80
                             HEX_CHAR_RE, HEX_CHAR_RE),
81
                            re.S | re.I)
82

    
83
# Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
84
# struct ucred { pid_t pid; uid_t uid; gid_t gid; };
85
#
86
# The GNU C Library defines gid_t and uid_t to be "unsigned int" and
87
# pid_t to "int".
88
#
89
# IEEE Std 1003.1-2008:
90
# "nlink_t, uid_t, gid_t, and id_t shall be integer types"
91
# "blksize_t, pid_t, and ssize_t shall be signed integer types"
92
_STRUCT_UCRED = "iII"
93
_STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
94

    
95

    
96
class RunResult(object):
97
  """Holds the result of running external programs.
98

99
  @type exit_code: int
100
  @ivar exit_code: the exit code of the program, or None (if the program
101
      didn't exit())
102
  @type signal: int or None
103
  @ivar signal: the signal that caused the program to finish, or None
104
      (if the program wasn't terminated by a signal)
105
  @type stdout: str
106
  @ivar stdout: the standard output of the program
107
  @type stderr: str
108
  @ivar stderr: the standard error of the program
109
  @type failed: boolean
110
  @ivar failed: True in case the program was
111
      terminated by a signal or exited with a non-zero exit code
112
  @ivar fail_reason: a string detailing the termination reason
113

114
  """
115
  __slots__ = ["exit_code", "signal", "stdout", "stderr",
116
               "failed", "fail_reason", "cmd"]
117

    
118

    
119
  def __init__(self, exit_code, signal_, stdout, stderr, cmd):
120
    self.cmd = cmd
121
    self.exit_code = exit_code
122
    self.signal = signal_
123
    self.stdout = stdout
124
    self.stderr = stderr
125
    self.failed = (signal_ is not None or exit_code != 0)
126

    
127
    if self.signal is not None:
128
      self.fail_reason = "terminated by signal %s" % self.signal
129
    elif self.exit_code is not None:
130
      self.fail_reason = "exited with exit code %s" % self.exit_code
131
    else:
132
      self.fail_reason = "unable to determine termination reason"
133

    
134
    if self.failed:
135
      logging.debug("Command '%s' failed (%s); output: %s",
136
                    self.cmd, self.fail_reason, self.output)
137

    
138
  def _GetOutput(self):
139
    """Returns the combined stdout and stderr for easier usage.
140

141
    """
142
    return self.stdout + self.stderr
143

    
144
  output = property(_GetOutput, None, None, "Return full output")
145

    
146

    
147
def _BuildCmdEnvironment(env, reset):
148
  """Builds the environment for an external program.
149

150
  """
151
  if reset:
152
    cmd_env = {}
153
  else:
154
    cmd_env = os.environ.copy()
155
    cmd_env["LC_ALL"] = "C"
156

    
157
  if env is not None:
158
    cmd_env.update(env)
159

    
160
  return cmd_env
161

    
162

    
163
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
164
  """Execute a (shell) command.
165

166
  The command should not read from its standard input, as it will be
167
  closed.
168

169
  @type cmd: string or list
170
  @param cmd: Command to run
171
  @type env: dict
172
  @param env: Additional environment variables
173
  @type output: str
174
  @param output: if desired, the output of the command can be
175
      saved in a file instead of the RunResult instance; this
176
      parameter denotes the file name (if not None)
177
  @type cwd: string
178
  @param cwd: if specified, will be used as the working
179
      directory for the command; the default will be /
180
  @type reset_env: boolean
181
  @param reset_env: whether to reset or keep the default os environment
182
  @rtype: L{RunResult}
183
  @return: RunResult instance
184
  @raise errors.ProgrammerError: if we call this when forks are disabled
185

186
  """
187
  if no_fork:
188
    raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
189

    
190
  if isinstance(cmd, basestring):
191
    strcmd = cmd
192
    shell = True
193
  else:
194
    cmd = [str(val) for val in cmd]
195
    strcmd = ShellQuoteArgs(cmd)
196
    shell = False
197

    
198
  if output:
199
    logging.debug("RunCmd %s, output file '%s'", strcmd, output)
200
  else:
201
    logging.debug("RunCmd %s", strcmd)
202

    
203
  cmd_env = _BuildCmdEnvironment(env, reset_env)
204

    
205
  try:
206
    if output is None:
207
      out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
208
    else:
209
      status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
210
      out = err = ""
211
  except OSError, err:
212
    if err.errno == errno.ENOENT:
213
      raise errors.OpExecError("Can't execute '%s': not found (%s)" %
214
                               (strcmd, err))
215
    else:
216
      raise
217

    
218
  if status >= 0:
219
    exitcode = status
220
    signal_ = None
221
  else:
222
    exitcode = None
223
    signal_ = -status
224

    
225
  return RunResult(exitcode, signal_, out, err, strcmd)
226

    
227

    
228
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
229
                pidfile=None):
230
  """Start a daemon process after forking twice.
231

232
  @type cmd: string or list
233
  @param cmd: Command to run
234
  @type env: dict
235
  @param env: Additional environment variables
236
  @type cwd: string
237
  @param cwd: Working directory for the program
238
  @type output: string
239
  @param output: Path to file in which to save the output
240
  @type output_fd: int
241
  @param output_fd: File descriptor for output
242
  @type pidfile: string
243
  @param pidfile: Process ID file
244
  @rtype: int
245
  @return: Daemon process ID
246
  @raise errors.ProgrammerError: if we call this when forks are disabled
247

248
  """
249
  if no_fork:
250
    raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
251
                                 " disabled")
252

    
253
  if output and not (bool(output) ^ (output_fd is not None)):
254
    raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
255
                                 " specified")
256

    
257
  if isinstance(cmd, basestring):
258
    cmd = ["/bin/sh", "-c", cmd]
259

    
260
  strcmd = ShellQuoteArgs(cmd)
261

    
262
  if output:
263
    logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
264
  else:
265
    logging.debug("StartDaemon %s", strcmd)
266

    
267
  cmd_env = _BuildCmdEnvironment(env, False)
268

    
269
  # Create pipe for sending PID back
270
  (pidpipe_read, pidpipe_write) = os.pipe()
271
  try:
272
    try:
273
      # Create pipe for sending error messages
274
      (errpipe_read, errpipe_write) = os.pipe()
275
      try:
276
        try:
277
          # First fork
278
          pid = os.fork()
279
          if pid == 0:
280
            try:
281
              # Child process, won't return
282
              _StartDaemonChild(errpipe_read, errpipe_write,
283
                                pidpipe_read, pidpipe_write,
284
                                cmd, cmd_env, cwd,
285
                                output, output_fd, pidfile)
286
            finally:
287
              # Well, maybe child process failed
288
              os._exit(1) # pylint: disable-msg=W0212
289
        finally:
290
          _CloseFDNoErr(errpipe_write)
291

    
292
        # Wait for daemon to be started (or an error message to arrive) and read
293
        # up to 100 KB as an error message
294
        errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
295
      finally:
296
        _CloseFDNoErr(errpipe_read)
297
    finally:
298
      _CloseFDNoErr(pidpipe_write)
299

    
300
    # Read up to 128 bytes for PID
301
    pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
302
  finally:
303
    _CloseFDNoErr(pidpipe_read)
304

    
305
  # Try to avoid zombies by waiting for child process
306
  try:
307
    os.waitpid(pid, 0)
308
  except OSError:
309
    pass
310

    
311
  if errormsg:
312
    raise errors.OpExecError("Error when starting daemon process: %r" %
313
                             errormsg)
314

    
315
  try:
316
    return int(pidtext)
317
  except (ValueError, TypeError), err:
318
    raise errors.OpExecError("Error while trying to parse PID %r: %s" %
319
                             (pidtext, err))
320

    
321

    
322
def _StartDaemonChild(errpipe_read, errpipe_write,
323
                      pidpipe_read, pidpipe_write,
324
                      args, env, cwd,
325
                      output, fd_output, pidfile):
326
  """Child process for starting daemon.
327

328
  """
329
  try:
330
    # Close parent's side
331
    _CloseFDNoErr(errpipe_read)
332
    _CloseFDNoErr(pidpipe_read)
333

    
334
    # First child process
335
    os.chdir("/")
336
    os.umask(077)
337
    os.setsid()
338

    
339
    # And fork for the second time
340
    pid = os.fork()
341
    if pid != 0:
342
      # Exit first child process
343
      os._exit(0) # pylint: disable-msg=W0212
344

    
345
    # Make sure pipe is closed on execv* (and thereby notifies original process)
346
    SetCloseOnExecFlag(errpipe_write, True)
347

    
348
    # List of file descriptors to be left open
349
    noclose_fds = [errpipe_write]
350

    
351
    # Open PID file
352
    if pidfile:
353
      try:
354
        # TODO: Atomic replace with another locked file instead of writing into
355
        # it after creating
356
        fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
357

    
358
        # Lock the PID file (and fail if not possible to do so). Any code
359
        # wanting to send a signal to the daemon should try to lock the PID
360
        # file before reading it. If acquiring the lock succeeds, the daemon is
361
        # no longer running and the signal should not be sent.
362
        LockFile(fd_pidfile)
363

    
364
        os.write(fd_pidfile, "%d\n" % os.getpid())
365
      except Exception, err:
366
        raise Exception("Creating and locking PID file failed: %s" % err)
367

    
368
      # Keeping the file open to hold the lock
369
      noclose_fds.append(fd_pidfile)
370

    
371
      SetCloseOnExecFlag(fd_pidfile, False)
372
    else:
373
      fd_pidfile = None
374

    
375
    # Open /dev/null
376
    fd_devnull = os.open(os.devnull, os.O_RDWR)
377

    
378
    assert not output or (bool(output) ^ (fd_output is not None))
379

    
380
    if fd_output is not None:
381
      pass
382
    elif output:
383
      # Open output file
384
      try:
385
        # TODO: Implement flag to set append=yes/no
386
        fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
387
      except EnvironmentError, err:
388
        raise Exception("Opening output file failed: %s" % err)
389
    else:
390
      fd_output = fd_devnull
391

    
392
    # Redirect standard I/O
393
    os.dup2(fd_devnull, 0)
394
    os.dup2(fd_output, 1)
395
    os.dup2(fd_output, 2)
396

    
397
    # Send daemon PID to parent
398
    RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
399

    
400
    # Close all file descriptors except stdio and error message pipe
401
    CloseFDs(noclose_fds=noclose_fds)
402

    
403
    # Change working directory
404
    os.chdir(cwd)
405

    
406
    if env is None:
407
      os.execvp(args[0], args)
408
    else:
409
      os.execvpe(args[0], args, env)
410
  except: # pylint: disable-msg=W0702
411
    try:
412
      # Report errors to original process
413
      buf = str(sys.exc_info()[1])
414

    
415
      RetryOnSignal(os.write, errpipe_write, buf)
416
    except: # pylint: disable-msg=W0702
417
      # Ignore errors in error handling
418
      pass
419

    
420
  os._exit(1) # pylint: disable-msg=W0212
421

    
422

    
423
def _RunCmdPipe(cmd, env, via_shell, cwd):
424
  """Run a command and return its output.
425

426
  @type  cmd: string or list
427
  @param cmd: Command to run
428
  @type env: dict
429
  @param env: The environment to use
430
  @type via_shell: bool
431
  @param via_shell: if we should run via the shell
432
  @type cwd: string
433
  @param cwd: the working directory for the program
434
  @rtype: tuple
435
  @return: (out, err, status)
436

437
  """
438
  poller = select.poll()
439
  child = subprocess.Popen(cmd, shell=via_shell,
440
                           stderr=subprocess.PIPE,
441
                           stdout=subprocess.PIPE,
442
                           stdin=subprocess.PIPE,
443
                           close_fds=True, env=env,
444
                           cwd=cwd)
445

    
446
  child.stdin.close()
447
  poller.register(child.stdout, select.POLLIN)
448
  poller.register(child.stderr, select.POLLIN)
449
  out = StringIO()
450
  err = StringIO()
451
  fdmap = {
452
    child.stdout.fileno(): (out, child.stdout),
453
    child.stderr.fileno(): (err, child.stderr),
454
    }
455
  for fd in fdmap:
456
    SetNonblockFlag(fd, True)
457

    
458
  while fdmap:
459
    pollresult = RetryOnSignal(poller.poll)
460

    
461
    for fd, event in pollresult:
462
      if event & select.POLLIN or event & select.POLLPRI:
463
        data = fdmap[fd][1].read()
464
        # no data from read signifies EOF (the same as POLLHUP)
465
        if not data:
466
          poller.unregister(fd)
467
          del fdmap[fd]
468
          continue
469
        fdmap[fd][0].write(data)
470
      if (event & select.POLLNVAL or event & select.POLLHUP or
471
          event & select.POLLERR):
472
        poller.unregister(fd)
473
        del fdmap[fd]
474

    
475
  out = out.getvalue()
476
  err = err.getvalue()
477

    
478
  status = child.wait()
479
  return out, err, status
480

    
481

    
482
def _RunCmdFile(cmd, env, via_shell, output, cwd):
483
  """Run a command and save its output to a file.
484

485
  @type  cmd: string or list
486
  @param cmd: Command to run
487
  @type env: dict
488
  @param env: The environment to use
489
  @type via_shell: bool
490
  @param via_shell: if we should run via the shell
491
  @type output: str
492
  @param output: the filename in which to save the output
493
  @type cwd: string
494
  @param cwd: the working directory for the program
495
  @rtype: int
496
  @return: the exit status
497

498
  """
499
  fh = open(output, "a")
500
  try:
501
    child = subprocess.Popen(cmd, shell=via_shell,
502
                             stderr=subprocess.STDOUT,
503
                             stdout=fh,
504
                             stdin=subprocess.PIPE,
505
                             close_fds=True, env=env,
506
                             cwd=cwd)
507

    
508
    child.stdin.close()
509
    status = child.wait()
510
  finally:
511
    fh.close()
512
  return status
513

    
514

    
515
def SetCloseOnExecFlag(fd, enable):
516
  """Sets or unsets the close-on-exec flag on a file descriptor.
517

518
  @type fd: int
519
  @param fd: File descriptor
520
  @type enable: bool
521
  @param enable: Whether to set or unset it.
522

523
  """
524
  flags = fcntl.fcntl(fd, fcntl.F_GETFD)
525

    
526
  if enable:
527
    flags |= fcntl.FD_CLOEXEC
528
  else:
529
    flags &= ~fcntl.FD_CLOEXEC
530

    
531
  fcntl.fcntl(fd, fcntl.F_SETFD, flags)
532

    
533

    
534
def SetNonblockFlag(fd, enable):
535
  """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
536

537
  @type fd: int
538
  @param fd: File descriptor
539
  @type enable: bool
540
  @param enable: Whether to set or unset it
541

542
  """
543
  flags = fcntl.fcntl(fd, fcntl.F_GETFL)
544

    
545
  if enable:
546
    flags |= os.O_NONBLOCK
547
  else:
548
    flags &= ~os.O_NONBLOCK
549

    
550
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
551

    
552

    
553
def RetryOnSignal(fn, *args, **kwargs):
554
  """Calls a function again if it failed due to EINTR.
555

556
  """
557
  while True:
558
    try:
559
      return fn(*args, **kwargs)
560
    except EnvironmentError, err:
561
      if err.errno != errno.EINTR:
562
        raise
563
    except select.error, err:
564
      if not (err.args and err.args[0] == errno.EINTR):
565
        raise
566

    
567

    
568
def RunParts(dir_name, env=None, reset_env=False):
569
  """Run Scripts or programs in a directory
570

571
  @type dir_name: string
572
  @param dir_name: absolute path to a directory
573
  @type env: dict
574
  @param env: The environment to use
575
  @type reset_env: boolean
576
  @param reset_env: whether to reset or keep the default os environment
577
  @rtype: list of tuples
578
  @return: list of (name, (one of RUNDIR_STATUS), RunResult)
579

580
  """
581
  rr = []
582

    
583
  try:
584
    dir_contents = ListVisibleFiles(dir_name)
585
  except OSError, err:
586
    logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
587
    return rr
588

    
589
  for relname in sorted(dir_contents):
590
    fname = PathJoin(dir_name, relname)
591
    if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
592
            constants.EXT_PLUGIN_MASK.match(relname) is not None):
593
      rr.append((relname, constants.RUNPARTS_SKIP, None))
594
    else:
595
      try:
596
        result = RunCmd([fname], env=env, reset_env=reset_env)
597
      except Exception, err: # pylint: disable-msg=W0703
598
        rr.append((relname, constants.RUNPARTS_ERR, str(err)))
599
      else:
600
        rr.append((relname, constants.RUNPARTS_RUN, result))
601

    
602
  return rr
603

    
604

    
605
def GetSocketCredentials(sock):
606
  """Returns the credentials of the foreign process connected to a socket.
607

608
  @param sock: Unix socket
609
  @rtype: tuple; (number, number, number)
610
  @return: The PID, UID and GID of the connected foreign process.
611

612
  """
613
  peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
614
                             _STRUCT_UCRED_SIZE)
615
  return struct.unpack(_STRUCT_UCRED, peercred)
616

    
617

    
618
def RemoveFile(filename):
619
  """Remove a file ignoring some errors.
620

621
  Remove a file, ignoring non-existing ones or directories. Other
622
  errors are passed.
623

624
  @type filename: str
625
  @param filename: the file to be removed
626

627
  """
628
  try:
629
    os.unlink(filename)
630
  except OSError, err:
631
    if err.errno not in (errno.ENOENT, errno.EISDIR):
632
      raise
633

    
634

    
635
def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
636
  """Renames a file.
637

638
  @type old: string
639
  @param old: Original path
640
  @type new: string
641
  @param new: New path
642
  @type mkdir: bool
643
  @param mkdir: Whether to create target directory if it doesn't exist
644
  @type mkdir_mode: int
645
  @param mkdir_mode: Mode for newly created directories
646

647
  """
648
  try:
649
    return os.rename(old, new)
650
  except OSError, err:
651
    # In at least one use case of this function, the job queue, directory
652
    # creation is very rare. Checking for the directory before renaming is not
653
    # as efficient.
654
    if mkdir and err.errno == errno.ENOENT:
655
      # Create directory and try again
656
      Makedirs(os.path.dirname(new), mode=mkdir_mode)
657

    
658
      return os.rename(old, new)
659

    
660
    raise
661

    
662

    
663
def Makedirs(path, mode=0750):
664
  """Super-mkdir; create a leaf directory and all intermediate ones.
665

666
  This is a wrapper around C{os.makedirs} adding error handling not implemented
667
  before Python 2.5.
668

669
  """
670
  try:
671
    os.makedirs(path, mode)
672
  except OSError, err:
673
    # Ignore EEXIST. This is only handled in os.makedirs as included in
674
    # Python 2.5 and above.
675
    if err.errno != errno.EEXIST or not os.path.exists(path):
676
      raise
677

    
678

    
679
def ResetTempfileModule():
680
  """Resets the random name generator of the tempfile module.
681

682
  This function should be called after C{os.fork} in the child process to
683
  ensure it creates a newly seeded random generator. Otherwise it would
684
  generate the same random parts as the parent process. If several processes
685
  race for the creation of a temporary file, this could lead to one not getting
686
  a temporary name.
687

688
  """
689
  # pylint: disable-msg=W0212
690
  if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
691
    tempfile._once_lock.acquire()
692
    try:
693
      # Reset random name generator
694
      tempfile._name_sequence = None
695
    finally:
696
      tempfile._once_lock.release()
697
  else:
698
    logging.critical("The tempfile module misses at least one of the"
699
                     " '_once_lock' and '_name_sequence' attributes")
700

    
701

    
702
def _FingerprintFile(filename):
703
  """Compute the fingerprint of a file.
704

705
  If the file does not exist, a None will be returned
706
  instead.
707

708
  @type filename: str
709
  @param filename: the filename to checksum
710
  @rtype: str
711
  @return: the hex digest of the sha checksum of the contents
712
      of the file
713

714
  """
715
  if not (os.path.exists(filename) and os.path.isfile(filename)):
716
    return None
717

    
718
  f = open(filename)
719

    
720
  if callable(sha1):
721
    fp = sha1()
722
  else:
723
    fp = sha1.new()
724
  while True:
725
    data = f.read(4096)
726
    if not data:
727
      break
728

    
729
    fp.update(data)
730

    
731
  return fp.hexdigest()
732

    
733

    
734
def FingerprintFiles(files):
735
  """Compute fingerprints for a list of files.
736

737
  @type files: list
738
  @param files: the list of filename to fingerprint
739
  @rtype: dict
740
  @return: a dictionary filename: fingerprint, holding only
741
      existing files
742

743
  """
744
  ret = {}
745

    
746
  for filename in files:
747
    cksum = _FingerprintFile(filename)
748
    if cksum:
749
      ret[filename] = cksum
750

    
751
  return ret
752

    
753

    
754
def ForceDictType(target, key_types, allowed_values=None):
755
  """Force the values of a dict to have certain types.
756

757
  @type target: dict
758
  @param target: the dict to update
759
  @type key_types: dict
760
  @param key_types: dict mapping target dict keys to types
761
                    in constants.ENFORCEABLE_TYPES
762
  @type allowed_values: list
763
  @keyword allowed_values: list of specially allowed values
764

765
  """
766
  if allowed_values is None:
767
    allowed_values = []
768

    
769
  if not isinstance(target, dict):
770
    msg = "Expected dictionary, got '%s'" % target
771
    raise errors.TypeEnforcementError(msg)
772

    
773
  for key in target:
774
    if key not in key_types:
775
      msg = "Unknown key '%s'" % key
776
      raise errors.TypeEnforcementError(msg)
777

    
778
    if target[key] in allowed_values:
779
      continue
780

    
781
    ktype = key_types[key]
782
    if ktype not in constants.ENFORCEABLE_TYPES:
783
      msg = "'%s' has non-enforceable type %s" % (key, ktype)
784
      raise errors.ProgrammerError(msg)
785

    
786
    if ktype == constants.VTYPE_STRING:
787
      if not isinstance(target[key], basestring):
788
        if isinstance(target[key], bool) and not target[key]:
789
          target[key] = ''
790
        else:
791
          msg = "'%s' (value %s) is not a valid string" % (key, target[key])
792
          raise errors.TypeEnforcementError(msg)
793
    elif ktype == constants.VTYPE_BOOL:
794
      if isinstance(target[key], basestring) and target[key]:
795
        if target[key].lower() == constants.VALUE_FALSE:
796
          target[key] = False
797
        elif target[key].lower() == constants.VALUE_TRUE:
798
          target[key] = True
799
        else:
800
          msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
801
          raise errors.TypeEnforcementError(msg)
802
      elif target[key]:
803
        target[key] = True
804
      else:
805
        target[key] = False
806
    elif ktype == constants.VTYPE_SIZE:
807
      try:
808
        target[key] = ParseUnit(target[key])
809
      except errors.UnitParseError, err:
810
        msg = "'%s' (value %s) is not a valid size. error: %s" % \
811
              (key, target[key], err)
812
        raise errors.TypeEnforcementError(msg)
813
    elif ktype == constants.VTYPE_INT:
814
      try:
815
        target[key] = int(target[key])
816
      except (ValueError, TypeError):
817
        msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
818
        raise errors.TypeEnforcementError(msg)
819

    
820

    
821
def IsProcessAlive(pid):
822
  """Check if a given pid exists on the system.
823

824
  @note: zombie status is not handled, so zombie processes
825
      will be returned as alive
826
  @type pid: int
827
  @param pid: the process ID to check
828
  @rtype: boolean
829
  @return: True if the process exists
830

831
  """
832
  if pid <= 0:
833
    return False
834

    
835
  try:
836
    os.stat("/proc/%d/status" % pid)
837
    return True
838
  except EnvironmentError, err:
839
    if err.errno in (errno.ENOENT, errno.ENOTDIR):
840
      return False
841
    raise
842

    
843

    
844
def ReadPidFile(pidfile):
845
  """Read a pid from a file.
846

847
  @type  pidfile: string
848
  @param pidfile: path to the file containing the pid
849
  @rtype: int
850
  @return: The process id, if the file exists and contains a valid PID,
851
           otherwise 0
852

853
  """
854
  try:
855
    raw_data = ReadFile(pidfile)
856
  except EnvironmentError, err:
857
    if err.errno != errno.ENOENT:
858
      logging.exception("Can't read pid file")
859
    return 0
860

    
861
  try:
862
    pid = int(raw_data)
863
  except (TypeError, ValueError), err:
864
    logging.info("Can't parse pid file contents", exc_info=True)
865
    return 0
866

    
867
  return pid
868

    
869

    
870
def ReadLockedPidFile(path):
871
  """Reads a locked PID file.
872

873
  This can be used together with L{StartDaemon}.
874

875
  @type path: string
876
  @param path: Path to PID file
877
  @return: PID as integer or, if file was unlocked or couldn't be opened, None
878

879
  """
880
  try:
881
    fd = os.open(path, os.O_RDONLY)
882
  except EnvironmentError, err:
883
    if err.errno == errno.ENOENT:
884
      # PID file doesn't exist
885
      return None
886
    raise
887

    
888
  try:
889
    try:
890
      # Try to acquire lock
891
      LockFile(fd)
892
    except errors.LockError:
893
      # Couldn't lock, daemon is running
894
      return int(os.read(fd, 100))
895
  finally:
896
    os.close(fd)
897

    
898
  return None
899

    
900

    
901
def MatchNameComponent(key, name_list, case_sensitive=True):
902
  """Try to match a name against a list.
903

904
  This function will try to match a name like test1 against a list
905
  like C{['test1.example.com', 'test2.example.com', ...]}. Against
906
  this list, I{'test1'} as well as I{'test1.example'} will match, but
907
  not I{'test1.ex'}. A multiple match will be considered as no match
908
  at all (e.g. I{'test1'} against C{['test1.example.com',
909
  'test1.example.org']}), except when the key fully matches an entry
910
  (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
911

912
  @type key: str
913
  @param key: the name to be searched
914
  @type name_list: list
915
  @param name_list: the list of strings against which to search the key
916
  @type case_sensitive: boolean
917
  @param case_sensitive: whether to provide a case-sensitive match
918

919
  @rtype: None or str
920
  @return: None if there is no match I{or} if there are multiple matches,
921
      otherwise the element from the list which matches
922

923
  """
924
  if key in name_list:
925
    return key
926

    
927
  re_flags = 0
928
  if not case_sensitive:
929
    re_flags |= re.IGNORECASE
930
    key = key.upper()
931
  mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
932
  names_filtered = []
933
  string_matches = []
934
  for name in name_list:
935
    if mo.match(name) is not None:
936
      names_filtered.append(name)
937
      if not case_sensitive and key == name.upper():
938
        string_matches.append(name)
939

    
940
  if len(string_matches) == 1:
941
    return string_matches[0]
942
  if len(names_filtered) == 1:
943
    return names_filtered[0]
944
  return None
945

    
946

    
947
class HostInfo:
948
  """Class implementing resolver and hostname functionality
949

950
  """
951
  _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
952

    
953
  def __init__(self, name=None):
954
    """Initialize the host name object.
955

956
    If the name argument is not passed, it will use this system's
957
    name.
958

959
    """
960
    if name is None:
961
      name = self.SysName()
962

    
963
    self.query = name
964
    self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
965
    self.ip = self.ipaddrs[0]
966

    
967
  def ShortName(self):
968
    """Returns the hostname without domain.
969

970
    """
971
    return self.name.split('.')[0]
972

    
973
  @staticmethod
974
  def SysName():
975
    """Return the current system's name.
976

977
    This is simply a wrapper over C{socket.gethostname()}.
978

979
    """
980
    return socket.gethostname()
981

    
982
  @staticmethod
983
  def LookupHostname(hostname):
984
    """Look up hostname
985

986
    @type hostname: str
987
    @param hostname: hostname to look up
988

989
    @rtype: tuple
990
    @return: a tuple (name, aliases, ipaddrs) as returned by
991
        C{socket.gethostbyname_ex}
992
    @raise errors.ResolverError: in case of errors in resolving
993

994
    """
995
    try:
996
      result = socket.gethostbyname_ex(hostname)
997
    except socket.gaierror, err:
998
      # hostname not found in DNS
999
      raise errors.ResolverError(hostname, err.args[0], err.args[1])
1000

    
1001
    return result
1002

    
1003
  @classmethod
1004
  def NormalizeName(cls, hostname):
1005
    """Validate and normalize the given hostname.
1006

1007
    @attention: the validation is a bit more relaxed than the standards
1008
        require; most importantly, we allow underscores in names
1009
    @raise errors.OpPrereqError: when the name is not valid
1010

1011
    """
1012
    hostname = hostname.lower()
1013
    if (not cls._VALID_NAME_RE.match(hostname) or
1014
        # double-dots, meaning empty label
1015
        ".." in hostname or
1016
        # empty initial label
1017
        hostname.startswith(".")):
1018
      raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
1019
                                 errors.ECODE_INVAL)
1020
    if hostname.endswith("."):
1021
      hostname = hostname.rstrip(".")
1022
    return hostname
1023

    
1024

    
1025
def GetHostInfo(name=None):
1026
  """Lookup host name and raise an OpPrereqError for failures"""
1027

    
1028
  try:
1029
    return HostInfo(name)
1030
  except errors.ResolverError, err:
1031
    raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
1032
                               (err[0], err[2]), errors.ECODE_RESOLVER)
1033

    
1034

    
1035
def ListVolumeGroups():
1036
  """List volume groups and their size
1037

1038
  @rtype: dict
1039
  @return:
1040
       Dictionary with keys volume name and values
1041
       the size of the volume
1042

1043
  """
1044
  command = "vgs --noheadings --units m --nosuffix -o name,size"
1045
  result = RunCmd(command)
1046
  retval = {}
1047
  if result.failed:
1048
    return retval
1049

    
1050
  for line in result.stdout.splitlines():
1051
    try:
1052
      name, size = line.split()
1053
      size = int(float(size))
1054
    except (IndexError, ValueError), err:
1055
      logging.error("Invalid output from vgs (%s): %s", err, line)
1056
      continue
1057

    
1058
    retval[name] = size
1059

    
1060
  return retval
1061

    
1062

    
1063
def BridgeExists(bridge):
1064
  """Check whether the given bridge exists in the system
1065

1066
  @type bridge: str
1067
  @param bridge: the bridge name to check
1068
  @rtype: boolean
1069
  @return: True if it does
1070

1071
  """
1072
  return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1073

    
1074

    
1075
def NiceSort(name_list):
1076
  """Sort a list of strings based on digit and non-digit groupings.
1077

1078
  Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1079
  will sort the list in the logical order C{['a1', 'a2', 'a10',
1080
  'a11']}.
1081

1082
  The sort algorithm breaks each name in groups of either only-digits
1083
  or no-digits. Only the first eight such groups are considered, and
1084
  after that we just use what's left of the string.
1085

1086
  @type name_list: list
1087
  @param name_list: the names to be sorted
1088
  @rtype: list
1089
  @return: a copy of the name list sorted with our algorithm
1090

1091
  """
1092
  _SORTER_BASE = "(\D+|\d+)"
1093
  _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1094
                                                  _SORTER_BASE, _SORTER_BASE,
1095
                                                  _SORTER_BASE, _SORTER_BASE,
1096
                                                  _SORTER_BASE, _SORTER_BASE)
1097
  _SORTER_RE = re.compile(_SORTER_FULL)
1098
  _SORTER_NODIGIT = re.compile("^\D*$")
1099
  def _TryInt(val):
1100
    """Attempts to convert a variable to integer."""
1101
    if val is None or _SORTER_NODIGIT.match(val):
1102
      return val
1103
    rval = int(val)
1104
    return rval
1105

    
1106
  to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1107
             for name in name_list]
1108
  to_sort.sort()
1109
  return [tup[1] for tup in to_sort]
1110

    
1111

    
1112
def TryConvert(fn, val):
1113
  """Try to convert a value ignoring errors.
1114

1115
  This function tries to apply function I{fn} to I{val}. If no
1116
  C{ValueError} or C{TypeError} exceptions are raised, it will return
1117
  the result, else it will return the original value. Any other
1118
  exceptions are propagated to the caller.
1119

1120
  @type fn: callable
1121
  @param fn: function to apply to the value
1122
  @param val: the value to be converted
1123
  @return: The converted value if the conversion was successful,
1124
      otherwise the original value.
1125

1126
  """
1127
  try:
1128
    nv = fn(val)
1129
  except (ValueError, TypeError):
1130
    nv = val
1131
  return nv
1132

    
1133

    
1134
def IsValidIP(ip):
1135
  """Verifies the syntax of an IPv4 address.
1136

1137
  This function checks if the IPv4 address passes is valid or not based
1138
  on syntax (not IP range, class calculations, etc.).
1139

1140
  @type ip: str
1141
  @param ip: the address to be checked
1142
  @rtype: a regular expression match object
1143
  @return: a regular expression match object, or None if the
1144
      address is not valid
1145

1146
  """
1147
  unit = "(0|[1-9]\d{0,2})"
1148
  #TODO: convert and return only boolean
1149
  return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
1150

    
1151

    
1152
def IsValidShellParam(word):
1153
  """Verifies is the given word is safe from the shell's p.o.v.
1154

1155
  This means that we can pass this to a command via the shell and be
1156
  sure that it doesn't alter the command line and is passed as such to
1157
  the actual command.
1158

1159
  Note that we are overly restrictive here, in order to be on the safe
1160
  side.
1161

1162
  @type word: str
1163
  @param word: the word to check
1164
  @rtype: boolean
1165
  @return: True if the word is 'safe'
1166

1167
  """
1168
  return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1169

    
1170

    
1171
def BuildShellCmd(template, *args):
1172
  """Build a safe shell command line from the given arguments.
1173

1174
  This function will check all arguments in the args list so that they
1175
  are valid shell parameters (i.e. they don't contain shell
1176
  metacharacters). If everything is ok, it will return the result of
1177
  template % args.
1178

1179
  @type template: str
1180
  @param template: the string holding the template for the
1181
      string formatting
1182
  @rtype: str
1183
  @return: the expanded command line
1184

1185
  """
1186
  for word in args:
1187
    if not IsValidShellParam(word):
1188
      raise errors.ProgrammerError("Shell argument '%s' contains"
1189
                                   " invalid characters" % word)
1190
  return template % args
1191

    
1192

    
1193
def FormatUnit(value, units):
1194
  """Formats an incoming number of MiB with the appropriate unit.
1195

1196
  @type value: int
1197
  @param value: integer representing the value in MiB (1048576)
1198
  @type units: char
1199
  @param units: the type of formatting we should do:
1200
      - 'h' for automatic scaling
1201
      - 'm' for MiBs
1202
      - 'g' for GiBs
1203
      - 't' for TiBs
1204
  @rtype: str
1205
  @return: the formatted value (with suffix)
1206

1207
  """
1208
  if units not in ('m', 'g', 't', 'h'):
1209
    raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1210

    
1211
  suffix = ''
1212

    
1213
  if units == 'm' or (units == 'h' and value < 1024):
1214
    if units == 'h':
1215
      suffix = 'M'
1216
    return "%d%s" % (round(value, 0), suffix)
1217

    
1218
  elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1219
    if units == 'h':
1220
      suffix = 'G'
1221
    return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1222

    
1223
  else:
1224
    if units == 'h':
1225
      suffix = 'T'
1226
    return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1227

    
1228

    
1229
def ParseUnit(input_string):
1230
  """Tries to extract number and scale from the given string.
1231

1232
  Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1233
  [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1234
  is always an int in MiB.
1235

1236
  """
1237
  m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1238
  if not m:
1239
    raise errors.UnitParseError("Invalid format")
1240

    
1241
  value = float(m.groups()[0])
1242

    
1243
  unit = m.groups()[1]
1244
  if unit:
1245
    lcunit = unit.lower()
1246
  else:
1247
    lcunit = 'm'
1248

    
1249
  if lcunit in ('m', 'mb', 'mib'):
1250
    # Value already in MiB
1251
    pass
1252

    
1253
  elif lcunit in ('g', 'gb', 'gib'):
1254
    value *= 1024
1255

    
1256
  elif lcunit in ('t', 'tb', 'tib'):
1257
    value *= 1024 * 1024
1258

    
1259
  else:
1260
    raise errors.UnitParseError("Unknown unit: %s" % unit)
1261

    
1262
  # Make sure we round up
1263
  if int(value) < value:
1264
    value += 1
1265

    
1266
  # Round up to the next multiple of 4
1267
  value = int(value)
1268
  if value % 4:
1269
    value += 4 - value % 4
1270

    
1271
  return value
1272

    
1273

    
1274
def AddAuthorizedKey(file_name, key):
1275
  """Adds an SSH public key to an authorized_keys file.
1276

1277
  @type file_name: str
1278
  @param file_name: path to authorized_keys file
1279
  @type key: str
1280
  @param key: string containing key
1281

1282
  """
1283
  key_fields = key.split()
1284

    
1285
  f = open(file_name, 'a+')
1286
  try:
1287
    nl = True
1288
    for line in f:
1289
      # Ignore whitespace changes
1290
      if line.split() == key_fields:
1291
        break
1292
      nl = line.endswith('\n')
1293
    else:
1294
      if not nl:
1295
        f.write("\n")
1296
      f.write(key.rstrip('\r\n'))
1297
      f.write("\n")
1298
      f.flush()
1299
  finally:
1300
    f.close()
1301

    
1302

    
1303
def RemoveAuthorizedKey(file_name, key):
1304
  """Removes an SSH public key from an authorized_keys file.
1305

1306
  @type file_name: str
1307
  @param file_name: path to authorized_keys file
1308
  @type key: str
1309
  @param key: string containing key
1310

1311
  """
1312
  key_fields = key.split()
1313

    
1314
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1315
  try:
1316
    out = os.fdopen(fd, 'w')
1317
    try:
1318
      f = open(file_name, 'r')
1319
      try:
1320
        for line in f:
1321
          # Ignore whitespace changes while comparing lines
1322
          if line.split() != key_fields:
1323
            out.write(line)
1324

    
1325
        out.flush()
1326
        os.rename(tmpname, file_name)
1327
      finally:
1328
        f.close()
1329
    finally:
1330
      out.close()
1331
  except:
1332
    RemoveFile(tmpname)
1333
    raise
1334

    
1335

    
1336
def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1337
  """Sets the name of an IP address and hostname in /etc/hosts.
1338

1339
  @type file_name: str
1340
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1341
  @type ip: str
1342
  @param ip: the IP address
1343
  @type hostname: str
1344
  @param hostname: the hostname to be added
1345
  @type aliases: list
1346
  @param aliases: the list of aliases to add for the hostname
1347

1348
  """
1349
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1350
  # Ensure aliases are unique
1351
  aliases = UniqueSequence([hostname] + aliases)[1:]
1352

    
1353
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1354
  try:
1355
    out = os.fdopen(fd, 'w')
1356
    try:
1357
      f = open(file_name, 'r')
1358
      try:
1359
        for line in f:
1360
          fields = line.split()
1361
          if fields and not fields[0].startswith('#') and ip == fields[0]:
1362
            continue
1363
          out.write(line)
1364

    
1365
        out.write("%s\t%s" % (ip, hostname))
1366
        if aliases:
1367
          out.write(" %s" % ' '.join(aliases))
1368
        out.write('\n')
1369

    
1370
        out.flush()
1371
        os.fsync(out)
1372
        os.chmod(tmpname, 0644)
1373
        os.rename(tmpname, file_name)
1374
      finally:
1375
        f.close()
1376
    finally:
1377
      out.close()
1378
  except:
1379
    RemoveFile(tmpname)
1380
    raise
1381

    
1382

    
1383
def AddHostToEtcHosts(hostname):
1384
  """Wrapper around SetEtcHostsEntry.
1385

1386
  @type hostname: str
1387
  @param hostname: a hostname that will be resolved and added to
1388
      L{constants.ETC_HOSTS}
1389

1390
  """
1391
  hi = HostInfo(name=hostname)
1392
  SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1393

    
1394

    
1395
def RemoveEtcHostsEntry(file_name, hostname):
1396
  """Removes a hostname from /etc/hosts.
1397

1398
  IP addresses without names are removed from the file.
1399

1400
  @type file_name: str
1401
  @param file_name: path to the file to modify (usually C{/etc/hosts})
1402
  @type hostname: str
1403
  @param hostname: the hostname to be removed
1404

1405
  """
1406
  # FIXME: use WriteFile + fn rather than duplicating its efforts
1407
  fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1408
  try:
1409
    out = os.fdopen(fd, 'w')
1410
    try:
1411
      f = open(file_name, 'r')
1412
      try:
1413
        for line in f:
1414
          fields = line.split()
1415
          if len(fields) > 1 and not fields[0].startswith('#'):
1416
            names = fields[1:]
1417
            if hostname in names:
1418
              while hostname in names:
1419
                names.remove(hostname)
1420
              if names:
1421
                out.write("%s %s\n" % (fields[0], ' '.join(names)))
1422
              continue
1423

    
1424
          out.write(line)
1425

    
1426
        out.flush()
1427
        os.fsync(out)
1428
        os.chmod(tmpname, 0644)
1429
        os.rename(tmpname, file_name)
1430
      finally:
1431
        f.close()
1432
    finally:
1433
      out.close()
1434
  except:
1435
    RemoveFile(tmpname)
1436
    raise
1437

    
1438

    
1439
def RemoveHostFromEtcHosts(hostname):
1440
  """Wrapper around RemoveEtcHostsEntry.
1441

1442
  @type hostname: str
1443
  @param hostname: hostname that will be resolved and its
1444
      full and shot name will be removed from
1445
      L{constants.ETC_HOSTS}
1446

1447
  """
1448
  hi = HostInfo(name=hostname)
1449
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1450
  RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1451

    
1452

    
1453
def TimestampForFilename():
1454
  """Returns the current time formatted for filenames.
1455

1456
  The format doesn't contain colons as some shells and applications them as
1457
  separators.
1458

1459
  """
1460
  return time.strftime("%Y-%m-%d_%H_%M_%S")
1461

    
1462

    
1463
def CreateBackup(file_name):
1464
  """Creates a backup of a file.
1465

1466
  @type file_name: str
1467
  @param file_name: file to be backed up
1468
  @rtype: str
1469
  @return: the path to the newly created backup
1470
  @raise errors.ProgrammerError: for invalid file names
1471

1472
  """
1473
  if not os.path.isfile(file_name):
1474
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1475
                                file_name)
1476

    
1477
  prefix = ("%s.backup-%s." %
1478
            (os.path.basename(file_name), TimestampForFilename()))
1479
  dir_name = os.path.dirname(file_name)
1480

    
1481
  fsrc = open(file_name, 'rb')
1482
  try:
1483
    (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1484
    fdst = os.fdopen(fd, 'wb')
1485
    try:
1486
      logging.debug("Backing up %s at %s", file_name, backup_name)
1487
      shutil.copyfileobj(fsrc, fdst)
1488
    finally:
1489
      fdst.close()
1490
  finally:
1491
    fsrc.close()
1492

    
1493
  return backup_name
1494

    
1495

    
1496
def ShellQuote(value):
1497
  """Quotes shell argument according to POSIX.
1498

1499
  @type value: str
1500
  @param value: the argument to be quoted
1501
  @rtype: str
1502
  @return: the quoted value
1503

1504
  """
1505
  if _re_shell_unquoted.match(value):
1506
    return value
1507
  else:
1508
    return "'%s'" % value.replace("'", "'\\''")
1509

    
1510

    
1511
def ShellQuoteArgs(args):
1512
  """Quotes a list of shell arguments.
1513

1514
  @type args: list
1515
  @param args: list of arguments to be quoted
1516
  @rtype: str
1517
  @return: the quoted arguments concatenated with spaces
1518

1519
  """
1520
  return ' '.join([ShellQuote(i) for i in args])
1521

    
1522

    
1523
def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1524
  """Simple ping implementation using TCP connect(2).
1525

1526
  Check if the given IP is reachable by doing attempting a TCP connect
1527
  to it.
1528

1529
  @type target: str
1530
  @param target: the IP or hostname to ping
1531
  @type port: int
1532
  @param port: the port to connect to
1533
  @type timeout: int
1534
  @param timeout: the timeout on the connection attempt
1535
  @type live_port_needed: boolean
1536
  @param live_port_needed: whether a closed port will cause the
1537
      function to return failure, as if there was a timeout
1538
  @type source: str or None
1539
  @param source: if specified, will cause the connect to be made
1540
      from this specific source address; failures to bind other
1541
      than C{EADDRNOTAVAIL} will be ignored
1542

1543
  """
1544
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1545

    
1546
  success = False
1547

    
1548
  if source is not None:
1549
    try:
1550
      sock.bind((source, 0))
1551
    except socket.error, (errcode, _):
1552
      if errcode == errno.EADDRNOTAVAIL:
1553
        success = False
1554

    
1555
  sock.settimeout(timeout)
1556

    
1557
  try:
1558
    sock.connect((target, port))
1559
    sock.close()
1560
    success = True
1561
  except socket.timeout:
1562
    success = False
1563
  except socket.error, (errcode, _):
1564
    success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1565

    
1566
  return success
1567

    
1568

    
1569
def OwnIpAddress(address):
1570
  """Check if the current host has the the given IP address.
1571

1572
  Currently this is done by TCP-pinging the address from the loopback
1573
  address.
1574

1575
  @type address: string
1576
  @param address: the address to check
1577
  @rtype: bool
1578
  @return: True if we own the address
1579

1580
  """
1581
  return TcpPing(address, constants.DEFAULT_NODED_PORT,
1582
                 source=constants.LOCALHOST_IP_ADDRESS)
1583

    
1584

    
1585
def ListVisibleFiles(path):
1586
  """Returns a list of visible files in a directory.
1587

1588
  @type path: str
1589
  @param path: the directory to enumerate
1590
  @rtype: list
1591
  @return: the list of all files not starting with a dot
1592
  @raise ProgrammerError: if L{path} is not an absolue and normalized path
1593

1594
  """
1595
  if not IsNormAbsPath(path):
1596
    raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1597
                                 " absolute/normalized: '%s'" % path)
1598
  files = [i for i in os.listdir(path) if not i.startswith(".")]
1599
  files.sort()
1600
  return files
1601

    
1602

    
1603
def GetHomeDir(user, default=None):
1604
  """Try to get the homedir of the given user.
1605

1606
  The user can be passed either as a string (denoting the name) or as
1607
  an integer (denoting the user id). If the user is not found, the
1608
  'default' argument is returned, which defaults to None.
1609

1610
  """
1611
  try:
1612
    if isinstance(user, basestring):
1613
      result = pwd.getpwnam(user)
1614
    elif isinstance(user, (int, long)):
1615
      result = pwd.getpwuid(user)
1616
    else:
1617
      raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1618
                                   type(user))
1619
  except KeyError:
1620
    return default
1621
  return result.pw_dir
1622

    
1623

    
1624
def NewUUID():
1625
  """Returns a random UUID.
1626

1627
  @note: This is a Linux-specific method as it uses the /proc
1628
      filesystem.
1629
  @rtype: str
1630

1631
  """
1632
  return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1633

    
1634

    
1635
def GenerateSecret(numbytes=20):
1636
  """Generates a random secret.
1637

1638
  This will generate a pseudo-random secret returning an hex string
1639
  (so that it can be used where an ASCII string is needed).
1640

1641
  @param numbytes: the number of bytes which will be represented by the returned
1642
      string (defaulting to 20, the length of a SHA1 hash)
1643
  @rtype: str
1644
  @return: an hex representation of the pseudo-random sequence
1645

1646
  """
1647
  return os.urandom(numbytes).encode('hex')
1648

    
1649

    
1650
def EnsureDirs(dirs):
1651
  """Make required directories, if they don't exist.
1652

1653
  @param dirs: list of tuples (dir_name, dir_mode)
1654
  @type dirs: list of (string, integer)
1655

1656
  """
1657
  for dir_name, dir_mode in dirs:
1658
    try:
1659
      os.mkdir(dir_name, dir_mode)
1660
    except EnvironmentError, err:
1661
      if err.errno != errno.EEXIST:
1662
        raise errors.GenericError("Cannot create needed directory"
1663
                                  " '%s': %s" % (dir_name, err))
1664
    if not os.path.isdir(dir_name):
1665
      raise errors.GenericError("%s is not a directory" % dir_name)
1666

    
1667

    
1668
def ReadFile(file_name, size=-1):
1669
  """Reads a file.
1670

1671
  @type size: int
1672
  @param size: Read at most size bytes (if negative, entire file)
1673
  @rtype: str
1674
  @return: the (possibly partial) content of the file
1675

1676
  """
1677
  f = open(file_name, "r")
1678
  try:
1679
    return f.read(size)
1680
  finally:
1681
    f.close()
1682

    
1683

    
1684
def WriteFile(file_name, fn=None, data=None,
1685
              mode=None, uid=-1, gid=-1,
1686
              atime=None, mtime=None, close=True,
1687
              dry_run=False, backup=False,
1688
              prewrite=None, postwrite=None):
1689
  """(Over)write a file atomically.
1690

1691
  The file_name and either fn (a function taking one argument, the
1692
  file descriptor, and which should write the data to it) or data (the
1693
  contents of the file) must be passed. The other arguments are
1694
  optional and allow setting the file mode, owner and group, and the
1695
  mtime/atime of the file.
1696

1697
  If the function doesn't raise an exception, it has succeeded and the
1698
  target file has the new contents. If the function has raised an
1699
  exception, an existing target file should be unmodified and the
1700
  temporary file should be removed.
1701

1702
  @type file_name: str
1703
  @param file_name: the target filename
1704
  @type fn: callable
1705
  @param fn: content writing function, called with
1706
      file descriptor as parameter
1707
  @type data: str
1708
  @param data: contents of the file
1709
  @type mode: int
1710
  @param mode: file mode
1711
  @type uid: int
1712
  @param uid: the owner of the file
1713
  @type gid: int
1714
  @param gid: the group of the file
1715
  @type atime: int
1716
  @param atime: a custom access time to be set on the file
1717
  @type mtime: int
1718
  @param mtime: a custom modification time to be set on the file
1719
  @type close: boolean
1720
  @param close: whether to close file after writing it
1721
  @type prewrite: callable
1722
  @param prewrite: function to be called before writing content
1723
  @type postwrite: callable
1724
  @param postwrite: function to be called after writing content
1725

1726
  @rtype: None or int
1727
  @return: None if the 'close' parameter evaluates to True,
1728
      otherwise the file descriptor
1729

1730
  @raise errors.ProgrammerError: if any of the arguments are not valid
1731

1732
  """
1733
  if not os.path.isabs(file_name):
1734
    raise errors.ProgrammerError("Path passed to WriteFile is not"
1735
                                 " absolute: '%s'" % file_name)
1736

    
1737
  if [fn, data].count(None) != 1:
1738
    raise errors.ProgrammerError("fn or data required")
1739

    
1740
  if [atime, mtime].count(None) == 1:
1741
    raise errors.ProgrammerError("Both atime and mtime must be either"
1742
                                 " set or None")
1743

    
1744
  if backup and not dry_run and os.path.isfile(file_name):
1745
    CreateBackup(file_name)
1746

    
1747
  dir_name, base_name = os.path.split(file_name)
1748
  fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1749
  do_remove = True
1750
  # here we need to make sure we remove the temp file, if any error
1751
  # leaves it in place
1752
  try:
1753
    if uid != -1 or gid != -1:
1754
      os.chown(new_name, uid, gid)
1755
    if mode:
1756
      os.chmod(new_name, mode)
1757
    if callable(prewrite):
1758
      prewrite(fd)
1759
    if data is not None:
1760
      os.write(fd, data)
1761
    else:
1762
      fn(fd)
1763
    if callable(postwrite):
1764
      postwrite(fd)
1765
    os.fsync(fd)
1766
    if atime is not None and mtime is not None:
1767
      os.utime(new_name, (atime, mtime))
1768
    if not dry_run:
1769
      os.rename(new_name, file_name)
1770
      do_remove = False
1771
  finally:
1772
    if close:
1773
      os.close(fd)
1774
      result = None
1775
    else:
1776
      result = fd
1777
    if do_remove:
1778
      RemoveFile(new_name)
1779

    
1780
  return result
1781

    
1782

    
1783
def FirstFree(seq, base=0):
1784
  """Returns the first non-existing integer from seq.
1785

1786
  The seq argument should be a sorted list of positive integers. The
1787
  first time the index of an element is smaller than the element
1788
  value, the index will be returned.
1789

1790
  The base argument is used to start at a different offset,
1791
  i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1792

1793
  Example: C{[0, 1, 3]} will return I{2}.
1794

1795
  @type seq: sequence
1796
  @param seq: the sequence to be analyzed.
1797
  @type base: int
1798
  @param base: use this value as the base index of the sequence
1799
  @rtype: int
1800
  @return: the first non-used index in the sequence
1801

1802
  """
1803
  for idx, elem in enumerate(seq):
1804
    assert elem >= base, "Passed element is higher than base offset"
1805
    if elem > idx + base:
1806
      # idx is not used
1807
      return idx + base
1808
  return None
1809

    
1810

    
1811
def SingleWaitForFdCondition(fdobj, event, timeout):
1812
  """Waits for a condition to occur on the socket.
1813

1814
  Immediately returns at the first interruption.
1815

1816
  @type fdobj: integer or object supporting a fileno() method
1817
  @param fdobj: entity to wait for events on
1818
  @type event: integer
1819
  @param event: ORed condition (see select module)
1820
  @type timeout: float or None
1821
  @param timeout: Timeout in seconds
1822
  @rtype: int or None
1823
  @return: None for timeout, otherwise occured conditions
1824

1825
  """
1826
  check = (event | select.POLLPRI |
1827
           select.POLLNVAL | select.POLLHUP | select.POLLERR)
1828

    
1829
  if timeout is not None:
1830
    # Poller object expects milliseconds
1831
    timeout *= 1000
1832

    
1833
  poller = select.poll()
1834
  poller.register(fdobj, event)
1835
  try:
1836
    # TODO: If the main thread receives a signal and we have no timeout, we
1837
    # could wait forever. This should check a global "quit" flag or something
1838
    # every so often.
1839
    io_events = poller.poll(timeout)
1840
  except select.error, err:
1841
    if err[0] != errno.EINTR:
1842
      raise
1843
    io_events = []
1844
  if io_events and io_events[0][1] & check:
1845
    return io_events[0][1]
1846
  else:
1847
    return None
1848

    
1849

    
1850
class FdConditionWaiterHelper(object):
1851
  """Retry helper for WaitForFdCondition.
1852

1853
  This class contains the retried and wait functions that make sure
1854
  WaitForFdCondition can continue waiting until the timeout is actually
1855
  expired.
1856

1857
  """
1858

    
1859
  def __init__(self, timeout):
1860
    self.timeout = timeout
1861

    
1862
  def Poll(self, fdobj, event):
1863
    result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1864
    if result is None:
1865
      raise RetryAgain()
1866
    else:
1867
      return result
1868

    
1869
  def UpdateTimeout(self, timeout):
1870
    self.timeout = timeout
1871

    
1872

    
1873
def WaitForFdCondition(fdobj, event, timeout):
1874
  """Waits for a condition to occur on the socket.
1875

1876
  Retries until the timeout is expired, even if interrupted.
1877

1878
  @type fdobj: integer or object supporting a fileno() method
1879
  @param fdobj: entity to wait for events on
1880
  @type event: integer
1881
  @param event: ORed condition (see select module)
1882
  @type timeout: float or None
1883
  @param timeout: Timeout in seconds
1884
  @rtype: int or None
1885
  @return: None for timeout, otherwise occured conditions
1886

1887
  """
1888
  if timeout is not None:
1889
    retrywaiter = FdConditionWaiterHelper(timeout)
1890
    try:
1891
      result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1892
                     args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1893
    except RetryTimeout:
1894
      result = None
1895
  else:
1896
    result = None
1897
    while result is None:
1898
      result = SingleWaitForFdCondition(fdobj, event, timeout)
1899
  return result
1900

    
1901

    
1902
def UniqueSequence(seq):
1903
  """Returns a list with unique elements.
1904

1905
  Element order is preserved.
1906

1907
  @type seq: sequence
1908
  @param seq: the sequence with the source elements
1909
  @rtype: list
1910
  @return: list of unique elements from seq
1911

1912
  """
1913
  seen = set()
1914
  return [i for i in seq if i not in seen and not seen.add(i)]
1915

    
1916

    
1917
def NormalizeAndValidateMac(mac):
1918
  """Normalizes and check if a MAC address is valid.
1919

1920
  Checks whether the supplied MAC address is formally correct, only
1921
  accepts colon separated format. Normalize it to all lower.
1922

1923
  @type mac: str
1924
  @param mac: the MAC to be validated
1925
  @rtype: str
1926
  @return: returns the normalized and validated MAC.
1927

1928
  @raise errors.OpPrereqError: If the MAC isn't valid
1929

1930
  """
1931
  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1932
  if not mac_check.match(mac):
1933
    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1934
                               mac, errors.ECODE_INVAL)
1935

    
1936
  return mac.lower()
1937

    
1938

    
1939
def TestDelay(duration):
1940
  """Sleep for a fixed amount of time.
1941

1942
  @type duration: float
1943
  @param duration: the sleep duration
1944
  @rtype: boolean
1945
  @return: False for negative value, True otherwise
1946

1947
  """
1948
  if duration < 0:
1949
    return False, "Invalid sleep duration"
1950
  time.sleep(duration)
1951
  return True, None
1952

    
1953

    
1954
def _CloseFDNoErr(fd, retries=5):
1955
  """Close a file descriptor ignoring errors.
1956

1957
  @type fd: int
1958
  @param fd: the file descriptor
1959
  @type retries: int
1960
  @param retries: how many retries to make, in case we get any
1961
      other error than EBADF
1962

1963
  """
1964
  try:
1965
    os.close(fd)
1966
  except OSError, err:
1967
    if err.errno != errno.EBADF:
1968
      if retries > 0:
1969
        _CloseFDNoErr(fd, retries - 1)
1970
    # else either it's closed already or we're out of retries, so we
1971
    # ignore this and go on
1972

    
1973

    
1974
def CloseFDs(noclose_fds=None):
1975
  """Close file descriptors.
1976

1977
  This closes all file descriptors above 2 (i.e. except
1978
  stdin/out/err).
1979

1980
  @type noclose_fds: list or None
1981
  @param noclose_fds: if given, it denotes a list of file descriptor
1982
      that should not be closed
1983

1984
  """
1985
  # Default maximum for the number of available file descriptors.
1986
  if 'SC_OPEN_MAX' in os.sysconf_names:
1987
    try:
1988
      MAXFD = os.sysconf('SC_OPEN_MAX')
1989
      if MAXFD < 0:
1990
        MAXFD = 1024
1991
    except OSError:
1992
      MAXFD = 1024
1993
  else:
1994
    MAXFD = 1024
1995
  maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1996
  if (maxfd == resource.RLIM_INFINITY):
1997
    maxfd = MAXFD
1998

    
1999
  # Iterate through and close all file descriptors (except the standard ones)
2000
  for fd in range(3, maxfd):
2001
    if noclose_fds and fd in noclose_fds:
2002
      continue
2003
    _CloseFDNoErr(fd)
2004

    
2005

    
2006
def Daemonize(logfile):
2007
  """Daemonize the current process.
2008

2009
  This detaches the current process from the controlling terminal and
2010
  runs it in the background as a daemon.
2011

2012
  @type logfile: str
2013
  @param logfile: the logfile to which we should redirect stdout/stderr
2014
  @rtype: int
2015
  @return: the value zero
2016

2017
  """
2018
  # pylint: disable-msg=W0212
2019
  # yes, we really want os._exit
2020
  UMASK = 077
2021
  WORKDIR = "/"
2022

    
2023
  # this might fail
2024
  pid = os.fork()
2025
  if (pid == 0):  # The first child.
2026
    os.setsid()
2027
    # this might fail
2028
    pid = os.fork() # Fork a second child.
2029
    if (pid == 0):  # The second child.
2030
      os.chdir(WORKDIR)
2031
      os.umask(UMASK)
2032
    else:
2033
      # exit() or _exit()?  See below.
2034
      os._exit(0) # Exit parent (the first child) of the second child.
2035
  else:
2036
    os._exit(0) # Exit parent of the first child.
2037

    
2038
  for fd in range(3):
2039
    _CloseFDNoErr(fd)
2040
  i = os.open("/dev/null", os.O_RDONLY) # stdin
2041
  assert i == 0, "Can't close/reopen stdin"
2042
  i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2043
  assert i == 1, "Can't close/reopen stdout"
2044
  # Duplicate standard output to standard error.
2045
  os.dup2(1, 2)
2046
  return 0
2047

    
2048

    
2049
def DaemonPidFileName(name):
2050
  """Compute a ganeti pid file absolute path
2051

2052
  @type name: str
2053
  @param name: the daemon name
2054
  @rtype: str
2055
  @return: the full path to the pidfile corresponding to the given
2056
      daemon name
2057

2058
  """
2059
  return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2060

    
2061

    
2062
def EnsureDaemon(name):
2063
  """Check for and start daemon if not alive.
2064

2065
  """
2066
  result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2067
  if result.failed:
2068
    logging.error("Can't start daemon '%s', failure %s, output: %s",
2069
                  name, result.fail_reason, result.output)
2070
    return False
2071

    
2072
  return True
2073

    
2074

    
2075
def WritePidFile(name):
2076
  """Write the current process pidfile.
2077

2078
  The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2079

2080
  @type name: str
2081
  @param name: the daemon name to use
2082
  @raise errors.GenericError: if the pid file already exists and
2083
      points to a live process
2084

2085
  """
2086
  pid = os.getpid()
2087
  pidfilename = DaemonPidFileName(name)
2088
  if IsProcessAlive(ReadPidFile(pidfilename)):
2089
    raise errors.GenericError("%s contains a live process" % pidfilename)
2090

    
2091
  WriteFile(pidfilename, data="%d\n" % pid)
2092

    
2093

    
2094
def RemovePidFile(name):
2095
  """Remove the current process pidfile.
2096

2097
  Any errors are ignored.
2098

2099
  @type name: str
2100
  @param name: the daemon name used to derive the pidfile name
2101

2102
  """
2103
  pidfilename = DaemonPidFileName(name)
2104
  # TODO: we could check here that the file contains our pid
2105
  try:
2106
    RemoveFile(pidfilename)
2107
  except: # pylint: disable-msg=W0702
2108
    pass
2109

    
2110

    
2111
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2112
                waitpid=False):
2113
  """Kill a process given by its pid.
2114

2115
  @type pid: int
2116
  @param pid: The PID to terminate.
2117
  @type signal_: int
2118
  @param signal_: The signal to send, by default SIGTERM
2119
  @type timeout: int
2120
  @param timeout: The timeout after which, if the process is still alive,
2121
                  a SIGKILL will be sent. If not positive, no such checking
2122
                  will be done
2123
  @type waitpid: boolean
2124
  @param waitpid: If true, we should waitpid on this process after
2125
      sending signals, since it's our own child and otherwise it
2126
      would remain as zombie
2127

2128
  """
2129
  def _helper(pid, signal_, wait):
2130
    """Simple helper to encapsulate the kill/waitpid sequence"""
2131
    os.kill(pid, signal_)
2132
    if wait:
2133
      try:
2134
        os.waitpid(pid, os.WNOHANG)
2135
      except OSError:
2136
        pass
2137

    
2138
  if pid <= 0:
2139
    # kill with pid=0 == suicide
2140
    raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2141

    
2142
  if not IsProcessAlive(pid):
2143
    return
2144

    
2145
  _helper(pid, signal_, waitpid)
2146

    
2147
  if timeout <= 0:
2148
    return
2149

    
2150
  def _CheckProcess():
2151
    if not IsProcessAlive(pid):
2152
      return
2153

    
2154
    try:
2155
      (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2156
    except OSError:
2157
      raise RetryAgain()
2158

    
2159
    if result_pid > 0:
2160
      return
2161

    
2162
    raise RetryAgain()
2163

    
2164
  try:
2165
    # Wait up to $timeout seconds
2166
    Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2167
  except RetryTimeout:
2168
    pass
2169

    
2170
  if IsProcessAlive(pid):
2171
    # Kill process if it's still alive
2172
    _helper(pid, signal.SIGKILL, waitpid)
2173

    
2174

    
2175
def FindFile(name, search_path, test=os.path.exists):
2176
  """Look for a filesystem object in a given path.
2177

2178
  This is an abstract method to search for filesystem object (files,
2179
  dirs) under a given search path.
2180

2181
  @type name: str
2182
  @param name: the name to look for
2183
  @type search_path: str
2184
  @param search_path: location to start at
2185
  @type test: callable
2186
  @param test: a function taking one argument that should return True
2187
      if the a given object is valid; the default value is
2188
      os.path.exists, causing only existing files to be returned
2189
  @rtype: str or None
2190
  @return: full path to the object if found, None otherwise
2191

2192
  """
2193
  # validate the filename mask
2194
  if constants.EXT_PLUGIN_MASK.match(name) is None:
2195
    logging.critical("Invalid value passed for external script name: '%s'",
2196
                     name)
2197
    return None
2198

    
2199
  for dir_name in search_path:
2200
    # FIXME: investigate switch to PathJoin
2201
    item_name = os.path.sep.join([dir_name, name])
2202
    # check the user test and that we're indeed resolving to the given
2203
    # basename
2204
    if test(item_name) and os.path.basename(item_name) == name:
2205
      return item_name
2206
  return None
2207

    
2208

    
2209
def CheckVolumeGroupSize(vglist, vgname, minsize):
2210
  """Checks if the volume group list is valid.
2211

2212
  The function will check if a given volume group is in the list of
2213
  volume groups and has a minimum size.
2214

2215
  @type vglist: dict
2216
  @param vglist: dictionary of volume group names and their size
2217
  @type vgname: str
2218
  @param vgname: the volume group we should check
2219
  @type minsize: int
2220
  @param minsize: the minimum size we accept
2221
  @rtype: None or str
2222
  @return: None for success, otherwise the error message
2223

2224
  """
2225
  vgsize = vglist.get(vgname, None)
2226
  if vgsize is None:
2227
    return "volume group '%s' missing" % vgname
2228
  elif vgsize < minsize:
2229
    return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2230
            (vgname, minsize, vgsize))
2231
  return None
2232

    
2233

    
2234
def SplitTime(value):
2235
  """Splits time as floating point number into a tuple.
2236

2237
  @param value: Time in seconds
2238
  @type value: int or float
2239
  @return: Tuple containing (seconds, microseconds)
2240

2241
  """
2242
  (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2243

    
2244
  assert 0 <= seconds, \
2245
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2246
  assert 0 <= microseconds <= 999999, \
2247
    "Microseconds must be 0-999999, but are %s" % microseconds
2248

    
2249
  return (int(seconds), int(microseconds))
2250

    
2251

    
2252
def MergeTime(timetuple):
2253
  """Merges a tuple into time as a floating point number.
2254

2255
  @param timetuple: Time as tuple, (seconds, microseconds)
2256
  @type timetuple: tuple
2257
  @return: Time as a floating point number expressed in seconds
2258

2259
  """
2260
  (seconds, microseconds) = timetuple
2261

    
2262
  assert 0 <= seconds, \
2263
    "Seconds must be larger than or equal to 0, but are %s" % seconds
2264
  assert 0 <= microseconds <= 999999, \
2265
    "Microseconds must be 0-999999, but are %s" % microseconds
2266

    
2267
  return float(seconds) + (float(microseconds) * 0.000001)
2268

    
2269

    
2270
def GetDaemonPort(daemon_name):
2271
  """Get the daemon port for this cluster.
2272

2273
  Note that this routine does not read a ganeti-specific file, but
2274
  instead uses C{socket.getservbyname} to allow pre-customization of
2275
  this parameter outside of Ganeti.
2276

2277
  @type daemon_name: string
2278
  @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2279
  @rtype: int
2280

2281
  """
2282
  if daemon_name not in constants.DAEMONS_PORTS:
2283
    raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2284

    
2285
  (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2286
  try:
2287
    port = socket.getservbyname(daemon_name, proto)
2288
  except socket.error:
2289
    port = default_port
2290

    
2291
  return port
2292

    
2293

    
2294
def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2295
                 multithreaded=False, syslog=constants.SYSLOG_USAGE):
2296
  """Configures the logging module.
2297

2298
  @type logfile: str
2299
  @param logfile: the filename to which we should log
2300
  @type debug: integer
2301
  @param debug: if greater than zero, enable debug messages, otherwise
2302
      only those at C{INFO} and above level
2303
  @type stderr_logging: boolean
2304
  @param stderr_logging: whether we should also log to the standard error
2305
  @type program: str
2306
  @param program: the name under which we should log messages
2307
  @type multithreaded: boolean
2308
  @param multithreaded: if True, will add the thread name to the log file
2309
  @type syslog: string
2310
  @param syslog: one of 'no', 'yes', 'only':
2311
      - if no, syslog is not used
2312
      - if yes, syslog is used (in addition to file-logging)
2313
      - if only, only syslog is used
2314
  @raise EnvironmentError: if we can't open the log file and
2315
      syslog/stderr logging is disabled
2316

2317
  """
2318
  fmt = "%(asctime)s: " + program + " pid=%(process)d"
2319
  sft = program + "[%(process)d]:"
2320
  if multithreaded:
2321
    fmt += "/%(threadName)s"
2322
    sft += " (%(threadName)s)"
2323
  if debug:
2324
    fmt += " %(module)s:%(lineno)s"
2325
    # no debug info for syslog loggers
2326
  fmt += " %(levelname)s %(message)s"
2327
  # yes, we do want the textual level, as remote syslog will probably
2328
  # lose the error level, and it's easier to grep for it
2329
  sft += " %(levelname)s %(message)s"
2330
  formatter = logging.Formatter(fmt)
2331
  sys_fmt = logging.Formatter(sft)
2332

    
2333
  root_logger = logging.getLogger("")
2334
  root_logger.setLevel(logging.NOTSET)
2335

    
2336
  # Remove all previously setup handlers
2337
  for handler in root_logger.handlers:
2338
    handler.close()
2339
    root_logger.removeHandler(handler)
2340

    
2341
  if stderr_logging:
2342
    stderr_handler = logging.StreamHandler()
2343
    stderr_handler.setFormatter(formatter)
2344
    if debug:
2345
      stderr_handler.setLevel(logging.NOTSET)
2346
    else:
2347
      stderr_handler.setLevel(logging.CRITICAL)
2348
    root_logger.addHandler(stderr_handler)
2349

    
2350
  if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2351
    facility = logging.handlers.SysLogHandler.LOG_DAEMON
2352
    syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2353
                                                    facility)
2354
    syslog_handler.setFormatter(sys_fmt)
2355
    # Never enable debug over syslog
2356
    syslog_handler.setLevel(logging.INFO)
2357
    root_logger.addHandler(syslog_handler)
2358

    
2359
  if syslog != constants.SYSLOG_ONLY:
2360
    # this can fail, if the logging directories are not setup or we have
2361
    # a permisssion problem; in this case, it's best to log but ignore
2362
    # the error if stderr_logging is True, and if false we re-raise the
2363
    # exception since otherwise we could run but without any logs at all
2364
    try:
2365
      logfile_handler = logging.FileHandler(logfile)
2366
      logfile_handler.setFormatter(formatter)
2367
      if debug:
2368
        logfile_handler.setLevel(logging.DEBUG)
2369
      else:
2370
        logfile_handler.setLevel(logging.INFO)
2371
      root_logger.addHandler(logfile_handler)
2372
    except EnvironmentError:
2373
      if stderr_logging or syslog == constants.SYSLOG_YES:
2374
        logging.exception("Failed to enable logging to file '%s'", logfile)
2375
      else:
2376
        # we need to re-raise the exception
2377
        raise
2378

    
2379

    
2380
def IsNormAbsPath(path):
2381
  """Check whether a path is absolute and also normalized
2382

2383
  This avoids things like /dir/../../other/path to be valid.
2384

2385
  """
2386
  return os.path.normpath(path) == path and os.path.isabs(path)
2387

    
2388

    
2389
def PathJoin(*args):
2390
  """Safe-join a list of path components.
2391

2392
  Requirements:
2393
      - the first argument must be an absolute path
2394
      - no component in the path must have backtracking (e.g. /../),
2395
        since we check for normalization at the end
2396

2397
  @param args: the path components to be joined
2398
  @raise ValueError: for invalid paths
2399

2400
  """
2401
  # ensure we're having at least one path passed in
2402
  assert args
2403
  # ensure the first component is an absolute and normalized path name
2404
  root = args[0]
2405
  if not IsNormAbsPath(root):
2406
    raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2407
  result = os.path.join(*args)
2408
  # ensure that the whole path is normalized
2409
  if not IsNormAbsPath(result):
2410
    raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2411
  # check that we're still under the original prefix
2412
  prefix = os.path.commonprefix([root, result])
2413
  if prefix != root:
2414
    raise ValueError("Error: path joining resulted in different prefix"
2415
                     " (%s != %s)" % (prefix, root))
2416
  return result
2417

    
2418

    
2419
def TailFile(fname, lines=20):
2420
  """Return the last lines from a file.
2421

2422
  @note: this function will only read and parse the last 4KB of
2423
      the file; if the lines are very long, it could be that less
2424
      than the requested number of lines are returned
2425

2426
  @param fname: the file name
2427
  @type lines: int
2428
  @param lines: the (maximum) number of lines to return
2429

2430
  """
2431
  fd = open(fname, "r")
2432
  try:
2433
    fd.seek(0, 2)
2434
    pos = fd.tell()
2435
    pos = max(0, pos-4096)
2436
    fd.seek(pos, 0)
2437
    raw_data = fd.read()
2438
  finally:
2439
    fd.close()
2440

    
2441
  rows = raw_data.splitlines()
2442
  return rows[-lines:]
2443

    
2444

    
2445
def _ParseAsn1Generalizedtime(value):
2446
  """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2447

2448
  @type value: string
2449
  @param value: ASN1 GENERALIZEDTIME timestamp
2450

2451
  """
2452
  m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2453
  if m:
2454
    # We have an offset
2455
    asn1time = m.group(1)
2456
    hours = int(m.group(2))
2457
    minutes = int(m.group(3))
2458
    utcoffset = (60 * hours) + minutes
2459
  else:
2460
    if not value.endswith("Z"):
2461
      raise ValueError("Missing timezone")
2462
    asn1time = value[:-1]
2463
    utcoffset = 0
2464

    
2465
  parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2466

    
2467
  tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2468

    
2469
  return calendar.timegm(tt.utctimetuple())
2470

    
2471

    
2472
def GetX509CertValidity(cert):
2473
  """Returns the validity period of the certificate.
2474

2475
  @type cert: OpenSSL.crypto.X509
2476
  @param cert: X509 certificate object
2477

2478
  """
2479
  # The get_notBefore and get_notAfter functions are only supported in
2480
  # pyOpenSSL 0.7 and above.
2481
  try:
2482
    get_notbefore_fn = cert.get_notBefore
2483
  except AttributeError:
2484
    not_before = None
2485
  else:
2486
    not_before_asn1 = get_notbefore_fn()
2487

    
2488
    if not_before_asn1 is None:
2489
      not_before = None
2490
    else:
2491
      not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2492

    
2493
  try:
2494
    get_notafter_fn = cert.get_notAfter
2495
  except AttributeError:
2496
    not_after = None
2497
  else:
2498
    not_after_asn1 = get_notafter_fn()
2499

    
2500
    if not_after_asn1 is None:
2501
      not_after = None
2502
    else:
2503
      not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2504

    
2505
  return (not_before, not_after)
2506

    
2507

    
2508
def SignX509Certificate(cert, key, salt):
2509
  """Sign a X509 certificate.
2510

2511
  An RFC822-like signature header is added in front of the certificate.
2512

2513
  @type cert: OpenSSL.crypto.X509
2514
  @param cert: X509 certificate object
2515
  @type key: string
2516
  @param key: Key for HMAC
2517
  @type salt: string
2518
  @param salt: Salt for HMAC
2519
  @rtype: string
2520
  @return: Serialized and signed certificate in PEM format
2521

2522
  """
2523
  if not VALID_X509_SIGNATURE_SALT.match(salt):
2524
    raise errors.GenericError("Invalid salt: %r" % salt)
2525

    
2526
  # Dumping as PEM here ensures the certificate is in a sane format
2527
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2528

    
2529
  return ("%s: %s/%s\n\n%s" %
2530
          (constants.X509_CERT_SIGNATURE_HEADER, salt,
2531
           hmac.new(key, salt + cert_pem, sha1).hexdigest(),
2532
           cert_pem))
2533

    
2534

    
2535
def _ExtractX509CertificateSignature(cert_pem):
2536
  """Helper function to extract signature from X509 certificate.
2537

2538
  """
2539
  # Extract signature from original PEM data
2540
  for line in cert_pem.splitlines():
2541
    if line.startswith("---"):
2542
      break
2543

    
2544
    m = X509_SIGNATURE.match(line.strip())
2545
    if m:
2546
      return (m.group("salt"), m.group("sign"))
2547

    
2548
  raise errors.GenericError("X509 certificate signature is missing")
2549

    
2550

    
2551
def LoadSignedX509Certificate(cert_pem, key):
2552
  """Verifies a signed X509 certificate.
2553

2554
  @type cert_pem: string
2555
  @param cert_pem: Certificate in PEM format and with signature header
2556
  @type key: string
2557
  @param key: Key for HMAC
2558
  @rtype: tuple; (OpenSSL.crypto.X509, string)
2559
  @return: X509 certificate object and salt
2560

2561
  """
2562
  (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2563

    
2564
  # Load certificate
2565
  cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2566

    
2567
  # Dump again to ensure it's in a sane format
2568
  sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2569

    
2570
  if signature != hmac.new(key, salt + sane_pem, sha1).hexdigest():
2571
    raise errors.GenericError("X509 certificate signature is invalid")
2572

    
2573
  return (cert, salt)
2574

    
2575

    
2576
def SafeEncode(text):
2577
  """Return a 'safe' version of a source string.
2578

2579
  This function mangles the input string and returns a version that
2580
  should be safe to display/encode as ASCII. To this end, we first
2581
  convert it to ASCII using the 'backslashreplace' encoding which
2582
  should get rid of any non-ASCII chars, and then we process it
2583
  through a loop copied from the string repr sources in the python; we
2584
  don't use string_escape anymore since that escape single quotes and
2585
  backslashes too, and that is too much; and that escaping is not
2586
  stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2587

2588
  @type text: str or unicode
2589
  @param text: input data
2590
  @rtype: str
2591
  @return: a safe version of text
2592

2593
  """
2594
  if isinstance(text, unicode):
2595
    # only if unicode; if str already, we handle it below
2596
    text = text.encode('ascii', 'backslashreplace')
2597
  resu = ""
2598
  for char in text:
2599
    c = ord(char)
2600
    if char  == '\t':
2601
      resu += r'\t'
2602
    elif char == '\n':
2603
      resu += r'\n'
2604
    elif char == '\r':
2605
      resu += r'\'r'
2606
    elif c < 32 or c >= 127: # non-printable
2607
      resu += "\\x%02x" % (c & 0xff)
2608
    else:
2609
      resu += char
2610
  return resu
2611

    
2612

    
2613
def UnescapeAndSplit(text, sep=","):
2614
  """Split and unescape a string based on a given separator.
2615

2616
  This function splits a string based on a separator where the
2617
  separator itself can be escape in order to be an element of the
2618
  elements. The escaping rules are (assuming coma being the
2619
  separator):
2620
    - a plain , separates the elements
2621
    - a sequence \\\\, (double backslash plus comma) is handled as a
2622
      backslash plus a separator comma
2623
    - a sequence \, (backslash plus comma) is handled as a
2624
      non-separator comma
2625

2626
  @type text: string
2627
  @param text: the string to split
2628
  @type sep: string
2629
  @param text: the separator
2630
  @rtype: string
2631
  @return: a list of strings
2632

2633
  """
2634
  # we split the list by sep (with no escaping at this stage)
2635
  slist = text.split(sep)
2636
  # next, we revisit the elements and if any of them ended with an odd
2637
  # number of backslashes, then we join it with the next
2638
  rlist = []
2639
  while slist:
2640
    e1 = slist.pop(0)
2641
    if e1.endswith("\\"):
2642
      num_b = len(e1) - len(e1.rstrip("\\"))
2643
      if num_b % 2 == 1:
2644
        e2 = slist.pop(0)
2645
        # here the backslashes remain (all), and will be reduced in
2646
        # the next step
2647
        rlist.append(e1 + sep + e2)
2648
        continue
2649
    rlist.append(e1)
2650
  # finally, replace backslash-something with something
2651
  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2652
  return rlist
2653

    
2654

    
2655
def CommaJoin(names):
2656
  """Nicely join a set of identifiers.
2657

2658
  @param names: set, list or tuple
2659
  @return: a string with the formatted results
2660

2661
  """
2662
  return ", ".join([str(val) for val in names])
2663

    
2664

    
2665
def BytesToMebibyte(value):
2666
  """Converts bytes to mebibytes.
2667

2668
  @type value: int
2669
  @param value: Value in bytes
2670
  @rtype: int
2671
  @return: Value in mebibytes
2672

2673
  """
2674
  return int(round(value / (1024.0 * 1024.0), 0))
2675

    
2676

    
2677
def CalculateDirectorySize(path):
2678
  """Calculates the size of a directory recursively.
2679

2680
  @type path: string
2681
  @param path: Path to directory
2682
  @rtype: int
2683
  @return: Size in mebibytes
2684

2685
  """
2686
  size = 0
2687

    
2688
  for (curpath, _, files) in os.walk(path):
2689
    for filename in files:
2690
      st = os.lstat(PathJoin(curpath, filename))
2691
      size += st.st_size
2692

    
2693
  return BytesToMebibyte(size)
2694

    
2695

    
2696
def GetFilesystemStats(path):
2697
  """Returns the total and free space on a filesystem.
2698

2699
  @type path: string
2700
  @param path: Path on filesystem to be examined
2701
  @rtype: int
2702
  @return: tuple of (Total space, Free space) in mebibytes
2703

2704
  """
2705
  st = os.statvfs(path)
2706

    
2707
  fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2708
  tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2709
  return (tsize, fsize)
2710

    
2711

    
2712
def RunInSeparateProcess(fn, *args):
2713
  """Runs a function in a separate process.
2714

2715
  Note: Only boolean return values are supported.
2716

2717
  @type fn: callable
2718
  @param fn: Function to be called
2719
  @rtype: bool
2720
  @return: Function's result
2721

2722
  """
2723
  pid = os.fork()
2724
  if pid == 0:
2725
    # Child process
2726
    try:
2727
      # In case the function uses temporary files
2728
      ResetTempfileModule()
2729

    
2730
      # Call function
2731
      result = int(bool(fn(*args)))
2732
      assert result in (0, 1)
2733
    except: # pylint: disable-msg=W0702
2734
      logging.exception("Error while calling function in separate process")
2735
      # 0 and 1 are reserved for the return value
2736
      result = 33
2737

    
2738
    os._exit(result) # pylint: disable-msg=W0212
2739

    
2740
  # Parent process
2741

    
2742
  # Avoid zombies and check exit code
2743
  (_, status) = os.waitpid(pid, 0)
2744

    
2745
  if os.WIFSIGNALED(status):
2746
    exitcode = None
2747
    signum = os.WTERMSIG(status)
2748
  else:
2749
    exitcode = os.WEXITSTATUS(status)
2750
    signum = None
2751

    
2752
  if not (exitcode in (0, 1) and signum is None):
2753
    raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2754
                              (exitcode, signum))
2755

    
2756
  return bool(exitcode)
2757

    
2758

    
2759
def LockedMethod(fn):
2760
  """Synchronized object access decorator.
2761

2762
  This decorator is intended to protect access to an object using the
2763
  object's own lock which is hardcoded to '_lock'.
2764

2765
  """
2766
  def _LockDebug(*args, **kwargs):
2767
    if debug_locks:
2768
      logging.debug(*args, **kwargs)
2769

    
2770
  def wrapper(self, *args, **kwargs):
2771
    # pylint: disable-msg=W0212
2772
    assert hasattr(self, '_lock')
2773
    lock = self._lock
2774
    _LockDebug("Waiting for %s", lock)
2775
    lock.acquire()
2776
    try:
2777
      _LockDebug("Acquired %s", lock)
2778
      result = fn(self, *args, **kwargs)
2779
    finally:
2780
      _LockDebug("Releasing %s", lock)
2781
      lock.release()
2782
      _LockDebug("Released %s", lock)
2783
    return result
2784
  return wrapper
2785

    
2786

    
2787
def LockFile(fd):
2788
  """Locks a file using POSIX locks.
2789

2790
  @type fd: int
2791
  @param fd: the file descriptor we need to lock
2792

2793
  """
2794
  try:
2795
    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2796
  except IOError, err:
2797
    if err.errno == errno.EAGAIN:
2798
      raise errors.LockError("File already locked")
2799
    raise
2800

    
2801

    
2802
def FormatTime(val):
2803
  """Formats a time value.
2804

2805
  @type val: float or None
2806
  @param val: the timestamp as returned by time.time()
2807
  @return: a string value or N/A if we don't have a valid timestamp
2808

2809
  """
2810
  if val is None or not isinstance(val, (int, float)):
2811
    return "N/A"
2812
  # these two codes works on Linux, but they are not guaranteed on all
2813
  # platforms
2814
  return time.strftime("%F %T", time.localtime(val))
2815

    
2816

    
2817
def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2818
  """Reads the watcher pause file.
2819

2820
  @type filename: string
2821
  @param filename: Path to watcher pause file
2822
  @type now: None, float or int
2823
  @param now: Current time as Unix timestamp
2824
  @type remove_after: int
2825
  @param remove_after: Remove watcher pause file after specified amount of
2826
    seconds past the pause end time
2827

2828
  """
2829
  if now is None:
2830
    now = time.time()
2831

    
2832
  try:
2833
    value = ReadFile(filename)
2834
  except IOError, err:
2835
    if err.errno != errno.ENOENT:
2836
      raise
2837
    value = None
2838

    
2839
  if value is not None:
2840
    try:
2841
      value = int(value)
2842
    except ValueError:
2843
      logging.warning(("Watcher pause file (%s) contains invalid value,"
2844
                       " removing it"), filename)
2845
      RemoveFile(filename)
2846
      value = None
2847

    
2848
    if value is not None:
2849
      # Remove file if it's outdated
2850
      if now > (value + remove_after):
2851
        RemoveFile(filename)
2852
        value = None
2853

    
2854
      elif now > value:
2855
        value = None
2856

    
2857
  return value
2858

    
2859

    
2860
class RetryTimeout(Exception):
2861
  """Retry loop timed out.
2862

2863
  """
2864

    
2865

    
2866
class RetryAgain(Exception):
2867
  """Retry again.
2868

2869
  """
2870

    
2871

    
2872
class _RetryDelayCalculator(object):
2873
  """Calculator for increasing delays.
2874

2875
  """
2876
  __slots__ = [
2877
    "_factor",
2878
    "_limit",
2879
    "_next",
2880
    "_start",
2881
    ]
2882

    
2883
  def __init__(self, start, factor, limit):
2884
    """Initializes this class.
2885

2886
    @type start: float
2887
    @param start: Initial delay
2888
    @type factor: float
2889
    @param factor: Factor for delay increase
2890
    @type limit: float or None
2891
    @param limit: Upper limit for delay or None for no limit
2892

2893
    """
2894
    assert start > 0.0
2895
    assert factor >= 1.0
2896
    assert limit is None or limit >= 0.0
2897

    
2898
    self._start = start
2899
    self._factor = factor
2900
    self._limit = limit
2901

    
2902
    self._next = start
2903

    
2904
  def __call__(self):
2905
    """Returns current delay and calculates the next one.
2906

2907
    """
2908
    current = self._next
2909

    
2910
    # Update for next run
2911
    if self._limit is None or self._next < self._limit:
2912
      self._next = min(self._limit, self._next * self._factor)
2913

    
2914
    return current
2915

    
2916

    
2917
#: Special delay to specify whole remaining timeout
2918
RETRY_REMAINING_TIME = object()
2919

    
2920

    
2921
def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
2922
          _time_fn=time.time):
2923
  """Call a function repeatedly until it succeeds.
2924

2925
  The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
2926
  anymore. Between calls a delay, specified by C{delay}, is inserted. After a
2927
  total of C{timeout} seconds, this function throws L{RetryTimeout}.
2928

2929
  C{delay} can be one of the following:
2930
    - callable returning the delay length as a float
2931
    - Tuple of (start, factor, limit)
2932
    - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
2933
      useful when overriding L{wait_fn} to wait for an external event)
2934
    - A static delay as a number (int or float)
2935

2936
  @type fn: callable
2937
  @param fn: Function to be called
2938
  @param delay: Either a callable (returning the delay), a tuple of (start,
2939
                factor, limit) (see L{_RetryDelayCalculator}),
2940
                L{RETRY_REMAINING_TIME} or a number (int or float)
2941
  @type timeout: float
2942
  @param timeout: Total timeout
2943
  @type wait_fn: callable
2944
  @param wait_fn: Waiting function
2945
  @return: Return value of function
2946

2947
  """
2948
  assert callable(fn)
2949
  assert callable(wait_fn)
2950
  assert callable(_time_fn)
2951

    
2952
  if args is None:
2953
    args = []
2954

    
2955
  end_time = _time_fn() + timeout
2956

    
2957
  if callable(delay):
2958
    # External function to calculate delay
2959
    calc_delay = delay
2960

    
2961
  elif isinstance(delay, (tuple, list)):
2962
    # Increasing delay with optional upper boundary
2963
    (start, factor, limit) = delay
2964
    calc_delay = _RetryDelayCalculator(start, factor, limit)
2965

    
2966
  elif delay is RETRY_REMAINING_TIME:
2967
    # Always use the remaining time
2968
    calc_delay = None
2969

    
2970
  else:
2971
    # Static delay
2972
    calc_delay = lambda: delay
2973

    
2974
  assert calc_delay is None or callable(calc_delay)
2975

    
2976
  while True:
2977
    try:
2978
      # pylint: disable-msg=W0142
2979
      return fn(*args)
2980
    except RetryAgain:
2981
      pass
2982
    except RetryTimeout:
2983
      raise errors.ProgrammerError("Nested retry loop detected that didn't"
2984
                                   " handle RetryTimeout")
2985

    
2986
    remaining_time = end_time - _time_fn()
2987

    
2988
    if remaining_time < 0.0:
2989
      raise RetryTimeout()
2990

    
2991
    assert remaining_time >= 0.0
2992

    
2993
    if calc_delay is None:
2994
      wait_fn(remaining_time)
2995
    else:
2996
      current_delay = calc_delay()
2997
      if current_delay > 0.0:
2998
        wait_fn(current_delay)
2999

    
3000

    
3001
def GetClosedTempfile(*args, **kwargs):
3002
  """Creates a temporary file and returns its path.
3003

3004
  """
3005
  (fd, path) = tempfile.mkstemp(*args, **kwargs)
3006
  _CloseFDNoErr(fd)
3007
  return path
3008

    
3009

    
3010
def GenerateSelfSignedX509Cert(common_name, validity):
3011
  """Generates a self-signed X509 certificate.
3012

3013
  @type common_name: string
3014
  @param common_name: commonName value
3015
  @type validity: int
3016
  @param validity: Validity for certificate in seconds
3017

3018
  """
3019
  # Create private and public key
3020
  key = OpenSSL.crypto.PKey()
3021
  key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3022

    
3023
  # Create self-signed certificate
3024
  cert = OpenSSL.crypto.X509()
3025
  if common_name:
3026
    cert.get_subject().CN = common_name
3027
  cert.set_serial_number(1)
3028
  cert.gmtime_adj_notBefore(0)
3029
  cert.gmtime_adj_notAfter(validity)
3030
  cert.set_issuer(cert.get_subject())
3031
  cert.set_pubkey(key)
3032
  cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3033

    
3034
  key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3035
  cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3036

    
3037
  return (key_pem, cert_pem)
3038

    
3039

    
3040
def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3041
  """Legacy function to generate self-signed X509 certificate.
3042

3043
  """
3044
  (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3045
                                                   validity * 24 * 60 * 60)
3046

    
3047
  WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3048

    
3049

    
3050
class FileLock(object):
3051
  """Utility class for file locks.
3052

3053
  """
3054
  def __init__(self, fd, filename):
3055
    """Constructor for FileLock.
3056

3057
    @type fd: file
3058
    @param fd: File object
3059
    @type filename: str
3060
    @param filename: Path of the file opened at I{fd}
3061

3062
    """
3063
    self.fd = fd
3064
    self.filename = filename
3065

    
3066
  @classmethod
3067
  def Open(cls, filename):
3068
    """Creates and opens a file to be used as a file-based lock.
3069

3070
    @type filename: string
3071
    @param filename: path to the file to be locked
3072

3073
    """
3074
    # Using "os.open" is necessary to allow both opening existing file
3075
    # read/write and creating if not existing. Vanilla "open" will truncate an
3076
    # existing file -or- allow creating if not existing.
3077
    return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3078
               filename)
3079

    
3080
  def __del__(self):
3081
    self.Close()
3082

    
3083
  def Close(self):
3084
    """Close the file and release the lock.
3085

3086
    """
3087
    if hasattr(self, "fd") and self.fd:
3088
      self.fd.close()
3089
      self.fd = None
3090

    
3091
  def _flock(self, flag, blocking, timeout, errmsg):
3092
    """Wrapper for fcntl.flock.
3093

3094
    @type flag: int
3095
    @param flag: operation flag
3096
    @type blocking: bool
3097
    @param blocking: whether the operation should be done in blocking mode.
3098
    @type timeout: None or float
3099
    @param timeout: for how long the operation should be retried (implies
3100
                    non-blocking mode).
3101
    @type errmsg: string
3102
    @param errmsg: error message in case operation fails.
3103

3104
    """
3105
    assert self.fd, "Lock was closed"
3106
    assert timeout is None or timeout >= 0, \
3107
      "If specified, timeout must be positive"
3108
    assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3109

    
3110
    # When a timeout is used, LOCK_NB must always be set
3111
    if not (timeout is None and blocking):
3112
      flag |= fcntl.LOCK_NB
3113

    
3114
    if timeout is None:
3115
      self._Lock(self.fd, flag, timeout)
3116
    else:
3117
      try:
3118
        Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3119
              args=(self.fd, flag, timeout))
3120
      except RetryTimeout:
3121
        raise errors.LockError(errmsg)
3122

    
3123
  @staticmethod
3124
  def _Lock(fd, flag, timeout):
3125
    try:
3126
      fcntl.flock(fd, flag)
3127
    except IOError, err:
3128
      if timeout is not None and err.errno == errno.EAGAIN:
3129
        raise RetryAgain()
3130

    
3131
      logging.exception("fcntl.flock failed")
3132
      raise
3133

    
3134
  def Exclusive(self, blocking=False, timeout=None):
3135
    """Locks the file in exclusive mode.
3136

3137
    @type blocking: boolean
3138
    @param blocking: whether to block and wait until we
3139
        can lock the file or return immediately
3140
    @type timeout: int or None
3141
    @param timeout: if not None, the duration to wait for the lock
3142
        (in blocking mode)
3143

3144
    """
3145
    self._flock(fcntl.LOCK_EX, blocking, timeout,
3146
                "Failed to lock %s in exclusive mode" % self.filename)
3147

    
3148
  def Shared(self, blocking=False, timeout=None):
3149
    """Locks the file in shared mode.
3150

3151
    @type blocking: boolean
3152
    @param blocking: whether to block and wait until we
3153
        can lock the file or return immediately
3154
    @type timeout: int or None
3155
    @param timeout: if not None, the duration to wait for the lock
3156
        (in blocking mode)
3157

3158
    """
3159
    self._flock(fcntl.LOCK_SH, blocking, timeout,
3160
                "Failed to lock %s in shared mode" % self.filename)
3161

    
3162
  def Unlock(self, blocking=True, timeout=None):
3163
    """Unlocks the file.
3164

3165
    According to C{flock(2)}, unlocking can also be a nonblocking
3166
    operation::
3167

3168
      To make a non-blocking request, include LOCK_NB with any of the above
3169
      operations.
3170

3171
    @type blocking: boolean
3172
    @param blocking: whether to block and wait until we
3173
        can lock the file or return immediately
3174
    @type timeout: int or None
3175
    @param timeout: if not None, the duration to wait for the lock
3176
        (in blocking mode)
3177

3178
    """
3179
    self._flock(fcntl.LOCK_UN, blocking, timeout,
3180
                "Failed to unlock %s" % self.filename)
3181

    
3182

    
3183
class LineSplitter:
3184
  """Splits data chunks into lines separated by newline.
3185

3186
  Instances provide a file-like interface.
3187

3188
  """
3189
  def __init__(self, line_fn, *args):
3190
    """Initializes this class.
3191

3192
    @type line_fn: callable
3193
    @param line_fn: Function called for each line, first parameter is line
3194
    @param args: Extra arguments for L{line_fn}
3195

3196
    """
3197
    assert callable(line_fn)
3198

    
3199
    if args:
3200
      # Python 2.4 doesn't have functools.partial yet
3201
      self._line_fn = \
3202
        lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3203
    else:
3204
      self._line_fn = line_fn
3205

    
3206
    self._lines = collections.deque()
3207
    self._buffer = ""
3208

    
3209
  def write(self, data):
3210
    parts = (self._buffer + data).split("\n")
3211
    self._buffer = parts.pop()
3212
    self._lines.extend(parts)
3213

    
3214
  def flush(self):
3215
    while self._lines:
3216
      self._line_fn(self._lines.popleft().rstrip("\r\n"))
3217

    
3218
  def close(self):
3219
    self.flush()
3220
    if self._buffer:
3221
      self._line_fn(self._buffer)
3222

    
3223

    
3224
def SignalHandled(signums):
3225
  """Signal Handled decoration.
3226

3227
  This special decorator installs a signal handler and then calls the target
3228
  function. The function must accept a 'signal_handlers' keyword argument,
3229
  which will contain a dict indexed by signal number, with SignalHandler
3230
  objects as values.
3231

3232
  The decorator can be safely stacked with iself, to handle multiple signals
3233
  with different handlers.
3234

3235
  @type signums: list
3236
  @param signums: signals to intercept
3237

3238
  """
3239
  def wrap(fn):
3240
    def sig_function(*args, **kwargs):
3241
      assert 'signal_handlers' not in kwargs or \
3242
             kwargs['signal_handlers'] is None or \
3243
             isinstance(kwargs['signal_handlers'], dict), \
3244
             "Wrong signal_handlers parameter in original function call"
3245
      if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3246
        signal_handlers = kwargs['signal_handlers']
3247
      else:
3248
        signal_handlers = {}
3249
        kwargs['signal_handlers'] = signal_handlers
3250
      sighandler = SignalHandler(signums)
3251
      try:
3252
        for sig in signums:
3253
          signal_handlers[sig] = sighandler
3254
        return fn(*args, **kwargs)
3255
      finally:
3256
        sighandler.Reset()
3257
    return sig_function
3258
  return wrap
3259

    
3260

    
3261
class SignalWakeupFd(object):
3262
  try:
3263
    # This is only supported in Python 2.5 and above (some distributions
3264
    # backported it to Python 2.4)
3265
    _set_wakeup_fd_fn = signal.set_wakeup_fd
3266
  except AttributeError:
3267
    # Not supported
3268
    def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3269
      return -1
3270
  else:
3271
    def _SetWakeupFd(self, fd):
3272
      return self._set_wakeup_fd_fn(fd)
3273

    
3274
  def __init__(self):
3275
    """Initializes this class.
3276

3277
    """
3278
    (read_fd, write_fd) = os.pipe()
3279

    
3280
    # Once these succeeded, the file descriptors will be closed automatically.
3281
    # Buffer size 0 is important, otherwise .read() with a specified length
3282
    # might buffer data and the file descriptors won't be marked readable.
3283
    self._read_fh = os.fdopen(read_fd, "r", 0)
3284
    self._write_fh = os.fdopen(write_fd, "w", 0)
3285

    
3286
    self._previous = self._SetWakeupFd(self._write_fh.fileno())
3287

    
3288
    # Utility functions
3289
    self.fileno = self._read_fh.fileno
3290
    self.read = self._read_fh.read
3291

    
3292
  def Reset(self):
3293
    """Restores the previous wakeup file descriptor.
3294

3295
    """
3296
    if hasattr(self, "_previous") and self._previous is not None:
3297
      self._SetWakeupFd(self._previous)
3298
      self._previous = None
3299

    
3300
  def Notify(self):
3301
    """Notifies the wakeup file descriptor.
3302

3303
    """
3304
    self._write_fh.write("\0")
3305

    
3306
  def __del__(self):
3307
    """Called before object deletion.
3308

3309
    """
3310
    self.Reset()
3311

    
3312

    
3313
class SignalHandler(object):
3314
  """Generic signal handler class.
3315

3316
  It automatically restores the original handler when deconstructed or
3317
  when L{Reset} is called. You can either pass your own handler
3318
  function in or query the L{called} attribute to detect whether the
3319
  signal was sent.
3320

3321
  @type signum: list
3322
  @ivar signum: the signals we handle
3323
  @type called: boolean
3324
  @ivar called: tracks whether any of the signals have been raised
3325

3326
  """
3327
  def __init__(self, signum, handler_fn=None, wakeup=None):
3328
    """Constructs a new SignalHandler instance.
3329

3330
    @type signum: int or list of ints
3331
    @param signum: Single signal number or set of signal numbers
3332
    @type handler_fn: callable
3333
    @param handler_fn: Signal handling function
3334

3335
    """
3336
    assert handler_fn is None or callable(handler_fn)
3337

    
3338
    self.signum = set(signum)
3339
    self.called = False
3340

    
3341
    self._handler_fn = handler_fn
3342
    self._wakeup = wakeup
3343

    
3344
    self._previous = {}
3345
    try:
3346
      for signum in self.signum:
3347
        # Setup handler
3348
        prev_handler = signal.signal(signum, self._HandleSignal)
3349
        try:
3350
          self._previous[signum] = prev_handler
3351
        except:
3352
          # Restore previous handler
3353
          signal.signal(signum, prev_handler)
3354
          raise
3355
    except:
3356
      # Reset all handlers
3357
      self.Reset()
3358
      # Here we have a race condition: a handler may have already been called,
3359
      # but there's not much we can do about it at this point.
3360
      raise
3361

    
3362
  def __del__(self):
3363
    self.Reset()
3364

    
3365
  def Reset(self):
3366
    """Restore previous handler.
3367

3368
    This will reset all the signals to their previous handlers.
3369

3370
    """
3371
    for signum, prev_handler in self._previous.items():
3372
      signal.signal(signum, prev_handler)
3373
      # If successful, remove from dict
3374
      del self._previous[signum]
3375

    
3376
  def Clear(self):
3377
    """Unsets the L{called} flag.
3378

3379
    This function can be used in case a signal may arrive several times.
3380

3381
    """
3382
    self.called = False
3383

    
3384
  def _HandleSignal(self, signum, frame):
3385
    """Actual signal handling function.
3386

3387
    """
3388
    # This is not nice and not absolutely atomic, but it appears to be the only
3389
    # solution in Python -- there are no atomic types.
3390
    self.called = True
3391

    
3392
    if self._wakeup:
3393
      # Notify whoever is interested in signals
3394
      self._wakeup.Notify()
3395

    
3396
    if self._handler_fn:
3397
      self._handler_fn(signum, frame)
3398

    
3399

    
3400
class FieldSet(object):
3401
  """A simple field set.
3402

3403
  Among the features are:
3404
    - checking if a string is among a list of static string or regex objects
3405
    - checking if a whole list of string matches
3406
    - returning the matching groups from a regex match
3407

3408
  Internally, all fields are held as regular expression objects.
3409

3410
  """
3411
  def __init__(self, *items):
3412
    self.items = [re.compile("^%s$" % value) for value in items]
3413

    
3414
  def Extend(self, other_set):
3415
    """Extend the field set with the items from another one"""
3416
    self.items.extend(other_set.items)
3417

    
3418
  def Matches(self, field):
3419
    """Checks if a field matches the current set
3420

3421
    @type field: str
3422
    @param field: the string to match
3423
    @return: either None or a regular expression match object
3424

3425
    """
3426
    for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3427
      return m
3428
    return None
3429

    
3430
  def NonMatching(self, items):
3431
    """Returns the list of fields not matching the current set
3432

3433
    @type items: list
3434
    @param items: the list of fields to check
3435
    @rtype: list
3436
    @return: list of non-matching fields
3437

3438
    """
3439
    return [val for val in items if not self.Matches(val)]