4 # Copyright (C) 2006, 2007, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti utility module.
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
45 import logging.handlers
53 from cStringIO import StringIO
56 # pylint: disable-msg=F0401
61 from ganeti import errors
62 from ganeti import constants
63 from ganeti import compat
67 _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
71 #: when set to True, L{RunCmd} is disabled
74 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
76 HEX_CHAR_RE = r"[a-zA-Z0-9]"
77 VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78 X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79 (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80 HEX_CHAR_RE, HEX_CHAR_RE),
83 _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
85 UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86 '[a-f0-9]{4}-[a-f0-9]{12}$')
88 # Certificate verification results
90 CERT_ERROR) = range(1, 3)
92 # Flags for mlockall() (from bits/mman.h)
97 _MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
101 _TIMEOUT_KILL) = range(3)
104 class RunResult(object):
105 """Holds the result of running external programs.
108 @ivar exit_code: the exit code of the program, or None (if the program
110 @type signal: int or None
111 @ivar signal: the signal that caused the program to finish, or None
112 (if the program wasn't terminated by a signal)
114 @ivar stdout: the standard output of the program
116 @ivar stderr: the standard error of the program
117 @type failed: boolean
118 @ivar failed: True in case the program was
119 terminated by a signal or exited with a non-zero exit code
120 @ivar fail_reason: a string detailing the termination reason
123 __slots__ = ["exit_code", "signal", "stdout", "stderr",
124 "failed", "fail_reason", "cmd"]
127 def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
130 self.exit_code = exit_code
131 self.signal = signal_
134 self.failed = (signal_ is not None or exit_code != 0)
137 if self.signal is not None:
138 fail_msgs.append("terminated by signal %s" % self.signal)
139 elif self.exit_code is not None:
140 fail_msgs.append("exited with exit code %s" % self.exit_code)
142 fail_msgs.append("unable to determine termination reason")
144 if timeout_action == _TIMEOUT_TERM:
145 fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
146 elif timeout_action == _TIMEOUT_KILL:
147 fail_msgs.append(("force termination after timeout of %.2f seconds"
148 " and linger for another %.2f seconds") %
149 (timeout, constants.CHILD_LINGER_TIMEOUT))
151 if fail_msgs and self.failed:
152 self.fail_reason = CommaJoin(fail_msgs)
155 logging.debug("Command '%s' failed (%s); output: %s",
156 self.cmd, self.fail_reason, self.output)
158 def _GetOutput(self):
159 """Returns the combined stdout and stderr for easier usage.
162 return self.stdout + self.stderr
164 output = property(_GetOutput, None, None, "Return full output")
167 def _BuildCmdEnvironment(env, reset):
168 """Builds the environment for an external program.
174 cmd_env = os.environ.copy()
175 cmd_env["LC_ALL"] = "C"
183 def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
184 interactive=False, timeout=None):
185 """Execute a (shell) command.
187 The command should not read from its standard input, as it will be
190 @type cmd: string or list
191 @param cmd: Command to run
193 @param env: Additional environment variables
195 @param output: if desired, the output of the command can be
196 saved in a file instead of the RunResult instance; this
197 parameter denotes the file name (if not None)
199 @param cwd: if specified, will be used as the working
200 directory for the command; the default will be /
201 @type reset_env: boolean
202 @param reset_env: whether to reset or keep the default os environment
203 @type interactive: boolean
204 @param interactive: weather we pipe stdin, stdout and stderr
205 (default behaviour) or run the command interactive
207 @param timeout: If not None, timeout in seconds until child process gets
210 @return: RunResult instance
211 @raise errors.ProgrammerError: if we call this when forks are disabled
215 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
217 if output and interactive:
218 raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
219 " not be provided at the same time")
221 if isinstance(cmd, basestring):
225 cmd = [str(val) for val in cmd]
226 strcmd = ShellQuoteArgs(cmd)
230 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
232 logging.debug("RunCmd %s", strcmd)
234 cmd_env = _BuildCmdEnvironment(env, reset_env)
238 out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
239 interactive, timeout)
241 timeout_action = _TIMEOUT_NONE
242 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
245 if err.errno == errno.ENOENT:
246 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
258 return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
261 def SetupDaemonEnv(cwd="/", umask=077):
262 """Setup a daemon's environment.
264 This should be called between the first and second fork, due to
267 @param cwd: the directory to which to chdir
268 @param umask: the umask to setup
276 def SetupDaemonFDs(output_file, output_fd):
277 """Setups up a daemon's file descriptors.
279 @param output_file: if not None, the file to which to redirect
281 @param output_fd: if not None, the file descriptor for stdout/stderr
284 # check that at most one is defined
285 assert [output_file, output_fd].count(None) >= 1
287 # Open /dev/null (read-only, only for stdin)
288 devnull_fd = os.open(os.devnull, os.O_RDONLY)
290 if output_fd is not None:
292 elif output_file is not None:
295 output_fd = os.open(output_file,
296 os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
297 except EnvironmentError, err:
298 raise Exception("Opening output file failed: %s" % err)
300 output_fd = os.open(os.devnull, os.O_WRONLY)
302 # Redirect standard I/O
303 os.dup2(devnull_fd, 0)
304 os.dup2(output_fd, 1)
305 os.dup2(output_fd, 2)
308 def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
310 """Start a daemon process after forking twice.
312 @type cmd: string or list
313 @param cmd: Command to run
315 @param env: Additional environment variables
317 @param cwd: Working directory for the program
319 @param output: Path to file in which to save the output
321 @param output_fd: File descriptor for output
322 @type pidfile: string
323 @param pidfile: Process ID file
325 @return: Daemon process ID
326 @raise errors.ProgrammerError: if we call this when forks are disabled
330 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
333 if output and not (bool(output) ^ (output_fd is not None)):
334 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
337 if isinstance(cmd, basestring):
338 cmd = ["/bin/sh", "-c", cmd]
340 strcmd = ShellQuoteArgs(cmd)
343 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
345 logging.debug("StartDaemon %s", strcmd)
347 cmd_env = _BuildCmdEnvironment(env, False)
349 # Create pipe for sending PID back
350 (pidpipe_read, pidpipe_write) = os.pipe()
353 # Create pipe for sending error messages
354 (errpipe_read, errpipe_write) = os.pipe()
361 # Child process, won't return
362 _StartDaemonChild(errpipe_read, errpipe_write,
363 pidpipe_read, pidpipe_write,
365 output, output_fd, pidfile)
367 # Well, maybe child process failed
368 os._exit(1) # pylint: disable-msg=W0212
370 _CloseFDNoErr(errpipe_write)
372 # Wait for daemon to be started (or an error message to
373 # arrive) and read up to 100 KB as an error message
374 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
376 _CloseFDNoErr(errpipe_read)
378 _CloseFDNoErr(pidpipe_write)
380 # Read up to 128 bytes for PID
381 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
383 _CloseFDNoErr(pidpipe_read)
385 # Try to avoid zombies by waiting for child process
392 raise errors.OpExecError("Error when starting daemon process: %r" %
397 except (ValueError, TypeError), err:
398 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
402 def _StartDaemonChild(errpipe_read, errpipe_write,
403 pidpipe_read, pidpipe_write,
405 output, fd_output, pidfile):
406 """Child process for starting daemon.
410 # Close parent's side
411 _CloseFDNoErr(errpipe_read)
412 _CloseFDNoErr(pidpipe_read)
414 # First child process
417 # And fork for the second time
420 # Exit first child process
421 os._exit(0) # pylint: disable-msg=W0212
423 # Make sure pipe is closed on execv* (and thereby notifies
425 SetCloseOnExecFlag(errpipe_write, True)
427 # List of file descriptors to be left open
428 noclose_fds = [errpipe_write]
432 fd_pidfile = WritePidFile(pidfile)
434 # Keeping the file open to hold the lock
435 noclose_fds.append(fd_pidfile)
437 SetCloseOnExecFlag(fd_pidfile, False)
441 SetupDaemonFDs(output, fd_output)
443 # Send daemon PID to parent
444 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
446 # Close all file descriptors except stdio and error message pipe
447 CloseFDs(noclose_fds=noclose_fds)
449 # Change working directory
453 os.execvp(args[0], args)
455 os.execvpe(args[0], args, env)
456 except: # pylint: disable-msg=W0702
458 # Report errors to original process
459 WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
460 except: # pylint: disable-msg=W0702
461 # Ignore errors in error handling
464 os._exit(1) # pylint: disable-msg=W0212
467 def WriteErrorToFD(fd, err):
468 """Possibly write an error message to a fd.
470 @type fd: None or int (file descriptor)
471 @param fd: if not None, the error will be written to this fd
472 @param err: string, the error message
479 err = "<unknown error>"
481 RetryOnSignal(os.write, fd, err)
484 def _CheckIfAlive(child):
485 """Raises L{RetryAgain} if child is still alive.
487 @raises RetryAgain: If child is still alive
490 if child.poll() is None:
494 def _WaitForProcess(child, timeout):
495 """Waits for the child to terminate or until we reach timeout.
499 Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
504 def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
505 _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
506 """Run a command and return its output.
508 @type cmd: string or list
509 @param cmd: Command to run
511 @param env: The environment to use
512 @type via_shell: bool
513 @param via_shell: if we should run via the shell
515 @param cwd: the working directory for the program
516 @type interactive: boolean
517 @param interactive: Run command interactive (without piping)
519 @param timeout: Timeout after the programm gets terminated
521 @return: (out, err, status)
524 poller = select.poll()
526 stderr = subprocess.PIPE
527 stdout = subprocess.PIPE
528 stdin = subprocess.PIPE
531 stderr = stdout = stdin = None
533 child = subprocess.Popen(cmd, shell=via_shell,
537 close_fds=True, env=env,
543 linger_timeout = None
548 poll_timeout = RunningTimeout(timeout, True).Remaining
550 msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
552 msg_linger = ("Command %s (%d) run into linger timeout, killing" %
555 timeout_action = _TIMEOUT_NONE
559 poller.register(child.stdout, select.POLLIN)
560 poller.register(child.stderr, select.POLLIN)
562 child.stdout.fileno(): (out, child.stdout),
563 child.stderr.fileno(): (err, child.stderr),
566 SetNonblockFlag(fd, True)
570 current_timeout = poll_timeout()
571 if current_timeout < 0:
572 if linger_timeout is None:
573 logging.warning(msg_timeout)
574 if child.poll() is None:
575 timeout_action = _TIMEOUT_TERM
576 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
577 linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
578 lt = linger_timeout()
588 pollresult = RetryOnSignal(poller.poll, pt)
590 for fd, event in pollresult:
591 if event & select.POLLIN or event & select.POLLPRI:
592 data = fdmap[fd][1].read()
593 # no data from read signifies EOF (the same as POLLHUP)
595 poller.unregister(fd)
598 fdmap[fd][0].write(data)
599 if (event & select.POLLNVAL or event & select.POLLHUP or
600 event & select.POLLERR):
601 poller.unregister(fd)
604 if timeout is not None:
605 assert callable(poll_timeout)
607 # We have no I/O left but it might still run
608 if child.poll() is None:
609 _WaitForProcess(child, poll_timeout())
611 # Terminate if still alive after timeout
612 if child.poll() is None:
613 if linger_timeout is None:
614 logging.warning(msg_timeout)
615 timeout_action = _TIMEOUT_TERM
616 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
619 lt = linger_timeout()
620 _WaitForProcess(child, lt)
622 # Okay, still alive after timeout and linger timeout? Kill it!
623 if child.poll() is None:
624 timeout_action = _TIMEOUT_KILL
625 logging.warning(msg_linger)
626 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
631 status = child.wait()
632 return out, err, status, timeout_action
635 def _RunCmdFile(cmd, env, via_shell, output, cwd):
636 """Run a command and save its output to a file.
638 @type cmd: string or list
639 @param cmd: Command to run
641 @param env: The environment to use
642 @type via_shell: bool
643 @param via_shell: if we should run via the shell
645 @param output: the filename in which to save the output
647 @param cwd: the working directory for the program
649 @return: the exit status
652 fh = open(output, "a")
654 child = subprocess.Popen(cmd, shell=via_shell,
655 stderr=subprocess.STDOUT,
657 stdin=subprocess.PIPE,
658 close_fds=True, env=env,
662 status = child.wait()
668 def SetCloseOnExecFlag(fd, enable):
669 """Sets or unsets the close-on-exec flag on a file descriptor.
672 @param fd: File descriptor
674 @param enable: Whether to set or unset it.
677 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
680 flags |= fcntl.FD_CLOEXEC
682 flags &= ~fcntl.FD_CLOEXEC
684 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
687 def SetNonblockFlag(fd, enable):
688 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
691 @param fd: File descriptor
693 @param enable: Whether to set or unset it
696 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
699 flags |= os.O_NONBLOCK
701 flags &= ~os.O_NONBLOCK
703 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
706 def RetryOnSignal(fn, *args, **kwargs):
707 """Calls a function again if it failed due to EINTR.
712 return fn(*args, **kwargs)
713 except EnvironmentError, err:
714 if err.errno != errno.EINTR:
716 except (socket.error, select.error), err:
717 # In python 2.6 and above select.error is an IOError, so it's handled
718 # above, in 2.5 and below it's not, and it's handled here.
719 if not (err.args and err.args[0] == errno.EINTR):
723 def RunParts(dir_name, env=None, reset_env=False):
724 """Run Scripts or programs in a directory
726 @type dir_name: string
727 @param dir_name: absolute path to a directory
729 @param env: The environment to use
730 @type reset_env: boolean
731 @param reset_env: whether to reset or keep the default os environment
732 @rtype: list of tuples
733 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
739 dir_contents = ListVisibleFiles(dir_name)
741 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
744 for relname in sorted(dir_contents):
745 fname = PathJoin(dir_name, relname)
746 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
747 constants.EXT_PLUGIN_MASK.match(relname) is not None):
748 rr.append((relname, constants.RUNPARTS_SKIP, None))
751 result = RunCmd([fname], env=env, reset_env=reset_env)
752 except Exception, err: # pylint: disable-msg=W0703
753 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
755 rr.append((relname, constants.RUNPARTS_RUN, result))
760 def RemoveFile(filename):
761 """Remove a file ignoring some errors.
763 Remove a file, ignoring non-existing ones or directories. Other
767 @param filename: the file to be removed
773 if err.errno not in (errno.ENOENT, errno.EISDIR):
777 def RemoveDir(dirname):
778 """Remove an empty directory.
780 Remove a directory, ignoring non-existing ones.
781 Other errors are passed. This includes the case,
782 where the directory is not empty, so it can't be removed.
785 @param dirname: the empty directory to be removed
791 if err.errno != errno.ENOENT:
795 def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
799 @param old: Original path
803 @param mkdir: Whether to create target directory if it doesn't exist
804 @type mkdir_mode: int
805 @param mkdir_mode: Mode for newly created directories
809 return os.rename(old, new)
811 # In at least one use case of this function, the job queue, directory
812 # creation is very rare. Checking for the directory before renaming is not
814 if mkdir and err.errno == errno.ENOENT:
815 # Create directory and try again
816 Makedirs(os.path.dirname(new), mode=mkdir_mode)
818 return os.rename(old, new)
823 def Makedirs(path, mode=0750):
824 """Super-mkdir; create a leaf directory and all intermediate ones.
826 This is a wrapper around C{os.makedirs} adding error handling not implemented
831 os.makedirs(path, mode)
833 # Ignore EEXIST. This is only handled in os.makedirs as included in
834 # Python 2.5 and above.
835 if err.errno != errno.EEXIST or not os.path.exists(path):
839 def ResetTempfileModule():
840 """Resets the random name generator of the tempfile module.
842 This function should be called after C{os.fork} in the child process to
843 ensure it creates a newly seeded random generator. Otherwise it would
844 generate the same random parts as the parent process. If several processes
845 race for the creation of a temporary file, this could lead to one not getting
849 # pylint: disable-msg=W0212
850 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
851 tempfile._once_lock.acquire()
853 # Reset random name generator
854 tempfile._name_sequence = None
856 tempfile._once_lock.release()
858 logging.critical("The tempfile module misses at least one of the"
859 " '_once_lock' and '_name_sequence' attributes")
862 def _FingerprintFile(filename):
863 """Compute the fingerprint of a file.
865 If the file does not exist, a None will be returned
869 @param filename: the filename to checksum
871 @return: the hex digest of the sha checksum of the contents
875 if not (os.path.exists(filename) and os.path.isfile(filename)):
880 fp = compat.sha1_hash()
888 return fp.hexdigest()
891 def FingerprintFiles(files):
892 """Compute fingerprints for a list of files.
895 @param files: the list of filename to fingerprint
897 @return: a dictionary filename: fingerprint, holding only
903 for filename in files:
904 cksum = _FingerprintFile(filename)
906 ret[filename] = cksum
911 def ForceDictType(target, key_types, allowed_values=None):
912 """Force the values of a dict to have certain types.
915 @param target: the dict to update
916 @type key_types: dict
917 @param key_types: dict mapping target dict keys to types
918 in constants.ENFORCEABLE_TYPES
919 @type allowed_values: list
920 @keyword allowed_values: list of specially allowed values
923 if allowed_values is None:
926 if not isinstance(target, dict):
927 msg = "Expected dictionary, got '%s'" % target
928 raise errors.TypeEnforcementError(msg)
931 if key not in key_types:
932 msg = "Unknown key '%s'" % key
933 raise errors.TypeEnforcementError(msg)
935 if target[key] in allowed_values:
938 ktype = key_types[key]
939 if ktype not in constants.ENFORCEABLE_TYPES:
940 msg = "'%s' has non-enforceable type %s" % (key, ktype)
941 raise errors.ProgrammerError(msg)
943 if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
944 if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
946 elif not isinstance(target[key], basestring):
947 if isinstance(target[key], bool) and not target[key]:
950 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
951 raise errors.TypeEnforcementError(msg)
952 elif ktype == constants.VTYPE_BOOL:
953 if isinstance(target[key], basestring) and target[key]:
954 if target[key].lower() == constants.VALUE_FALSE:
956 elif target[key].lower() == constants.VALUE_TRUE:
959 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
960 raise errors.TypeEnforcementError(msg)
965 elif ktype == constants.VTYPE_SIZE:
967 target[key] = ParseUnit(target[key])
968 except errors.UnitParseError, err:
969 msg = "'%s' (value %s) is not a valid size. error: %s" % \
970 (key, target[key], err)
971 raise errors.TypeEnforcementError(msg)
972 elif ktype == constants.VTYPE_INT:
974 target[key] = int(target[key])
975 except (ValueError, TypeError):
976 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
977 raise errors.TypeEnforcementError(msg)
980 def _GetProcStatusPath(pid):
981 """Returns the path for a PID's proc status file.
984 @param pid: Process ID
988 return "/proc/%d/status" % pid
991 def IsProcessAlive(pid):
992 """Check if a given pid exists on the system.
994 @note: zombie status is not handled, so zombie processes
995 will be returned as alive
997 @param pid: the process ID to check
999 @return: True if the process exists
1006 except EnvironmentError, err:
1007 if err.errno in (errno.ENOENT, errno.ENOTDIR):
1009 elif err.errno == errno.EINVAL:
1010 raise RetryAgain(err)
1013 assert isinstance(pid, int), "pid must be an integer"
1017 # /proc in a multiprocessor environment can have strange behaviors.
1018 # Retry the os.stat a few times until we get a good result.
1020 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1021 args=[_GetProcStatusPath(pid)])
1022 except RetryTimeout, err:
1026 def _ParseSigsetT(sigset):
1027 """Parse a rendered sigset_t value.
1029 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1032 @type sigset: string
1033 @param sigset: Rendered signal set from /proc/$pid/status
1035 @return: Set of all enabled signal numbers
1041 for ch in reversed(sigset):
1044 # The following could be done in a loop, but it's easier to read and
1045 # understand in the unrolled form
1047 result.add(signum + 1)
1049 result.add(signum + 2)
1051 result.add(signum + 3)
1053 result.add(signum + 4)
1060 def _GetProcStatusField(pstatus, field):
1061 """Retrieves a field from the contents of a proc status file.
1063 @type pstatus: string
1064 @param pstatus: Contents of /proc/$pid/status
1066 @param field: Name of field whose value should be returned
1070 for line in pstatus.splitlines():
1071 parts = line.split(":", 1)
1073 if len(parts) < 2 or parts[0] != field:
1076 return parts[1].strip()
1081 def IsProcessHandlingSignal(pid, signum, status_path=None):
1082 """Checks whether a process is handling a signal.
1085 @param pid: Process ID
1087 @param signum: Signal number
1091 if status_path is None:
1092 status_path = _GetProcStatusPath(pid)
1095 proc_status = ReadFile(status_path)
1096 except EnvironmentError, err:
1097 # In at least one case, reading /proc/$pid/status failed with ESRCH.
1098 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1102 sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1104 raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1106 # Now check whether signal is handled
1107 return signum in _ParseSigsetT(sigcgt)
1110 def ReadPidFile(pidfile):
1111 """Read a pid from a file.
1113 @type pidfile: string
1114 @param pidfile: path to the file containing the pid
1116 @return: The process id, if the file exists and contains a valid PID,
1121 raw_data = ReadOneLineFile(pidfile)
1122 except EnvironmentError, err:
1123 if err.errno != errno.ENOENT:
1124 logging.exception("Can't read pid file")
1129 except (TypeError, ValueError), err:
1130 logging.info("Can't parse pid file contents", exc_info=True)
1136 def ReadLockedPidFile(path):
1137 """Reads a locked PID file.
1139 This can be used together with L{StartDaemon}.
1142 @param path: Path to PID file
1143 @return: PID as integer or, if file was unlocked or couldn't be opened, None
1147 fd = os.open(path, os.O_RDONLY)
1148 except EnvironmentError, err:
1149 if err.errno == errno.ENOENT:
1150 # PID file doesn't exist
1156 # Try to acquire lock
1158 except errors.LockError:
1159 # Couldn't lock, daemon is running
1160 return int(os.read(fd, 100))
1167 def MatchNameComponent(key, name_list, case_sensitive=True):
1168 """Try to match a name against a list.
1170 This function will try to match a name like test1 against a list
1171 like C{['test1.example.com', 'test2.example.com', ...]}. Against
1172 this list, I{'test1'} as well as I{'test1.example'} will match, but
1173 not I{'test1.ex'}. A multiple match will be considered as no match
1174 at all (e.g. I{'test1'} against C{['test1.example.com',
1175 'test1.example.org']}), except when the key fully matches an entry
1176 (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1179 @param key: the name to be searched
1180 @type name_list: list
1181 @param name_list: the list of strings against which to search the key
1182 @type case_sensitive: boolean
1183 @param case_sensitive: whether to provide a case-sensitive match
1186 @return: None if there is no match I{or} if there are multiple matches,
1187 otherwise the element from the list which matches
1190 if key in name_list:
1194 if not case_sensitive:
1195 re_flags |= re.IGNORECASE
1197 mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1200 for name in name_list:
1201 if mo.match(name) is not None:
1202 names_filtered.append(name)
1203 if not case_sensitive and key == name.upper():
1204 string_matches.append(name)
1206 if len(string_matches) == 1:
1207 return string_matches[0]
1208 if len(names_filtered) == 1:
1209 return names_filtered[0]
1213 def ValidateServiceName(name):
1214 """Validate the given service name.
1216 @type name: number or string
1217 @param name: Service name or port specification
1222 except (ValueError, TypeError):
1223 # Non-numeric service name
1224 valid = _VALID_SERVICE_NAME_RE.match(name)
1226 # Numeric port (protocols other than TCP or UDP might need adjustments
1228 valid = (numport >= 0 and numport < (1 << 16))
1231 raise errors.OpPrereqError("Invalid service name '%s'" % name,
1237 def ListVolumeGroups():
1238 """List volume groups and their size
1242 Dictionary with keys volume name and values
1243 the size of the volume
1246 command = "vgs --noheadings --units m --nosuffix -o name,size"
1247 result = RunCmd(command)
1252 for line in result.stdout.splitlines():
1254 name, size = line.split()
1255 size = int(float(size))
1256 except (IndexError, ValueError), err:
1257 logging.error("Invalid output from vgs (%s): %s", err, line)
1265 def BridgeExists(bridge):
1266 """Check whether the given bridge exists in the system
1269 @param bridge: the bridge name to check
1271 @return: True if it does
1274 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1277 def NiceSort(name_list):
1278 """Sort a list of strings based on digit and non-digit groupings.
1280 Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1281 will sort the list in the logical order C{['a1', 'a2', 'a10',
1284 The sort algorithm breaks each name in groups of either only-digits
1285 or no-digits. Only the first eight such groups are considered, and
1286 after that we just use what's left of the string.
1288 @type name_list: list
1289 @param name_list: the names to be sorted
1291 @return: a copy of the name list sorted with our algorithm
1294 _SORTER_BASE = "(\D+|\d+)"
1295 _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1296 _SORTER_BASE, _SORTER_BASE,
1297 _SORTER_BASE, _SORTER_BASE,
1298 _SORTER_BASE, _SORTER_BASE)
1299 _SORTER_RE = re.compile(_SORTER_FULL)
1300 _SORTER_NODIGIT = re.compile("^\D*$")
1302 """Attempts to convert a variable to integer."""
1303 if val is None or _SORTER_NODIGIT.match(val):
1308 to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1309 for name in name_list]
1311 return [tup[1] for tup in to_sort]
1314 def TryConvert(fn, val):
1315 """Try to convert a value ignoring errors.
1317 This function tries to apply function I{fn} to I{val}. If no
1318 C{ValueError} or C{TypeError} exceptions are raised, it will return
1319 the result, else it will return the original value. Any other
1320 exceptions are propagated to the caller.
1323 @param fn: function to apply to the value
1324 @param val: the value to be converted
1325 @return: The converted value if the conversion was successful,
1326 otherwise the original value.
1331 except (ValueError, TypeError):
1336 def IsValidShellParam(word):
1337 """Verifies is the given word is safe from the shell's p.o.v.
1339 This means that we can pass this to a command via the shell and be
1340 sure that it doesn't alter the command line and is passed as such to
1343 Note that we are overly restrictive here, in order to be on the safe
1347 @param word: the word to check
1349 @return: True if the word is 'safe'
1352 return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1355 def BuildShellCmd(template, *args):
1356 """Build a safe shell command line from the given arguments.
1358 This function will check all arguments in the args list so that they
1359 are valid shell parameters (i.e. they don't contain shell
1360 metacharacters). If everything is ok, it will return the result of
1364 @param template: the string holding the template for the
1367 @return: the expanded command line
1371 if not IsValidShellParam(word):
1372 raise errors.ProgrammerError("Shell argument '%s' contains"
1373 " invalid characters" % word)
1374 return template % args
1377 def FormatUnit(value, units):
1378 """Formats an incoming number of MiB with the appropriate unit.
1381 @param value: integer representing the value in MiB (1048576)
1383 @param units: the type of formatting we should do:
1384 - 'h' for automatic scaling
1389 @return: the formatted value (with suffix)
1392 if units not in ('m', 'g', 't', 'h'):
1393 raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1397 if units == 'm' or (units == 'h' and value < 1024):
1400 return "%d%s" % (round(value, 0), suffix)
1402 elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1405 return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1410 return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1413 def ParseUnit(input_string):
1414 """Tries to extract number and scale from the given string.
1416 Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1417 [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1418 is always an int in MiB.
1421 m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1423 raise errors.UnitParseError("Invalid format")
1425 value = float(m.groups()[0])
1427 unit = m.groups()[1]
1429 lcunit = unit.lower()
1433 if lcunit in ('m', 'mb', 'mib'):
1434 # Value already in MiB
1437 elif lcunit in ('g', 'gb', 'gib'):
1440 elif lcunit in ('t', 'tb', 'tib'):
1441 value *= 1024 * 1024
1444 raise errors.UnitParseError("Unknown unit: %s" % unit)
1446 # Make sure we round up
1447 if int(value) < value:
1450 # Round up to the next multiple of 4
1453 value += 4 - value % 4
1458 def ParseCpuMask(cpu_mask):
1459 """Parse a CPU mask definition and return the list of CPU IDs.
1461 CPU mask format: comma-separated list of CPU IDs
1462 or dash-separated ID ranges
1463 Example: "0-2,5" -> "0,1,2,5"
1466 @param cpu_mask: CPU mask definition
1468 @return: list of CPU IDs
1474 for range_def in cpu_mask.split(","):
1475 boundaries = range_def.split("-")
1476 n_elements = len(boundaries)
1478 raise errors.ParseError("Invalid CPU ID range definition"
1479 " (only one hyphen allowed): %s" % range_def)
1481 lower = int(boundaries[0])
1482 except (ValueError, TypeError), err:
1483 raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1484 " CPU ID range: %s" % str(err))
1486 higher = int(boundaries[-1])
1487 except (ValueError, TypeError), err:
1488 raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1489 " CPU ID range: %s" % str(err))
1491 raise errors.ParseError("Invalid CPU ID range definition"
1492 " (%d > %d): %s" % (lower, higher, range_def))
1493 cpu_list.extend(range(lower, higher + 1))
1497 def AddAuthorizedKey(file_obj, key):
1498 """Adds an SSH public key to an authorized_keys file.
1500 @type file_obj: str or file handle
1501 @param file_obj: path to authorized_keys file
1503 @param key: string containing key
1506 key_fields = key.split()
1508 if isinstance(file_obj, basestring):
1509 f = open(file_obj, 'a+')
1516 # Ignore whitespace changes
1517 if line.split() == key_fields:
1519 nl = line.endswith('\n')
1523 f.write(key.rstrip('\r\n'))
1530 def RemoveAuthorizedKey(file_name, key):
1531 """Removes an SSH public key from an authorized_keys file.
1533 @type file_name: str
1534 @param file_name: path to authorized_keys file
1536 @param key: string containing key
1539 key_fields = key.split()
1541 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1543 out = os.fdopen(fd, 'w')
1545 f = open(file_name, 'r')
1548 # Ignore whitespace changes while comparing lines
1549 if line.split() != key_fields:
1553 os.rename(tmpname, file_name)
1563 def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1564 """Sets the name of an IP address and hostname in /etc/hosts.
1566 @type file_name: str
1567 @param file_name: path to the file to modify (usually C{/etc/hosts})
1569 @param ip: the IP address
1571 @param hostname: the hostname to be added
1573 @param aliases: the list of aliases to add for the hostname
1576 # Ensure aliases are unique
1577 aliases = UniqueSequence([hostname] + aliases)[1:]
1579 def _WriteEtcHosts(fd):
1580 # Duplicating file descriptor because os.fdopen's result will automatically
1581 # close the descriptor, but we would still like to have its functionality.
1582 out = os.fdopen(os.dup(fd), "w")
1584 for line in ReadFile(file_name).splitlines(True):
1585 fields = line.split()
1586 if fields and not fields[0].startswith("#") and ip == fields[0]:
1590 out.write("%s\t%s" % (ip, hostname))
1592 out.write(" %s" % " ".join(aliases))
1598 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1601 def AddHostToEtcHosts(hostname, ip):
1602 """Wrapper around SetEtcHostsEntry.
1605 @param hostname: a hostname that will be resolved and added to
1606 L{constants.ETC_HOSTS}
1608 @param ip: The ip address of the host
1611 SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1614 def RemoveEtcHostsEntry(file_name, hostname):
1615 """Removes a hostname from /etc/hosts.
1617 IP addresses without names are removed from the file.
1619 @type file_name: str
1620 @param file_name: path to the file to modify (usually C{/etc/hosts})
1622 @param hostname: the hostname to be removed
1625 def _WriteEtcHosts(fd):
1626 # Duplicating file descriptor because os.fdopen's result will automatically
1627 # close the descriptor, but we would still like to have its functionality.
1628 out = os.fdopen(os.dup(fd), "w")
1630 for line in ReadFile(file_name).splitlines(True):
1631 fields = line.split()
1632 if len(fields) > 1 and not fields[0].startswith("#"):
1634 if hostname in names:
1635 while hostname in names:
1636 names.remove(hostname)
1638 out.write("%s %s\n" % (fields[0], " ".join(names)))
1647 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1650 def RemoveHostFromEtcHosts(hostname):
1651 """Wrapper around RemoveEtcHostsEntry.
1654 @param hostname: hostname that will be resolved and its
1655 full and shot name will be removed from
1656 L{constants.ETC_HOSTS}
1659 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1660 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1663 def TimestampForFilename():
1664 """Returns the current time formatted for filenames.
1666 The format doesn't contain colons as some shells and applications them as
1670 return time.strftime("%Y-%m-%d_%H_%M_%S")
1673 def CreateBackup(file_name):
1674 """Creates a backup of a file.
1676 @type file_name: str
1677 @param file_name: file to be backed up
1679 @return: the path to the newly created backup
1680 @raise errors.ProgrammerError: for invalid file names
1683 if not os.path.isfile(file_name):
1684 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1687 prefix = ("%s.backup-%s." %
1688 (os.path.basename(file_name), TimestampForFilename()))
1689 dir_name = os.path.dirname(file_name)
1691 fsrc = open(file_name, 'rb')
1693 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1694 fdst = os.fdopen(fd, 'wb')
1696 logging.debug("Backing up %s at %s", file_name, backup_name)
1697 shutil.copyfileobj(fsrc, fdst)
1706 def ShellQuote(value):
1707 """Quotes shell argument according to POSIX.
1710 @param value: the argument to be quoted
1712 @return: the quoted value
1715 if _re_shell_unquoted.match(value):
1718 return "'%s'" % value.replace("'", "'\\''")
1721 def ShellQuoteArgs(args):
1722 """Quotes a list of shell arguments.
1725 @param args: list of arguments to be quoted
1727 @return: the quoted arguments concatenated with spaces
1730 return ' '.join([ShellQuote(i) for i in args])
1734 """Helper class to write scripts with indentation.
1739 def __init__(self, fh):
1740 """Initializes this class.
1746 def IncIndent(self):
1747 """Increase indentation level by 1.
1752 def DecIndent(self):
1753 """Decrease indentation level by 1.
1756 assert self._indent > 0
1759 def Write(self, txt, *args):
1760 """Write line to output file.
1763 assert self._indent >= 0
1765 self._fh.write(self._indent * self.INDENT_STR)
1768 self._fh.write(txt % args)
1772 self._fh.write("\n")
1775 def ListVisibleFiles(path):
1776 """Returns a list of visible files in a directory.
1779 @param path: the directory to enumerate
1781 @return: the list of all files not starting with a dot
1782 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1785 if not IsNormAbsPath(path):
1786 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1787 " absolute/normalized: '%s'" % path)
1788 files = [i for i in os.listdir(path) if not i.startswith(".")]
1792 def GetHomeDir(user, default=None):
1793 """Try to get the homedir of the given user.
1795 The user can be passed either as a string (denoting the name) or as
1796 an integer (denoting the user id). If the user is not found, the
1797 'default' argument is returned, which defaults to None.
1801 if isinstance(user, basestring):
1802 result = pwd.getpwnam(user)
1803 elif isinstance(user, (int, long)):
1804 result = pwd.getpwuid(user)
1806 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1810 return result.pw_dir
1814 """Returns a random UUID.
1816 @note: This is a Linux-specific method as it uses the /proc
1821 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1824 def GenerateSecret(numbytes=20):
1825 """Generates a random secret.
1827 This will generate a pseudo-random secret returning an hex string
1828 (so that it can be used where an ASCII string is needed).
1830 @param numbytes: the number of bytes which will be represented by the returned
1831 string (defaulting to 20, the length of a SHA1 hash)
1833 @return: an hex representation of the pseudo-random sequence
1836 return os.urandom(numbytes).encode('hex')
1839 def EnsureDirs(dirs):
1840 """Make required directories, if they don't exist.
1842 @param dirs: list of tuples (dir_name, dir_mode)
1843 @type dirs: list of (string, integer)
1846 for dir_name, dir_mode in dirs:
1848 os.mkdir(dir_name, dir_mode)
1849 except EnvironmentError, err:
1850 if err.errno != errno.EEXIST:
1851 raise errors.GenericError("Cannot create needed directory"
1852 " '%s': %s" % (dir_name, err))
1854 os.chmod(dir_name, dir_mode)
1855 except EnvironmentError, err:
1856 raise errors.GenericError("Cannot change directory permissions on"
1857 " '%s': %s" % (dir_name, err))
1858 if not os.path.isdir(dir_name):
1859 raise errors.GenericError("%s is not a directory" % dir_name)
1862 def ReadFile(file_name, size=-1):
1866 @param size: Read at most size bytes (if negative, entire file)
1868 @return: the (possibly partial) content of the file
1871 f = open(file_name, "r")
1878 def WriteFile(file_name, fn=None, data=None,
1879 mode=None, uid=-1, gid=-1,
1880 atime=None, mtime=None, close=True,
1881 dry_run=False, backup=False,
1882 prewrite=None, postwrite=None):
1883 """(Over)write a file atomically.
1885 The file_name and either fn (a function taking one argument, the
1886 file descriptor, and which should write the data to it) or data (the
1887 contents of the file) must be passed. The other arguments are
1888 optional and allow setting the file mode, owner and group, and the
1889 mtime/atime of the file.
1891 If the function doesn't raise an exception, it has succeeded and the
1892 target file has the new contents. If the function has raised an
1893 exception, an existing target file should be unmodified and the
1894 temporary file should be removed.
1896 @type file_name: str
1897 @param file_name: the target filename
1899 @param fn: content writing function, called with
1900 file descriptor as parameter
1902 @param data: contents of the file
1904 @param mode: file mode
1906 @param uid: the owner of the file
1908 @param gid: the group of the file
1910 @param atime: a custom access time to be set on the file
1912 @param mtime: a custom modification time to be set on the file
1913 @type close: boolean
1914 @param close: whether to close file after writing it
1915 @type prewrite: callable
1916 @param prewrite: function to be called before writing content
1917 @type postwrite: callable
1918 @param postwrite: function to be called after writing content
1921 @return: None if the 'close' parameter evaluates to True,
1922 otherwise the file descriptor
1924 @raise errors.ProgrammerError: if any of the arguments are not valid
1927 if not os.path.isabs(file_name):
1928 raise errors.ProgrammerError("Path passed to WriteFile is not"
1929 " absolute: '%s'" % file_name)
1931 if [fn, data].count(None) != 1:
1932 raise errors.ProgrammerError("fn or data required")
1934 if [atime, mtime].count(None) == 1:
1935 raise errors.ProgrammerError("Both atime and mtime must be either"
1938 if backup and not dry_run and os.path.isfile(file_name):
1939 CreateBackup(file_name)
1941 dir_name, base_name = os.path.split(file_name)
1942 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1944 # here we need to make sure we remove the temp file, if any error
1945 # leaves it in place
1947 if uid != -1 or gid != -1:
1948 os.chown(new_name, uid, gid)
1950 os.chmod(new_name, mode)
1951 if callable(prewrite):
1953 if data is not None:
1957 if callable(postwrite):
1960 if atime is not None and mtime is not None:
1961 os.utime(new_name, (atime, mtime))
1963 os.rename(new_name, file_name)
1972 RemoveFile(new_name)
1977 def GetFileID(path=None, fd=None):
1978 """Returns the file 'id', i.e. the dev/inode and mtime information.
1980 Either the path to the file or the fd must be given.
1982 @param path: the file path
1983 @param fd: a file descriptor
1984 @return: a tuple of (device number, inode number, mtime)
1987 if [path, fd].count(None) != 1:
1988 raise errors.ProgrammerError("One and only one of fd/path must be given")
1995 return (st.st_dev, st.st_ino, st.st_mtime)
1998 def VerifyFileID(fi_disk, fi_ours):
1999 """Verifies that two file IDs are matching.
2001 Differences in the inode/device are not accepted, but and older
2002 timestamp for fi_disk is accepted.
2004 @param fi_disk: tuple (dev, inode, mtime) representing the actual
2006 @param fi_ours: tuple (dev, inode, mtime) representing the last
2011 (d1, i1, m1) = fi_disk
2012 (d2, i2, m2) = fi_ours
2014 return (d1, i1) == (d2, i2) and m1 <= m2
2017 def SafeWriteFile(file_name, file_id, **kwargs):
2018 """Wraper over L{WriteFile} that locks the target file.
2020 By keeping the target file locked during WriteFile, we ensure that
2021 cooperating writers will safely serialise access to the file.
2023 @type file_name: str
2024 @param file_name: the target filename
2025 @type file_id: tuple
2026 @param file_id: a result from L{GetFileID}
2029 fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
2032 if file_id is not None:
2033 disk_id = GetFileID(fd=fd)
2034 if not VerifyFileID(disk_id, file_id):
2035 raise errors.LockError("Cannot overwrite file %s, it has been modified"
2036 " since last written" % file_name)
2037 return WriteFile(file_name, **kwargs)
2042 def ReadOneLineFile(file_name, strict=False):
2043 """Return the first non-empty line from a file.
2045 @type strict: boolean
2046 @param strict: if True, abort if the file has more than one
2050 file_lines = ReadFile(file_name).splitlines()
2051 full_lines = filter(bool, file_lines)
2052 if not file_lines or not full_lines:
2053 raise errors.GenericError("No data in one-liner file %s" % file_name)
2054 elif strict and len(full_lines) > 1:
2055 raise errors.GenericError("Too many lines in one-liner file %s" %
2057 return full_lines[0]
2060 def FirstFree(seq, base=0):
2061 """Returns the first non-existing integer from seq.
2063 The seq argument should be a sorted list of positive integers. The
2064 first time the index of an element is smaller than the element
2065 value, the index will be returned.
2067 The base argument is used to start at a different offset,
2068 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
2070 Example: C{[0, 1, 3]} will return I{2}.
2073 @param seq: the sequence to be analyzed.
2075 @param base: use this value as the base index of the sequence
2077 @return: the first non-used index in the sequence
2080 for idx, elem in enumerate(seq):
2081 assert elem >= base, "Passed element is higher than base offset"
2082 if elem > idx + base:
2088 def SingleWaitForFdCondition(fdobj, event, timeout):
2089 """Waits for a condition to occur on the socket.
2091 Immediately returns at the first interruption.
2093 @type fdobj: integer or object supporting a fileno() method
2094 @param fdobj: entity to wait for events on
2095 @type event: integer
2096 @param event: ORed condition (see select module)
2097 @type timeout: float or None
2098 @param timeout: Timeout in seconds
2100 @return: None for timeout, otherwise occured conditions
2103 check = (event | select.POLLPRI |
2104 select.POLLNVAL | select.POLLHUP | select.POLLERR)
2106 if timeout is not None:
2107 # Poller object expects milliseconds
2110 poller = select.poll()
2111 poller.register(fdobj, event)
2113 # TODO: If the main thread receives a signal and we have no timeout, we
2114 # could wait forever. This should check a global "quit" flag or something
2116 io_events = poller.poll(timeout)
2117 except select.error, err:
2118 if err[0] != errno.EINTR:
2121 if io_events and io_events[0][1] & check:
2122 return io_events[0][1]
2127 class FdConditionWaiterHelper(object):
2128 """Retry helper for WaitForFdCondition.
2130 This class contains the retried and wait functions that make sure
2131 WaitForFdCondition can continue waiting until the timeout is actually
2136 def __init__(self, timeout):
2137 self.timeout = timeout
2139 def Poll(self, fdobj, event):
2140 result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2146 def UpdateTimeout(self, timeout):
2147 self.timeout = timeout
2150 def WaitForFdCondition(fdobj, event, timeout):
2151 """Waits for a condition to occur on the socket.
2153 Retries until the timeout is expired, even if interrupted.
2155 @type fdobj: integer or object supporting a fileno() method
2156 @param fdobj: entity to wait for events on
2157 @type event: integer
2158 @param event: ORed condition (see select module)
2159 @type timeout: float or None
2160 @param timeout: Timeout in seconds
2162 @return: None for timeout, otherwise occured conditions
2165 if timeout is not None:
2166 retrywaiter = FdConditionWaiterHelper(timeout)
2168 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2169 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2170 except RetryTimeout:
2174 while result is None:
2175 result = SingleWaitForFdCondition(fdobj, event, timeout)
2179 def UniqueSequence(seq):
2180 """Returns a list with unique elements.
2182 Element order is preserved.
2185 @param seq: the sequence with the source elements
2187 @return: list of unique elements from seq
2191 return [i for i in seq if i not in seen and not seen.add(i)]
2194 def NormalizeAndValidateMac(mac):
2195 """Normalizes and check if a MAC address is valid.
2197 Checks whether the supplied MAC address is formally correct, only
2198 accepts colon separated format. Normalize it to all lower.
2201 @param mac: the MAC to be validated
2203 @return: returns the normalized and validated MAC.
2205 @raise errors.OpPrereqError: If the MAC isn't valid
2208 if not _MAC_CHECK.match(mac):
2209 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2210 mac, errors.ECODE_INVAL)
2215 def TestDelay(duration):
2216 """Sleep for a fixed amount of time.
2218 @type duration: float
2219 @param duration: the sleep duration
2221 @return: False for negative value, True otherwise
2225 return False, "Invalid sleep duration"
2226 time.sleep(duration)
2230 def _CloseFDNoErr(fd, retries=5):
2231 """Close a file descriptor ignoring errors.
2234 @param fd: the file descriptor
2236 @param retries: how many retries to make, in case we get any
2237 other error than EBADF
2242 except OSError, err:
2243 if err.errno != errno.EBADF:
2245 _CloseFDNoErr(fd, retries - 1)
2246 # else either it's closed already or we're out of retries, so we
2247 # ignore this and go on
2250 def CloseFDs(noclose_fds=None):
2251 """Close file descriptors.
2253 This closes all file descriptors above 2 (i.e. except
2256 @type noclose_fds: list or None
2257 @param noclose_fds: if given, it denotes a list of file descriptor
2258 that should not be closed
2261 # Default maximum for the number of available file descriptors.
2262 if 'SC_OPEN_MAX' in os.sysconf_names:
2264 MAXFD = os.sysconf('SC_OPEN_MAX')
2271 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2272 if (maxfd == resource.RLIM_INFINITY):
2275 # Iterate through and close all file descriptors (except the standard ones)
2276 for fd in range(3, maxfd):
2277 if noclose_fds and fd in noclose_fds:
2282 def Mlockall(_ctypes=ctypes):
2283 """Lock current process' virtual address space into RAM.
2285 This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2286 see mlock(2) for more details. This function requires ctypes module.
2288 @raises errors.NoCtypesError: if ctypes module is not found
2292 raise errors.NoCtypesError()
2294 libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2296 logging.error("Cannot set memory lock, ctypes cannot load libc")
2299 # Some older version of the ctypes module don't have built-in functionality
2300 # to access the errno global variable, where function error codes are stored.
2301 # By declaring this variable as a pointer to an integer we can then access
2302 # its value correctly, should the mlockall call fail, in order to see what
2303 # the actual error code was.
2304 # pylint: disable-msg=W0212
2305 libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2307 if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2308 # pylint: disable-msg=W0212
2309 logging.error("Cannot set memory lock: %s",
2310 os.strerror(libc.__errno_location().contents.value))
2313 logging.debug("Memory lock set")
2316 def Daemonize(logfile):
2317 """Daemonize the current process.
2319 This detaches the current process from the controlling terminal and
2320 runs it in the background as a daemon.
2323 @param logfile: the logfile to which we should redirect stdout/stderr
2325 @return: the value zero
2328 # pylint: disable-msg=W0212
2329 # yes, we really want os._exit
2331 # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2332 # least abstract the pipe functionality between them
2334 # Create pipe for sending error messages
2335 (rpipe, wpipe) = os.pipe()
2339 if (pid == 0): # The first child.
2343 pid = os.fork() # Fork a second child.
2344 if (pid == 0): # The second child.
2345 _CloseFDNoErr(rpipe)
2347 # exit() or _exit()? See below.
2348 os._exit(0) # Exit parent (the first child) of the second child.
2350 _CloseFDNoErr(wpipe)
2351 # Wait for daemon to be started (or an error message to
2352 # arrive) and read up to 100 KB as an error message
2353 errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2355 sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2359 os._exit(rcode) # Exit parent of the first child.
2361 SetupDaemonFDs(logfile, None)
2365 def DaemonPidFileName(name):
2366 """Compute a ganeti pid file absolute path
2369 @param name: the daemon name
2371 @return: the full path to the pidfile corresponding to the given
2375 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2378 def EnsureDaemon(name):
2379 """Check for and start daemon if not alive.
2382 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2384 logging.error("Can't start daemon '%s', failure %s, output: %s",
2385 name, result.fail_reason, result.output)
2391 def StopDaemon(name):
2395 result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2397 logging.error("Can't stop daemon '%s', failure %s, output: %s",
2398 name, result.fail_reason, result.output)
2404 def WritePidFile(pidfile):
2405 """Write the current process pidfile.
2407 @type pidfile: sting
2408 @param pidfile: the path to the file to be written
2409 @raise errors.LockError: if the pid file already exists and
2410 points to a live process
2412 @return: the file descriptor of the lock file; do not close this unless
2413 you want to unlock the pid file
2416 # We don't rename nor truncate the file to not drop locks under
2417 # existing processes
2418 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2420 # Lock the PID file (and fail if not possible to do so). Any code
2421 # wanting to send a signal to the daemon should try to lock the PID
2422 # file before reading it. If acquiring the lock succeeds, the daemon is
2423 # no longer running and the signal should not be sent.
2424 LockFile(fd_pidfile)
2426 os.write(fd_pidfile, "%d\n" % os.getpid())
2431 def RemovePidFile(name):
2432 """Remove the current process pidfile.
2434 Any errors are ignored.
2437 @param name: the daemon name used to derive the pidfile name
2440 pidfilename = DaemonPidFileName(name)
2441 # TODO: we could check here that the file contains our pid
2443 RemoveFile(pidfilename)
2444 except: # pylint: disable-msg=W0702
2448 def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2450 """Kill a process given by its pid.
2453 @param pid: The PID to terminate.
2455 @param signal_: The signal to send, by default SIGTERM
2457 @param timeout: The timeout after which, if the process is still alive,
2458 a SIGKILL will be sent. If not positive, no such checking
2460 @type waitpid: boolean
2461 @param waitpid: If true, we should waitpid on this process after
2462 sending signals, since it's our own child and otherwise it
2463 would remain as zombie
2466 def _helper(pid, signal_, wait):
2467 """Simple helper to encapsulate the kill/waitpid sequence"""
2468 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2470 os.waitpid(pid, os.WNOHANG)
2475 # kill with pid=0 == suicide
2476 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2478 if not IsProcessAlive(pid):
2481 _helper(pid, signal_, waitpid)
2486 def _CheckProcess():
2487 if not IsProcessAlive(pid):
2491 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2501 # Wait up to $timeout seconds
2502 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2503 except RetryTimeout:
2506 if IsProcessAlive(pid):
2507 # Kill process if it's still alive
2508 _helper(pid, signal.SIGKILL, waitpid)
2511 def FindFile(name, search_path, test=os.path.exists):
2512 """Look for a filesystem object in a given path.
2514 This is an abstract method to search for filesystem object (files,
2515 dirs) under a given search path.
2518 @param name: the name to look for
2519 @type search_path: str
2520 @param search_path: location to start at
2521 @type test: callable
2522 @param test: a function taking one argument that should return True
2523 if the a given object is valid; the default value is
2524 os.path.exists, causing only existing files to be returned
2526 @return: full path to the object if found, None otherwise
2529 # validate the filename mask
2530 if constants.EXT_PLUGIN_MASK.match(name) is None:
2531 logging.critical("Invalid value passed for external script name: '%s'",
2535 for dir_name in search_path:
2536 # FIXME: investigate switch to PathJoin
2537 item_name = os.path.sep.join([dir_name, name])
2538 # check the user test and that we're indeed resolving to the given
2540 if test(item_name) and os.path.basename(item_name) == name:
2545 def CheckVolumeGroupSize(vglist, vgname, minsize):
2546 """Checks if the volume group list is valid.
2548 The function will check if a given volume group is in the list of
2549 volume groups and has a minimum size.
2552 @param vglist: dictionary of volume group names and their size
2554 @param vgname: the volume group we should check
2556 @param minsize: the minimum size we accept
2558 @return: None for success, otherwise the error message
2561 vgsize = vglist.get(vgname, None)
2563 return "volume group '%s' missing" % vgname
2564 elif vgsize < minsize:
2565 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2566 (vgname, minsize, vgsize))
2570 def SplitTime(value):
2571 """Splits time as floating point number into a tuple.
2573 @param value: Time in seconds
2574 @type value: int or float
2575 @return: Tuple containing (seconds, microseconds)
2578 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2580 assert 0 <= seconds, \
2581 "Seconds must be larger than or equal to 0, but are %s" % seconds
2582 assert 0 <= microseconds <= 999999, \
2583 "Microseconds must be 0-999999, but are %s" % microseconds
2585 return (int(seconds), int(microseconds))
2588 def MergeTime(timetuple):
2589 """Merges a tuple into time as a floating point number.
2591 @param timetuple: Time as tuple, (seconds, microseconds)
2592 @type timetuple: tuple
2593 @return: Time as a floating point number expressed in seconds
2596 (seconds, microseconds) = timetuple
2598 assert 0 <= seconds, \
2599 "Seconds must be larger than or equal to 0, but are %s" % seconds
2600 assert 0 <= microseconds <= 999999, \
2601 "Microseconds must be 0-999999, but are %s" % microseconds
2603 return float(seconds) + (float(microseconds) * 0.000001)
2606 class LogFileHandler(logging.FileHandler):
2607 """Log handler that doesn't fallback to stderr.
2609 When an error occurs while writing on the logfile, logging.FileHandler tries
2610 to log on stderr. This doesn't work in ganeti since stderr is redirected to
2611 the logfile. This class avoids failures reporting errors to /dev/console.
2614 def __init__(self, filename, mode="a", encoding=None):
2615 """Open the specified file and use it as the stream for logging.
2617 Also open /dev/console to report errors while logging.
2620 logging.FileHandler.__init__(self, filename, mode, encoding)
2621 self.console = open(constants.DEV_CONSOLE, "a")
2623 def handleError(self, record): # pylint: disable-msg=C0103
2624 """Handle errors which occur during an emit() call.
2626 Try to handle errors with FileHandler method, if it fails write to
2631 logging.FileHandler.handleError(self, record)
2632 except Exception: # pylint: disable-msg=W0703
2634 self.console.write("Cannot log message:\n%s\n" % self.format(record))
2635 except Exception: # pylint: disable-msg=W0703
2636 # Log handler tried everything it could, now just give up
2640 def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2641 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2642 console_logging=False):
2643 """Configures the logging module.
2646 @param logfile: the filename to which we should log
2647 @type debug: integer
2648 @param debug: if greater than zero, enable debug messages, otherwise
2649 only those at C{INFO} and above level
2650 @type stderr_logging: boolean
2651 @param stderr_logging: whether we should also log to the standard error
2653 @param program: the name under which we should log messages
2654 @type multithreaded: boolean
2655 @param multithreaded: if True, will add the thread name to the log file
2656 @type syslog: string
2657 @param syslog: one of 'no', 'yes', 'only':
2658 - if no, syslog is not used
2659 - if yes, syslog is used (in addition to file-logging)
2660 - if only, only syslog is used
2661 @type console_logging: boolean
2662 @param console_logging: if True, will use a FileHandler which falls back to
2663 the system console if logging fails
2664 @raise EnvironmentError: if we can't open the log file and
2665 syslog/stderr logging is disabled
2668 fmt = "%(asctime)s: " + program + " pid=%(process)d"
2669 sft = program + "[%(process)d]:"
2671 fmt += "/%(threadName)s"
2672 sft += " (%(threadName)s)"
2674 fmt += " %(module)s:%(lineno)s"
2675 # no debug info for syslog loggers
2676 fmt += " %(levelname)s %(message)s"
2677 # yes, we do want the textual level, as remote syslog will probably
2678 # lose the error level, and it's easier to grep for it
2679 sft += " %(levelname)s %(message)s"
2680 formatter = logging.Formatter(fmt)
2681 sys_fmt = logging.Formatter(sft)
2683 root_logger = logging.getLogger("")
2684 root_logger.setLevel(logging.NOTSET)
2686 # Remove all previously setup handlers
2687 for handler in root_logger.handlers:
2689 root_logger.removeHandler(handler)
2692 stderr_handler = logging.StreamHandler()
2693 stderr_handler.setFormatter(formatter)
2695 stderr_handler.setLevel(logging.NOTSET)
2697 stderr_handler.setLevel(logging.CRITICAL)
2698 root_logger.addHandler(stderr_handler)
2700 if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2701 facility = logging.handlers.SysLogHandler.LOG_DAEMON
2702 syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2704 syslog_handler.setFormatter(sys_fmt)
2705 # Never enable debug over syslog
2706 syslog_handler.setLevel(logging.INFO)
2707 root_logger.addHandler(syslog_handler)
2709 if syslog != constants.SYSLOG_ONLY:
2710 # this can fail, if the logging directories are not setup or we have
2711 # a permisssion problem; in this case, it's best to log but ignore
2712 # the error if stderr_logging is True, and if false we re-raise the
2713 # exception since otherwise we could run but without any logs at all
2716 logfile_handler = LogFileHandler(logfile)
2718 logfile_handler = logging.FileHandler(logfile)
2719 logfile_handler.setFormatter(formatter)
2721 logfile_handler.setLevel(logging.DEBUG)
2723 logfile_handler.setLevel(logging.INFO)
2724 root_logger.addHandler(logfile_handler)
2725 except EnvironmentError:
2726 if stderr_logging or syslog == constants.SYSLOG_YES:
2727 logging.exception("Failed to enable logging to file '%s'", logfile)
2729 # we need to re-raise the exception
2733 def IsNormAbsPath(path):
2734 """Check whether a path is absolute and also normalized
2736 This avoids things like /dir/../../other/path to be valid.
2739 return os.path.normpath(path) == path and os.path.isabs(path)
2742 def PathJoin(*args):
2743 """Safe-join a list of path components.
2746 - the first argument must be an absolute path
2747 - no component in the path must have backtracking (e.g. /../),
2748 since we check for normalization at the end
2750 @param args: the path components to be joined
2751 @raise ValueError: for invalid paths
2754 # ensure we're having at least one path passed in
2756 # ensure the first component is an absolute and normalized path name
2758 if not IsNormAbsPath(root):
2759 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2760 result = os.path.join(*args)
2761 # ensure that the whole path is normalized
2762 if not IsNormAbsPath(result):
2763 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2764 # check that we're still under the original prefix
2765 prefix = os.path.commonprefix([root, result])
2767 raise ValueError("Error: path joining resulted in different prefix"
2768 " (%s != %s)" % (prefix, root))
2772 def TailFile(fname, lines=20):
2773 """Return the last lines from a file.
2775 @note: this function will only read and parse the last 4KB of
2776 the file; if the lines are very long, it could be that less
2777 than the requested number of lines are returned
2779 @param fname: the file name
2781 @param lines: the (maximum) number of lines to return
2784 fd = open(fname, "r")
2788 pos = max(0, pos-4096)
2790 raw_data = fd.read()
2794 rows = raw_data.splitlines()
2795 return rows[-lines:]
2798 def FormatTimestampWithTZ(secs):
2799 """Formats a Unix timestamp with the local timezone.
2802 return time.strftime("%F %T %Z", time.gmtime(secs))
2805 def _ParseAsn1Generalizedtime(value):
2806 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2809 @param value: ASN1 GENERALIZEDTIME timestamp
2812 m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2815 asn1time = m.group(1)
2816 hours = int(m.group(2))
2817 minutes = int(m.group(3))
2818 utcoffset = (60 * hours) + minutes
2820 if not value.endswith("Z"):
2821 raise ValueError("Missing timezone")
2822 asn1time = value[:-1]
2825 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2827 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2829 return calendar.timegm(tt.utctimetuple())
2832 def GetX509CertValidity(cert):
2833 """Returns the validity period of the certificate.
2835 @type cert: OpenSSL.crypto.X509
2836 @param cert: X509 certificate object
2839 # The get_notBefore and get_notAfter functions are only supported in
2840 # pyOpenSSL 0.7 and above.
2842 get_notbefore_fn = cert.get_notBefore
2843 except AttributeError:
2846 not_before_asn1 = get_notbefore_fn()
2848 if not_before_asn1 is None:
2851 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2854 get_notafter_fn = cert.get_notAfter
2855 except AttributeError:
2858 not_after_asn1 = get_notafter_fn()
2860 if not_after_asn1 is None:
2863 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2865 return (not_before, not_after)
2868 def _VerifyCertificateInner(expired, not_before, not_after, now,
2869 warn_days, error_days):
2870 """Verifies certificate validity.
2873 @param expired: Whether pyOpenSSL considers the certificate as expired
2874 @type not_before: number or None
2875 @param not_before: Unix timestamp before which certificate is not valid
2876 @type not_after: number or None
2877 @param not_after: Unix timestamp after which certificate is invalid
2879 @param now: Current time as Unix timestamp
2880 @type warn_days: number or None
2881 @param warn_days: How many days before expiration a warning should be reported
2882 @type error_days: number or None
2883 @param error_days: How many days before expiration an error should be reported
2887 msg = "Certificate is expired"
2889 if not_before is not None and not_after is not None:
2890 msg += (" (valid from %s to %s)" %
2891 (FormatTimestampWithTZ(not_before),
2892 FormatTimestampWithTZ(not_after)))
2893 elif not_before is not None:
2894 msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2895 elif not_after is not None:
2896 msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2898 return (CERT_ERROR, msg)
2900 elif not_before is not None and not_before > now:
2901 return (CERT_WARNING,
2902 "Certificate not yet valid (valid from %s)" %
2903 FormatTimestampWithTZ(not_before))
2905 elif not_after is not None:
2906 remaining_days = int((not_after - now) / (24 * 3600))
2908 msg = "Certificate expires in about %d days" % remaining_days
2910 if error_days is not None and remaining_days <= error_days:
2911 return (CERT_ERROR, msg)
2913 if warn_days is not None and remaining_days <= warn_days:
2914 return (CERT_WARNING, msg)
2919 def VerifyX509Certificate(cert, warn_days, error_days):
2920 """Verifies a certificate for LUVerifyCluster.
2922 @type cert: OpenSSL.crypto.X509
2923 @param cert: X509 certificate object
2924 @type warn_days: number or None
2925 @param warn_days: How many days before expiration a warning should be reported
2926 @type error_days: number or None
2927 @param error_days: How many days before expiration an error should be reported
2930 # Depending on the pyOpenSSL version, this can just return (None, None)
2931 (not_before, not_after) = GetX509CertValidity(cert)
2933 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2934 time.time(), warn_days, error_days)
2937 def SignX509Certificate(cert, key, salt):
2938 """Sign a X509 certificate.
2940 An RFC822-like signature header is added in front of the certificate.
2942 @type cert: OpenSSL.crypto.X509
2943 @param cert: X509 certificate object
2945 @param key: Key for HMAC
2947 @param salt: Salt for HMAC
2949 @return: Serialized and signed certificate in PEM format
2952 if not VALID_X509_SIGNATURE_SALT.match(salt):
2953 raise errors.GenericError("Invalid salt: %r" % salt)
2955 # Dumping as PEM here ensures the certificate is in a sane format
2956 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2958 return ("%s: %s/%s\n\n%s" %
2959 (constants.X509_CERT_SIGNATURE_HEADER, salt,
2960 Sha1Hmac(key, cert_pem, salt=salt),
2964 def _ExtractX509CertificateSignature(cert_pem):
2965 """Helper function to extract signature from X509 certificate.
2968 # Extract signature from original PEM data
2969 for line in cert_pem.splitlines():
2970 if line.startswith("---"):
2973 m = X509_SIGNATURE.match(line.strip())
2975 return (m.group("salt"), m.group("sign"))
2977 raise errors.GenericError("X509 certificate signature is missing")
2980 def LoadSignedX509Certificate(cert_pem, key):
2981 """Verifies a signed X509 certificate.
2983 @type cert_pem: string
2984 @param cert_pem: Certificate in PEM format and with signature header
2986 @param key: Key for HMAC
2987 @rtype: tuple; (OpenSSL.crypto.X509, string)
2988 @return: X509 certificate object and salt
2991 (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2994 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2996 # Dump again to ensure it's in a sane format
2997 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2999 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
3000 raise errors.GenericError("X509 certificate signature is invalid")
3005 def Sha1Hmac(key, text, salt=None):
3006 """Calculates the HMAC-SHA1 digest of a text.
3008 HMAC is defined in RFC2104.
3011 @param key: Secret key
3016 salted_text = salt + text
3020 return hmac.new(key, salted_text, compat.sha1).hexdigest()
3023 def VerifySha1Hmac(key, text, digest, salt=None):
3024 """Verifies the HMAC-SHA1 digest of a text.
3026 HMAC is defined in RFC2104.
3029 @param key: Secret key
3031 @type digest: string
3032 @param digest: Expected digest
3034 @return: Whether HMAC-SHA1 digest matches
3037 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
3040 def SafeEncode(text):
3041 """Return a 'safe' version of a source string.
3043 This function mangles the input string and returns a version that
3044 should be safe to display/encode as ASCII. To this end, we first
3045 convert it to ASCII using the 'backslashreplace' encoding which
3046 should get rid of any non-ASCII chars, and then we process it
3047 through a loop copied from the string repr sources in the python; we
3048 don't use string_escape anymore since that escape single quotes and
3049 backslashes too, and that is too much; and that escaping is not
3050 stable, i.e. string_escape(string_escape(x)) != string_escape(x).
3052 @type text: str or unicode
3053 @param text: input data
3055 @return: a safe version of text
3058 if isinstance(text, unicode):
3059 # only if unicode; if str already, we handle it below
3060 text = text.encode('ascii', 'backslashreplace')
3070 elif c < 32 or c >= 127: # non-printable
3071 resu += "\\x%02x" % (c & 0xff)
3077 def UnescapeAndSplit(text, sep=","):
3078 """Split and unescape a string based on a given separator.
3080 This function splits a string based on a separator where the
3081 separator itself can be escape in order to be an element of the
3082 elements. The escaping rules are (assuming coma being the
3084 - a plain , separates the elements
3085 - a sequence \\\\, (double backslash plus comma) is handled as a
3086 backslash plus a separator comma
3087 - a sequence \, (backslash plus comma) is handled as a
3091 @param text: the string to split
3093 @param text: the separator
3095 @return: a list of strings
3098 # we split the list by sep (with no escaping at this stage)
3099 slist = text.split(sep)
3100 # next, we revisit the elements and if any of them ended with an odd
3101 # number of backslashes, then we join it with the next
3105 if e1.endswith("\\"):
3106 num_b = len(e1) - len(e1.rstrip("\\"))
3109 # here the backslashes remain (all), and will be reduced in
3111 rlist.append(e1 + sep + e2)
3114 # finally, replace backslash-something with something
3115 rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3119 def CommaJoin(names):
3120 """Nicely join a set of identifiers.
3122 @param names: set, list or tuple
3123 @return: a string with the formatted results
3126 return ", ".join([str(val) for val in names])
3129 def FindMatch(data, name):
3130 """Tries to find an item in a dictionary matching a name.
3132 Callers have to ensure the data names aren't contradictory (e.g. a regexp
3133 that matches a string). If the name isn't a direct key, all regular
3134 expression objects in the dictionary are matched against it.
3137 @param data: Dictionary containing data
3139 @param name: Name to look for
3140 @rtype: tuple; (value in dictionary, matched groups as list)
3144 return (data[name], [])
3146 for key, value in data.items():
3148 if hasattr(key, "match"):
3151 return (value, list(m.groups()))
3156 def BytesToMebibyte(value):
3157 """Converts bytes to mebibytes.
3160 @param value: Value in bytes
3162 @return: Value in mebibytes
3165 return int(round(value / (1024.0 * 1024.0), 0))
3168 def CalculateDirectorySize(path):
3169 """Calculates the size of a directory recursively.
3172 @param path: Path to directory
3174 @return: Size in mebibytes
3179 for (curpath, _, files) in os.walk(path):
3180 for filename in files:
3181 st = os.lstat(PathJoin(curpath, filename))
3184 return BytesToMebibyte(size)
3187 def GetMounts(filename=constants.PROC_MOUNTS):
3188 """Returns the list of mounted filesystems.
3190 This function is Linux-specific.
3192 @param filename: path of mounts file (/proc/mounts by default)
3193 @rtype: list of tuples
3194 @return: list of mount entries (device, mountpoint, fstype, options)
3197 # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3199 mountlines = ReadFile(filename).splitlines()
3200 for line in mountlines:
3201 device, mountpoint, fstype, options, _ = line.split(None, 4)
3202 data.append((device, mountpoint, fstype, options))
3207 def GetFilesystemStats(path):
3208 """Returns the total and free space on a filesystem.
3211 @param path: Path on filesystem to be examined
3213 @return: tuple of (Total space, Free space) in mebibytes
3216 st = os.statvfs(path)
3218 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3219 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3220 return (tsize, fsize)
3223 def RunInSeparateProcess(fn, *args):
3224 """Runs a function in a separate process.
3226 Note: Only boolean return values are supported.
3229 @param fn: Function to be called
3231 @return: Function's result
3238 # In case the function uses temporary files
3239 ResetTempfileModule()
3242 result = int(bool(fn(*args)))
3243 assert result in (0, 1)
3244 except: # pylint: disable-msg=W0702
3245 logging.exception("Error while calling function in separate process")
3246 # 0 and 1 are reserved for the return value
3249 os._exit(result) # pylint: disable-msg=W0212
3253 # Avoid zombies and check exit code
3254 (_, status) = os.waitpid(pid, 0)
3256 if os.WIFSIGNALED(status):
3258 signum = os.WTERMSIG(status)
3260 exitcode = os.WEXITSTATUS(status)
3263 if not (exitcode in (0, 1) and signum is None):
3264 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3267 return bool(exitcode)
3270 def IgnoreProcessNotFound(fn, *args, **kwargs):
3271 """Ignores ESRCH when calling a process-related function.
3273 ESRCH is raised when a process is not found.
3276 @return: Whether process was found
3281 except EnvironmentError, err:
3283 if err.errno == errno.ESRCH:
3290 def IgnoreSignals(fn, *args, **kwargs):
3291 """Tries to call a function ignoring failures due to EINTR.
3295 return fn(*args, **kwargs)
3296 except EnvironmentError, err:
3297 if err.errno == errno.EINTR:
3301 except (select.error, socket.error), err:
3302 # In python 2.6 and above select.error is an IOError, so it's handled
3303 # above, in 2.5 and below it's not, and it's handled here.
3304 if err.args and err.args[0] == errno.EINTR:
3311 """Locks a file using POSIX locks.
3314 @param fd: the file descriptor we need to lock
3318 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3319 except IOError, err:
3320 if err.errno == errno.EAGAIN:
3321 raise errors.LockError("File already locked")
3325 def FormatTime(val):
3326 """Formats a time value.
3328 @type val: float or None
3329 @param val: the timestamp as returned by time.time()
3330 @return: a string value or N/A if we don't have a valid timestamp
3333 if val is None or not isinstance(val, (int, float)):
3335 # these two codes works on Linux, but they are not guaranteed on all
3337 return time.strftime("%F %T", time.localtime(val))
3340 def FormatSeconds(secs):
3341 """Formats seconds for easier reading.
3344 @param secs: Number of seconds
3346 @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3351 secs = round(secs, 0)
3354 # Negative values would be a bit tricky
3355 for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3356 (complete, secs) = divmod(secs, one)
3357 if complete or parts:
3358 parts.append("%d%s" % (complete, unit))
3360 parts.append("%ds" % secs)
3362 return " ".join(parts)
3365 def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3366 """Reads the watcher pause file.
3368 @type filename: string
3369 @param filename: Path to watcher pause file
3370 @type now: None, float or int
3371 @param now: Current time as Unix timestamp
3372 @type remove_after: int
3373 @param remove_after: Remove watcher pause file after specified amount of
3374 seconds past the pause end time
3381 value = ReadFile(filename)
3382 except IOError, err:
3383 if err.errno != errno.ENOENT:
3387 if value is not None:
3391 logging.warning(("Watcher pause file (%s) contains invalid value,"
3392 " removing it"), filename)
3393 RemoveFile(filename)
3396 if value is not None:
3397 # Remove file if it's outdated
3398 if now > (value + remove_after):
3399 RemoveFile(filename)
3408 class RetryTimeout(Exception):
3409 """Retry loop timed out.
3411 Any arguments which was passed by the retried function to RetryAgain will be
3412 preserved in RetryTimeout, if it is raised. If such argument was an exception
3413 the RaiseInner helper method will reraise it.
3416 def RaiseInner(self):
3417 if self.args and isinstance(self.args[0], Exception):
3420 raise RetryTimeout(*self.args)
3423 class RetryAgain(Exception):
3426 Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3427 arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3428 of the RetryTimeout() method can be used to reraise it.
3433 class _RetryDelayCalculator(object):
3434 """Calculator for increasing delays.
3444 def __init__(self, start, factor, limit):
3445 """Initializes this class.
3448 @param start: Initial delay
3450 @param factor: Factor for delay increase
3451 @type limit: float or None
3452 @param limit: Upper limit for delay or None for no limit
3456 assert factor >= 1.0
3457 assert limit is None or limit >= 0.0
3460 self._factor = factor
3466 """Returns current delay and calculates the next one.
3469 current = self._next
3471 # Update for next run
3472 if self._limit is None or self._next < self._limit:
3473 self._next = min(self._limit, self._next * self._factor)
3478 #: Special delay to specify whole remaining timeout
3479 RETRY_REMAINING_TIME = object()
3482 def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3483 _time_fn=time.time):
3484 """Call a function repeatedly until it succeeds.
3486 The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3487 anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3488 total of C{timeout} seconds, this function throws L{RetryTimeout}.
3490 C{delay} can be one of the following:
3491 - callable returning the delay length as a float
3492 - Tuple of (start, factor, limit)
3493 - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3494 useful when overriding L{wait_fn} to wait for an external event)
3495 - A static delay as a number (int or float)
3498 @param fn: Function to be called
3499 @param delay: Either a callable (returning the delay), a tuple of (start,
3500 factor, limit) (see L{_RetryDelayCalculator}),
3501 L{RETRY_REMAINING_TIME} or a number (int or float)
3502 @type timeout: float
3503 @param timeout: Total timeout
3504 @type wait_fn: callable
3505 @param wait_fn: Waiting function
3506 @return: Return value of function
3510 assert callable(wait_fn)
3511 assert callable(_time_fn)
3516 end_time = _time_fn() + timeout
3519 # External function to calculate delay
3522 elif isinstance(delay, (tuple, list)):
3523 # Increasing delay with optional upper boundary
3524 (start, factor, limit) = delay
3525 calc_delay = _RetryDelayCalculator(start, factor, limit)
3527 elif delay is RETRY_REMAINING_TIME:
3528 # Always use the remaining time
3533 calc_delay = lambda: delay
3535 assert calc_delay is None or callable(calc_delay)
3540 # pylint: disable-msg=W0142
3542 except RetryAgain, err:
3543 retry_args = err.args
3544 except RetryTimeout:
3545 raise errors.ProgrammerError("Nested retry loop detected that didn't"
3546 " handle RetryTimeout")
3548 remaining_time = end_time - _time_fn()
3550 if remaining_time < 0.0:
3551 # pylint: disable-msg=W0142
3552 raise RetryTimeout(*retry_args)
3554 assert remaining_time >= 0.0
3556 if calc_delay is None:
3557 wait_fn(remaining_time)
3559 current_delay = calc_delay()
3560 if current_delay > 0.0:
3561 wait_fn(current_delay)
3564 def GetClosedTempfile(*args, **kwargs):
3565 """Creates a temporary file and returns its path.
3568 (fd, path) = tempfile.mkstemp(*args, **kwargs)
3573 def GenerateSelfSignedX509Cert(common_name, validity):
3574 """Generates a self-signed X509 certificate.
3576 @type common_name: string
3577 @param common_name: commonName value
3579 @param validity: Validity for certificate in seconds
3582 # Create private and public key
3583 key = OpenSSL.crypto.PKey()
3584 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3586 # Create self-signed certificate
3587 cert = OpenSSL.crypto.X509()
3589 cert.get_subject().CN = common_name
3590 cert.set_serial_number(1)
3591 cert.gmtime_adj_notBefore(0)
3592 cert.gmtime_adj_notAfter(validity)
3593 cert.set_issuer(cert.get_subject())
3594 cert.set_pubkey(key)
3595 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3597 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3598 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3600 return (key_pem, cert_pem)
3603 def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3604 validity=constants.X509_CERT_DEFAULT_VALIDITY):
3605 """Legacy function to generate self-signed X509 certificate.
3608 @param filename: path to write certificate to
3609 @type common_name: string
3610 @param common_name: commonName value
3612 @param validity: validity of certificate in number of days
3615 # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3616 # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3617 # and node daemon certificates have the proper Subject/Issuer.
3618 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3619 validity * 24 * 60 * 60)
3621 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3624 class FileLock(object):
3625 """Utility class for file locks.
3628 def __init__(self, fd, filename):
3629 """Constructor for FileLock.
3632 @param fd: File object
3634 @param filename: Path of the file opened at I{fd}
3638 self.filename = filename
3641 def Open(cls, filename):
3642 """Creates and opens a file to be used as a file-based lock.
3644 @type filename: string
3645 @param filename: path to the file to be locked
3648 # Using "os.open" is necessary to allow both opening existing file
3649 # read/write and creating if not existing. Vanilla "open" will truncate an
3650 # existing file -or- allow creating if not existing.
3651 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3658 """Close the file and release the lock.
3661 if hasattr(self, "fd") and self.fd:
3665 def _flock(self, flag, blocking, timeout, errmsg):
3666 """Wrapper for fcntl.flock.
3669 @param flag: operation flag
3670 @type blocking: bool
3671 @param blocking: whether the operation should be done in blocking mode.
3672 @type timeout: None or float
3673 @param timeout: for how long the operation should be retried (implies
3675 @type errmsg: string
3676 @param errmsg: error message in case operation fails.
3679 assert self.fd, "Lock was closed"
3680 assert timeout is None or timeout >= 0, \
3681 "If specified, timeout must be positive"
3682 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3684 # When a timeout is used, LOCK_NB must always be set
3685 if not (timeout is None and blocking):
3686 flag |= fcntl.LOCK_NB
3689 self._Lock(self.fd, flag, timeout)
3692 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3693 args=(self.fd, flag, timeout))
3694 except RetryTimeout:
3695 raise errors.LockError(errmsg)
3698 def _Lock(fd, flag, timeout):
3700 fcntl.flock(fd, flag)
3701 except IOError, err:
3702 if timeout is not None and err.errno == errno.EAGAIN:
3705 logging.exception("fcntl.flock failed")
3708 def Exclusive(self, blocking=False, timeout=None):
3709 """Locks the file in exclusive mode.
3711 @type blocking: boolean
3712 @param blocking: whether to block and wait until we
3713 can lock the file or return immediately
3714 @type timeout: int or None
3715 @param timeout: if not None, the duration to wait for the lock
3719 self._flock(fcntl.LOCK_EX, blocking, timeout,
3720 "Failed to lock %s in exclusive mode" % self.filename)
3722 def Shared(self, blocking=False, timeout=None):
3723 """Locks the file in shared mode.
3725 @type blocking: boolean
3726 @param blocking: whether to block and wait until we
3727 can lock the file or return immediately
3728 @type timeout: int or None
3729 @param timeout: if not None, the duration to wait for the lock
3733 self._flock(fcntl.LOCK_SH, blocking, timeout,
3734 "Failed to lock %s in shared mode" % self.filename)
3736 def Unlock(self, blocking=True, timeout=None):
3737 """Unlocks the file.
3739 According to C{flock(2)}, unlocking can also be a nonblocking
3742 To make a non-blocking request, include LOCK_NB with any of the above
3745 @type blocking: boolean
3746 @param blocking: whether to block and wait until we
3747 can lock the file or return immediately
3748 @type timeout: int or None
3749 @param timeout: if not None, the duration to wait for the lock
3753 self._flock(fcntl.LOCK_UN, blocking, timeout,
3754 "Failed to unlock %s" % self.filename)
3758 """Splits data chunks into lines separated by newline.
3760 Instances provide a file-like interface.
3763 def __init__(self, line_fn, *args):
3764 """Initializes this class.
3766 @type line_fn: callable
3767 @param line_fn: Function called for each line, first parameter is line
3768 @param args: Extra arguments for L{line_fn}
3771 assert callable(line_fn)
3774 # Python 2.4 doesn't have functools.partial yet
3776 lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3778 self._line_fn = line_fn
3780 self._lines = collections.deque()
3783 def write(self, data):
3784 parts = (self._buffer + data).split("\n")
3785 self._buffer = parts.pop()
3786 self._lines.extend(parts)
3790 self._line_fn(self._lines.popleft().rstrip("\r\n"))
3795 self._line_fn(self._buffer)
3798 def SignalHandled(signums):
3799 """Signal Handled decoration.
3801 This special decorator installs a signal handler and then calls the target
3802 function. The function must accept a 'signal_handlers' keyword argument,
3803 which will contain a dict indexed by signal number, with SignalHandler
3806 The decorator can be safely stacked with iself, to handle multiple signals
3807 with different handlers.
3810 @param signums: signals to intercept
3814 def sig_function(*args, **kwargs):
3815 assert 'signal_handlers' not in kwargs or \
3816 kwargs['signal_handlers'] is None or \
3817 isinstance(kwargs['signal_handlers'], dict), \
3818 "Wrong signal_handlers parameter in original function call"
3819 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3820 signal_handlers = kwargs['signal_handlers']
3822 signal_handlers = {}
3823 kwargs['signal_handlers'] = signal_handlers
3824 sighandler = SignalHandler(signums)
3827 signal_handlers[sig] = sighandler
3828 return fn(*args, **kwargs)
3835 class SignalWakeupFd(object):
3837 # This is only supported in Python 2.5 and above (some distributions
3838 # backported it to Python 2.4)
3839 _set_wakeup_fd_fn = signal.set_wakeup_fd
3840 except AttributeError:
3842 def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3845 def _SetWakeupFd(self, fd):
3846 return self._set_wakeup_fd_fn(fd)
3849 """Initializes this class.
3852 (read_fd, write_fd) = os.pipe()
3854 # Once these succeeded, the file descriptors will be closed automatically.
3855 # Buffer size 0 is important, otherwise .read() with a specified length
3856 # might buffer data and the file descriptors won't be marked readable.
3857 self._read_fh = os.fdopen(read_fd, "r", 0)
3858 self._write_fh = os.fdopen(write_fd, "w", 0)
3860 self._previous = self._SetWakeupFd(self._write_fh.fileno())
3863 self.fileno = self._read_fh.fileno
3864 self.read = self._read_fh.read
3867 """Restores the previous wakeup file descriptor.
3870 if hasattr(self, "_previous") and self._previous is not None:
3871 self._SetWakeupFd(self._previous)
3872 self._previous = None
3875 """Notifies the wakeup file descriptor.
3878 self._write_fh.write("\0")
3881 """Called before object deletion.
3887 class SignalHandler(object):
3888 """Generic signal handler class.
3890 It automatically restores the original handler when deconstructed or
3891 when L{Reset} is called. You can either pass your own handler
3892 function in or query the L{called} attribute to detect whether the
3896 @ivar signum: the signals we handle
3897 @type called: boolean
3898 @ivar called: tracks whether any of the signals have been raised
3901 def __init__(self, signum, handler_fn=None, wakeup=None):
3902 """Constructs a new SignalHandler instance.
3904 @type signum: int or list of ints
3905 @param signum: Single signal number or set of signal numbers
3906 @type handler_fn: callable
3907 @param handler_fn: Signal handling function
3910 assert handler_fn is None or callable(handler_fn)
3912 self.signum = set(signum)
3915 self._handler_fn = handler_fn
3916 self._wakeup = wakeup
3920 for signum in self.signum:
3922 prev_handler = signal.signal(signum, self._HandleSignal)
3924 self._previous[signum] = prev_handler
3926 # Restore previous handler
3927 signal.signal(signum, prev_handler)
3930 # Reset all handlers
3932 # Here we have a race condition: a handler may have already been called,
3933 # but there's not much we can do about it at this point.
3940 """Restore previous handler.
3942 This will reset all the signals to their previous handlers.
3945 for signum, prev_handler in self._previous.items():
3946 signal.signal(signum, prev_handler)
3947 # If successful, remove from dict
3948 del self._previous[signum]
3951 """Unsets the L{called} flag.
3953 This function can be used in case a signal may arrive several times.
3958 def _HandleSignal(self, signum, frame):
3959 """Actual signal handling function.
3962 # This is not nice and not absolutely atomic, but it appears to be the only
3963 # solution in Python -- there are no atomic types.
3967 # Notify whoever is interested in signals
3968 self._wakeup.Notify()
3970 if self._handler_fn:
3971 self._handler_fn(signum, frame)
3974 class FieldSet(object):
3975 """A simple field set.
3977 Among the features are:
3978 - checking if a string is among a list of static string or regex objects
3979 - checking if a whole list of string matches
3980 - returning the matching groups from a regex match
3982 Internally, all fields are held as regular expression objects.
3985 def __init__(self, *items):
3986 self.items = [re.compile("^%s$" % value) for value in items]
3988 def Extend(self, other_set):
3989 """Extend the field set with the items from another one"""
3990 self.items.extend(other_set.items)
3992 def Matches(self, field):
3993 """Checks if a field matches the current set
3996 @param field: the string to match
3997 @return: either None or a regular expression match object
4000 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
4004 def NonMatching(self, items):
4005 """Returns the list of fields not matching the current set
4008 @param items: the list of fields to check
4010 @return: list of non-matching fields
4013 return [val for val in items if not self.Matches(val)]
4016 class RunningTimeout(object):
4017 """Class to calculate remaining timeout when doing several operations.
4027 def __init__(self, timeout, allow_negative, _time_fn=time.time):
4028 """Initializes this class.
4030 @type timeout: float
4031 @param timeout: Timeout duration
4032 @type allow_negative: bool
4033 @param allow_negative: Whether to return values below zero
4034 @param _time_fn: Time function for unittests
4037 object.__init__(self)
4039 if timeout is not None and timeout < 0.0:
4040 raise ValueError("Timeout must not be negative")
4042 self._timeout = timeout
4043 self._allow_negative = allow_negative
4044 self._time_fn = _time_fn
4046 self._start_time = None
4048 def Remaining(self):
4049 """Returns the remaining timeout.
4052 if self._timeout is None:
4055 # Get start time on first calculation
4056 if self._start_time is None:
4057 self._start_time = self._time_fn()
4059 # Calculate remaining time
4060 remaining_timeout = self._start_time + self._timeout - self._time_fn()
4062 if not self._allow_negative:
4063 # Ensure timeout is always >= 0
4064 return max(0.0, remaining_timeout)
4066 return remaining_timeout