4 # Copyright (C) 2006, 2007, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti utility module.
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
45 import logging.handlers
53 from cStringIO import StringIO
56 # pylint: disable-msg=F0401
61 from ganeti import errors
62 from ganeti import constants
63 from ganeti import compat
67 _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
71 #: when set to True, L{RunCmd} is disabled
74 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
76 HEX_CHAR_RE = r"[a-zA-Z0-9]"
77 VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78 X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79 (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80 HEX_CHAR_RE, HEX_CHAR_RE),
83 _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
85 UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86 '[a-f0-9]{4}-[a-f0-9]{12}$')
88 # Certificate verification results
90 CERT_ERROR) = range(1, 3)
92 # Flags for mlockall() (from bits/mman.h)
97 _MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
101 _TIMEOUT_KILL) = range(3)
103 #: Shell param checker regexp
104 _SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
106 #: Unit checker regexp
107 _PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$")
110 _ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
112 _SORTER_RE = re.compile("^%s(.*)$" % (8 * "(\D+|\d+)?"))
113 _SORTER_DIGIT = re.compile("^\d+$")
116 class RunResult(object):
117 """Holds the result of running external programs.
120 @ivar exit_code: the exit code of the program, or None (if the program
122 @type signal: int or None
123 @ivar signal: the signal that caused the program to finish, or None
124 (if the program wasn't terminated by a signal)
126 @ivar stdout: the standard output of the program
128 @ivar stderr: the standard error of the program
129 @type failed: boolean
130 @ivar failed: True in case the program was
131 terminated by a signal or exited with a non-zero exit code
132 @ivar fail_reason: a string detailing the termination reason
135 __slots__ = ["exit_code", "signal", "stdout", "stderr",
136 "failed", "fail_reason", "cmd"]
139 def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
142 self.exit_code = exit_code
143 self.signal = signal_
146 self.failed = (signal_ is not None or exit_code != 0)
149 if self.signal is not None:
150 fail_msgs.append("terminated by signal %s" % self.signal)
151 elif self.exit_code is not None:
152 fail_msgs.append("exited with exit code %s" % self.exit_code)
154 fail_msgs.append("unable to determine termination reason")
156 if timeout_action == _TIMEOUT_TERM:
157 fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
158 elif timeout_action == _TIMEOUT_KILL:
159 fail_msgs.append(("force termination after timeout of %.2f seconds"
160 " and linger for another %.2f seconds") %
161 (timeout, constants.CHILD_LINGER_TIMEOUT))
163 if fail_msgs and self.failed:
164 self.fail_reason = CommaJoin(fail_msgs)
167 logging.debug("Command '%s' failed (%s); output: %s",
168 self.cmd, self.fail_reason, self.output)
170 def _GetOutput(self):
171 """Returns the combined stdout and stderr for easier usage.
174 return self.stdout + self.stderr
176 output = property(_GetOutput, None, None, "Return full output")
179 def _BuildCmdEnvironment(env, reset):
180 """Builds the environment for an external program.
186 cmd_env = os.environ.copy()
187 cmd_env["LC_ALL"] = "C"
195 def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
196 interactive=False, timeout=None):
197 """Execute a (shell) command.
199 The command should not read from its standard input, as it will be
202 @type cmd: string or list
203 @param cmd: Command to run
205 @param env: Additional environment variables
207 @param output: if desired, the output of the command can be
208 saved in a file instead of the RunResult instance; this
209 parameter denotes the file name (if not None)
211 @param cwd: if specified, will be used as the working
212 directory for the command; the default will be /
213 @type reset_env: boolean
214 @param reset_env: whether to reset or keep the default os environment
215 @type interactive: boolean
216 @param interactive: weather we pipe stdin, stdout and stderr
217 (default behaviour) or run the command interactive
219 @param timeout: If not None, timeout in seconds until child process gets
222 @return: RunResult instance
223 @raise errors.ProgrammerError: if we call this when forks are disabled
227 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
229 if output and interactive:
230 raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
231 " not be provided at the same time")
233 if isinstance(cmd, basestring):
237 cmd = [str(val) for val in cmd]
238 strcmd = ShellQuoteArgs(cmd)
242 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
244 logging.debug("RunCmd %s", strcmd)
246 cmd_env = _BuildCmdEnvironment(env, reset_env)
250 out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
251 interactive, timeout)
253 timeout_action = _TIMEOUT_NONE
254 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
257 if err.errno == errno.ENOENT:
258 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
270 return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
273 def SetupDaemonEnv(cwd="/", umask=077):
274 """Setup a daemon's environment.
276 This should be called between the first and second fork, due to
279 @param cwd: the directory to which to chdir
280 @param umask: the umask to setup
288 def SetupDaemonFDs(output_file, output_fd):
289 """Setups up a daemon's file descriptors.
291 @param output_file: if not None, the file to which to redirect
293 @param output_fd: if not None, the file descriptor for stdout/stderr
296 # check that at most one is defined
297 assert [output_file, output_fd].count(None) >= 1
299 # Open /dev/null (read-only, only for stdin)
300 devnull_fd = os.open(os.devnull, os.O_RDONLY)
302 if output_fd is not None:
304 elif output_file is not None:
307 output_fd = os.open(output_file,
308 os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
309 except EnvironmentError, err:
310 raise Exception("Opening output file failed: %s" % err)
312 output_fd = os.open(os.devnull, os.O_WRONLY)
314 # Redirect standard I/O
315 os.dup2(devnull_fd, 0)
316 os.dup2(output_fd, 1)
317 os.dup2(output_fd, 2)
320 def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
322 """Start a daemon process after forking twice.
324 @type cmd: string or list
325 @param cmd: Command to run
327 @param env: Additional environment variables
329 @param cwd: Working directory for the program
331 @param output: Path to file in which to save the output
333 @param output_fd: File descriptor for output
334 @type pidfile: string
335 @param pidfile: Process ID file
337 @return: Daemon process ID
338 @raise errors.ProgrammerError: if we call this when forks are disabled
342 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
345 if output and not (bool(output) ^ (output_fd is not None)):
346 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
349 if isinstance(cmd, basestring):
350 cmd = ["/bin/sh", "-c", cmd]
352 strcmd = ShellQuoteArgs(cmd)
355 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
357 logging.debug("StartDaemon %s", strcmd)
359 cmd_env = _BuildCmdEnvironment(env, False)
361 # Create pipe for sending PID back
362 (pidpipe_read, pidpipe_write) = os.pipe()
365 # Create pipe for sending error messages
366 (errpipe_read, errpipe_write) = os.pipe()
373 # Child process, won't return
374 _StartDaemonChild(errpipe_read, errpipe_write,
375 pidpipe_read, pidpipe_write,
377 output, output_fd, pidfile)
379 # Well, maybe child process failed
380 os._exit(1) # pylint: disable-msg=W0212
382 _CloseFDNoErr(errpipe_write)
384 # Wait for daemon to be started (or an error message to
385 # arrive) and read up to 100 KB as an error message
386 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
388 _CloseFDNoErr(errpipe_read)
390 _CloseFDNoErr(pidpipe_write)
392 # Read up to 128 bytes for PID
393 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
395 _CloseFDNoErr(pidpipe_read)
397 # Try to avoid zombies by waiting for child process
404 raise errors.OpExecError("Error when starting daemon process: %r" %
409 except (ValueError, TypeError), err:
410 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
414 def _StartDaemonChild(errpipe_read, errpipe_write,
415 pidpipe_read, pidpipe_write,
417 output, fd_output, pidfile):
418 """Child process for starting daemon.
422 # Close parent's side
423 _CloseFDNoErr(errpipe_read)
424 _CloseFDNoErr(pidpipe_read)
426 # First child process
429 # And fork for the second time
432 # Exit first child process
433 os._exit(0) # pylint: disable-msg=W0212
435 # Make sure pipe is closed on execv* (and thereby notifies
437 SetCloseOnExecFlag(errpipe_write, True)
439 # List of file descriptors to be left open
440 noclose_fds = [errpipe_write]
444 fd_pidfile = WritePidFile(pidfile)
446 # Keeping the file open to hold the lock
447 noclose_fds.append(fd_pidfile)
449 SetCloseOnExecFlag(fd_pidfile, False)
453 SetupDaemonFDs(output, fd_output)
455 # Send daemon PID to parent
456 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
458 # Close all file descriptors except stdio and error message pipe
459 CloseFDs(noclose_fds=noclose_fds)
461 # Change working directory
465 os.execvp(args[0], args)
467 os.execvpe(args[0], args, env)
468 except: # pylint: disable-msg=W0702
470 # Report errors to original process
471 WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
472 except: # pylint: disable-msg=W0702
473 # Ignore errors in error handling
476 os._exit(1) # pylint: disable-msg=W0212
479 def WriteErrorToFD(fd, err):
480 """Possibly write an error message to a fd.
482 @type fd: None or int (file descriptor)
483 @param fd: if not None, the error will be written to this fd
484 @param err: string, the error message
491 err = "<unknown error>"
493 RetryOnSignal(os.write, fd, err)
496 def _CheckIfAlive(child):
497 """Raises L{RetryAgain} if child is still alive.
499 @raises RetryAgain: If child is still alive
502 if child.poll() is None:
506 def _WaitForProcess(child, timeout):
507 """Waits for the child to terminate or until we reach timeout.
511 Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
516 def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
517 _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
518 """Run a command and return its output.
520 @type cmd: string or list
521 @param cmd: Command to run
523 @param env: The environment to use
524 @type via_shell: bool
525 @param via_shell: if we should run via the shell
527 @param cwd: the working directory for the program
528 @type interactive: boolean
529 @param interactive: Run command interactive (without piping)
531 @param timeout: Timeout after the programm gets terminated
533 @return: (out, err, status)
536 poller = select.poll()
538 stderr = subprocess.PIPE
539 stdout = subprocess.PIPE
540 stdin = subprocess.PIPE
543 stderr = stdout = stdin = None
545 child = subprocess.Popen(cmd, shell=via_shell,
549 close_fds=True, env=env,
555 linger_timeout = None
560 poll_timeout = RunningTimeout(timeout, True).Remaining
562 msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
564 msg_linger = ("Command %s (%d) run into linger timeout, killing" %
567 timeout_action = _TIMEOUT_NONE
571 poller.register(child.stdout, select.POLLIN)
572 poller.register(child.stderr, select.POLLIN)
574 child.stdout.fileno(): (out, child.stdout),
575 child.stderr.fileno(): (err, child.stderr),
578 SetNonblockFlag(fd, True)
582 pt = poll_timeout() * 1000
584 if linger_timeout is None:
585 logging.warning(msg_timeout)
586 if child.poll() is None:
587 timeout_action = _TIMEOUT_TERM
588 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
589 linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
590 pt = linger_timeout() * 1000
596 pollresult = RetryOnSignal(poller.poll, pt)
598 for fd, event in pollresult:
599 if event & select.POLLIN or event & select.POLLPRI:
600 data = fdmap[fd][1].read()
601 # no data from read signifies EOF (the same as POLLHUP)
603 poller.unregister(fd)
606 fdmap[fd][0].write(data)
607 if (event & select.POLLNVAL or event & select.POLLHUP or
608 event & select.POLLERR):
609 poller.unregister(fd)
612 if timeout is not None:
613 assert callable(poll_timeout)
615 # We have no I/O left but it might still run
616 if child.poll() is None:
617 _WaitForProcess(child, poll_timeout())
619 # Terminate if still alive after timeout
620 if child.poll() is None:
621 if linger_timeout is None:
622 logging.warning(msg_timeout)
623 timeout_action = _TIMEOUT_TERM
624 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
627 lt = linger_timeout()
628 _WaitForProcess(child, lt)
630 # Okay, still alive after timeout and linger timeout? Kill it!
631 if child.poll() is None:
632 timeout_action = _TIMEOUT_KILL
633 logging.warning(msg_linger)
634 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
639 status = child.wait()
640 return out, err, status, timeout_action
643 def _RunCmdFile(cmd, env, via_shell, output, cwd):
644 """Run a command and save its output to a file.
646 @type cmd: string or list
647 @param cmd: Command to run
649 @param env: The environment to use
650 @type via_shell: bool
651 @param via_shell: if we should run via the shell
653 @param output: the filename in which to save the output
655 @param cwd: the working directory for the program
657 @return: the exit status
660 fh = open(output, "a")
662 child = subprocess.Popen(cmd, shell=via_shell,
663 stderr=subprocess.STDOUT,
665 stdin=subprocess.PIPE,
666 close_fds=True, env=env,
670 status = child.wait()
676 def SetCloseOnExecFlag(fd, enable):
677 """Sets or unsets the close-on-exec flag on a file descriptor.
680 @param fd: File descriptor
682 @param enable: Whether to set or unset it.
685 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
688 flags |= fcntl.FD_CLOEXEC
690 flags &= ~fcntl.FD_CLOEXEC
692 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
695 def SetNonblockFlag(fd, enable):
696 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
699 @param fd: File descriptor
701 @param enable: Whether to set or unset it
704 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
707 flags |= os.O_NONBLOCK
709 flags &= ~os.O_NONBLOCK
711 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
714 def RetryOnSignal(fn, *args, **kwargs):
715 """Calls a function again if it failed due to EINTR.
720 return fn(*args, **kwargs)
721 except EnvironmentError, err:
722 if err.errno != errno.EINTR:
724 except (socket.error, select.error), err:
725 # In python 2.6 and above select.error is an IOError, so it's handled
726 # above, in 2.5 and below it's not, and it's handled here.
727 if not (err.args and err.args[0] == errno.EINTR):
731 def RunParts(dir_name, env=None, reset_env=False):
732 """Run Scripts or programs in a directory
734 @type dir_name: string
735 @param dir_name: absolute path to a directory
737 @param env: The environment to use
738 @type reset_env: boolean
739 @param reset_env: whether to reset or keep the default os environment
740 @rtype: list of tuples
741 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
747 dir_contents = ListVisibleFiles(dir_name)
749 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
752 for relname in sorted(dir_contents):
753 fname = PathJoin(dir_name, relname)
754 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
755 constants.EXT_PLUGIN_MASK.match(relname) is not None):
756 rr.append((relname, constants.RUNPARTS_SKIP, None))
759 result = RunCmd([fname], env=env, reset_env=reset_env)
760 except Exception, err: # pylint: disable-msg=W0703
761 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
763 rr.append((relname, constants.RUNPARTS_RUN, result))
768 def RemoveFile(filename):
769 """Remove a file ignoring some errors.
771 Remove a file, ignoring non-existing ones or directories. Other
775 @param filename: the file to be removed
781 if err.errno not in (errno.ENOENT, errno.EISDIR):
785 def RemoveDir(dirname):
786 """Remove an empty directory.
788 Remove a directory, ignoring non-existing ones.
789 Other errors are passed. This includes the case,
790 where the directory is not empty, so it can't be removed.
793 @param dirname: the empty directory to be removed
799 if err.errno != errno.ENOENT:
803 def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
807 @param old: Original path
811 @param mkdir: Whether to create target directory if it doesn't exist
812 @type mkdir_mode: int
813 @param mkdir_mode: Mode for newly created directories
817 return os.rename(old, new)
819 # In at least one use case of this function, the job queue, directory
820 # creation is very rare. Checking for the directory before renaming is not
822 if mkdir and err.errno == errno.ENOENT:
823 # Create directory and try again
824 Makedirs(os.path.dirname(new), mode=mkdir_mode)
826 return os.rename(old, new)
831 def Makedirs(path, mode=0750):
832 """Super-mkdir; create a leaf directory and all intermediate ones.
834 This is a wrapper around C{os.makedirs} adding error handling not implemented
839 os.makedirs(path, mode)
841 # Ignore EEXIST. This is only handled in os.makedirs as included in
842 # Python 2.5 and above.
843 if err.errno != errno.EEXIST or not os.path.exists(path):
847 def ResetTempfileModule():
848 """Resets the random name generator of the tempfile module.
850 This function should be called after C{os.fork} in the child process to
851 ensure it creates a newly seeded random generator. Otherwise it would
852 generate the same random parts as the parent process. If several processes
853 race for the creation of a temporary file, this could lead to one not getting
857 # pylint: disable-msg=W0212
858 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
859 tempfile._once_lock.acquire()
861 # Reset random name generator
862 tempfile._name_sequence = None
864 tempfile._once_lock.release()
866 logging.critical("The tempfile module misses at least one of the"
867 " '_once_lock' and '_name_sequence' attributes")
870 def _FingerprintFile(filename):
871 """Compute the fingerprint of a file.
873 If the file does not exist, a None will be returned
877 @param filename: the filename to checksum
879 @return: the hex digest of the sha checksum of the contents
883 if not (os.path.exists(filename) and os.path.isfile(filename)):
888 fp = compat.sha1_hash()
896 return fp.hexdigest()
899 def FingerprintFiles(files):
900 """Compute fingerprints for a list of files.
903 @param files: the list of filename to fingerprint
905 @return: a dictionary filename: fingerprint, holding only
911 for filename in files:
912 cksum = _FingerprintFile(filename)
914 ret[filename] = cksum
919 def ForceDictType(target, key_types, allowed_values=None):
920 """Force the values of a dict to have certain types.
923 @param target: the dict to update
924 @type key_types: dict
925 @param key_types: dict mapping target dict keys to types
926 in constants.ENFORCEABLE_TYPES
927 @type allowed_values: list
928 @keyword allowed_values: list of specially allowed values
931 if allowed_values is None:
934 if not isinstance(target, dict):
935 msg = "Expected dictionary, got '%s'" % target
936 raise errors.TypeEnforcementError(msg)
939 if key not in key_types:
940 msg = "Unknown key '%s'" % key
941 raise errors.TypeEnforcementError(msg)
943 if target[key] in allowed_values:
946 ktype = key_types[key]
947 if ktype not in constants.ENFORCEABLE_TYPES:
948 msg = "'%s' has non-enforceable type %s" % (key, ktype)
949 raise errors.ProgrammerError(msg)
951 if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
952 if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
954 elif not isinstance(target[key], basestring):
955 if isinstance(target[key], bool) and not target[key]:
958 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
959 raise errors.TypeEnforcementError(msg)
960 elif ktype == constants.VTYPE_BOOL:
961 if isinstance(target[key], basestring) and target[key]:
962 if target[key].lower() == constants.VALUE_FALSE:
964 elif target[key].lower() == constants.VALUE_TRUE:
967 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
968 raise errors.TypeEnforcementError(msg)
973 elif ktype == constants.VTYPE_SIZE:
975 target[key] = ParseUnit(target[key])
976 except errors.UnitParseError, err:
977 msg = "'%s' (value %s) is not a valid size. error: %s" % \
978 (key, target[key], err)
979 raise errors.TypeEnforcementError(msg)
980 elif ktype == constants.VTYPE_INT:
982 target[key] = int(target[key])
983 except (ValueError, TypeError):
984 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
985 raise errors.TypeEnforcementError(msg)
988 def _GetProcStatusPath(pid):
989 """Returns the path for a PID's proc status file.
992 @param pid: Process ID
996 return "/proc/%d/status" % pid
999 def IsProcessAlive(pid):
1000 """Check if a given pid exists on the system.
1002 @note: zombie status is not handled, so zombie processes
1003 will be returned as alive
1005 @param pid: the process ID to check
1007 @return: True if the process exists
1014 except EnvironmentError, err:
1015 if err.errno in (errno.ENOENT, errno.ENOTDIR):
1017 elif err.errno == errno.EINVAL:
1018 raise RetryAgain(err)
1021 assert isinstance(pid, int), "pid must be an integer"
1025 # /proc in a multiprocessor environment can have strange behaviors.
1026 # Retry the os.stat a few times until we get a good result.
1028 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1029 args=[_GetProcStatusPath(pid)])
1030 except RetryTimeout, err:
1034 def _ParseSigsetT(sigset):
1035 """Parse a rendered sigset_t value.
1037 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1040 @type sigset: string
1041 @param sigset: Rendered signal set from /proc/$pid/status
1043 @return: Set of all enabled signal numbers
1049 for ch in reversed(sigset):
1052 # The following could be done in a loop, but it's easier to read and
1053 # understand in the unrolled form
1055 result.add(signum + 1)
1057 result.add(signum + 2)
1059 result.add(signum + 3)
1061 result.add(signum + 4)
1068 def _GetProcStatusField(pstatus, field):
1069 """Retrieves a field from the contents of a proc status file.
1071 @type pstatus: string
1072 @param pstatus: Contents of /proc/$pid/status
1074 @param field: Name of field whose value should be returned
1078 for line in pstatus.splitlines():
1079 parts = line.split(":", 1)
1081 if len(parts) < 2 or parts[0] != field:
1084 return parts[1].strip()
1089 def IsProcessHandlingSignal(pid, signum, status_path=None):
1090 """Checks whether a process is handling a signal.
1093 @param pid: Process ID
1095 @param signum: Signal number
1099 if status_path is None:
1100 status_path = _GetProcStatusPath(pid)
1103 proc_status = ReadFile(status_path)
1104 except EnvironmentError, err:
1105 # In at least one case, reading /proc/$pid/status failed with ESRCH.
1106 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1110 sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1112 raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1114 # Now check whether signal is handled
1115 return signum in _ParseSigsetT(sigcgt)
1118 def ReadPidFile(pidfile):
1119 """Read a pid from a file.
1121 @type pidfile: string
1122 @param pidfile: path to the file containing the pid
1124 @return: The process id, if the file exists and contains a valid PID,
1129 raw_data = ReadOneLineFile(pidfile)
1130 except EnvironmentError, err:
1131 if err.errno != errno.ENOENT:
1132 logging.exception("Can't read pid file")
1137 except (TypeError, ValueError), err:
1138 logging.info("Can't parse pid file contents", exc_info=True)
1144 def ReadLockedPidFile(path):
1145 """Reads a locked PID file.
1147 This can be used together with L{StartDaemon}.
1150 @param path: Path to PID file
1151 @return: PID as integer or, if file was unlocked or couldn't be opened, None
1155 fd = os.open(path, os.O_RDONLY)
1156 except EnvironmentError, err:
1157 if err.errno == errno.ENOENT:
1158 # PID file doesn't exist
1164 # Try to acquire lock
1166 except errors.LockError:
1167 # Couldn't lock, daemon is running
1168 return int(os.read(fd, 100))
1175 def MatchNameComponent(key, name_list, case_sensitive=True):
1176 """Try to match a name against a list.
1178 This function will try to match a name like test1 against a list
1179 like C{['test1.example.com', 'test2.example.com', ...]}. Against
1180 this list, I{'test1'} as well as I{'test1.example'} will match, but
1181 not I{'test1.ex'}. A multiple match will be considered as no match
1182 at all (e.g. I{'test1'} against C{['test1.example.com',
1183 'test1.example.org']}), except when the key fully matches an entry
1184 (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1187 @param key: the name to be searched
1188 @type name_list: list
1189 @param name_list: the list of strings against which to search the key
1190 @type case_sensitive: boolean
1191 @param case_sensitive: whether to provide a case-sensitive match
1194 @return: None if there is no match I{or} if there are multiple matches,
1195 otherwise the element from the list which matches
1198 if key in name_list:
1202 if not case_sensitive:
1203 re_flags |= re.IGNORECASE
1205 mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1208 for name in name_list:
1209 if mo.match(name) is not None:
1210 names_filtered.append(name)
1211 if not case_sensitive and key == name.upper():
1212 string_matches.append(name)
1214 if len(string_matches) == 1:
1215 return string_matches[0]
1216 if len(names_filtered) == 1:
1217 return names_filtered[0]
1221 def ValidateServiceName(name):
1222 """Validate the given service name.
1224 @type name: number or string
1225 @param name: Service name or port specification
1230 except (ValueError, TypeError):
1231 # Non-numeric service name
1232 valid = _VALID_SERVICE_NAME_RE.match(name)
1234 # Numeric port (protocols other than TCP or UDP might need adjustments
1236 valid = (numport >= 0 and numport < (1 << 16))
1239 raise errors.OpPrereqError("Invalid service name '%s'" % name,
1245 def ListVolumeGroups():
1246 """List volume groups and their size
1250 Dictionary with keys volume name and values
1251 the size of the volume
1254 command = "vgs --noheadings --units m --nosuffix -o name,size"
1255 result = RunCmd(command)
1260 for line in result.stdout.splitlines():
1262 name, size = line.split()
1263 size = int(float(size))
1264 except (IndexError, ValueError), err:
1265 logging.error("Invalid output from vgs (%s): %s", err, line)
1273 def BridgeExists(bridge):
1274 """Check whether the given bridge exists in the system
1277 @param bridge: the bridge name to check
1279 @return: True if it does
1282 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1285 def _NiceSortTryInt(val):
1286 """Attempts to convert a string to an integer.
1289 if val and _SORTER_DIGIT.match(val):
1295 def _NiceSortKey(value):
1296 """Extract key for sorting.
1299 return [_NiceSortTryInt(grp)
1300 for grp in _SORTER_RE.match(value).groups()]
1303 def NiceSort(values, key=None):
1304 """Sort a list of strings based on digit and non-digit groupings.
1306 Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1307 will sort the list in the logical order C{['a1', 'a2', 'a10',
1310 The sort algorithm breaks each name in groups of either only-digits
1311 or no-digits. Only the first eight such groups are considered, and
1312 after that we just use what's left of the string.
1315 @param values: the names to be sorted
1316 @type key: callable or None
1317 @param key: function of one argument to extract a comparison key from each
1318 list element, must return string
1320 @return: a copy of the name list sorted with our algorithm
1324 keyfunc = _NiceSortKey
1326 keyfunc = lambda value: _NiceSortKey(key(value))
1328 return sorted(values, key=keyfunc)
1331 def TryConvert(fn, val):
1332 """Try to convert a value ignoring errors.
1334 This function tries to apply function I{fn} to I{val}. If no
1335 C{ValueError} or C{TypeError} exceptions are raised, it will return
1336 the result, else it will return the original value. Any other
1337 exceptions are propagated to the caller.
1340 @param fn: function to apply to the value
1341 @param val: the value to be converted
1342 @return: The converted value if the conversion was successful,
1343 otherwise the original value.
1348 except (ValueError, TypeError):
1353 def IsValidShellParam(word):
1354 """Verifies is the given word is safe from the shell's p.o.v.
1356 This means that we can pass this to a command via the shell and be
1357 sure that it doesn't alter the command line and is passed as such to
1360 Note that we are overly restrictive here, in order to be on the safe
1364 @param word: the word to check
1366 @return: True if the word is 'safe'
1369 return bool(_SHELLPARAM_REGEX.match(word))
1372 def BuildShellCmd(template, *args):
1373 """Build a safe shell command line from the given arguments.
1375 This function will check all arguments in the args list so that they
1376 are valid shell parameters (i.e. they don't contain shell
1377 metacharacters). If everything is ok, it will return the result of
1381 @param template: the string holding the template for the
1384 @return: the expanded command line
1388 if not IsValidShellParam(word):
1389 raise errors.ProgrammerError("Shell argument '%s' contains"
1390 " invalid characters" % word)
1391 return template % args
1394 def FormatUnit(value, units):
1395 """Formats an incoming number of MiB with the appropriate unit.
1398 @param value: integer representing the value in MiB (1048576)
1400 @param units: the type of formatting we should do:
1401 - 'h' for automatic scaling
1406 @return: the formatted value (with suffix)
1409 if units not in ('m', 'g', 't', 'h'):
1410 raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1414 if units == 'm' or (units == 'h' and value < 1024):
1417 return "%d%s" % (round(value, 0), suffix)
1419 elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1422 return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1427 return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1430 def ParseUnit(input_string):
1431 """Tries to extract number and scale from the given string.
1433 Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1434 [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1435 is always an int in MiB.
1438 m = _PARSEUNIT_REGEX.match(str(input_string))
1440 raise errors.UnitParseError("Invalid format")
1442 value = float(m.groups()[0])
1444 unit = m.groups()[1]
1446 lcunit = unit.lower()
1450 if lcunit in ('m', 'mb', 'mib'):
1451 # Value already in MiB
1454 elif lcunit in ('g', 'gb', 'gib'):
1457 elif lcunit in ('t', 'tb', 'tib'):
1458 value *= 1024 * 1024
1461 raise errors.UnitParseError("Unknown unit: %s" % unit)
1463 # Make sure we round up
1464 if int(value) < value:
1467 # Round up to the next multiple of 4
1470 value += 4 - value % 4
1475 def ParseCpuMask(cpu_mask):
1476 """Parse a CPU mask definition and return the list of CPU IDs.
1478 CPU mask format: comma-separated list of CPU IDs
1479 or dash-separated ID ranges
1480 Example: "0-2,5" -> "0,1,2,5"
1483 @param cpu_mask: CPU mask definition
1485 @return: list of CPU IDs
1491 for range_def in cpu_mask.split(","):
1492 boundaries = range_def.split("-")
1493 n_elements = len(boundaries)
1495 raise errors.ParseError("Invalid CPU ID range definition"
1496 " (only one hyphen allowed): %s" % range_def)
1498 lower = int(boundaries[0])
1499 except (ValueError, TypeError), err:
1500 raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1501 " CPU ID range: %s" % str(err))
1503 higher = int(boundaries[-1])
1504 except (ValueError, TypeError), err:
1505 raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1506 " CPU ID range: %s" % str(err))
1508 raise errors.ParseError("Invalid CPU ID range definition"
1509 " (%d > %d): %s" % (lower, higher, range_def))
1510 cpu_list.extend(range(lower, higher + 1))
1514 def AddAuthorizedKey(file_obj, key):
1515 """Adds an SSH public key to an authorized_keys file.
1517 @type file_obj: str or file handle
1518 @param file_obj: path to authorized_keys file
1520 @param key: string containing key
1523 key_fields = key.split()
1525 if isinstance(file_obj, basestring):
1526 f = open(file_obj, 'a+')
1533 # Ignore whitespace changes
1534 if line.split() == key_fields:
1536 nl = line.endswith('\n')
1540 f.write(key.rstrip('\r\n'))
1547 def RemoveAuthorizedKey(file_name, key):
1548 """Removes an SSH public key from an authorized_keys file.
1550 @type file_name: str
1551 @param file_name: path to authorized_keys file
1553 @param key: string containing key
1556 key_fields = key.split()
1558 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1560 out = os.fdopen(fd, 'w')
1562 f = open(file_name, 'r')
1565 # Ignore whitespace changes while comparing lines
1566 if line.split() != key_fields:
1570 os.rename(tmpname, file_name)
1580 def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1581 """Sets the name of an IP address and hostname in /etc/hosts.
1583 @type file_name: str
1584 @param file_name: path to the file to modify (usually C{/etc/hosts})
1586 @param ip: the IP address
1588 @param hostname: the hostname to be added
1590 @param aliases: the list of aliases to add for the hostname
1593 # Ensure aliases are unique
1594 aliases = UniqueSequence([hostname] + aliases)[1:]
1596 def _WriteEtcHosts(fd):
1597 # Duplicating file descriptor because os.fdopen's result will automatically
1598 # close the descriptor, but we would still like to have its functionality.
1599 out = os.fdopen(os.dup(fd), "w")
1601 for line in ReadFile(file_name).splitlines(True):
1602 fields = line.split()
1603 if fields and not fields[0].startswith("#") and ip == fields[0]:
1607 out.write("%s\t%s" % (ip, hostname))
1609 out.write(" %s" % " ".join(aliases))
1615 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1618 def AddHostToEtcHosts(hostname, ip):
1619 """Wrapper around SetEtcHostsEntry.
1622 @param hostname: a hostname that will be resolved and added to
1623 L{constants.ETC_HOSTS}
1625 @param ip: The ip address of the host
1628 SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1631 def RemoveEtcHostsEntry(file_name, hostname):
1632 """Removes a hostname from /etc/hosts.
1634 IP addresses without names are removed from the file.
1636 @type file_name: str
1637 @param file_name: path to the file to modify (usually C{/etc/hosts})
1639 @param hostname: the hostname to be removed
1642 def _WriteEtcHosts(fd):
1643 # Duplicating file descriptor because os.fdopen's result will automatically
1644 # close the descriptor, but we would still like to have its functionality.
1645 out = os.fdopen(os.dup(fd), "w")
1647 for line in ReadFile(file_name).splitlines(True):
1648 fields = line.split()
1649 if len(fields) > 1 and not fields[0].startswith("#"):
1651 if hostname in names:
1652 while hostname in names:
1653 names.remove(hostname)
1655 out.write("%s %s\n" % (fields[0], " ".join(names)))
1664 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1667 def RemoveHostFromEtcHosts(hostname):
1668 """Wrapper around RemoveEtcHostsEntry.
1671 @param hostname: hostname that will be resolved and its
1672 full and shot name will be removed from
1673 L{constants.ETC_HOSTS}
1676 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1677 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1680 def TimestampForFilename():
1681 """Returns the current time formatted for filenames.
1683 The format doesn't contain colons as some shells and applications treat them
1684 as separators. Uses the local timezone.
1687 return time.strftime("%Y-%m-%d_%H_%M_%S")
1690 def CreateBackup(file_name):
1691 """Creates a backup of a file.
1693 @type file_name: str
1694 @param file_name: file to be backed up
1696 @return: the path to the newly created backup
1697 @raise errors.ProgrammerError: for invalid file names
1700 if not os.path.isfile(file_name):
1701 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1704 prefix = ("%s.backup-%s." %
1705 (os.path.basename(file_name), TimestampForFilename()))
1706 dir_name = os.path.dirname(file_name)
1708 fsrc = open(file_name, 'rb')
1710 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1711 fdst = os.fdopen(fd, 'wb')
1713 logging.debug("Backing up %s at %s", file_name, backup_name)
1714 shutil.copyfileobj(fsrc, fdst)
1723 def ShellQuote(value):
1724 """Quotes shell argument according to POSIX.
1727 @param value: the argument to be quoted
1729 @return: the quoted value
1732 if _re_shell_unquoted.match(value):
1735 return "'%s'" % value.replace("'", "'\\''")
1738 def ShellQuoteArgs(args):
1739 """Quotes a list of shell arguments.
1742 @param args: list of arguments to be quoted
1744 @return: the quoted arguments concatenated with spaces
1747 return ' '.join([ShellQuote(i) for i in args])
1751 """Helper class to write scripts with indentation.
1756 def __init__(self, fh):
1757 """Initializes this class.
1763 def IncIndent(self):
1764 """Increase indentation level by 1.
1769 def DecIndent(self):
1770 """Decrease indentation level by 1.
1773 assert self._indent > 0
1776 def Write(self, txt, *args):
1777 """Write line to output file.
1780 assert self._indent >= 0
1782 self._fh.write(self._indent * self.INDENT_STR)
1785 self._fh.write(txt % args)
1789 self._fh.write("\n")
1792 def ListVisibleFiles(path):
1793 """Returns a list of visible files in a directory.
1796 @param path: the directory to enumerate
1798 @return: the list of all files not starting with a dot
1799 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1802 if not IsNormAbsPath(path):
1803 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1804 " absolute/normalized: '%s'" % path)
1805 files = [i for i in os.listdir(path) if not i.startswith(".")]
1809 def GetHomeDir(user, default=None):
1810 """Try to get the homedir of the given user.
1812 The user can be passed either as a string (denoting the name) or as
1813 an integer (denoting the user id). If the user is not found, the
1814 'default' argument is returned, which defaults to None.
1818 if isinstance(user, basestring):
1819 result = pwd.getpwnam(user)
1820 elif isinstance(user, (int, long)):
1821 result = pwd.getpwuid(user)
1823 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1827 return result.pw_dir
1831 """Returns a random UUID.
1833 @note: This is a Linux-specific method as it uses the /proc
1838 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1841 def GenerateSecret(numbytes=20):
1842 """Generates a random secret.
1844 This will generate a pseudo-random secret returning an hex string
1845 (so that it can be used where an ASCII string is needed).
1847 @param numbytes: the number of bytes which will be represented by the returned
1848 string (defaulting to 20, the length of a SHA1 hash)
1850 @return: an hex representation of the pseudo-random sequence
1853 return os.urandom(numbytes).encode('hex')
1856 def EnsureDirs(dirs):
1857 """Make required directories, if they don't exist.
1859 @param dirs: list of tuples (dir_name, dir_mode)
1860 @type dirs: list of (string, integer)
1863 for dir_name, dir_mode in dirs:
1865 os.mkdir(dir_name, dir_mode)
1866 except EnvironmentError, err:
1867 if err.errno != errno.EEXIST:
1868 raise errors.GenericError("Cannot create needed directory"
1869 " '%s': %s" % (dir_name, err))
1871 os.chmod(dir_name, dir_mode)
1872 except EnvironmentError, err:
1873 raise errors.GenericError("Cannot change directory permissions on"
1874 " '%s': %s" % (dir_name, err))
1875 if not os.path.isdir(dir_name):
1876 raise errors.GenericError("%s is not a directory" % dir_name)
1879 def ReadFile(file_name, size=-1):
1883 @param size: Read at most size bytes (if negative, entire file)
1885 @return: the (possibly partial) content of the file
1888 f = open(file_name, "r")
1895 def WriteFile(file_name, fn=None, data=None,
1896 mode=None, uid=-1, gid=-1,
1897 atime=None, mtime=None, close=True,
1898 dry_run=False, backup=False,
1899 prewrite=None, postwrite=None):
1900 """(Over)write a file atomically.
1902 The file_name and either fn (a function taking one argument, the
1903 file descriptor, and which should write the data to it) or data (the
1904 contents of the file) must be passed. The other arguments are
1905 optional and allow setting the file mode, owner and group, and the
1906 mtime/atime of the file.
1908 If the function doesn't raise an exception, it has succeeded and the
1909 target file has the new contents. If the function has raised an
1910 exception, an existing target file should be unmodified and the
1911 temporary file should be removed.
1913 @type file_name: str
1914 @param file_name: the target filename
1916 @param fn: content writing function, called with
1917 file descriptor as parameter
1919 @param data: contents of the file
1921 @param mode: file mode
1923 @param uid: the owner of the file
1925 @param gid: the group of the file
1927 @param atime: a custom access time to be set on the file
1929 @param mtime: a custom modification time to be set on the file
1930 @type close: boolean
1931 @param close: whether to close file after writing it
1932 @type prewrite: callable
1933 @param prewrite: function to be called before writing content
1934 @type postwrite: callable
1935 @param postwrite: function to be called after writing content
1938 @return: None if the 'close' parameter evaluates to True,
1939 otherwise the file descriptor
1941 @raise errors.ProgrammerError: if any of the arguments are not valid
1944 if not os.path.isabs(file_name):
1945 raise errors.ProgrammerError("Path passed to WriteFile is not"
1946 " absolute: '%s'" % file_name)
1948 if [fn, data].count(None) != 1:
1949 raise errors.ProgrammerError("fn or data required")
1951 if [atime, mtime].count(None) == 1:
1952 raise errors.ProgrammerError("Both atime and mtime must be either"
1955 if backup and not dry_run and os.path.isfile(file_name):
1956 CreateBackup(file_name)
1958 dir_name, base_name = os.path.split(file_name)
1959 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1961 # here we need to make sure we remove the temp file, if any error
1962 # leaves it in place
1964 if uid != -1 or gid != -1:
1965 os.chown(new_name, uid, gid)
1967 os.chmod(new_name, mode)
1968 if callable(prewrite):
1970 if data is not None:
1974 if callable(postwrite):
1977 if atime is not None and mtime is not None:
1978 os.utime(new_name, (atime, mtime))
1980 os.rename(new_name, file_name)
1989 RemoveFile(new_name)
1994 def GetFileID(path=None, fd=None):
1995 """Returns the file 'id', i.e. the dev/inode and mtime information.
1997 Either the path to the file or the fd must be given.
1999 @param path: the file path
2000 @param fd: a file descriptor
2001 @return: a tuple of (device number, inode number, mtime)
2004 if [path, fd].count(None) != 1:
2005 raise errors.ProgrammerError("One and only one of fd/path must be given")
2012 return (st.st_dev, st.st_ino, st.st_mtime)
2015 def VerifyFileID(fi_disk, fi_ours):
2016 """Verifies that two file IDs are matching.
2018 Differences in the inode/device are not accepted, but and older
2019 timestamp for fi_disk is accepted.
2021 @param fi_disk: tuple (dev, inode, mtime) representing the actual
2023 @param fi_ours: tuple (dev, inode, mtime) representing the last
2028 (d1, i1, m1) = fi_disk
2029 (d2, i2, m2) = fi_ours
2031 return (d1, i1) == (d2, i2) and m1 <= m2
2034 def SafeWriteFile(file_name, file_id, **kwargs):
2035 """Wraper over L{WriteFile} that locks the target file.
2037 By keeping the target file locked during WriteFile, we ensure that
2038 cooperating writers will safely serialise access to the file.
2040 @type file_name: str
2041 @param file_name: the target filename
2042 @type file_id: tuple
2043 @param file_id: a result from L{GetFileID}
2046 fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
2049 if file_id is not None:
2050 disk_id = GetFileID(fd=fd)
2051 if not VerifyFileID(disk_id, file_id):
2052 raise errors.LockError("Cannot overwrite file %s, it has been modified"
2053 " since last written" % file_name)
2054 return WriteFile(file_name, **kwargs)
2059 def ReadOneLineFile(file_name, strict=False):
2060 """Return the first non-empty line from a file.
2062 @type strict: boolean
2063 @param strict: if True, abort if the file has more than one
2067 file_lines = ReadFile(file_name).splitlines()
2068 full_lines = filter(bool, file_lines)
2069 if not file_lines or not full_lines:
2070 raise errors.GenericError("No data in one-liner file %s" % file_name)
2071 elif strict and len(full_lines) > 1:
2072 raise errors.GenericError("Too many lines in one-liner file %s" %
2074 return full_lines[0]
2077 def FirstFree(seq, base=0):
2078 """Returns the first non-existing integer from seq.
2080 The seq argument should be a sorted list of positive integers. The
2081 first time the index of an element is smaller than the element
2082 value, the index will be returned.
2084 The base argument is used to start at a different offset,
2085 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
2087 Example: C{[0, 1, 3]} will return I{2}.
2090 @param seq: the sequence to be analyzed.
2092 @param base: use this value as the base index of the sequence
2094 @return: the first non-used index in the sequence
2097 for idx, elem in enumerate(seq):
2098 assert elem >= base, "Passed element is higher than base offset"
2099 if elem > idx + base:
2105 def SingleWaitForFdCondition(fdobj, event, timeout):
2106 """Waits for a condition to occur on the socket.
2108 Immediately returns at the first interruption.
2110 @type fdobj: integer or object supporting a fileno() method
2111 @param fdobj: entity to wait for events on
2112 @type event: integer
2113 @param event: ORed condition (see select module)
2114 @type timeout: float or None
2115 @param timeout: Timeout in seconds
2117 @return: None for timeout, otherwise occured conditions
2120 check = (event | select.POLLPRI |
2121 select.POLLNVAL | select.POLLHUP | select.POLLERR)
2123 if timeout is not None:
2124 # Poller object expects milliseconds
2127 poller = select.poll()
2128 poller.register(fdobj, event)
2130 # TODO: If the main thread receives a signal and we have no timeout, we
2131 # could wait forever. This should check a global "quit" flag or something
2133 io_events = poller.poll(timeout)
2134 except select.error, err:
2135 if err[0] != errno.EINTR:
2138 if io_events and io_events[0][1] & check:
2139 return io_events[0][1]
2144 class FdConditionWaiterHelper(object):
2145 """Retry helper for WaitForFdCondition.
2147 This class contains the retried and wait functions that make sure
2148 WaitForFdCondition can continue waiting until the timeout is actually
2153 def __init__(self, timeout):
2154 self.timeout = timeout
2156 def Poll(self, fdobj, event):
2157 result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2163 def UpdateTimeout(self, timeout):
2164 self.timeout = timeout
2167 def WaitForFdCondition(fdobj, event, timeout):
2168 """Waits for a condition to occur on the socket.
2170 Retries until the timeout is expired, even if interrupted.
2172 @type fdobj: integer or object supporting a fileno() method
2173 @param fdobj: entity to wait for events on
2174 @type event: integer
2175 @param event: ORed condition (see select module)
2176 @type timeout: float or None
2177 @param timeout: Timeout in seconds
2179 @return: None for timeout, otherwise occured conditions
2182 if timeout is not None:
2183 retrywaiter = FdConditionWaiterHelper(timeout)
2185 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2186 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2187 except RetryTimeout:
2191 while result is None:
2192 result = SingleWaitForFdCondition(fdobj, event, timeout)
2196 def UniqueSequence(seq):
2197 """Returns a list with unique elements.
2199 Element order is preserved.
2202 @param seq: the sequence with the source elements
2204 @return: list of unique elements from seq
2208 return [i for i in seq if i not in seen and not seen.add(i)]
2211 def FindDuplicates(seq):
2212 """Identifies duplicates in a list.
2214 Does not preserve element order.
2217 @param seq: Sequence with source elements
2219 @return: List of duplicate elements from seq
2234 def NormalizeAndValidateMac(mac):
2235 """Normalizes and check if a MAC address is valid.
2237 Checks whether the supplied MAC address is formally correct, only
2238 accepts colon separated format. Normalize it to all lower.
2241 @param mac: the MAC to be validated
2243 @return: returns the normalized and validated MAC.
2245 @raise errors.OpPrereqError: If the MAC isn't valid
2248 if not _MAC_CHECK.match(mac):
2249 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2250 mac, errors.ECODE_INVAL)
2255 def TestDelay(duration):
2256 """Sleep for a fixed amount of time.
2258 @type duration: float
2259 @param duration: the sleep duration
2261 @return: False for negative value, True otherwise
2265 return False, "Invalid sleep duration"
2266 time.sleep(duration)
2270 def _CloseFDNoErr(fd, retries=5):
2271 """Close a file descriptor ignoring errors.
2274 @param fd: the file descriptor
2276 @param retries: how many retries to make, in case we get any
2277 other error than EBADF
2282 except OSError, err:
2283 if err.errno != errno.EBADF:
2285 _CloseFDNoErr(fd, retries - 1)
2286 # else either it's closed already or we're out of retries, so we
2287 # ignore this and go on
2290 def CloseFDs(noclose_fds=None):
2291 """Close file descriptors.
2293 This closes all file descriptors above 2 (i.e. except
2296 @type noclose_fds: list or None
2297 @param noclose_fds: if given, it denotes a list of file descriptor
2298 that should not be closed
2301 # Default maximum for the number of available file descriptors.
2302 if 'SC_OPEN_MAX' in os.sysconf_names:
2304 MAXFD = os.sysconf('SC_OPEN_MAX')
2311 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2312 if (maxfd == resource.RLIM_INFINITY):
2315 # Iterate through and close all file descriptors (except the standard ones)
2316 for fd in range(3, maxfd):
2317 if noclose_fds and fd in noclose_fds:
2322 def Mlockall(_ctypes=ctypes):
2323 """Lock current process' virtual address space into RAM.
2325 This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2326 see mlock(2) for more details. This function requires ctypes module.
2328 @raises errors.NoCtypesError: if ctypes module is not found
2332 raise errors.NoCtypesError()
2334 libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2336 logging.error("Cannot set memory lock, ctypes cannot load libc")
2339 # Some older version of the ctypes module don't have built-in functionality
2340 # to access the errno global variable, where function error codes are stored.
2341 # By declaring this variable as a pointer to an integer we can then access
2342 # its value correctly, should the mlockall call fail, in order to see what
2343 # the actual error code was.
2344 # pylint: disable-msg=W0212
2345 libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2347 if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2348 # pylint: disable-msg=W0212
2349 logging.error("Cannot set memory lock: %s",
2350 os.strerror(libc.__errno_location().contents.value))
2353 logging.debug("Memory lock set")
2356 def Daemonize(logfile):
2357 """Daemonize the current process.
2359 This detaches the current process from the controlling terminal and
2360 runs it in the background as a daemon.
2363 @param logfile: the logfile to which we should redirect stdout/stderr
2365 @return: the value zero
2368 # pylint: disable-msg=W0212
2369 # yes, we really want os._exit
2371 # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2372 # least abstract the pipe functionality between them
2374 # Create pipe for sending error messages
2375 (rpipe, wpipe) = os.pipe()
2379 if (pid == 0): # The first child.
2383 pid = os.fork() # Fork a second child.
2384 if (pid == 0): # The second child.
2385 _CloseFDNoErr(rpipe)
2387 # exit() or _exit()? See below.
2388 os._exit(0) # Exit parent (the first child) of the second child.
2390 _CloseFDNoErr(wpipe)
2391 # Wait for daemon to be started (or an error message to
2392 # arrive) and read up to 100 KB as an error message
2393 errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2395 sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2399 os._exit(rcode) # Exit parent of the first child.
2401 SetupDaemonFDs(logfile, None)
2405 def DaemonPidFileName(name):
2406 """Compute a ganeti pid file absolute path
2409 @param name: the daemon name
2411 @return: the full path to the pidfile corresponding to the given
2415 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2418 def EnsureDaemon(name):
2419 """Check for and start daemon if not alive.
2422 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2424 logging.error("Can't start daemon '%s', failure %s, output: %s",
2425 name, result.fail_reason, result.output)
2431 def StopDaemon(name):
2435 result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2437 logging.error("Can't stop daemon '%s', failure %s, output: %s",
2438 name, result.fail_reason, result.output)
2444 def WritePidFile(pidfile):
2445 """Write the current process pidfile.
2447 @type pidfile: sting
2448 @param pidfile: the path to the file to be written
2449 @raise errors.LockError: if the pid file already exists and
2450 points to a live process
2452 @return: the file descriptor of the lock file; do not close this unless
2453 you want to unlock the pid file
2456 # We don't rename nor truncate the file to not drop locks under
2457 # existing processes
2458 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2460 # Lock the PID file (and fail if not possible to do so). Any code
2461 # wanting to send a signal to the daemon should try to lock the PID
2462 # file before reading it. If acquiring the lock succeeds, the daemon is
2463 # no longer running and the signal should not be sent.
2464 LockFile(fd_pidfile)
2466 os.write(fd_pidfile, "%d\n" % os.getpid())
2471 def RemovePidFile(name):
2472 """Remove the current process pidfile.
2474 Any errors are ignored.
2477 @param name: the daemon name used to derive the pidfile name
2480 pidfilename = DaemonPidFileName(name)
2481 # TODO: we could check here that the file contains our pid
2483 RemoveFile(pidfilename)
2484 except: # pylint: disable-msg=W0702
2488 def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2490 """Kill a process given by its pid.
2493 @param pid: The PID to terminate.
2495 @param signal_: The signal to send, by default SIGTERM
2497 @param timeout: The timeout after which, if the process is still alive,
2498 a SIGKILL will be sent. If not positive, no such checking
2500 @type waitpid: boolean
2501 @param waitpid: If true, we should waitpid on this process after
2502 sending signals, since it's our own child and otherwise it
2503 would remain as zombie
2506 def _helper(pid, signal_, wait):
2507 """Simple helper to encapsulate the kill/waitpid sequence"""
2508 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2510 os.waitpid(pid, os.WNOHANG)
2515 # kill with pid=0 == suicide
2516 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2518 if not IsProcessAlive(pid):
2521 _helper(pid, signal_, waitpid)
2526 def _CheckProcess():
2527 if not IsProcessAlive(pid):
2531 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2541 # Wait up to $timeout seconds
2542 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2543 except RetryTimeout:
2546 if IsProcessAlive(pid):
2547 # Kill process if it's still alive
2548 _helper(pid, signal.SIGKILL, waitpid)
2551 def FindFile(name, search_path, test=os.path.exists):
2552 """Look for a filesystem object in a given path.
2554 This is an abstract method to search for filesystem object (files,
2555 dirs) under a given search path.
2558 @param name: the name to look for
2559 @type search_path: str
2560 @param search_path: location to start at
2561 @type test: callable
2562 @param test: a function taking one argument that should return True
2563 if the a given object is valid; the default value is
2564 os.path.exists, causing only existing files to be returned
2566 @return: full path to the object if found, None otherwise
2569 # validate the filename mask
2570 if constants.EXT_PLUGIN_MASK.match(name) is None:
2571 logging.critical("Invalid value passed for external script name: '%s'",
2575 for dir_name in search_path:
2576 # FIXME: investigate switch to PathJoin
2577 item_name = os.path.sep.join([dir_name, name])
2578 # check the user test and that we're indeed resolving to the given
2580 if test(item_name) and os.path.basename(item_name) == name:
2585 def CheckVolumeGroupSize(vglist, vgname, minsize):
2586 """Checks if the volume group list is valid.
2588 The function will check if a given volume group is in the list of
2589 volume groups and has a minimum size.
2592 @param vglist: dictionary of volume group names and their size
2594 @param vgname: the volume group we should check
2596 @param minsize: the minimum size we accept
2598 @return: None for success, otherwise the error message
2601 vgsize = vglist.get(vgname, None)
2603 return "volume group '%s' missing" % vgname
2604 elif vgsize < minsize:
2605 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2606 (vgname, minsize, vgsize))
2610 def SplitTime(value):
2611 """Splits time as floating point number into a tuple.
2613 @param value: Time in seconds
2614 @type value: int or float
2615 @return: Tuple containing (seconds, microseconds)
2618 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2620 assert 0 <= seconds, \
2621 "Seconds must be larger than or equal to 0, but are %s" % seconds
2622 assert 0 <= microseconds <= 999999, \
2623 "Microseconds must be 0-999999, but are %s" % microseconds
2625 return (int(seconds), int(microseconds))
2628 def MergeTime(timetuple):
2629 """Merges a tuple into time as a floating point number.
2631 @param timetuple: Time as tuple, (seconds, microseconds)
2632 @type timetuple: tuple
2633 @return: Time as a floating point number expressed in seconds
2636 (seconds, microseconds) = timetuple
2638 assert 0 <= seconds, \
2639 "Seconds must be larger than or equal to 0, but are %s" % seconds
2640 assert 0 <= microseconds <= 999999, \
2641 "Microseconds must be 0-999999, but are %s" % microseconds
2643 return float(seconds) + (float(microseconds) * 0.000001)
2646 class LogFileHandler(logging.FileHandler):
2647 """Log handler that doesn't fallback to stderr.
2649 When an error occurs while writing on the logfile, logging.FileHandler tries
2650 to log on stderr. This doesn't work in ganeti since stderr is redirected to
2651 the logfile. This class avoids failures reporting errors to /dev/console.
2654 def __init__(self, filename, mode="a", encoding=None):
2655 """Open the specified file and use it as the stream for logging.
2657 Also open /dev/console to report errors while logging.
2660 logging.FileHandler.__init__(self, filename, mode, encoding)
2661 self.console = open(constants.DEV_CONSOLE, "a")
2663 def handleError(self, record): # pylint: disable-msg=C0103
2664 """Handle errors which occur during an emit() call.
2666 Try to handle errors with FileHandler method, if it fails write to
2671 logging.FileHandler.handleError(self, record)
2672 except Exception: # pylint: disable-msg=W0703
2674 self.console.write("Cannot log message:\n%s\n" % self.format(record))
2675 except Exception: # pylint: disable-msg=W0703
2676 # Log handler tried everything it could, now just give up
2680 def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2681 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2682 console_logging=False):
2683 """Configures the logging module.
2686 @param logfile: the filename to which we should log
2687 @type debug: integer
2688 @param debug: if greater than zero, enable debug messages, otherwise
2689 only those at C{INFO} and above level
2690 @type stderr_logging: boolean
2691 @param stderr_logging: whether we should also log to the standard error
2693 @param program: the name under which we should log messages
2694 @type multithreaded: boolean
2695 @param multithreaded: if True, will add the thread name to the log file
2696 @type syslog: string
2697 @param syslog: one of 'no', 'yes', 'only':
2698 - if no, syslog is not used
2699 - if yes, syslog is used (in addition to file-logging)
2700 - if only, only syslog is used
2701 @type console_logging: boolean
2702 @param console_logging: if True, will use a FileHandler which falls back to
2703 the system console if logging fails
2704 @raise EnvironmentError: if we can't open the log file and
2705 syslog/stderr logging is disabled
2708 fmt = "%(asctime)s: " + program + " pid=%(process)d"
2709 sft = program + "[%(process)d]:"
2711 fmt += "/%(threadName)s"
2712 sft += " (%(threadName)s)"
2714 fmt += " %(module)s:%(lineno)s"
2715 # no debug info for syslog loggers
2716 fmt += " %(levelname)s %(message)s"
2717 # yes, we do want the textual level, as remote syslog will probably
2718 # lose the error level, and it's easier to grep for it
2719 sft += " %(levelname)s %(message)s"
2720 formatter = logging.Formatter(fmt)
2721 sys_fmt = logging.Formatter(sft)
2723 root_logger = logging.getLogger("")
2724 root_logger.setLevel(logging.NOTSET)
2726 # Remove all previously setup handlers
2727 for handler in root_logger.handlers:
2729 root_logger.removeHandler(handler)
2732 stderr_handler = logging.StreamHandler()
2733 stderr_handler.setFormatter(formatter)
2735 stderr_handler.setLevel(logging.NOTSET)
2737 stderr_handler.setLevel(logging.CRITICAL)
2738 root_logger.addHandler(stderr_handler)
2740 if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2741 facility = logging.handlers.SysLogHandler.LOG_DAEMON
2742 syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2744 syslog_handler.setFormatter(sys_fmt)
2745 # Never enable debug over syslog
2746 syslog_handler.setLevel(logging.INFO)
2747 root_logger.addHandler(syslog_handler)
2749 if syslog != constants.SYSLOG_ONLY:
2750 # this can fail, if the logging directories are not setup or we have
2751 # a permisssion problem; in this case, it's best to log but ignore
2752 # the error if stderr_logging is True, and if false we re-raise the
2753 # exception since otherwise we could run but without any logs at all
2756 logfile_handler = LogFileHandler(logfile)
2758 logfile_handler = logging.FileHandler(logfile)
2759 logfile_handler.setFormatter(formatter)
2761 logfile_handler.setLevel(logging.DEBUG)
2763 logfile_handler.setLevel(logging.INFO)
2764 root_logger.addHandler(logfile_handler)
2765 except EnvironmentError:
2766 if stderr_logging or syslog == constants.SYSLOG_YES:
2767 logging.exception("Failed to enable logging to file '%s'", logfile)
2769 # we need to re-raise the exception
2773 def IsNormAbsPath(path):
2774 """Check whether a path is absolute and also normalized
2776 This avoids things like /dir/../../other/path to be valid.
2779 return os.path.normpath(path) == path and os.path.isabs(path)
2782 def PathJoin(*args):
2783 """Safe-join a list of path components.
2786 - the first argument must be an absolute path
2787 - no component in the path must have backtracking (e.g. /../),
2788 since we check for normalization at the end
2790 @param args: the path components to be joined
2791 @raise ValueError: for invalid paths
2794 # ensure we're having at least one path passed in
2796 # ensure the first component is an absolute and normalized path name
2798 if not IsNormAbsPath(root):
2799 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2800 result = os.path.join(*args)
2801 # ensure that the whole path is normalized
2802 if not IsNormAbsPath(result):
2803 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2804 # check that we're still under the original prefix
2805 prefix = os.path.commonprefix([root, result])
2807 raise ValueError("Error: path joining resulted in different prefix"
2808 " (%s != %s)" % (prefix, root))
2812 def TailFile(fname, lines=20):
2813 """Return the last lines from a file.
2815 @note: this function will only read and parse the last 4KB of
2816 the file; if the lines are very long, it could be that less
2817 than the requested number of lines are returned
2819 @param fname: the file name
2821 @param lines: the (maximum) number of lines to return
2824 fd = open(fname, "r")
2828 pos = max(0, pos-4096)
2830 raw_data = fd.read()
2834 rows = raw_data.splitlines()
2835 return rows[-lines:]
2838 def _ParseAsn1Generalizedtime(value):
2839 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2842 @param value: ASN1 GENERALIZEDTIME timestamp
2843 @return: Seconds since the Epoch (1970-01-01 00:00:00 UTC)
2846 m = _ASN1_TIME_REGEX.match(value)
2849 asn1time = m.group(1)
2850 hours = int(m.group(2))
2851 minutes = int(m.group(3))
2852 utcoffset = (60 * hours) + minutes
2854 if not value.endswith("Z"):
2855 raise ValueError("Missing timezone")
2856 asn1time = value[:-1]
2859 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2861 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2863 return calendar.timegm(tt.utctimetuple())
2866 def GetX509CertValidity(cert):
2867 """Returns the validity period of the certificate.
2869 @type cert: OpenSSL.crypto.X509
2870 @param cert: X509 certificate object
2873 # The get_notBefore and get_notAfter functions are only supported in
2874 # pyOpenSSL 0.7 and above.
2876 get_notbefore_fn = cert.get_notBefore
2877 except AttributeError:
2880 not_before_asn1 = get_notbefore_fn()
2882 if not_before_asn1 is None:
2885 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2888 get_notafter_fn = cert.get_notAfter
2889 except AttributeError:
2892 not_after_asn1 = get_notafter_fn()
2894 if not_after_asn1 is None:
2897 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2899 return (not_before, not_after)
2902 def _VerifyCertificateInner(expired, not_before, not_after, now,
2903 warn_days, error_days):
2904 """Verifies certificate validity.
2907 @param expired: Whether pyOpenSSL considers the certificate as expired
2908 @type not_before: number or None
2909 @param not_before: Unix timestamp before which certificate is not valid
2910 @type not_after: number or None
2911 @param not_after: Unix timestamp after which certificate is invalid
2913 @param now: Current time as Unix timestamp
2914 @type warn_days: number or None
2915 @param warn_days: How many days before expiration a warning should be reported
2916 @type error_days: number or None
2917 @param error_days: How many days before expiration an error should be reported
2921 msg = "Certificate is expired"
2923 if not_before is not None and not_after is not None:
2924 msg += (" (valid from %s to %s)" %
2925 (FormatTime(not_before), FormatTime(not_after)))
2926 elif not_before is not None:
2927 msg += " (valid from %s)" % FormatTime(not_before)
2928 elif not_after is not None:
2929 msg += " (valid until %s)" % FormatTime(not_after)
2931 return (CERT_ERROR, msg)
2933 elif not_before is not None and not_before > now:
2934 return (CERT_WARNING,
2935 "Certificate not yet valid (valid from %s)" %
2936 FormatTime(not_before))
2938 elif not_after is not None:
2939 remaining_days = int((not_after - now) / (24 * 3600))
2941 msg = "Certificate expires in about %d days" % remaining_days
2943 if error_days is not None and remaining_days <= error_days:
2944 return (CERT_ERROR, msg)
2946 if warn_days is not None and remaining_days <= warn_days:
2947 return (CERT_WARNING, msg)
2952 def VerifyX509Certificate(cert, warn_days, error_days):
2953 """Verifies a certificate for LUVerifyCluster.
2955 @type cert: OpenSSL.crypto.X509
2956 @param cert: X509 certificate object
2957 @type warn_days: number or None
2958 @param warn_days: How many days before expiration a warning should be reported
2959 @type error_days: number or None
2960 @param error_days: How many days before expiration an error should be reported
2963 # Depending on the pyOpenSSL version, this can just return (None, None)
2964 (not_before, not_after) = GetX509CertValidity(cert)
2966 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2967 time.time(), warn_days, error_days)
2970 def SignX509Certificate(cert, key, salt):
2971 """Sign a X509 certificate.
2973 An RFC822-like signature header is added in front of the certificate.
2975 @type cert: OpenSSL.crypto.X509
2976 @param cert: X509 certificate object
2978 @param key: Key for HMAC
2980 @param salt: Salt for HMAC
2982 @return: Serialized and signed certificate in PEM format
2985 if not VALID_X509_SIGNATURE_SALT.match(salt):
2986 raise errors.GenericError("Invalid salt: %r" % salt)
2988 # Dumping as PEM here ensures the certificate is in a sane format
2989 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2991 return ("%s: %s/%s\n\n%s" %
2992 (constants.X509_CERT_SIGNATURE_HEADER, salt,
2993 Sha1Hmac(key, cert_pem, salt=salt),
2997 def _ExtractX509CertificateSignature(cert_pem):
2998 """Helper function to extract signature from X509 certificate.
3001 # Extract signature from original PEM data
3002 for line in cert_pem.splitlines():
3003 if line.startswith("---"):
3006 m = X509_SIGNATURE.match(line.strip())
3008 return (m.group("salt"), m.group("sign"))
3010 raise errors.GenericError("X509 certificate signature is missing")
3013 def LoadSignedX509Certificate(cert_pem, key):
3014 """Verifies a signed X509 certificate.
3016 @type cert_pem: string
3017 @param cert_pem: Certificate in PEM format and with signature header
3019 @param key: Key for HMAC
3020 @rtype: tuple; (OpenSSL.crypto.X509, string)
3021 @return: X509 certificate object and salt
3024 (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
3027 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
3029 # Dump again to ensure it's in a sane format
3030 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3032 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
3033 raise errors.GenericError("X509 certificate signature is invalid")
3038 def Sha1Hmac(key, text, salt=None):
3039 """Calculates the HMAC-SHA1 digest of a text.
3041 HMAC is defined in RFC2104.
3044 @param key: Secret key
3049 salted_text = salt + text
3053 return hmac.new(key, salted_text, compat.sha1).hexdigest()
3056 def VerifySha1Hmac(key, text, digest, salt=None):
3057 """Verifies the HMAC-SHA1 digest of a text.
3059 HMAC is defined in RFC2104.
3062 @param key: Secret key
3064 @type digest: string
3065 @param digest: Expected digest
3067 @return: Whether HMAC-SHA1 digest matches
3070 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
3073 def SafeEncode(text):
3074 """Return a 'safe' version of a source string.
3076 This function mangles the input string and returns a version that
3077 should be safe to display/encode as ASCII. To this end, we first
3078 convert it to ASCII using the 'backslashreplace' encoding which
3079 should get rid of any non-ASCII chars, and then we process it
3080 through a loop copied from the string repr sources in the python; we
3081 don't use string_escape anymore since that escape single quotes and
3082 backslashes too, and that is too much; and that escaping is not
3083 stable, i.e. string_escape(string_escape(x)) != string_escape(x).
3085 @type text: str or unicode
3086 @param text: input data
3088 @return: a safe version of text
3091 if isinstance(text, unicode):
3092 # only if unicode; if str already, we handle it below
3093 text = text.encode('ascii', 'backslashreplace')
3103 elif c < 32 or c >= 127: # non-printable
3104 resu += "\\x%02x" % (c & 0xff)
3110 def UnescapeAndSplit(text, sep=","):
3111 """Split and unescape a string based on a given separator.
3113 This function splits a string based on a separator where the
3114 separator itself can be escape in order to be an element of the
3115 elements. The escaping rules are (assuming coma being the
3117 - a plain , separates the elements
3118 - a sequence \\\\, (double backslash plus comma) is handled as a
3119 backslash plus a separator comma
3120 - a sequence \, (backslash plus comma) is handled as a
3124 @param text: the string to split
3126 @param text: the separator
3128 @return: a list of strings
3131 # we split the list by sep (with no escaping at this stage)
3132 slist = text.split(sep)
3133 # next, we revisit the elements and if any of them ended with an odd
3134 # number of backslashes, then we join it with the next
3138 if e1.endswith("\\"):
3139 num_b = len(e1) - len(e1.rstrip("\\"))
3142 # here the backslashes remain (all), and will be reduced in
3144 rlist.append(e1 + sep + e2)
3147 # finally, replace backslash-something with something
3148 rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3152 def CommaJoin(names):
3153 """Nicely join a set of identifiers.
3155 @param names: set, list or tuple
3156 @return: a string with the formatted results
3159 return ", ".join([str(val) for val in names])
3162 def FindMatch(data, name):
3163 """Tries to find an item in a dictionary matching a name.
3165 Callers have to ensure the data names aren't contradictory (e.g. a regexp
3166 that matches a string). If the name isn't a direct key, all regular
3167 expression objects in the dictionary are matched against it.
3170 @param data: Dictionary containing data
3172 @param name: Name to look for
3173 @rtype: tuple; (value in dictionary, matched groups as list)
3177 return (data[name], [])
3179 for key, value in data.items():
3181 if hasattr(key, "match"):
3184 return (value, list(m.groups()))
3189 def BytesToMebibyte(value):
3190 """Converts bytes to mebibytes.
3193 @param value: Value in bytes
3195 @return: Value in mebibytes
3198 return int(round(value / (1024.0 * 1024.0), 0))
3201 def CalculateDirectorySize(path):
3202 """Calculates the size of a directory recursively.
3205 @param path: Path to directory
3207 @return: Size in mebibytes
3212 for (curpath, _, files) in os.walk(path):
3213 for filename in files:
3214 st = os.lstat(PathJoin(curpath, filename))
3217 return BytesToMebibyte(size)
3220 def GetMounts(filename=constants.PROC_MOUNTS):
3221 """Returns the list of mounted filesystems.
3223 This function is Linux-specific.
3225 @param filename: path of mounts file (/proc/mounts by default)
3226 @rtype: list of tuples
3227 @return: list of mount entries (device, mountpoint, fstype, options)
3230 # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3232 mountlines = ReadFile(filename).splitlines()
3233 for line in mountlines:
3234 device, mountpoint, fstype, options, _ = line.split(None, 4)
3235 data.append((device, mountpoint, fstype, options))
3240 def GetFilesystemStats(path):
3241 """Returns the total and free space on a filesystem.
3244 @param path: Path on filesystem to be examined
3246 @return: tuple of (Total space, Free space) in mebibytes
3249 st = os.statvfs(path)
3251 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3252 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3253 return (tsize, fsize)
3256 def RunInSeparateProcess(fn, *args):
3257 """Runs a function in a separate process.
3259 Note: Only boolean return values are supported.
3262 @param fn: Function to be called
3264 @return: Function's result
3271 # In case the function uses temporary files
3272 ResetTempfileModule()
3275 result = int(bool(fn(*args)))
3276 assert result in (0, 1)
3277 except: # pylint: disable-msg=W0702
3278 logging.exception("Error while calling function in separate process")
3279 # 0 and 1 are reserved for the return value
3282 os._exit(result) # pylint: disable-msg=W0212
3286 # Avoid zombies and check exit code
3287 (_, status) = os.waitpid(pid, 0)
3289 if os.WIFSIGNALED(status):
3291 signum = os.WTERMSIG(status)
3293 exitcode = os.WEXITSTATUS(status)
3296 if not (exitcode in (0, 1) and signum is None):
3297 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3300 return bool(exitcode)
3303 def IgnoreProcessNotFound(fn, *args, **kwargs):
3304 """Ignores ESRCH when calling a process-related function.
3306 ESRCH is raised when a process is not found.
3309 @return: Whether process was found
3314 except EnvironmentError, err:
3316 if err.errno == errno.ESRCH:
3323 def IgnoreSignals(fn, *args, **kwargs):
3324 """Tries to call a function ignoring failures due to EINTR.
3328 return fn(*args, **kwargs)
3329 except EnvironmentError, err:
3330 if err.errno == errno.EINTR:
3334 except (select.error, socket.error), err:
3335 # In python 2.6 and above select.error is an IOError, so it's handled
3336 # above, in 2.5 and below it's not, and it's handled here.
3337 if err.args and err.args[0] == errno.EINTR:
3344 """Locks a file using POSIX locks.
3347 @param fd: the file descriptor we need to lock
3351 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3352 except IOError, err:
3353 if err.errno == errno.EAGAIN:
3354 raise errors.LockError("File already locked")
3358 def FormatTime(val):
3359 """Formats a time value.
3361 @type val: float or None
3362 @param val: Timestamp as returned by time.time() (seconds since Epoch,
3363 1970-01-01 00:00:00 UTC)
3364 @return: a string value or N/A if we don't have a valid timestamp
3367 if val is None or not isinstance(val, (int, float)):
3369 # these two codes works on Linux, but they are not guaranteed on all
3371 return time.strftime("%F %T", time.localtime(val))
3374 def FormatSeconds(secs):
3375 """Formats seconds for easier reading.
3378 @param secs: Number of seconds
3380 @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3385 secs = round(secs, 0)
3388 # Negative values would be a bit tricky
3389 for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3390 (complete, secs) = divmod(secs, one)
3391 if complete or parts:
3392 parts.append("%d%s" % (complete, unit))
3394 parts.append("%ds" % secs)
3396 return " ".join(parts)
3399 def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3400 """Reads the watcher pause file.
3402 @type filename: string
3403 @param filename: Path to watcher pause file
3404 @type now: None, float or int
3405 @param now: Current time as Unix timestamp
3406 @type remove_after: int
3407 @param remove_after: Remove watcher pause file after specified amount of
3408 seconds past the pause end time
3415 value = ReadFile(filename)
3416 except IOError, err:
3417 if err.errno != errno.ENOENT:
3421 if value is not None:
3425 logging.warning(("Watcher pause file (%s) contains invalid value,"
3426 " removing it"), filename)
3427 RemoveFile(filename)
3430 if value is not None:
3431 # Remove file if it's outdated
3432 if now > (value + remove_after):
3433 RemoveFile(filename)
3442 class RetryTimeout(Exception):
3443 """Retry loop timed out.
3445 Any arguments which was passed by the retried function to RetryAgain will be
3446 preserved in RetryTimeout, if it is raised. If such argument was an exception
3447 the RaiseInner helper method will reraise it.
3450 def RaiseInner(self):
3451 if self.args and isinstance(self.args[0], Exception):
3454 raise RetryTimeout(*self.args)
3457 class RetryAgain(Exception):
3460 Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3461 arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3462 of the RetryTimeout() method can be used to reraise it.
3467 class _RetryDelayCalculator(object):
3468 """Calculator for increasing delays.
3478 def __init__(self, start, factor, limit):
3479 """Initializes this class.
3482 @param start: Initial delay
3484 @param factor: Factor for delay increase
3485 @type limit: float or None
3486 @param limit: Upper limit for delay or None for no limit
3490 assert factor >= 1.0
3491 assert limit is None or limit >= 0.0
3494 self._factor = factor
3500 """Returns current delay and calculates the next one.
3503 current = self._next
3505 # Update for next run
3506 if self._limit is None or self._next < self._limit:
3507 self._next = min(self._limit, self._next * self._factor)
3512 #: Special delay to specify whole remaining timeout
3513 RETRY_REMAINING_TIME = object()
3516 def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3517 _time_fn=time.time):
3518 """Call a function repeatedly until it succeeds.
3520 The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3521 anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3522 total of C{timeout} seconds, this function throws L{RetryTimeout}.
3524 C{delay} can be one of the following:
3525 - callable returning the delay length as a float
3526 - Tuple of (start, factor, limit)
3527 - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3528 useful when overriding L{wait_fn} to wait for an external event)
3529 - A static delay as a number (int or float)
3532 @param fn: Function to be called
3533 @param delay: Either a callable (returning the delay), a tuple of (start,
3534 factor, limit) (see L{_RetryDelayCalculator}),
3535 L{RETRY_REMAINING_TIME} or a number (int or float)
3536 @type timeout: float
3537 @param timeout: Total timeout
3538 @type wait_fn: callable
3539 @param wait_fn: Waiting function
3540 @return: Return value of function
3544 assert callable(wait_fn)
3545 assert callable(_time_fn)
3550 end_time = _time_fn() + timeout
3553 # External function to calculate delay
3556 elif isinstance(delay, (tuple, list)):
3557 # Increasing delay with optional upper boundary
3558 (start, factor, limit) = delay
3559 calc_delay = _RetryDelayCalculator(start, factor, limit)
3561 elif delay is RETRY_REMAINING_TIME:
3562 # Always use the remaining time
3567 calc_delay = lambda: delay
3569 assert calc_delay is None or callable(calc_delay)
3574 # pylint: disable-msg=W0142
3576 except RetryAgain, err:
3577 retry_args = err.args
3578 except RetryTimeout:
3579 raise errors.ProgrammerError("Nested retry loop detected that didn't"
3580 " handle RetryTimeout")
3582 remaining_time = end_time - _time_fn()
3584 if remaining_time < 0.0:
3585 # pylint: disable-msg=W0142
3586 raise RetryTimeout(*retry_args)
3588 assert remaining_time >= 0.0
3590 if calc_delay is None:
3591 wait_fn(remaining_time)
3593 current_delay = calc_delay()
3594 if current_delay > 0.0:
3595 wait_fn(current_delay)
3598 def GetClosedTempfile(*args, **kwargs):
3599 """Creates a temporary file and returns its path.
3602 (fd, path) = tempfile.mkstemp(*args, **kwargs)
3607 def GenerateSelfSignedX509Cert(common_name, validity):
3608 """Generates a self-signed X509 certificate.
3610 @type common_name: string
3611 @param common_name: commonName value
3613 @param validity: Validity for certificate in seconds
3616 # Create private and public key
3617 key = OpenSSL.crypto.PKey()
3618 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3620 # Create self-signed certificate
3621 cert = OpenSSL.crypto.X509()
3623 cert.get_subject().CN = common_name
3624 cert.set_serial_number(1)
3625 cert.gmtime_adj_notBefore(0)
3626 cert.gmtime_adj_notAfter(validity)
3627 cert.set_issuer(cert.get_subject())
3628 cert.set_pubkey(key)
3629 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3631 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3632 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3634 return (key_pem, cert_pem)
3637 def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3638 validity=constants.X509_CERT_DEFAULT_VALIDITY):
3639 """Legacy function to generate self-signed X509 certificate.
3642 @param filename: path to write certificate to
3643 @type common_name: string
3644 @param common_name: commonName value
3646 @param validity: validity of certificate in number of days
3649 # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3650 # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3651 # and node daemon certificates have the proper Subject/Issuer.
3652 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3653 validity * 24 * 60 * 60)
3655 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3658 class FileLock(object):
3659 """Utility class for file locks.
3662 def __init__(self, fd, filename):
3663 """Constructor for FileLock.
3666 @param fd: File object
3668 @param filename: Path of the file opened at I{fd}
3672 self.filename = filename
3675 def Open(cls, filename):
3676 """Creates and opens a file to be used as a file-based lock.
3678 @type filename: string
3679 @param filename: path to the file to be locked
3682 # Using "os.open" is necessary to allow both opening existing file
3683 # read/write and creating if not existing. Vanilla "open" will truncate an
3684 # existing file -or- allow creating if not existing.
3685 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3692 """Close the file and release the lock.
3695 if hasattr(self, "fd") and self.fd:
3699 def _flock(self, flag, blocking, timeout, errmsg):
3700 """Wrapper for fcntl.flock.
3703 @param flag: operation flag
3704 @type blocking: bool
3705 @param blocking: whether the operation should be done in blocking mode.
3706 @type timeout: None or float
3707 @param timeout: for how long the operation should be retried (implies
3709 @type errmsg: string
3710 @param errmsg: error message in case operation fails.
3713 assert self.fd, "Lock was closed"
3714 assert timeout is None or timeout >= 0, \
3715 "If specified, timeout must be positive"
3716 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3718 # When a timeout is used, LOCK_NB must always be set
3719 if not (timeout is None and blocking):
3720 flag |= fcntl.LOCK_NB
3723 self._Lock(self.fd, flag, timeout)
3726 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3727 args=(self.fd, flag, timeout))
3728 except RetryTimeout:
3729 raise errors.LockError(errmsg)
3732 def _Lock(fd, flag, timeout):
3734 fcntl.flock(fd, flag)
3735 except IOError, err:
3736 if timeout is not None and err.errno == errno.EAGAIN:
3739 logging.exception("fcntl.flock failed")
3742 def Exclusive(self, blocking=False, timeout=None):
3743 """Locks the file in exclusive mode.
3745 @type blocking: boolean
3746 @param blocking: whether to block and wait until we
3747 can lock the file or return immediately
3748 @type timeout: int or None
3749 @param timeout: if not None, the duration to wait for the lock
3753 self._flock(fcntl.LOCK_EX, blocking, timeout,
3754 "Failed to lock %s in exclusive mode" % self.filename)
3756 def Shared(self, blocking=False, timeout=None):
3757 """Locks the file in shared mode.
3759 @type blocking: boolean
3760 @param blocking: whether to block and wait until we
3761 can lock the file or return immediately
3762 @type timeout: int or None
3763 @param timeout: if not None, the duration to wait for the lock
3767 self._flock(fcntl.LOCK_SH, blocking, timeout,
3768 "Failed to lock %s in shared mode" % self.filename)
3770 def Unlock(self, blocking=True, timeout=None):
3771 """Unlocks the file.
3773 According to C{flock(2)}, unlocking can also be a nonblocking
3776 To make a non-blocking request, include LOCK_NB with any of the above
3779 @type blocking: boolean
3780 @param blocking: whether to block and wait until we
3781 can lock the file or return immediately
3782 @type timeout: int or None
3783 @param timeout: if not None, the duration to wait for the lock
3787 self._flock(fcntl.LOCK_UN, blocking, timeout,
3788 "Failed to unlock %s" % self.filename)
3792 """Splits data chunks into lines separated by newline.
3794 Instances provide a file-like interface.
3797 def __init__(self, line_fn, *args):
3798 """Initializes this class.
3800 @type line_fn: callable
3801 @param line_fn: Function called for each line, first parameter is line
3802 @param args: Extra arguments for L{line_fn}
3805 assert callable(line_fn)
3808 # Python 2.4 doesn't have functools.partial yet
3810 lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3812 self._line_fn = line_fn
3814 self._lines = collections.deque()
3817 def write(self, data):
3818 parts = (self._buffer + data).split("\n")
3819 self._buffer = parts.pop()
3820 self._lines.extend(parts)
3824 self._line_fn(self._lines.popleft().rstrip("\r\n"))
3829 self._line_fn(self._buffer)
3832 def SignalHandled(signums):
3833 """Signal Handled decoration.
3835 This special decorator installs a signal handler and then calls the target
3836 function. The function must accept a 'signal_handlers' keyword argument,
3837 which will contain a dict indexed by signal number, with SignalHandler
3840 The decorator can be safely stacked with iself, to handle multiple signals
3841 with different handlers.
3844 @param signums: signals to intercept
3848 def sig_function(*args, **kwargs):
3849 assert 'signal_handlers' not in kwargs or \
3850 kwargs['signal_handlers'] is None or \
3851 isinstance(kwargs['signal_handlers'], dict), \
3852 "Wrong signal_handlers parameter in original function call"
3853 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3854 signal_handlers = kwargs['signal_handlers']
3856 signal_handlers = {}
3857 kwargs['signal_handlers'] = signal_handlers
3858 sighandler = SignalHandler(signums)
3861 signal_handlers[sig] = sighandler
3862 return fn(*args, **kwargs)
3869 class SignalWakeupFd(object):
3871 # This is only supported in Python 2.5 and above (some distributions
3872 # backported it to Python 2.4)
3873 _set_wakeup_fd_fn = signal.set_wakeup_fd
3874 except AttributeError:
3876 def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3879 def _SetWakeupFd(self, fd):
3880 return self._set_wakeup_fd_fn(fd)
3883 """Initializes this class.
3886 (read_fd, write_fd) = os.pipe()
3888 # Once these succeeded, the file descriptors will be closed automatically.
3889 # Buffer size 0 is important, otherwise .read() with a specified length
3890 # might buffer data and the file descriptors won't be marked readable.
3891 self._read_fh = os.fdopen(read_fd, "r", 0)
3892 self._write_fh = os.fdopen(write_fd, "w", 0)
3894 self._previous = self._SetWakeupFd(self._write_fh.fileno())
3897 self.fileno = self._read_fh.fileno
3898 self.read = self._read_fh.read
3901 """Restores the previous wakeup file descriptor.
3904 if hasattr(self, "_previous") and self._previous is not None:
3905 self._SetWakeupFd(self._previous)
3906 self._previous = None
3909 """Notifies the wakeup file descriptor.
3912 self._write_fh.write("\0")
3915 """Called before object deletion.
3921 class SignalHandler(object):
3922 """Generic signal handler class.
3924 It automatically restores the original handler when deconstructed or
3925 when L{Reset} is called. You can either pass your own handler
3926 function in or query the L{called} attribute to detect whether the
3930 @ivar signum: the signals we handle
3931 @type called: boolean
3932 @ivar called: tracks whether any of the signals have been raised
3935 def __init__(self, signum, handler_fn=None, wakeup=None):
3936 """Constructs a new SignalHandler instance.
3938 @type signum: int or list of ints
3939 @param signum: Single signal number or set of signal numbers
3940 @type handler_fn: callable
3941 @param handler_fn: Signal handling function
3944 assert handler_fn is None or callable(handler_fn)
3946 self.signum = set(signum)
3949 self._handler_fn = handler_fn
3950 self._wakeup = wakeup
3954 for signum in self.signum:
3956 prev_handler = signal.signal(signum, self._HandleSignal)
3958 self._previous[signum] = prev_handler
3960 # Restore previous handler
3961 signal.signal(signum, prev_handler)
3964 # Reset all handlers
3966 # Here we have a race condition: a handler may have already been called,
3967 # but there's not much we can do about it at this point.
3974 """Restore previous handler.
3976 This will reset all the signals to their previous handlers.
3979 for signum, prev_handler in self._previous.items():
3980 signal.signal(signum, prev_handler)
3981 # If successful, remove from dict
3982 del self._previous[signum]
3985 """Unsets the L{called} flag.
3987 This function can be used in case a signal may arrive several times.
3992 def _HandleSignal(self, signum, frame):
3993 """Actual signal handling function.
3996 # This is not nice and not absolutely atomic, but it appears to be the only
3997 # solution in Python -- there are no atomic types.
4001 # Notify whoever is interested in signals
4002 self._wakeup.Notify()
4004 if self._handler_fn:
4005 self._handler_fn(signum, frame)
4008 class FieldSet(object):
4009 """A simple field set.
4011 Among the features are:
4012 - checking if a string is among a list of static string or regex objects
4013 - checking if a whole list of string matches
4014 - returning the matching groups from a regex match
4016 Internally, all fields are held as regular expression objects.
4019 def __init__(self, *items):
4020 self.items = [re.compile("^%s$" % value) for value in items]
4022 def Extend(self, other_set):
4023 """Extend the field set with the items from another one"""
4024 self.items.extend(other_set.items)
4026 def Matches(self, field):
4027 """Checks if a field matches the current set
4030 @param field: the string to match
4031 @return: either None or a regular expression match object
4034 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
4038 def NonMatching(self, items):
4039 """Returns the list of fields not matching the current set
4042 @param items: the list of fields to check
4044 @return: list of non-matching fields
4047 return [val for val in items if not self.Matches(val)]
4050 class RunningTimeout(object):
4051 """Class to calculate remaining timeout when doing several operations.
4061 def __init__(self, timeout, allow_negative, _time_fn=time.time):
4062 """Initializes this class.
4064 @type timeout: float
4065 @param timeout: Timeout duration
4066 @type allow_negative: bool
4067 @param allow_negative: Whether to return values below zero
4068 @param _time_fn: Time function for unittests
4071 object.__init__(self)
4073 if timeout is not None and timeout < 0.0:
4074 raise ValueError("Timeout must not be negative")
4076 self._timeout = timeout
4077 self._allow_negative = allow_negative
4078 self._time_fn = _time_fn
4080 self._start_time = None
4082 def Remaining(self):
4083 """Returns the remaining timeout.
4086 if self._timeout is None:
4089 # Get start time on first calculation
4090 if self._start_time is None:
4091 self._start_time = self._time_fn()
4093 # Calculate remaining time
4094 remaining_timeout = self._start_time + self._timeout - self._time_fn()
4096 if not self._allow_negative:
4097 # Ensure timeout is always >= 0
4098 return max(0.0, remaining_timeout)
4100 return remaining_timeout