4 # Copyright (C) 2006, 2007, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti utility module.
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
45 import logging.handlers
53 from cStringIO import StringIO
56 # pylint: disable-msg=F0401
61 from ganeti import errors
62 from ganeti import constants
63 from ganeti import compat
67 _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
71 #: when set to True, L{RunCmd} is disabled
74 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
76 HEX_CHAR_RE = r"[a-zA-Z0-9]"
77 VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
78 X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
79 (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
80 HEX_CHAR_RE, HEX_CHAR_RE),
83 _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
85 UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
86 '[a-f0-9]{4}-[a-f0-9]{12}$')
88 # Certificate verification results
90 CERT_ERROR) = range(1, 3)
92 # Flags for mlockall() (from bits/mman.h)
97 _MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
101 _TIMEOUT_KILL) = range(3)
103 #: Shell param checker regexp
104 _SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
106 #: Unit checker regexp
107 _PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$")
110 _ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
113 class RunResult(object):
114 """Holds the result of running external programs.
117 @ivar exit_code: the exit code of the program, or None (if the program
119 @type signal: int or None
120 @ivar signal: the signal that caused the program to finish, or None
121 (if the program wasn't terminated by a signal)
123 @ivar stdout: the standard output of the program
125 @ivar stderr: the standard error of the program
126 @type failed: boolean
127 @ivar failed: True in case the program was
128 terminated by a signal or exited with a non-zero exit code
129 @ivar fail_reason: a string detailing the termination reason
132 __slots__ = ["exit_code", "signal", "stdout", "stderr",
133 "failed", "fail_reason", "cmd"]
136 def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
139 self.exit_code = exit_code
140 self.signal = signal_
143 self.failed = (signal_ is not None or exit_code != 0)
146 if self.signal is not None:
147 fail_msgs.append("terminated by signal %s" % self.signal)
148 elif self.exit_code is not None:
149 fail_msgs.append("exited with exit code %s" % self.exit_code)
151 fail_msgs.append("unable to determine termination reason")
153 if timeout_action == _TIMEOUT_TERM:
154 fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
155 elif timeout_action == _TIMEOUT_KILL:
156 fail_msgs.append(("force termination after timeout of %.2f seconds"
157 " and linger for another %.2f seconds") %
158 (timeout, constants.CHILD_LINGER_TIMEOUT))
160 if fail_msgs and self.failed:
161 self.fail_reason = CommaJoin(fail_msgs)
164 logging.debug("Command '%s' failed (%s); output: %s",
165 self.cmd, self.fail_reason, self.output)
167 def _GetOutput(self):
168 """Returns the combined stdout and stderr for easier usage.
171 return self.stdout + self.stderr
173 output = property(_GetOutput, None, None, "Return full output")
176 def _BuildCmdEnvironment(env, reset):
177 """Builds the environment for an external program.
183 cmd_env = os.environ.copy()
184 cmd_env["LC_ALL"] = "C"
192 def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
193 interactive=False, timeout=None):
194 """Execute a (shell) command.
196 The command should not read from its standard input, as it will be
199 @type cmd: string or list
200 @param cmd: Command to run
202 @param env: Additional environment variables
204 @param output: if desired, the output of the command can be
205 saved in a file instead of the RunResult instance; this
206 parameter denotes the file name (if not None)
208 @param cwd: if specified, will be used as the working
209 directory for the command; the default will be /
210 @type reset_env: boolean
211 @param reset_env: whether to reset or keep the default os environment
212 @type interactive: boolean
213 @param interactive: weather we pipe stdin, stdout and stderr
214 (default behaviour) or run the command interactive
216 @param timeout: If not None, timeout in seconds until child process gets
219 @return: RunResult instance
220 @raise errors.ProgrammerError: if we call this when forks are disabled
224 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
226 if output and interactive:
227 raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
228 " not be provided at the same time")
230 if isinstance(cmd, basestring):
234 cmd = [str(val) for val in cmd]
235 strcmd = ShellQuoteArgs(cmd)
239 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
241 logging.debug("RunCmd %s", strcmd)
243 cmd_env = _BuildCmdEnvironment(env, reset_env)
247 out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
248 interactive, timeout)
250 timeout_action = _TIMEOUT_NONE
251 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
254 if err.errno == errno.ENOENT:
255 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
267 return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
270 def SetupDaemonEnv(cwd="/", umask=077):
271 """Setup a daemon's environment.
273 This should be called between the first and second fork, due to
276 @param cwd: the directory to which to chdir
277 @param umask: the umask to setup
285 def SetupDaemonFDs(output_file, output_fd):
286 """Setups up a daemon's file descriptors.
288 @param output_file: if not None, the file to which to redirect
290 @param output_fd: if not None, the file descriptor for stdout/stderr
293 # check that at most one is defined
294 assert [output_file, output_fd].count(None) >= 1
296 # Open /dev/null (read-only, only for stdin)
297 devnull_fd = os.open(os.devnull, os.O_RDONLY)
299 if output_fd is not None:
301 elif output_file is not None:
304 output_fd = os.open(output_file,
305 os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
306 except EnvironmentError, err:
307 raise Exception("Opening output file failed: %s" % err)
309 output_fd = os.open(os.devnull, os.O_WRONLY)
311 # Redirect standard I/O
312 os.dup2(devnull_fd, 0)
313 os.dup2(output_fd, 1)
314 os.dup2(output_fd, 2)
317 def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
319 """Start a daemon process after forking twice.
321 @type cmd: string or list
322 @param cmd: Command to run
324 @param env: Additional environment variables
326 @param cwd: Working directory for the program
328 @param output: Path to file in which to save the output
330 @param output_fd: File descriptor for output
331 @type pidfile: string
332 @param pidfile: Process ID file
334 @return: Daemon process ID
335 @raise errors.ProgrammerError: if we call this when forks are disabled
339 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
342 if output and not (bool(output) ^ (output_fd is not None)):
343 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
346 if isinstance(cmd, basestring):
347 cmd = ["/bin/sh", "-c", cmd]
349 strcmd = ShellQuoteArgs(cmd)
352 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
354 logging.debug("StartDaemon %s", strcmd)
356 cmd_env = _BuildCmdEnvironment(env, False)
358 # Create pipe for sending PID back
359 (pidpipe_read, pidpipe_write) = os.pipe()
362 # Create pipe for sending error messages
363 (errpipe_read, errpipe_write) = os.pipe()
370 # Child process, won't return
371 _StartDaemonChild(errpipe_read, errpipe_write,
372 pidpipe_read, pidpipe_write,
374 output, output_fd, pidfile)
376 # Well, maybe child process failed
377 os._exit(1) # pylint: disable-msg=W0212
379 _CloseFDNoErr(errpipe_write)
381 # Wait for daemon to be started (or an error message to
382 # arrive) and read up to 100 KB as an error message
383 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
385 _CloseFDNoErr(errpipe_read)
387 _CloseFDNoErr(pidpipe_write)
389 # Read up to 128 bytes for PID
390 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
392 _CloseFDNoErr(pidpipe_read)
394 # Try to avoid zombies by waiting for child process
401 raise errors.OpExecError("Error when starting daemon process: %r" %
406 except (ValueError, TypeError), err:
407 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
411 def _StartDaemonChild(errpipe_read, errpipe_write,
412 pidpipe_read, pidpipe_write,
414 output, fd_output, pidfile):
415 """Child process for starting daemon.
419 # Close parent's side
420 _CloseFDNoErr(errpipe_read)
421 _CloseFDNoErr(pidpipe_read)
423 # First child process
426 # And fork for the second time
429 # Exit first child process
430 os._exit(0) # pylint: disable-msg=W0212
432 # Make sure pipe is closed on execv* (and thereby notifies
434 SetCloseOnExecFlag(errpipe_write, True)
436 # List of file descriptors to be left open
437 noclose_fds = [errpipe_write]
441 fd_pidfile = WritePidFile(pidfile)
443 # Keeping the file open to hold the lock
444 noclose_fds.append(fd_pidfile)
446 SetCloseOnExecFlag(fd_pidfile, False)
450 SetupDaemonFDs(output, fd_output)
452 # Send daemon PID to parent
453 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
455 # Close all file descriptors except stdio and error message pipe
456 CloseFDs(noclose_fds=noclose_fds)
458 # Change working directory
462 os.execvp(args[0], args)
464 os.execvpe(args[0], args, env)
465 except: # pylint: disable-msg=W0702
467 # Report errors to original process
468 WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
469 except: # pylint: disable-msg=W0702
470 # Ignore errors in error handling
473 os._exit(1) # pylint: disable-msg=W0212
476 def WriteErrorToFD(fd, err):
477 """Possibly write an error message to a fd.
479 @type fd: None or int (file descriptor)
480 @param fd: if not None, the error will be written to this fd
481 @param err: string, the error message
488 err = "<unknown error>"
490 RetryOnSignal(os.write, fd, err)
493 def _CheckIfAlive(child):
494 """Raises L{RetryAgain} if child is still alive.
496 @raises RetryAgain: If child is still alive
499 if child.poll() is None:
503 def _WaitForProcess(child, timeout):
504 """Waits for the child to terminate or until we reach timeout.
508 Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
513 def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
514 _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
515 """Run a command and return its output.
517 @type cmd: string or list
518 @param cmd: Command to run
520 @param env: The environment to use
521 @type via_shell: bool
522 @param via_shell: if we should run via the shell
524 @param cwd: the working directory for the program
525 @type interactive: boolean
526 @param interactive: Run command interactive (without piping)
528 @param timeout: Timeout after the programm gets terminated
530 @return: (out, err, status)
533 poller = select.poll()
535 stderr = subprocess.PIPE
536 stdout = subprocess.PIPE
537 stdin = subprocess.PIPE
540 stderr = stdout = stdin = None
542 child = subprocess.Popen(cmd, shell=via_shell,
546 close_fds=True, env=env,
552 linger_timeout = None
557 poll_timeout = RunningTimeout(timeout, True).Remaining
559 msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
561 msg_linger = ("Command %s (%d) run into linger timeout, killing" %
564 timeout_action = _TIMEOUT_NONE
568 poller.register(child.stdout, select.POLLIN)
569 poller.register(child.stderr, select.POLLIN)
571 child.stdout.fileno(): (out, child.stdout),
572 child.stderr.fileno(): (err, child.stderr),
575 SetNonblockFlag(fd, True)
579 pt = poll_timeout() * 1000
581 if linger_timeout is None:
582 logging.warning(msg_timeout)
583 if child.poll() is None:
584 timeout_action = _TIMEOUT_TERM
585 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
586 linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
587 pt = linger_timeout() * 1000
593 pollresult = RetryOnSignal(poller.poll, pt)
595 for fd, event in pollresult:
596 if event & select.POLLIN or event & select.POLLPRI:
597 data = fdmap[fd][1].read()
598 # no data from read signifies EOF (the same as POLLHUP)
600 poller.unregister(fd)
603 fdmap[fd][0].write(data)
604 if (event & select.POLLNVAL or event & select.POLLHUP or
605 event & select.POLLERR):
606 poller.unregister(fd)
609 if timeout is not None:
610 assert callable(poll_timeout)
612 # We have no I/O left but it might still run
613 if child.poll() is None:
614 _WaitForProcess(child, poll_timeout())
616 # Terminate if still alive after timeout
617 if child.poll() is None:
618 if linger_timeout is None:
619 logging.warning(msg_timeout)
620 timeout_action = _TIMEOUT_TERM
621 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
624 lt = linger_timeout()
625 _WaitForProcess(child, lt)
627 # Okay, still alive after timeout and linger timeout? Kill it!
628 if child.poll() is None:
629 timeout_action = _TIMEOUT_KILL
630 logging.warning(msg_linger)
631 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
636 status = child.wait()
637 return out, err, status, timeout_action
640 def _RunCmdFile(cmd, env, via_shell, output, cwd):
641 """Run a command and save its output to a file.
643 @type cmd: string or list
644 @param cmd: Command to run
646 @param env: The environment to use
647 @type via_shell: bool
648 @param via_shell: if we should run via the shell
650 @param output: the filename in which to save the output
652 @param cwd: the working directory for the program
654 @return: the exit status
657 fh = open(output, "a")
659 child = subprocess.Popen(cmd, shell=via_shell,
660 stderr=subprocess.STDOUT,
662 stdin=subprocess.PIPE,
663 close_fds=True, env=env,
667 status = child.wait()
673 def SetCloseOnExecFlag(fd, enable):
674 """Sets or unsets the close-on-exec flag on a file descriptor.
677 @param fd: File descriptor
679 @param enable: Whether to set or unset it.
682 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
685 flags |= fcntl.FD_CLOEXEC
687 flags &= ~fcntl.FD_CLOEXEC
689 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
692 def SetNonblockFlag(fd, enable):
693 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
696 @param fd: File descriptor
698 @param enable: Whether to set or unset it
701 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
704 flags |= os.O_NONBLOCK
706 flags &= ~os.O_NONBLOCK
708 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
711 def RetryOnSignal(fn, *args, **kwargs):
712 """Calls a function again if it failed due to EINTR.
717 return fn(*args, **kwargs)
718 except EnvironmentError, err:
719 if err.errno != errno.EINTR:
721 except (socket.error, select.error), err:
722 # In python 2.6 and above select.error is an IOError, so it's handled
723 # above, in 2.5 and below it's not, and it's handled here.
724 if not (err.args and err.args[0] == errno.EINTR):
728 def RunParts(dir_name, env=None, reset_env=False):
729 """Run Scripts or programs in a directory
731 @type dir_name: string
732 @param dir_name: absolute path to a directory
734 @param env: The environment to use
735 @type reset_env: boolean
736 @param reset_env: whether to reset or keep the default os environment
737 @rtype: list of tuples
738 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
744 dir_contents = ListVisibleFiles(dir_name)
746 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
749 for relname in sorted(dir_contents):
750 fname = PathJoin(dir_name, relname)
751 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
752 constants.EXT_PLUGIN_MASK.match(relname) is not None):
753 rr.append((relname, constants.RUNPARTS_SKIP, None))
756 result = RunCmd([fname], env=env, reset_env=reset_env)
757 except Exception, err: # pylint: disable-msg=W0703
758 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
760 rr.append((relname, constants.RUNPARTS_RUN, result))
765 def RemoveFile(filename):
766 """Remove a file ignoring some errors.
768 Remove a file, ignoring non-existing ones or directories. Other
772 @param filename: the file to be removed
778 if err.errno not in (errno.ENOENT, errno.EISDIR):
782 def RemoveDir(dirname):
783 """Remove an empty directory.
785 Remove a directory, ignoring non-existing ones.
786 Other errors are passed. This includes the case,
787 where the directory is not empty, so it can't be removed.
790 @param dirname: the empty directory to be removed
796 if err.errno != errno.ENOENT:
800 def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
804 @param old: Original path
808 @param mkdir: Whether to create target directory if it doesn't exist
809 @type mkdir_mode: int
810 @param mkdir_mode: Mode for newly created directories
814 return os.rename(old, new)
816 # In at least one use case of this function, the job queue, directory
817 # creation is very rare. Checking for the directory before renaming is not
819 if mkdir and err.errno == errno.ENOENT:
820 # Create directory and try again
821 Makedirs(os.path.dirname(new), mode=mkdir_mode)
823 return os.rename(old, new)
828 def Makedirs(path, mode=0750):
829 """Super-mkdir; create a leaf directory and all intermediate ones.
831 This is a wrapper around C{os.makedirs} adding error handling not implemented
836 os.makedirs(path, mode)
838 # Ignore EEXIST. This is only handled in os.makedirs as included in
839 # Python 2.5 and above.
840 if err.errno != errno.EEXIST or not os.path.exists(path):
844 def ResetTempfileModule():
845 """Resets the random name generator of the tempfile module.
847 This function should be called after C{os.fork} in the child process to
848 ensure it creates a newly seeded random generator. Otherwise it would
849 generate the same random parts as the parent process. If several processes
850 race for the creation of a temporary file, this could lead to one not getting
854 # pylint: disable-msg=W0212
855 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
856 tempfile._once_lock.acquire()
858 # Reset random name generator
859 tempfile._name_sequence = None
861 tempfile._once_lock.release()
863 logging.critical("The tempfile module misses at least one of the"
864 " '_once_lock' and '_name_sequence' attributes")
867 def _FingerprintFile(filename):
868 """Compute the fingerprint of a file.
870 If the file does not exist, a None will be returned
874 @param filename: the filename to checksum
876 @return: the hex digest of the sha checksum of the contents
880 if not (os.path.exists(filename) and os.path.isfile(filename)):
885 fp = compat.sha1_hash()
893 return fp.hexdigest()
896 def FingerprintFiles(files):
897 """Compute fingerprints for a list of files.
900 @param files: the list of filename to fingerprint
902 @return: a dictionary filename: fingerprint, holding only
908 for filename in files:
909 cksum = _FingerprintFile(filename)
911 ret[filename] = cksum
916 def ForceDictType(target, key_types, allowed_values=None):
917 """Force the values of a dict to have certain types.
920 @param target: the dict to update
921 @type key_types: dict
922 @param key_types: dict mapping target dict keys to types
923 in constants.ENFORCEABLE_TYPES
924 @type allowed_values: list
925 @keyword allowed_values: list of specially allowed values
928 if allowed_values is None:
931 if not isinstance(target, dict):
932 msg = "Expected dictionary, got '%s'" % target
933 raise errors.TypeEnforcementError(msg)
936 if key not in key_types:
937 msg = "Unknown key '%s'" % key
938 raise errors.TypeEnforcementError(msg)
940 if target[key] in allowed_values:
943 ktype = key_types[key]
944 if ktype not in constants.ENFORCEABLE_TYPES:
945 msg = "'%s' has non-enforceable type %s" % (key, ktype)
946 raise errors.ProgrammerError(msg)
948 if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
949 if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
951 elif not isinstance(target[key], basestring):
952 if isinstance(target[key], bool) and not target[key]:
955 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
956 raise errors.TypeEnforcementError(msg)
957 elif ktype == constants.VTYPE_BOOL:
958 if isinstance(target[key], basestring) and target[key]:
959 if target[key].lower() == constants.VALUE_FALSE:
961 elif target[key].lower() == constants.VALUE_TRUE:
964 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
965 raise errors.TypeEnforcementError(msg)
970 elif ktype == constants.VTYPE_SIZE:
972 target[key] = ParseUnit(target[key])
973 except errors.UnitParseError, err:
974 msg = "'%s' (value %s) is not a valid size. error: %s" % \
975 (key, target[key], err)
976 raise errors.TypeEnforcementError(msg)
977 elif ktype == constants.VTYPE_INT:
979 target[key] = int(target[key])
980 except (ValueError, TypeError):
981 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
982 raise errors.TypeEnforcementError(msg)
985 def _GetProcStatusPath(pid):
986 """Returns the path for a PID's proc status file.
989 @param pid: Process ID
993 return "/proc/%d/status" % pid
996 def IsProcessAlive(pid):
997 """Check if a given pid exists on the system.
999 @note: zombie status is not handled, so zombie processes
1000 will be returned as alive
1002 @param pid: the process ID to check
1004 @return: True if the process exists
1011 except EnvironmentError, err:
1012 if err.errno in (errno.ENOENT, errno.ENOTDIR):
1014 elif err.errno == errno.EINVAL:
1015 raise RetryAgain(err)
1018 assert isinstance(pid, int), "pid must be an integer"
1022 # /proc in a multiprocessor environment can have strange behaviors.
1023 # Retry the os.stat a few times until we get a good result.
1025 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1026 args=[_GetProcStatusPath(pid)])
1027 except RetryTimeout, err:
1031 def _ParseSigsetT(sigset):
1032 """Parse a rendered sigset_t value.
1034 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1037 @type sigset: string
1038 @param sigset: Rendered signal set from /proc/$pid/status
1040 @return: Set of all enabled signal numbers
1046 for ch in reversed(sigset):
1049 # The following could be done in a loop, but it's easier to read and
1050 # understand in the unrolled form
1052 result.add(signum + 1)
1054 result.add(signum + 2)
1056 result.add(signum + 3)
1058 result.add(signum + 4)
1065 def _GetProcStatusField(pstatus, field):
1066 """Retrieves a field from the contents of a proc status file.
1068 @type pstatus: string
1069 @param pstatus: Contents of /proc/$pid/status
1071 @param field: Name of field whose value should be returned
1075 for line in pstatus.splitlines():
1076 parts = line.split(":", 1)
1078 if len(parts) < 2 or parts[0] != field:
1081 return parts[1].strip()
1086 def IsProcessHandlingSignal(pid, signum, status_path=None):
1087 """Checks whether a process is handling a signal.
1090 @param pid: Process ID
1092 @param signum: Signal number
1096 if status_path is None:
1097 status_path = _GetProcStatusPath(pid)
1100 proc_status = ReadFile(status_path)
1101 except EnvironmentError, err:
1102 # In at least one case, reading /proc/$pid/status failed with ESRCH.
1103 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1107 sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1109 raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1111 # Now check whether signal is handled
1112 return signum in _ParseSigsetT(sigcgt)
1115 def ReadPidFile(pidfile):
1116 """Read a pid from a file.
1118 @type pidfile: string
1119 @param pidfile: path to the file containing the pid
1121 @return: The process id, if the file exists and contains a valid PID,
1126 raw_data = ReadOneLineFile(pidfile)
1127 except EnvironmentError, err:
1128 if err.errno != errno.ENOENT:
1129 logging.exception("Can't read pid file")
1134 except (TypeError, ValueError), err:
1135 logging.info("Can't parse pid file contents", exc_info=True)
1141 def ReadLockedPidFile(path):
1142 """Reads a locked PID file.
1144 This can be used together with L{StartDaemon}.
1147 @param path: Path to PID file
1148 @return: PID as integer or, if file was unlocked or couldn't be opened, None
1152 fd = os.open(path, os.O_RDONLY)
1153 except EnvironmentError, err:
1154 if err.errno == errno.ENOENT:
1155 # PID file doesn't exist
1161 # Try to acquire lock
1163 except errors.LockError:
1164 # Couldn't lock, daemon is running
1165 return int(os.read(fd, 100))
1172 def MatchNameComponent(key, name_list, case_sensitive=True):
1173 """Try to match a name against a list.
1175 This function will try to match a name like test1 against a list
1176 like C{['test1.example.com', 'test2.example.com', ...]}. Against
1177 this list, I{'test1'} as well as I{'test1.example'} will match, but
1178 not I{'test1.ex'}. A multiple match will be considered as no match
1179 at all (e.g. I{'test1'} against C{['test1.example.com',
1180 'test1.example.org']}), except when the key fully matches an entry
1181 (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1184 @param key: the name to be searched
1185 @type name_list: list
1186 @param name_list: the list of strings against which to search the key
1187 @type case_sensitive: boolean
1188 @param case_sensitive: whether to provide a case-sensitive match
1191 @return: None if there is no match I{or} if there are multiple matches,
1192 otherwise the element from the list which matches
1195 if key in name_list:
1199 if not case_sensitive:
1200 re_flags |= re.IGNORECASE
1202 mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1205 for name in name_list:
1206 if mo.match(name) is not None:
1207 names_filtered.append(name)
1208 if not case_sensitive and key == name.upper():
1209 string_matches.append(name)
1211 if len(string_matches) == 1:
1212 return string_matches[0]
1213 if len(names_filtered) == 1:
1214 return names_filtered[0]
1218 def ValidateServiceName(name):
1219 """Validate the given service name.
1221 @type name: number or string
1222 @param name: Service name or port specification
1227 except (ValueError, TypeError):
1228 # Non-numeric service name
1229 valid = _VALID_SERVICE_NAME_RE.match(name)
1231 # Numeric port (protocols other than TCP or UDP might need adjustments
1233 valid = (numport >= 0 and numport < (1 << 16))
1236 raise errors.OpPrereqError("Invalid service name '%s'" % name,
1242 def ListVolumeGroups():
1243 """List volume groups and their size
1247 Dictionary with keys volume name and values
1248 the size of the volume
1251 command = "vgs --noheadings --units m --nosuffix -o name,size"
1252 result = RunCmd(command)
1257 for line in result.stdout.splitlines():
1259 name, size = line.split()
1260 size = int(float(size))
1261 except (IndexError, ValueError), err:
1262 logging.error("Invalid output from vgs (%s): %s", err, line)
1270 def BridgeExists(bridge):
1271 """Check whether the given bridge exists in the system
1274 @param bridge: the bridge name to check
1276 @return: True if it does
1279 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1282 def NiceSort(name_list):
1283 """Sort a list of strings based on digit and non-digit groupings.
1285 Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1286 will sort the list in the logical order C{['a1', 'a2', 'a10',
1289 The sort algorithm breaks each name in groups of either only-digits
1290 or no-digits. Only the first eight such groups are considered, and
1291 after that we just use what's left of the string.
1293 @type name_list: list
1294 @param name_list: the names to be sorted
1296 @return: a copy of the name list sorted with our algorithm
1299 _SORTER_BASE = "(\D+|\d+)"
1300 _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1301 _SORTER_BASE, _SORTER_BASE,
1302 _SORTER_BASE, _SORTER_BASE,
1303 _SORTER_BASE, _SORTER_BASE)
1304 _SORTER_RE = re.compile(_SORTER_FULL)
1305 _SORTER_NODIGIT = re.compile("^\D*$")
1307 """Attempts to convert a variable to integer."""
1308 if val is None or _SORTER_NODIGIT.match(val):
1313 to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1314 for name in name_list]
1316 return [tup[1] for tup in to_sort]
1319 def TryConvert(fn, val):
1320 """Try to convert a value ignoring errors.
1322 This function tries to apply function I{fn} to I{val}. If no
1323 C{ValueError} or C{TypeError} exceptions are raised, it will return
1324 the result, else it will return the original value. Any other
1325 exceptions are propagated to the caller.
1328 @param fn: function to apply to the value
1329 @param val: the value to be converted
1330 @return: The converted value if the conversion was successful,
1331 otherwise the original value.
1336 except (ValueError, TypeError):
1341 def IsValidShellParam(word):
1342 """Verifies is the given word is safe from the shell's p.o.v.
1344 This means that we can pass this to a command via the shell and be
1345 sure that it doesn't alter the command line and is passed as such to
1348 Note that we are overly restrictive here, in order to be on the safe
1352 @param word: the word to check
1354 @return: True if the word is 'safe'
1357 return bool(_SHELLPARAM_REGEX.match(word))
1360 def BuildShellCmd(template, *args):
1361 """Build a safe shell command line from the given arguments.
1363 This function will check all arguments in the args list so that they
1364 are valid shell parameters (i.e. they don't contain shell
1365 metacharacters). If everything is ok, it will return the result of
1369 @param template: the string holding the template for the
1372 @return: the expanded command line
1376 if not IsValidShellParam(word):
1377 raise errors.ProgrammerError("Shell argument '%s' contains"
1378 " invalid characters" % word)
1379 return template % args
1382 def FormatUnit(value, units):
1383 """Formats an incoming number of MiB with the appropriate unit.
1386 @param value: integer representing the value in MiB (1048576)
1388 @param units: the type of formatting we should do:
1389 - 'h' for automatic scaling
1394 @return: the formatted value (with suffix)
1397 if units not in ('m', 'g', 't', 'h'):
1398 raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1402 if units == 'm' or (units == 'h' and value < 1024):
1405 return "%d%s" % (round(value, 0), suffix)
1407 elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1410 return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1415 return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1418 def ParseUnit(input_string):
1419 """Tries to extract number and scale from the given string.
1421 Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1422 [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1423 is always an int in MiB.
1426 m = _PARSEUNIT_REGEX.match(str(input_string))
1428 raise errors.UnitParseError("Invalid format")
1430 value = float(m.groups()[0])
1432 unit = m.groups()[1]
1434 lcunit = unit.lower()
1438 if lcunit in ('m', 'mb', 'mib'):
1439 # Value already in MiB
1442 elif lcunit in ('g', 'gb', 'gib'):
1445 elif lcunit in ('t', 'tb', 'tib'):
1446 value *= 1024 * 1024
1449 raise errors.UnitParseError("Unknown unit: %s" % unit)
1451 # Make sure we round up
1452 if int(value) < value:
1455 # Round up to the next multiple of 4
1458 value += 4 - value % 4
1463 def ParseCpuMask(cpu_mask):
1464 """Parse a CPU mask definition and return the list of CPU IDs.
1466 CPU mask format: comma-separated list of CPU IDs
1467 or dash-separated ID ranges
1468 Example: "0-2,5" -> "0,1,2,5"
1471 @param cpu_mask: CPU mask definition
1473 @return: list of CPU IDs
1479 for range_def in cpu_mask.split(","):
1480 boundaries = range_def.split("-")
1481 n_elements = len(boundaries)
1483 raise errors.ParseError("Invalid CPU ID range definition"
1484 " (only one hyphen allowed): %s" % range_def)
1486 lower = int(boundaries[0])
1487 except (ValueError, TypeError), err:
1488 raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1489 " CPU ID range: %s" % str(err))
1491 higher = int(boundaries[-1])
1492 except (ValueError, TypeError), err:
1493 raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1494 " CPU ID range: %s" % str(err))
1496 raise errors.ParseError("Invalid CPU ID range definition"
1497 " (%d > %d): %s" % (lower, higher, range_def))
1498 cpu_list.extend(range(lower, higher + 1))
1502 def AddAuthorizedKey(file_obj, key):
1503 """Adds an SSH public key to an authorized_keys file.
1505 @type file_obj: str or file handle
1506 @param file_obj: path to authorized_keys file
1508 @param key: string containing key
1511 key_fields = key.split()
1513 if isinstance(file_obj, basestring):
1514 f = open(file_obj, 'a+')
1521 # Ignore whitespace changes
1522 if line.split() == key_fields:
1524 nl = line.endswith('\n')
1528 f.write(key.rstrip('\r\n'))
1535 def RemoveAuthorizedKey(file_name, key):
1536 """Removes an SSH public key from an authorized_keys file.
1538 @type file_name: str
1539 @param file_name: path to authorized_keys file
1541 @param key: string containing key
1544 key_fields = key.split()
1546 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1548 out = os.fdopen(fd, 'w')
1550 f = open(file_name, 'r')
1553 # Ignore whitespace changes while comparing lines
1554 if line.split() != key_fields:
1558 os.rename(tmpname, file_name)
1568 def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1569 """Sets the name of an IP address and hostname in /etc/hosts.
1571 @type file_name: str
1572 @param file_name: path to the file to modify (usually C{/etc/hosts})
1574 @param ip: the IP address
1576 @param hostname: the hostname to be added
1578 @param aliases: the list of aliases to add for the hostname
1581 # Ensure aliases are unique
1582 aliases = UniqueSequence([hostname] + aliases)[1:]
1584 def _WriteEtcHosts(fd):
1585 # Duplicating file descriptor because os.fdopen's result will automatically
1586 # close the descriptor, but we would still like to have its functionality.
1587 out = os.fdopen(os.dup(fd), "w")
1589 for line in ReadFile(file_name).splitlines(True):
1590 fields = line.split()
1591 if fields and not fields[0].startswith("#") and ip == fields[0]:
1595 out.write("%s\t%s" % (ip, hostname))
1597 out.write(" %s" % " ".join(aliases))
1603 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1606 def AddHostToEtcHosts(hostname, ip):
1607 """Wrapper around SetEtcHostsEntry.
1610 @param hostname: a hostname that will be resolved and added to
1611 L{constants.ETC_HOSTS}
1613 @param ip: The ip address of the host
1616 SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1619 def RemoveEtcHostsEntry(file_name, hostname):
1620 """Removes a hostname from /etc/hosts.
1622 IP addresses without names are removed from the file.
1624 @type file_name: str
1625 @param file_name: path to the file to modify (usually C{/etc/hosts})
1627 @param hostname: the hostname to be removed
1630 def _WriteEtcHosts(fd):
1631 # Duplicating file descriptor because os.fdopen's result will automatically
1632 # close the descriptor, but we would still like to have its functionality.
1633 out = os.fdopen(os.dup(fd), "w")
1635 for line in ReadFile(file_name).splitlines(True):
1636 fields = line.split()
1637 if len(fields) > 1 and not fields[0].startswith("#"):
1639 if hostname in names:
1640 while hostname in names:
1641 names.remove(hostname)
1643 out.write("%s %s\n" % (fields[0], " ".join(names)))
1652 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1655 def RemoveHostFromEtcHosts(hostname):
1656 """Wrapper around RemoveEtcHostsEntry.
1659 @param hostname: hostname that will be resolved and its
1660 full and shot name will be removed from
1661 L{constants.ETC_HOSTS}
1664 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1665 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1668 def TimestampForFilename():
1669 """Returns the current time formatted for filenames.
1671 The format doesn't contain colons as some shells and applications them as
1675 return time.strftime("%Y-%m-%d_%H_%M_%S")
1678 def CreateBackup(file_name):
1679 """Creates a backup of a file.
1681 @type file_name: str
1682 @param file_name: file to be backed up
1684 @return: the path to the newly created backup
1685 @raise errors.ProgrammerError: for invalid file names
1688 if not os.path.isfile(file_name):
1689 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1692 prefix = ("%s.backup-%s." %
1693 (os.path.basename(file_name), TimestampForFilename()))
1694 dir_name = os.path.dirname(file_name)
1696 fsrc = open(file_name, 'rb')
1698 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1699 fdst = os.fdopen(fd, 'wb')
1701 logging.debug("Backing up %s at %s", file_name, backup_name)
1702 shutil.copyfileobj(fsrc, fdst)
1711 def ShellQuote(value):
1712 """Quotes shell argument according to POSIX.
1715 @param value: the argument to be quoted
1717 @return: the quoted value
1720 if _re_shell_unquoted.match(value):
1723 return "'%s'" % value.replace("'", "'\\''")
1726 def ShellQuoteArgs(args):
1727 """Quotes a list of shell arguments.
1730 @param args: list of arguments to be quoted
1732 @return: the quoted arguments concatenated with spaces
1735 return ' '.join([ShellQuote(i) for i in args])
1739 """Helper class to write scripts with indentation.
1744 def __init__(self, fh):
1745 """Initializes this class.
1751 def IncIndent(self):
1752 """Increase indentation level by 1.
1757 def DecIndent(self):
1758 """Decrease indentation level by 1.
1761 assert self._indent > 0
1764 def Write(self, txt, *args):
1765 """Write line to output file.
1768 assert self._indent >= 0
1770 self._fh.write(self._indent * self.INDENT_STR)
1773 self._fh.write(txt % args)
1777 self._fh.write("\n")
1780 def ListVisibleFiles(path):
1781 """Returns a list of visible files in a directory.
1784 @param path: the directory to enumerate
1786 @return: the list of all files not starting with a dot
1787 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1790 if not IsNormAbsPath(path):
1791 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1792 " absolute/normalized: '%s'" % path)
1793 files = [i for i in os.listdir(path) if not i.startswith(".")]
1797 def GetHomeDir(user, default=None):
1798 """Try to get the homedir of the given user.
1800 The user can be passed either as a string (denoting the name) or as
1801 an integer (denoting the user id). If the user is not found, the
1802 'default' argument is returned, which defaults to None.
1806 if isinstance(user, basestring):
1807 result = pwd.getpwnam(user)
1808 elif isinstance(user, (int, long)):
1809 result = pwd.getpwuid(user)
1811 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1815 return result.pw_dir
1819 """Returns a random UUID.
1821 @note: This is a Linux-specific method as it uses the /proc
1826 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1829 def GenerateSecret(numbytes=20):
1830 """Generates a random secret.
1832 This will generate a pseudo-random secret returning an hex string
1833 (so that it can be used where an ASCII string is needed).
1835 @param numbytes: the number of bytes which will be represented by the returned
1836 string (defaulting to 20, the length of a SHA1 hash)
1838 @return: an hex representation of the pseudo-random sequence
1841 return os.urandom(numbytes).encode('hex')
1844 def EnsureDirs(dirs):
1845 """Make required directories, if they don't exist.
1847 @param dirs: list of tuples (dir_name, dir_mode)
1848 @type dirs: list of (string, integer)
1851 for dir_name, dir_mode in dirs:
1853 os.mkdir(dir_name, dir_mode)
1854 except EnvironmentError, err:
1855 if err.errno != errno.EEXIST:
1856 raise errors.GenericError("Cannot create needed directory"
1857 " '%s': %s" % (dir_name, err))
1859 os.chmod(dir_name, dir_mode)
1860 except EnvironmentError, err:
1861 raise errors.GenericError("Cannot change directory permissions on"
1862 " '%s': %s" % (dir_name, err))
1863 if not os.path.isdir(dir_name):
1864 raise errors.GenericError("%s is not a directory" % dir_name)
1867 def ReadFile(file_name, size=-1):
1871 @param size: Read at most size bytes (if negative, entire file)
1873 @return: the (possibly partial) content of the file
1876 f = open(file_name, "r")
1883 def WriteFile(file_name, fn=None, data=None,
1884 mode=None, uid=-1, gid=-1,
1885 atime=None, mtime=None, close=True,
1886 dry_run=False, backup=False,
1887 prewrite=None, postwrite=None):
1888 """(Over)write a file atomically.
1890 The file_name and either fn (a function taking one argument, the
1891 file descriptor, and which should write the data to it) or data (the
1892 contents of the file) must be passed. The other arguments are
1893 optional and allow setting the file mode, owner and group, and the
1894 mtime/atime of the file.
1896 If the function doesn't raise an exception, it has succeeded and the
1897 target file has the new contents. If the function has raised an
1898 exception, an existing target file should be unmodified and the
1899 temporary file should be removed.
1901 @type file_name: str
1902 @param file_name: the target filename
1904 @param fn: content writing function, called with
1905 file descriptor as parameter
1907 @param data: contents of the file
1909 @param mode: file mode
1911 @param uid: the owner of the file
1913 @param gid: the group of the file
1915 @param atime: a custom access time to be set on the file
1917 @param mtime: a custom modification time to be set on the file
1918 @type close: boolean
1919 @param close: whether to close file after writing it
1920 @type prewrite: callable
1921 @param prewrite: function to be called before writing content
1922 @type postwrite: callable
1923 @param postwrite: function to be called after writing content
1926 @return: None if the 'close' parameter evaluates to True,
1927 otherwise the file descriptor
1929 @raise errors.ProgrammerError: if any of the arguments are not valid
1932 if not os.path.isabs(file_name):
1933 raise errors.ProgrammerError("Path passed to WriteFile is not"
1934 " absolute: '%s'" % file_name)
1936 if [fn, data].count(None) != 1:
1937 raise errors.ProgrammerError("fn or data required")
1939 if [atime, mtime].count(None) == 1:
1940 raise errors.ProgrammerError("Both atime and mtime must be either"
1943 if backup and not dry_run and os.path.isfile(file_name):
1944 CreateBackup(file_name)
1946 dir_name, base_name = os.path.split(file_name)
1947 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1949 # here we need to make sure we remove the temp file, if any error
1950 # leaves it in place
1952 if uid != -1 or gid != -1:
1953 os.chown(new_name, uid, gid)
1955 os.chmod(new_name, mode)
1956 if callable(prewrite):
1958 if data is not None:
1962 if callable(postwrite):
1965 if atime is not None and mtime is not None:
1966 os.utime(new_name, (atime, mtime))
1968 os.rename(new_name, file_name)
1977 RemoveFile(new_name)
1982 def GetFileID(path=None, fd=None):
1983 """Returns the file 'id', i.e. the dev/inode and mtime information.
1985 Either the path to the file or the fd must be given.
1987 @param path: the file path
1988 @param fd: a file descriptor
1989 @return: a tuple of (device number, inode number, mtime)
1992 if [path, fd].count(None) != 1:
1993 raise errors.ProgrammerError("One and only one of fd/path must be given")
2000 return (st.st_dev, st.st_ino, st.st_mtime)
2003 def VerifyFileID(fi_disk, fi_ours):
2004 """Verifies that two file IDs are matching.
2006 Differences in the inode/device are not accepted, but and older
2007 timestamp for fi_disk is accepted.
2009 @param fi_disk: tuple (dev, inode, mtime) representing the actual
2011 @param fi_ours: tuple (dev, inode, mtime) representing the last
2016 (d1, i1, m1) = fi_disk
2017 (d2, i2, m2) = fi_ours
2019 return (d1, i1) == (d2, i2) and m1 <= m2
2022 def SafeWriteFile(file_name, file_id, **kwargs):
2023 """Wraper over L{WriteFile} that locks the target file.
2025 By keeping the target file locked during WriteFile, we ensure that
2026 cooperating writers will safely serialise access to the file.
2028 @type file_name: str
2029 @param file_name: the target filename
2030 @type file_id: tuple
2031 @param file_id: a result from L{GetFileID}
2034 fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
2037 if file_id is not None:
2038 disk_id = GetFileID(fd=fd)
2039 if not VerifyFileID(disk_id, file_id):
2040 raise errors.LockError("Cannot overwrite file %s, it has been modified"
2041 " since last written" % file_name)
2042 return WriteFile(file_name, **kwargs)
2047 def ReadOneLineFile(file_name, strict=False):
2048 """Return the first non-empty line from a file.
2050 @type strict: boolean
2051 @param strict: if True, abort if the file has more than one
2055 file_lines = ReadFile(file_name).splitlines()
2056 full_lines = filter(bool, file_lines)
2057 if not file_lines or not full_lines:
2058 raise errors.GenericError("No data in one-liner file %s" % file_name)
2059 elif strict and len(full_lines) > 1:
2060 raise errors.GenericError("Too many lines in one-liner file %s" %
2062 return full_lines[0]
2065 def FirstFree(seq, base=0):
2066 """Returns the first non-existing integer from seq.
2068 The seq argument should be a sorted list of positive integers. The
2069 first time the index of an element is smaller than the element
2070 value, the index will be returned.
2072 The base argument is used to start at a different offset,
2073 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
2075 Example: C{[0, 1, 3]} will return I{2}.
2078 @param seq: the sequence to be analyzed.
2080 @param base: use this value as the base index of the sequence
2082 @return: the first non-used index in the sequence
2085 for idx, elem in enumerate(seq):
2086 assert elem >= base, "Passed element is higher than base offset"
2087 if elem > idx + base:
2093 def SingleWaitForFdCondition(fdobj, event, timeout):
2094 """Waits for a condition to occur on the socket.
2096 Immediately returns at the first interruption.
2098 @type fdobj: integer or object supporting a fileno() method
2099 @param fdobj: entity to wait for events on
2100 @type event: integer
2101 @param event: ORed condition (see select module)
2102 @type timeout: float or None
2103 @param timeout: Timeout in seconds
2105 @return: None for timeout, otherwise occured conditions
2108 check = (event | select.POLLPRI |
2109 select.POLLNVAL | select.POLLHUP | select.POLLERR)
2111 if timeout is not None:
2112 # Poller object expects milliseconds
2115 poller = select.poll()
2116 poller.register(fdobj, event)
2118 # TODO: If the main thread receives a signal and we have no timeout, we
2119 # could wait forever. This should check a global "quit" flag or something
2121 io_events = poller.poll(timeout)
2122 except select.error, err:
2123 if err[0] != errno.EINTR:
2126 if io_events and io_events[0][1] & check:
2127 return io_events[0][1]
2132 class FdConditionWaiterHelper(object):
2133 """Retry helper for WaitForFdCondition.
2135 This class contains the retried and wait functions that make sure
2136 WaitForFdCondition can continue waiting until the timeout is actually
2141 def __init__(self, timeout):
2142 self.timeout = timeout
2144 def Poll(self, fdobj, event):
2145 result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2151 def UpdateTimeout(self, timeout):
2152 self.timeout = timeout
2155 def WaitForFdCondition(fdobj, event, timeout):
2156 """Waits for a condition to occur on the socket.
2158 Retries until the timeout is expired, even if interrupted.
2160 @type fdobj: integer or object supporting a fileno() method
2161 @param fdobj: entity to wait for events on
2162 @type event: integer
2163 @param event: ORed condition (see select module)
2164 @type timeout: float or None
2165 @param timeout: Timeout in seconds
2167 @return: None for timeout, otherwise occured conditions
2170 if timeout is not None:
2171 retrywaiter = FdConditionWaiterHelper(timeout)
2173 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2174 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2175 except RetryTimeout:
2179 while result is None:
2180 result = SingleWaitForFdCondition(fdobj, event, timeout)
2184 def UniqueSequence(seq):
2185 """Returns a list with unique elements.
2187 Element order is preserved.
2190 @param seq: the sequence with the source elements
2192 @return: list of unique elements from seq
2196 return [i for i in seq if i not in seen and not seen.add(i)]
2199 def FindDuplicates(seq):
2200 """Identifies duplicates in a list.
2202 Does not preserve element order.
2205 @param seq: Sequence with source elements
2207 @return: List of duplicate elements from seq
2222 def NormalizeAndValidateMac(mac):
2223 """Normalizes and check if a MAC address is valid.
2225 Checks whether the supplied MAC address is formally correct, only
2226 accepts colon separated format. Normalize it to all lower.
2229 @param mac: the MAC to be validated
2231 @return: returns the normalized and validated MAC.
2233 @raise errors.OpPrereqError: If the MAC isn't valid
2236 if not _MAC_CHECK.match(mac):
2237 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2238 mac, errors.ECODE_INVAL)
2243 def TestDelay(duration):
2244 """Sleep for a fixed amount of time.
2246 @type duration: float
2247 @param duration: the sleep duration
2249 @return: False for negative value, True otherwise
2253 return False, "Invalid sleep duration"
2254 time.sleep(duration)
2258 def _CloseFDNoErr(fd, retries=5):
2259 """Close a file descriptor ignoring errors.
2262 @param fd: the file descriptor
2264 @param retries: how many retries to make, in case we get any
2265 other error than EBADF
2270 except OSError, err:
2271 if err.errno != errno.EBADF:
2273 _CloseFDNoErr(fd, retries - 1)
2274 # else either it's closed already or we're out of retries, so we
2275 # ignore this and go on
2278 def CloseFDs(noclose_fds=None):
2279 """Close file descriptors.
2281 This closes all file descriptors above 2 (i.e. except
2284 @type noclose_fds: list or None
2285 @param noclose_fds: if given, it denotes a list of file descriptor
2286 that should not be closed
2289 # Default maximum for the number of available file descriptors.
2290 if 'SC_OPEN_MAX' in os.sysconf_names:
2292 MAXFD = os.sysconf('SC_OPEN_MAX')
2299 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2300 if (maxfd == resource.RLIM_INFINITY):
2303 # Iterate through and close all file descriptors (except the standard ones)
2304 for fd in range(3, maxfd):
2305 if noclose_fds and fd in noclose_fds:
2310 def Mlockall(_ctypes=ctypes):
2311 """Lock current process' virtual address space into RAM.
2313 This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2314 see mlock(2) for more details. This function requires ctypes module.
2316 @raises errors.NoCtypesError: if ctypes module is not found
2320 raise errors.NoCtypesError()
2322 libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2324 logging.error("Cannot set memory lock, ctypes cannot load libc")
2327 # Some older version of the ctypes module don't have built-in functionality
2328 # to access the errno global variable, where function error codes are stored.
2329 # By declaring this variable as a pointer to an integer we can then access
2330 # its value correctly, should the mlockall call fail, in order to see what
2331 # the actual error code was.
2332 # pylint: disable-msg=W0212
2333 libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2335 if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2336 # pylint: disable-msg=W0212
2337 logging.error("Cannot set memory lock: %s",
2338 os.strerror(libc.__errno_location().contents.value))
2341 logging.debug("Memory lock set")
2344 def Daemonize(logfile):
2345 """Daemonize the current process.
2347 This detaches the current process from the controlling terminal and
2348 runs it in the background as a daemon.
2351 @param logfile: the logfile to which we should redirect stdout/stderr
2353 @return: the value zero
2356 # pylint: disable-msg=W0212
2357 # yes, we really want os._exit
2359 # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2360 # least abstract the pipe functionality between them
2362 # Create pipe for sending error messages
2363 (rpipe, wpipe) = os.pipe()
2367 if (pid == 0): # The first child.
2371 pid = os.fork() # Fork a second child.
2372 if (pid == 0): # The second child.
2373 _CloseFDNoErr(rpipe)
2375 # exit() or _exit()? See below.
2376 os._exit(0) # Exit parent (the first child) of the second child.
2378 _CloseFDNoErr(wpipe)
2379 # Wait for daemon to be started (or an error message to
2380 # arrive) and read up to 100 KB as an error message
2381 errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2383 sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2387 os._exit(rcode) # Exit parent of the first child.
2389 SetupDaemonFDs(logfile, None)
2393 def DaemonPidFileName(name):
2394 """Compute a ganeti pid file absolute path
2397 @param name: the daemon name
2399 @return: the full path to the pidfile corresponding to the given
2403 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2406 def EnsureDaemon(name):
2407 """Check for and start daemon if not alive.
2410 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2412 logging.error("Can't start daemon '%s', failure %s, output: %s",
2413 name, result.fail_reason, result.output)
2419 def StopDaemon(name):
2423 result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2425 logging.error("Can't stop daemon '%s', failure %s, output: %s",
2426 name, result.fail_reason, result.output)
2432 def WritePidFile(pidfile):
2433 """Write the current process pidfile.
2435 @type pidfile: sting
2436 @param pidfile: the path to the file to be written
2437 @raise errors.LockError: if the pid file already exists and
2438 points to a live process
2440 @return: the file descriptor of the lock file; do not close this unless
2441 you want to unlock the pid file
2444 # We don't rename nor truncate the file to not drop locks under
2445 # existing processes
2446 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2448 # Lock the PID file (and fail if not possible to do so). Any code
2449 # wanting to send a signal to the daemon should try to lock the PID
2450 # file before reading it. If acquiring the lock succeeds, the daemon is
2451 # no longer running and the signal should not be sent.
2452 LockFile(fd_pidfile)
2454 os.write(fd_pidfile, "%d\n" % os.getpid())
2459 def RemovePidFile(name):
2460 """Remove the current process pidfile.
2462 Any errors are ignored.
2465 @param name: the daemon name used to derive the pidfile name
2468 pidfilename = DaemonPidFileName(name)
2469 # TODO: we could check here that the file contains our pid
2471 RemoveFile(pidfilename)
2472 except: # pylint: disable-msg=W0702
2476 def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2478 """Kill a process given by its pid.
2481 @param pid: The PID to terminate.
2483 @param signal_: The signal to send, by default SIGTERM
2485 @param timeout: The timeout after which, if the process is still alive,
2486 a SIGKILL will be sent. If not positive, no such checking
2488 @type waitpid: boolean
2489 @param waitpid: If true, we should waitpid on this process after
2490 sending signals, since it's our own child and otherwise it
2491 would remain as zombie
2494 def _helper(pid, signal_, wait):
2495 """Simple helper to encapsulate the kill/waitpid sequence"""
2496 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2498 os.waitpid(pid, os.WNOHANG)
2503 # kill with pid=0 == suicide
2504 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2506 if not IsProcessAlive(pid):
2509 _helper(pid, signal_, waitpid)
2514 def _CheckProcess():
2515 if not IsProcessAlive(pid):
2519 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2529 # Wait up to $timeout seconds
2530 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2531 except RetryTimeout:
2534 if IsProcessAlive(pid):
2535 # Kill process if it's still alive
2536 _helper(pid, signal.SIGKILL, waitpid)
2539 def FindFile(name, search_path, test=os.path.exists):
2540 """Look for a filesystem object in a given path.
2542 This is an abstract method to search for filesystem object (files,
2543 dirs) under a given search path.
2546 @param name: the name to look for
2547 @type search_path: str
2548 @param search_path: location to start at
2549 @type test: callable
2550 @param test: a function taking one argument that should return True
2551 if the a given object is valid; the default value is
2552 os.path.exists, causing only existing files to be returned
2554 @return: full path to the object if found, None otherwise
2557 # validate the filename mask
2558 if constants.EXT_PLUGIN_MASK.match(name) is None:
2559 logging.critical("Invalid value passed for external script name: '%s'",
2563 for dir_name in search_path:
2564 # FIXME: investigate switch to PathJoin
2565 item_name = os.path.sep.join([dir_name, name])
2566 # check the user test and that we're indeed resolving to the given
2568 if test(item_name) and os.path.basename(item_name) == name:
2573 def CheckVolumeGroupSize(vglist, vgname, minsize):
2574 """Checks if the volume group list is valid.
2576 The function will check if a given volume group is in the list of
2577 volume groups and has a minimum size.
2580 @param vglist: dictionary of volume group names and their size
2582 @param vgname: the volume group we should check
2584 @param minsize: the minimum size we accept
2586 @return: None for success, otherwise the error message
2589 vgsize = vglist.get(vgname, None)
2591 return "volume group '%s' missing" % vgname
2592 elif vgsize < minsize:
2593 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2594 (vgname, minsize, vgsize))
2598 def SplitTime(value):
2599 """Splits time as floating point number into a tuple.
2601 @param value: Time in seconds
2602 @type value: int or float
2603 @return: Tuple containing (seconds, microseconds)
2606 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2608 assert 0 <= seconds, \
2609 "Seconds must be larger than or equal to 0, but are %s" % seconds
2610 assert 0 <= microseconds <= 999999, \
2611 "Microseconds must be 0-999999, but are %s" % microseconds
2613 return (int(seconds), int(microseconds))
2616 def MergeTime(timetuple):
2617 """Merges a tuple into time as a floating point number.
2619 @param timetuple: Time as tuple, (seconds, microseconds)
2620 @type timetuple: tuple
2621 @return: Time as a floating point number expressed in seconds
2624 (seconds, microseconds) = timetuple
2626 assert 0 <= seconds, \
2627 "Seconds must be larger than or equal to 0, but are %s" % seconds
2628 assert 0 <= microseconds <= 999999, \
2629 "Microseconds must be 0-999999, but are %s" % microseconds
2631 return float(seconds) + (float(microseconds) * 0.000001)
2634 class LogFileHandler(logging.FileHandler):
2635 """Log handler that doesn't fallback to stderr.
2637 When an error occurs while writing on the logfile, logging.FileHandler tries
2638 to log on stderr. This doesn't work in ganeti since stderr is redirected to
2639 the logfile. This class avoids failures reporting errors to /dev/console.
2642 def __init__(self, filename, mode="a", encoding=None):
2643 """Open the specified file and use it as the stream for logging.
2645 Also open /dev/console to report errors while logging.
2648 logging.FileHandler.__init__(self, filename, mode, encoding)
2649 self.console = open(constants.DEV_CONSOLE, "a")
2651 def handleError(self, record): # pylint: disable-msg=C0103
2652 """Handle errors which occur during an emit() call.
2654 Try to handle errors with FileHandler method, if it fails write to
2659 logging.FileHandler.handleError(self, record)
2660 except Exception: # pylint: disable-msg=W0703
2662 self.console.write("Cannot log message:\n%s\n" % self.format(record))
2663 except Exception: # pylint: disable-msg=W0703
2664 # Log handler tried everything it could, now just give up
2668 def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2669 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2670 console_logging=False):
2671 """Configures the logging module.
2674 @param logfile: the filename to which we should log
2675 @type debug: integer
2676 @param debug: if greater than zero, enable debug messages, otherwise
2677 only those at C{INFO} and above level
2678 @type stderr_logging: boolean
2679 @param stderr_logging: whether we should also log to the standard error
2681 @param program: the name under which we should log messages
2682 @type multithreaded: boolean
2683 @param multithreaded: if True, will add the thread name to the log file
2684 @type syslog: string
2685 @param syslog: one of 'no', 'yes', 'only':
2686 - if no, syslog is not used
2687 - if yes, syslog is used (in addition to file-logging)
2688 - if only, only syslog is used
2689 @type console_logging: boolean
2690 @param console_logging: if True, will use a FileHandler which falls back to
2691 the system console if logging fails
2692 @raise EnvironmentError: if we can't open the log file and
2693 syslog/stderr logging is disabled
2696 fmt = "%(asctime)s: " + program + " pid=%(process)d"
2697 sft = program + "[%(process)d]:"
2699 fmt += "/%(threadName)s"
2700 sft += " (%(threadName)s)"
2702 fmt += " %(module)s:%(lineno)s"
2703 # no debug info for syslog loggers
2704 fmt += " %(levelname)s %(message)s"
2705 # yes, we do want the textual level, as remote syslog will probably
2706 # lose the error level, and it's easier to grep for it
2707 sft += " %(levelname)s %(message)s"
2708 formatter = logging.Formatter(fmt)
2709 sys_fmt = logging.Formatter(sft)
2711 root_logger = logging.getLogger("")
2712 root_logger.setLevel(logging.NOTSET)
2714 # Remove all previously setup handlers
2715 for handler in root_logger.handlers:
2717 root_logger.removeHandler(handler)
2720 stderr_handler = logging.StreamHandler()
2721 stderr_handler.setFormatter(formatter)
2723 stderr_handler.setLevel(logging.NOTSET)
2725 stderr_handler.setLevel(logging.CRITICAL)
2726 root_logger.addHandler(stderr_handler)
2728 if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2729 facility = logging.handlers.SysLogHandler.LOG_DAEMON
2730 syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2732 syslog_handler.setFormatter(sys_fmt)
2733 # Never enable debug over syslog
2734 syslog_handler.setLevel(logging.INFO)
2735 root_logger.addHandler(syslog_handler)
2737 if syslog != constants.SYSLOG_ONLY:
2738 # this can fail, if the logging directories are not setup or we have
2739 # a permisssion problem; in this case, it's best to log but ignore
2740 # the error if stderr_logging is True, and if false we re-raise the
2741 # exception since otherwise we could run but without any logs at all
2744 logfile_handler = LogFileHandler(logfile)
2746 logfile_handler = logging.FileHandler(logfile)
2747 logfile_handler.setFormatter(formatter)
2749 logfile_handler.setLevel(logging.DEBUG)
2751 logfile_handler.setLevel(logging.INFO)
2752 root_logger.addHandler(logfile_handler)
2753 except EnvironmentError:
2754 if stderr_logging or syslog == constants.SYSLOG_YES:
2755 logging.exception("Failed to enable logging to file '%s'", logfile)
2757 # we need to re-raise the exception
2761 def IsNormAbsPath(path):
2762 """Check whether a path is absolute and also normalized
2764 This avoids things like /dir/../../other/path to be valid.
2767 return os.path.normpath(path) == path and os.path.isabs(path)
2770 def PathJoin(*args):
2771 """Safe-join a list of path components.
2774 - the first argument must be an absolute path
2775 - no component in the path must have backtracking (e.g. /../),
2776 since we check for normalization at the end
2778 @param args: the path components to be joined
2779 @raise ValueError: for invalid paths
2782 # ensure we're having at least one path passed in
2784 # ensure the first component is an absolute and normalized path name
2786 if not IsNormAbsPath(root):
2787 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2788 result = os.path.join(*args)
2789 # ensure that the whole path is normalized
2790 if not IsNormAbsPath(result):
2791 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2792 # check that we're still under the original prefix
2793 prefix = os.path.commonprefix([root, result])
2795 raise ValueError("Error: path joining resulted in different prefix"
2796 " (%s != %s)" % (prefix, root))
2800 def TailFile(fname, lines=20):
2801 """Return the last lines from a file.
2803 @note: this function will only read and parse the last 4KB of
2804 the file; if the lines are very long, it could be that less
2805 than the requested number of lines are returned
2807 @param fname: the file name
2809 @param lines: the (maximum) number of lines to return
2812 fd = open(fname, "r")
2816 pos = max(0, pos-4096)
2818 raw_data = fd.read()
2822 rows = raw_data.splitlines()
2823 return rows[-lines:]
2826 def FormatTimestampWithTZ(secs):
2827 """Formats a Unix timestamp with the local timezone.
2830 return time.strftime("%F %T %Z", time.gmtime(secs))
2833 def _ParseAsn1Generalizedtime(value):
2834 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2837 @param value: ASN1 GENERALIZEDTIME timestamp
2840 m = _ASN1_TIME_REGEX.match(value)
2843 asn1time = m.group(1)
2844 hours = int(m.group(2))
2845 minutes = int(m.group(3))
2846 utcoffset = (60 * hours) + minutes
2848 if not value.endswith("Z"):
2849 raise ValueError("Missing timezone")
2850 asn1time = value[:-1]
2853 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2855 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2857 return calendar.timegm(tt.utctimetuple())
2860 def GetX509CertValidity(cert):
2861 """Returns the validity period of the certificate.
2863 @type cert: OpenSSL.crypto.X509
2864 @param cert: X509 certificate object
2867 # The get_notBefore and get_notAfter functions are only supported in
2868 # pyOpenSSL 0.7 and above.
2870 get_notbefore_fn = cert.get_notBefore
2871 except AttributeError:
2874 not_before_asn1 = get_notbefore_fn()
2876 if not_before_asn1 is None:
2879 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2882 get_notafter_fn = cert.get_notAfter
2883 except AttributeError:
2886 not_after_asn1 = get_notafter_fn()
2888 if not_after_asn1 is None:
2891 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2893 return (not_before, not_after)
2896 def _VerifyCertificateInner(expired, not_before, not_after, now,
2897 warn_days, error_days):
2898 """Verifies certificate validity.
2901 @param expired: Whether pyOpenSSL considers the certificate as expired
2902 @type not_before: number or None
2903 @param not_before: Unix timestamp before which certificate is not valid
2904 @type not_after: number or None
2905 @param not_after: Unix timestamp after which certificate is invalid
2907 @param now: Current time as Unix timestamp
2908 @type warn_days: number or None
2909 @param warn_days: How many days before expiration a warning should be reported
2910 @type error_days: number or None
2911 @param error_days: How many days before expiration an error should be reported
2915 msg = "Certificate is expired"
2917 if not_before is not None and not_after is not None:
2918 msg += (" (valid from %s to %s)" %
2919 (FormatTimestampWithTZ(not_before),
2920 FormatTimestampWithTZ(not_after)))
2921 elif not_before is not None:
2922 msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2923 elif not_after is not None:
2924 msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2926 return (CERT_ERROR, msg)
2928 elif not_before is not None and not_before > now:
2929 return (CERT_WARNING,
2930 "Certificate not yet valid (valid from %s)" %
2931 FormatTimestampWithTZ(not_before))
2933 elif not_after is not None:
2934 remaining_days = int((not_after - now) / (24 * 3600))
2936 msg = "Certificate expires in about %d days" % remaining_days
2938 if error_days is not None and remaining_days <= error_days:
2939 return (CERT_ERROR, msg)
2941 if warn_days is not None and remaining_days <= warn_days:
2942 return (CERT_WARNING, msg)
2947 def VerifyX509Certificate(cert, warn_days, error_days):
2948 """Verifies a certificate for LUVerifyCluster.
2950 @type cert: OpenSSL.crypto.X509
2951 @param cert: X509 certificate object
2952 @type warn_days: number or None
2953 @param warn_days: How many days before expiration a warning should be reported
2954 @type error_days: number or None
2955 @param error_days: How many days before expiration an error should be reported
2958 # Depending on the pyOpenSSL version, this can just return (None, None)
2959 (not_before, not_after) = GetX509CertValidity(cert)
2961 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2962 time.time(), warn_days, error_days)
2965 def SignX509Certificate(cert, key, salt):
2966 """Sign a X509 certificate.
2968 An RFC822-like signature header is added in front of the certificate.
2970 @type cert: OpenSSL.crypto.X509
2971 @param cert: X509 certificate object
2973 @param key: Key for HMAC
2975 @param salt: Salt for HMAC
2977 @return: Serialized and signed certificate in PEM format
2980 if not VALID_X509_SIGNATURE_SALT.match(salt):
2981 raise errors.GenericError("Invalid salt: %r" % salt)
2983 # Dumping as PEM here ensures the certificate is in a sane format
2984 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2986 return ("%s: %s/%s\n\n%s" %
2987 (constants.X509_CERT_SIGNATURE_HEADER, salt,
2988 Sha1Hmac(key, cert_pem, salt=salt),
2992 def _ExtractX509CertificateSignature(cert_pem):
2993 """Helper function to extract signature from X509 certificate.
2996 # Extract signature from original PEM data
2997 for line in cert_pem.splitlines():
2998 if line.startswith("---"):
3001 m = X509_SIGNATURE.match(line.strip())
3003 return (m.group("salt"), m.group("sign"))
3005 raise errors.GenericError("X509 certificate signature is missing")
3008 def LoadSignedX509Certificate(cert_pem, key):
3009 """Verifies a signed X509 certificate.
3011 @type cert_pem: string
3012 @param cert_pem: Certificate in PEM format and with signature header
3014 @param key: Key for HMAC
3015 @rtype: tuple; (OpenSSL.crypto.X509, string)
3016 @return: X509 certificate object and salt
3019 (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
3022 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
3024 # Dump again to ensure it's in a sane format
3025 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3027 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
3028 raise errors.GenericError("X509 certificate signature is invalid")
3033 def Sha1Hmac(key, text, salt=None):
3034 """Calculates the HMAC-SHA1 digest of a text.
3036 HMAC is defined in RFC2104.
3039 @param key: Secret key
3044 salted_text = salt + text
3048 return hmac.new(key, salted_text, compat.sha1).hexdigest()
3051 def VerifySha1Hmac(key, text, digest, salt=None):
3052 """Verifies the HMAC-SHA1 digest of a text.
3054 HMAC is defined in RFC2104.
3057 @param key: Secret key
3059 @type digest: string
3060 @param digest: Expected digest
3062 @return: Whether HMAC-SHA1 digest matches
3065 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
3068 def SafeEncode(text):
3069 """Return a 'safe' version of a source string.
3071 This function mangles the input string and returns a version that
3072 should be safe to display/encode as ASCII. To this end, we first
3073 convert it to ASCII using the 'backslashreplace' encoding which
3074 should get rid of any non-ASCII chars, and then we process it
3075 through a loop copied from the string repr sources in the python; we
3076 don't use string_escape anymore since that escape single quotes and
3077 backslashes too, and that is too much; and that escaping is not
3078 stable, i.e. string_escape(string_escape(x)) != string_escape(x).
3080 @type text: str or unicode
3081 @param text: input data
3083 @return: a safe version of text
3086 if isinstance(text, unicode):
3087 # only if unicode; if str already, we handle it below
3088 text = text.encode('ascii', 'backslashreplace')
3098 elif c < 32 or c >= 127: # non-printable
3099 resu += "\\x%02x" % (c & 0xff)
3105 def UnescapeAndSplit(text, sep=","):
3106 """Split and unescape a string based on a given separator.
3108 This function splits a string based on a separator where the
3109 separator itself can be escape in order to be an element of the
3110 elements. The escaping rules are (assuming coma being the
3112 - a plain , separates the elements
3113 - a sequence \\\\, (double backslash plus comma) is handled as a
3114 backslash plus a separator comma
3115 - a sequence \, (backslash plus comma) is handled as a
3119 @param text: the string to split
3121 @param text: the separator
3123 @return: a list of strings
3126 # we split the list by sep (with no escaping at this stage)
3127 slist = text.split(sep)
3128 # next, we revisit the elements and if any of them ended with an odd
3129 # number of backslashes, then we join it with the next
3133 if e1.endswith("\\"):
3134 num_b = len(e1) - len(e1.rstrip("\\"))
3137 # here the backslashes remain (all), and will be reduced in
3139 rlist.append(e1 + sep + e2)
3142 # finally, replace backslash-something with something
3143 rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3147 def CommaJoin(names):
3148 """Nicely join a set of identifiers.
3150 @param names: set, list or tuple
3151 @return: a string with the formatted results
3154 return ", ".join([str(val) for val in names])
3157 def FindMatch(data, name):
3158 """Tries to find an item in a dictionary matching a name.
3160 Callers have to ensure the data names aren't contradictory (e.g. a regexp
3161 that matches a string). If the name isn't a direct key, all regular
3162 expression objects in the dictionary are matched against it.
3165 @param data: Dictionary containing data
3167 @param name: Name to look for
3168 @rtype: tuple; (value in dictionary, matched groups as list)
3172 return (data[name], [])
3174 for key, value in data.items():
3176 if hasattr(key, "match"):
3179 return (value, list(m.groups()))
3184 def BytesToMebibyte(value):
3185 """Converts bytes to mebibytes.
3188 @param value: Value in bytes
3190 @return: Value in mebibytes
3193 return int(round(value / (1024.0 * 1024.0), 0))
3196 def CalculateDirectorySize(path):
3197 """Calculates the size of a directory recursively.
3200 @param path: Path to directory
3202 @return: Size in mebibytes
3207 for (curpath, _, files) in os.walk(path):
3208 for filename in files:
3209 st = os.lstat(PathJoin(curpath, filename))
3212 return BytesToMebibyte(size)
3215 def GetMounts(filename=constants.PROC_MOUNTS):
3216 """Returns the list of mounted filesystems.
3218 This function is Linux-specific.
3220 @param filename: path of mounts file (/proc/mounts by default)
3221 @rtype: list of tuples
3222 @return: list of mount entries (device, mountpoint, fstype, options)
3225 # TODO(iustin): investigate non-Linux options (e.g. via mount output)
3227 mountlines = ReadFile(filename).splitlines()
3228 for line in mountlines:
3229 device, mountpoint, fstype, options, _ = line.split(None, 4)
3230 data.append((device, mountpoint, fstype, options))
3235 def GetFilesystemStats(path):
3236 """Returns the total and free space on a filesystem.
3239 @param path: Path on filesystem to be examined
3241 @return: tuple of (Total space, Free space) in mebibytes
3244 st = os.statvfs(path)
3246 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3247 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3248 return (tsize, fsize)
3251 def RunInSeparateProcess(fn, *args):
3252 """Runs a function in a separate process.
3254 Note: Only boolean return values are supported.
3257 @param fn: Function to be called
3259 @return: Function's result
3266 # In case the function uses temporary files
3267 ResetTempfileModule()
3270 result = int(bool(fn(*args)))
3271 assert result in (0, 1)
3272 except: # pylint: disable-msg=W0702
3273 logging.exception("Error while calling function in separate process")
3274 # 0 and 1 are reserved for the return value
3277 os._exit(result) # pylint: disable-msg=W0212
3281 # Avoid zombies and check exit code
3282 (_, status) = os.waitpid(pid, 0)
3284 if os.WIFSIGNALED(status):
3286 signum = os.WTERMSIG(status)
3288 exitcode = os.WEXITSTATUS(status)
3291 if not (exitcode in (0, 1) and signum is None):
3292 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3295 return bool(exitcode)
3298 def IgnoreProcessNotFound(fn, *args, **kwargs):
3299 """Ignores ESRCH when calling a process-related function.
3301 ESRCH is raised when a process is not found.
3304 @return: Whether process was found
3309 except EnvironmentError, err:
3311 if err.errno == errno.ESRCH:
3318 def IgnoreSignals(fn, *args, **kwargs):
3319 """Tries to call a function ignoring failures due to EINTR.
3323 return fn(*args, **kwargs)
3324 except EnvironmentError, err:
3325 if err.errno == errno.EINTR:
3329 except (select.error, socket.error), err:
3330 # In python 2.6 and above select.error is an IOError, so it's handled
3331 # above, in 2.5 and below it's not, and it's handled here.
3332 if err.args and err.args[0] == errno.EINTR:
3339 """Locks a file using POSIX locks.
3342 @param fd: the file descriptor we need to lock
3346 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3347 except IOError, err:
3348 if err.errno == errno.EAGAIN:
3349 raise errors.LockError("File already locked")
3353 def FormatTime(val):
3354 """Formats a time value.
3356 @type val: float or None
3357 @param val: the timestamp as returned by time.time()
3358 @return: a string value or N/A if we don't have a valid timestamp
3361 if val is None or not isinstance(val, (int, float)):
3363 # these two codes works on Linux, but they are not guaranteed on all
3365 return time.strftime("%F %T", time.localtime(val))
3368 def FormatSeconds(secs):
3369 """Formats seconds for easier reading.
3372 @param secs: Number of seconds
3374 @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3379 secs = round(secs, 0)
3382 # Negative values would be a bit tricky
3383 for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3384 (complete, secs) = divmod(secs, one)
3385 if complete or parts:
3386 parts.append("%d%s" % (complete, unit))
3388 parts.append("%ds" % secs)
3390 return " ".join(parts)
3393 def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3394 """Reads the watcher pause file.
3396 @type filename: string
3397 @param filename: Path to watcher pause file
3398 @type now: None, float or int
3399 @param now: Current time as Unix timestamp
3400 @type remove_after: int
3401 @param remove_after: Remove watcher pause file after specified amount of
3402 seconds past the pause end time
3409 value = ReadFile(filename)
3410 except IOError, err:
3411 if err.errno != errno.ENOENT:
3415 if value is not None:
3419 logging.warning(("Watcher pause file (%s) contains invalid value,"
3420 " removing it"), filename)
3421 RemoveFile(filename)
3424 if value is not None:
3425 # Remove file if it's outdated
3426 if now > (value + remove_after):
3427 RemoveFile(filename)
3436 class RetryTimeout(Exception):
3437 """Retry loop timed out.
3439 Any arguments which was passed by the retried function to RetryAgain will be
3440 preserved in RetryTimeout, if it is raised. If such argument was an exception
3441 the RaiseInner helper method will reraise it.
3444 def RaiseInner(self):
3445 if self.args and isinstance(self.args[0], Exception):
3448 raise RetryTimeout(*self.args)
3451 class RetryAgain(Exception):
3454 Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3455 arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3456 of the RetryTimeout() method can be used to reraise it.
3461 class _RetryDelayCalculator(object):
3462 """Calculator for increasing delays.
3472 def __init__(self, start, factor, limit):
3473 """Initializes this class.
3476 @param start: Initial delay
3478 @param factor: Factor for delay increase
3479 @type limit: float or None
3480 @param limit: Upper limit for delay or None for no limit
3484 assert factor >= 1.0
3485 assert limit is None or limit >= 0.0
3488 self._factor = factor
3494 """Returns current delay and calculates the next one.
3497 current = self._next
3499 # Update for next run
3500 if self._limit is None or self._next < self._limit:
3501 self._next = min(self._limit, self._next * self._factor)
3506 #: Special delay to specify whole remaining timeout
3507 RETRY_REMAINING_TIME = object()
3510 def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3511 _time_fn=time.time):
3512 """Call a function repeatedly until it succeeds.
3514 The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3515 anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3516 total of C{timeout} seconds, this function throws L{RetryTimeout}.
3518 C{delay} can be one of the following:
3519 - callable returning the delay length as a float
3520 - Tuple of (start, factor, limit)
3521 - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3522 useful when overriding L{wait_fn} to wait for an external event)
3523 - A static delay as a number (int or float)
3526 @param fn: Function to be called
3527 @param delay: Either a callable (returning the delay), a tuple of (start,
3528 factor, limit) (see L{_RetryDelayCalculator}),
3529 L{RETRY_REMAINING_TIME} or a number (int or float)
3530 @type timeout: float
3531 @param timeout: Total timeout
3532 @type wait_fn: callable
3533 @param wait_fn: Waiting function
3534 @return: Return value of function
3538 assert callable(wait_fn)
3539 assert callable(_time_fn)
3544 end_time = _time_fn() + timeout
3547 # External function to calculate delay
3550 elif isinstance(delay, (tuple, list)):
3551 # Increasing delay with optional upper boundary
3552 (start, factor, limit) = delay
3553 calc_delay = _RetryDelayCalculator(start, factor, limit)
3555 elif delay is RETRY_REMAINING_TIME:
3556 # Always use the remaining time
3561 calc_delay = lambda: delay
3563 assert calc_delay is None or callable(calc_delay)
3568 # pylint: disable-msg=W0142
3570 except RetryAgain, err:
3571 retry_args = err.args
3572 except RetryTimeout:
3573 raise errors.ProgrammerError("Nested retry loop detected that didn't"
3574 " handle RetryTimeout")
3576 remaining_time = end_time - _time_fn()
3578 if remaining_time < 0.0:
3579 # pylint: disable-msg=W0142
3580 raise RetryTimeout(*retry_args)
3582 assert remaining_time >= 0.0
3584 if calc_delay is None:
3585 wait_fn(remaining_time)
3587 current_delay = calc_delay()
3588 if current_delay > 0.0:
3589 wait_fn(current_delay)
3592 def GetClosedTempfile(*args, **kwargs):
3593 """Creates a temporary file and returns its path.
3596 (fd, path) = tempfile.mkstemp(*args, **kwargs)
3601 def GenerateSelfSignedX509Cert(common_name, validity):
3602 """Generates a self-signed X509 certificate.
3604 @type common_name: string
3605 @param common_name: commonName value
3607 @param validity: Validity for certificate in seconds
3610 # Create private and public key
3611 key = OpenSSL.crypto.PKey()
3612 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3614 # Create self-signed certificate
3615 cert = OpenSSL.crypto.X509()
3617 cert.get_subject().CN = common_name
3618 cert.set_serial_number(1)
3619 cert.gmtime_adj_notBefore(0)
3620 cert.gmtime_adj_notAfter(validity)
3621 cert.set_issuer(cert.get_subject())
3622 cert.set_pubkey(key)
3623 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3625 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3626 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3628 return (key_pem, cert_pem)
3631 def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
3632 validity=constants.X509_CERT_DEFAULT_VALIDITY):
3633 """Legacy function to generate self-signed X509 certificate.
3636 @param filename: path to write certificate to
3637 @type common_name: string
3638 @param common_name: commonName value
3640 @param validity: validity of certificate in number of days
3643 # TODO: Investigate using the cluster name instead of X505_CERT_CN for
3644 # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
3645 # and node daemon certificates have the proper Subject/Issuer.
3646 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3647 validity * 24 * 60 * 60)
3649 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3652 class FileLock(object):
3653 """Utility class for file locks.
3656 def __init__(self, fd, filename):
3657 """Constructor for FileLock.
3660 @param fd: File object
3662 @param filename: Path of the file opened at I{fd}
3666 self.filename = filename
3669 def Open(cls, filename):
3670 """Creates and opens a file to be used as a file-based lock.
3672 @type filename: string
3673 @param filename: path to the file to be locked
3676 # Using "os.open" is necessary to allow both opening existing file
3677 # read/write and creating if not existing. Vanilla "open" will truncate an
3678 # existing file -or- allow creating if not existing.
3679 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3686 """Close the file and release the lock.
3689 if hasattr(self, "fd") and self.fd:
3693 def _flock(self, flag, blocking, timeout, errmsg):
3694 """Wrapper for fcntl.flock.
3697 @param flag: operation flag
3698 @type blocking: bool
3699 @param blocking: whether the operation should be done in blocking mode.
3700 @type timeout: None or float
3701 @param timeout: for how long the operation should be retried (implies
3703 @type errmsg: string
3704 @param errmsg: error message in case operation fails.
3707 assert self.fd, "Lock was closed"
3708 assert timeout is None or timeout >= 0, \
3709 "If specified, timeout must be positive"
3710 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3712 # When a timeout is used, LOCK_NB must always be set
3713 if not (timeout is None and blocking):
3714 flag |= fcntl.LOCK_NB
3717 self._Lock(self.fd, flag, timeout)
3720 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3721 args=(self.fd, flag, timeout))
3722 except RetryTimeout:
3723 raise errors.LockError(errmsg)
3726 def _Lock(fd, flag, timeout):
3728 fcntl.flock(fd, flag)
3729 except IOError, err:
3730 if timeout is not None and err.errno == errno.EAGAIN:
3733 logging.exception("fcntl.flock failed")
3736 def Exclusive(self, blocking=False, timeout=None):
3737 """Locks the file in exclusive mode.
3739 @type blocking: boolean
3740 @param blocking: whether to block and wait until we
3741 can lock the file or return immediately
3742 @type timeout: int or None
3743 @param timeout: if not None, the duration to wait for the lock
3747 self._flock(fcntl.LOCK_EX, blocking, timeout,
3748 "Failed to lock %s in exclusive mode" % self.filename)
3750 def Shared(self, blocking=False, timeout=None):
3751 """Locks the file in shared mode.
3753 @type blocking: boolean
3754 @param blocking: whether to block and wait until we
3755 can lock the file or return immediately
3756 @type timeout: int or None
3757 @param timeout: if not None, the duration to wait for the lock
3761 self._flock(fcntl.LOCK_SH, blocking, timeout,
3762 "Failed to lock %s in shared mode" % self.filename)
3764 def Unlock(self, blocking=True, timeout=None):
3765 """Unlocks the file.
3767 According to C{flock(2)}, unlocking can also be a nonblocking
3770 To make a non-blocking request, include LOCK_NB with any of the above
3773 @type blocking: boolean
3774 @param blocking: whether to block and wait until we
3775 can lock the file or return immediately
3776 @type timeout: int or None
3777 @param timeout: if not None, the duration to wait for the lock
3781 self._flock(fcntl.LOCK_UN, blocking, timeout,
3782 "Failed to unlock %s" % self.filename)
3786 """Splits data chunks into lines separated by newline.
3788 Instances provide a file-like interface.
3791 def __init__(self, line_fn, *args):
3792 """Initializes this class.
3794 @type line_fn: callable
3795 @param line_fn: Function called for each line, first parameter is line
3796 @param args: Extra arguments for L{line_fn}
3799 assert callable(line_fn)
3802 # Python 2.4 doesn't have functools.partial yet
3804 lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3806 self._line_fn = line_fn
3808 self._lines = collections.deque()
3811 def write(self, data):
3812 parts = (self._buffer + data).split("\n")
3813 self._buffer = parts.pop()
3814 self._lines.extend(parts)
3818 self._line_fn(self._lines.popleft().rstrip("\r\n"))
3823 self._line_fn(self._buffer)
3826 def SignalHandled(signums):
3827 """Signal Handled decoration.
3829 This special decorator installs a signal handler and then calls the target
3830 function. The function must accept a 'signal_handlers' keyword argument,
3831 which will contain a dict indexed by signal number, with SignalHandler
3834 The decorator can be safely stacked with iself, to handle multiple signals
3835 with different handlers.
3838 @param signums: signals to intercept
3842 def sig_function(*args, **kwargs):
3843 assert 'signal_handlers' not in kwargs or \
3844 kwargs['signal_handlers'] is None or \
3845 isinstance(kwargs['signal_handlers'], dict), \
3846 "Wrong signal_handlers parameter in original function call"
3847 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3848 signal_handlers = kwargs['signal_handlers']
3850 signal_handlers = {}
3851 kwargs['signal_handlers'] = signal_handlers
3852 sighandler = SignalHandler(signums)
3855 signal_handlers[sig] = sighandler
3856 return fn(*args, **kwargs)
3863 class SignalWakeupFd(object):
3865 # This is only supported in Python 2.5 and above (some distributions
3866 # backported it to Python 2.4)
3867 _set_wakeup_fd_fn = signal.set_wakeup_fd
3868 except AttributeError:
3870 def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3873 def _SetWakeupFd(self, fd):
3874 return self._set_wakeup_fd_fn(fd)
3877 """Initializes this class.
3880 (read_fd, write_fd) = os.pipe()
3882 # Once these succeeded, the file descriptors will be closed automatically.
3883 # Buffer size 0 is important, otherwise .read() with a specified length
3884 # might buffer data and the file descriptors won't be marked readable.
3885 self._read_fh = os.fdopen(read_fd, "r", 0)
3886 self._write_fh = os.fdopen(write_fd, "w", 0)
3888 self._previous = self._SetWakeupFd(self._write_fh.fileno())
3891 self.fileno = self._read_fh.fileno
3892 self.read = self._read_fh.read
3895 """Restores the previous wakeup file descriptor.
3898 if hasattr(self, "_previous") and self._previous is not None:
3899 self._SetWakeupFd(self._previous)
3900 self._previous = None
3903 """Notifies the wakeup file descriptor.
3906 self._write_fh.write("\0")
3909 """Called before object deletion.
3915 class SignalHandler(object):
3916 """Generic signal handler class.
3918 It automatically restores the original handler when deconstructed or
3919 when L{Reset} is called. You can either pass your own handler
3920 function in or query the L{called} attribute to detect whether the
3924 @ivar signum: the signals we handle
3925 @type called: boolean
3926 @ivar called: tracks whether any of the signals have been raised
3929 def __init__(self, signum, handler_fn=None, wakeup=None):
3930 """Constructs a new SignalHandler instance.
3932 @type signum: int or list of ints
3933 @param signum: Single signal number or set of signal numbers
3934 @type handler_fn: callable
3935 @param handler_fn: Signal handling function
3938 assert handler_fn is None or callable(handler_fn)
3940 self.signum = set(signum)
3943 self._handler_fn = handler_fn
3944 self._wakeup = wakeup
3948 for signum in self.signum:
3950 prev_handler = signal.signal(signum, self._HandleSignal)
3952 self._previous[signum] = prev_handler
3954 # Restore previous handler
3955 signal.signal(signum, prev_handler)
3958 # Reset all handlers
3960 # Here we have a race condition: a handler may have already been called,
3961 # but there's not much we can do about it at this point.
3968 """Restore previous handler.
3970 This will reset all the signals to their previous handlers.
3973 for signum, prev_handler in self._previous.items():
3974 signal.signal(signum, prev_handler)
3975 # If successful, remove from dict
3976 del self._previous[signum]
3979 """Unsets the L{called} flag.
3981 This function can be used in case a signal may arrive several times.
3986 def _HandleSignal(self, signum, frame):
3987 """Actual signal handling function.
3990 # This is not nice and not absolutely atomic, but it appears to be the only
3991 # solution in Python -- there are no atomic types.
3995 # Notify whoever is interested in signals
3996 self._wakeup.Notify()
3998 if self._handler_fn:
3999 self._handler_fn(signum, frame)
4002 class FieldSet(object):
4003 """A simple field set.
4005 Among the features are:
4006 - checking if a string is among a list of static string or regex objects
4007 - checking if a whole list of string matches
4008 - returning the matching groups from a regex match
4010 Internally, all fields are held as regular expression objects.
4013 def __init__(self, *items):
4014 self.items = [re.compile("^%s$" % value) for value in items]
4016 def Extend(self, other_set):
4017 """Extend the field set with the items from another one"""
4018 self.items.extend(other_set.items)
4020 def Matches(self, field):
4021 """Checks if a field matches the current set
4024 @param field: the string to match
4025 @return: either None or a regular expression match object
4028 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
4032 def NonMatching(self, items):
4033 """Returns the list of fields not matching the current set
4036 @param items: the list of fields to check
4038 @return: list of non-matching fields
4041 return [val for val in items if not self.Matches(val)]
4044 class RunningTimeout(object):
4045 """Class to calculate remaining timeout when doing several operations.
4055 def __init__(self, timeout, allow_negative, _time_fn=time.time):
4056 """Initializes this class.
4058 @type timeout: float
4059 @param timeout: Timeout duration
4060 @type allow_negative: bool
4061 @param allow_negative: Whether to return values below zero
4062 @param _time_fn: Time function for unittests
4065 object.__init__(self)
4067 if timeout is not None and timeout < 0.0:
4068 raise ValueError("Timeout must not be negative")
4070 self._timeout = timeout
4071 self._allow_negative = allow_negative
4072 self._time_fn = _time_fn
4074 self._start_time = None
4076 def Remaining(self):
4077 """Returns the remaining timeout.
4080 if self._timeout is None:
4083 # Get start time on first calculation
4084 if self._start_time is None:
4085 self._start_time = self._time_fn()
4087 # Calculate remaining time
4088 remaining_timeout = self._start_time + self._timeout - self._time_fn()
4090 if not self._allow_negative:
4091 # Ensure timeout is always >= 0
4092 return max(0.0, remaining_timeout)
4094 return remaining_timeout