4 # Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti utility module.
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
51 from cStringIO import StringIO
53 from ganeti import errors
54 from ganeti import constants
55 from ganeti import compat
57 from ganeti.utils.algo import * # pylint: disable-msg=W0401
58 from ganeti.utils.retry import * # pylint: disable-msg=W0401
59 from ganeti.utils.text import * # pylint: disable-msg=W0401
60 from ganeti.utils.mlock import * # pylint: disable-msg=W0401
61 from ganeti.utils.log import * # pylint: disable-msg=W0401
67 #: when set to True, L{RunCmd} is disabled
70 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
72 HEX_CHAR_RE = r"[a-zA-Z0-9]"
73 VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
74 X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
75 (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
76 HEX_CHAR_RE, HEX_CHAR_RE),
79 _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
81 UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
82 '[a-f0-9]{4}-[a-f0-9]{12}$')
84 # Certificate verification results
86 CERT_ERROR) = range(1, 3)
90 _TIMEOUT_KILL) = range(3)
92 #: Shell param checker regexp
93 _SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
96 _ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
99 class RunResult(object):
100 """Holds the result of running external programs.
103 @ivar exit_code: the exit code of the program, or None (if the program
105 @type signal: int or None
106 @ivar signal: the signal that caused the program to finish, or None
107 (if the program wasn't terminated by a signal)
109 @ivar stdout: the standard output of the program
111 @ivar stderr: the standard error of the program
112 @type failed: boolean
113 @ivar failed: True in case the program was
114 terminated by a signal or exited with a non-zero exit code
115 @ivar fail_reason: a string detailing the termination reason
118 __slots__ = ["exit_code", "signal", "stdout", "stderr",
119 "failed", "fail_reason", "cmd"]
122 def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
125 self.exit_code = exit_code
126 self.signal = signal_
129 self.failed = (signal_ is not None or exit_code != 0)
132 if self.signal is not None:
133 fail_msgs.append("terminated by signal %s" % self.signal)
134 elif self.exit_code is not None:
135 fail_msgs.append("exited with exit code %s" % self.exit_code)
137 fail_msgs.append("unable to determine termination reason")
139 if timeout_action == _TIMEOUT_TERM:
140 fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
141 elif timeout_action == _TIMEOUT_KILL:
142 fail_msgs.append(("force termination after timeout of %.2f seconds"
143 " and linger for another %.2f seconds") %
144 (timeout, constants.CHILD_LINGER_TIMEOUT))
146 if fail_msgs and self.failed:
147 self.fail_reason = CommaJoin(fail_msgs)
150 logging.debug("Command '%s' failed (%s); output: %s",
151 self.cmd, self.fail_reason, self.output)
153 def _GetOutput(self):
154 """Returns the combined stdout and stderr for easier usage.
157 return self.stdout + self.stderr
159 output = property(_GetOutput, None, None, "Return full output")
162 def _BuildCmdEnvironment(env, reset):
163 """Builds the environment for an external program.
169 cmd_env = os.environ.copy()
170 cmd_env["LC_ALL"] = "C"
178 def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
179 interactive=False, timeout=None):
180 """Execute a (shell) command.
182 The command should not read from its standard input, as it will be
185 @type cmd: string or list
186 @param cmd: Command to run
188 @param env: Additional environment variables
190 @param output: if desired, the output of the command can be
191 saved in a file instead of the RunResult instance; this
192 parameter denotes the file name (if not None)
194 @param cwd: if specified, will be used as the working
195 directory for the command; the default will be /
196 @type reset_env: boolean
197 @param reset_env: whether to reset or keep the default os environment
198 @type interactive: boolean
199 @param interactive: weather we pipe stdin, stdout and stderr
200 (default behaviour) or run the command interactive
202 @param timeout: If not None, timeout in seconds until child process gets
205 @return: RunResult instance
206 @raise errors.ProgrammerError: if we call this when forks are disabled
210 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
212 if output and interactive:
213 raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
214 " not be provided at the same time")
216 if isinstance(cmd, basestring):
220 cmd = [str(val) for val in cmd]
221 strcmd = ShellQuoteArgs(cmd)
225 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
227 logging.debug("RunCmd %s", strcmd)
229 cmd_env = _BuildCmdEnvironment(env, reset_env)
233 out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
234 interactive, timeout)
236 timeout_action = _TIMEOUT_NONE
237 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
240 if err.errno == errno.ENOENT:
241 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
253 return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
256 def SetupDaemonEnv(cwd="/", umask=077):
257 """Setup a daemon's environment.
259 This should be called between the first and second fork, due to
262 @param cwd: the directory to which to chdir
263 @param umask: the umask to setup
271 def SetupDaemonFDs(output_file, output_fd):
272 """Setups up a daemon's file descriptors.
274 @param output_file: if not None, the file to which to redirect
276 @param output_fd: if not None, the file descriptor for stdout/stderr
279 # check that at most one is defined
280 assert [output_file, output_fd].count(None) >= 1
282 # Open /dev/null (read-only, only for stdin)
283 devnull_fd = os.open(os.devnull, os.O_RDONLY)
285 if output_fd is not None:
287 elif output_file is not None:
290 output_fd = os.open(output_file,
291 os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
292 except EnvironmentError, err:
293 raise Exception("Opening output file failed: %s" % err)
295 output_fd = os.open(os.devnull, os.O_WRONLY)
297 # Redirect standard I/O
298 os.dup2(devnull_fd, 0)
299 os.dup2(output_fd, 1)
300 os.dup2(output_fd, 2)
303 def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
305 """Start a daemon process after forking twice.
307 @type cmd: string or list
308 @param cmd: Command to run
310 @param env: Additional environment variables
312 @param cwd: Working directory for the program
314 @param output: Path to file in which to save the output
316 @param output_fd: File descriptor for output
317 @type pidfile: string
318 @param pidfile: Process ID file
320 @return: Daemon process ID
321 @raise errors.ProgrammerError: if we call this when forks are disabled
325 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
328 if output and not (bool(output) ^ (output_fd is not None)):
329 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
332 if isinstance(cmd, basestring):
333 cmd = ["/bin/sh", "-c", cmd]
335 strcmd = ShellQuoteArgs(cmd)
338 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
340 logging.debug("StartDaemon %s", strcmd)
342 cmd_env = _BuildCmdEnvironment(env, False)
344 # Create pipe for sending PID back
345 (pidpipe_read, pidpipe_write) = os.pipe()
348 # Create pipe for sending error messages
349 (errpipe_read, errpipe_write) = os.pipe()
356 # Child process, won't return
357 _StartDaemonChild(errpipe_read, errpipe_write,
358 pidpipe_read, pidpipe_write,
360 output, output_fd, pidfile)
362 # Well, maybe child process failed
363 os._exit(1) # pylint: disable-msg=W0212
365 CloseFdNoError(errpipe_write)
367 # Wait for daemon to be started (or an error message to
368 # arrive) and read up to 100 KB as an error message
369 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
371 CloseFdNoError(errpipe_read)
373 CloseFdNoError(pidpipe_write)
375 # Read up to 128 bytes for PID
376 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
378 CloseFdNoError(pidpipe_read)
380 # Try to avoid zombies by waiting for child process
387 raise errors.OpExecError("Error when starting daemon process: %r" %
392 except (ValueError, TypeError), err:
393 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
397 def _StartDaemonChild(errpipe_read, errpipe_write,
398 pidpipe_read, pidpipe_write,
400 output, fd_output, pidfile):
401 """Child process for starting daemon.
405 # Close parent's side
406 CloseFdNoError(errpipe_read)
407 CloseFdNoError(pidpipe_read)
409 # First child process
412 # And fork for the second time
415 # Exit first child process
416 os._exit(0) # pylint: disable-msg=W0212
418 # Make sure pipe is closed on execv* (and thereby notifies
420 SetCloseOnExecFlag(errpipe_write, True)
422 # List of file descriptors to be left open
423 noclose_fds = [errpipe_write]
427 fd_pidfile = WritePidFile(pidfile)
429 # Keeping the file open to hold the lock
430 noclose_fds.append(fd_pidfile)
432 SetCloseOnExecFlag(fd_pidfile, False)
436 SetupDaemonFDs(output, fd_output)
438 # Send daemon PID to parent
439 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
441 # Close all file descriptors except stdio and error message pipe
442 CloseFDs(noclose_fds=noclose_fds)
444 # Change working directory
448 os.execvp(args[0], args)
450 os.execvpe(args[0], args, env)
451 except: # pylint: disable-msg=W0702
453 # Report errors to original process
454 WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
455 except: # pylint: disable-msg=W0702
456 # Ignore errors in error handling
459 os._exit(1) # pylint: disable-msg=W0212
462 def WriteErrorToFD(fd, err):
463 """Possibly write an error message to a fd.
465 @type fd: None or int (file descriptor)
466 @param fd: if not None, the error will be written to this fd
467 @param err: string, the error message
474 err = "<unknown error>"
476 RetryOnSignal(os.write, fd, err)
479 def _CheckIfAlive(child):
480 """Raises L{RetryAgain} if child is still alive.
482 @raises RetryAgain: If child is still alive
485 if child.poll() is None:
489 def _WaitForProcess(child, timeout):
490 """Waits for the child to terminate or until we reach timeout.
494 Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
499 def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
500 _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
501 """Run a command and return its output.
503 @type cmd: string or list
504 @param cmd: Command to run
506 @param env: The environment to use
507 @type via_shell: bool
508 @param via_shell: if we should run via the shell
510 @param cwd: the working directory for the program
511 @type interactive: boolean
512 @param interactive: Run command interactive (without piping)
514 @param timeout: Timeout after the programm gets terminated
516 @return: (out, err, status)
519 poller = select.poll()
521 stderr = subprocess.PIPE
522 stdout = subprocess.PIPE
523 stdin = subprocess.PIPE
526 stderr = stdout = stdin = None
528 child = subprocess.Popen(cmd, shell=via_shell,
532 close_fds=True, env=env,
538 linger_timeout = None
543 poll_timeout = RunningTimeout(timeout, True).Remaining
545 msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
547 msg_linger = ("Command %s (%d) run into linger timeout, killing" %
550 timeout_action = _TIMEOUT_NONE
554 poller.register(child.stdout, select.POLLIN)
555 poller.register(child.stderr, select.POLLIN)
557 child.stdout.fileno(): (out, child.stdout),
558 child.stderr.fileno(): (err, child.stderr),
561 SetNonblockFlag(fd, True)
565 pt = poll_timeout() * 1000
567 if linger_timeout is None:
568 logging.warning(msg_timeout)
569 if child.poll() is None:
570 timeout_action = _TIMEOUT_TERM
571 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
572 linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
573 pt = linger_timeout() * 1000
579 pollresult = RetryOnSignal(poller.poll, pt)
581 for fd, event in pollresult:
582 if event & select.POLLIN or event & select.POLLPRI:
583 data = fdmap[fd][1].read()
584 # no data from read signifies EOF (the same as POLLHUP)
586 poller.unregister(fd)
589 fdmap[fd][0].write(data)
590 if (event & select.POLLNVAL or event & select.POLLHUP or
591 event & select.POLLERR):
592 poller.unregister(fd)
595 if timeout is not None:
596 assert callable(poll_timeout)
598 # We have no I/O left but it might still run
599 if child.poll() is None:
600 _WaitForProcess(child, poll_timeout())
602 # Terminate if still alive after timeout
603 if child.poll() is None:
604 if linger_timeout is None:
605 logging.warning(msg_timeout)
606 timeout_action = _TIMEOUT_TERM
607 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
610 lt = linger_timeout()
611 _WaitForProcess(child, lt)
613 # Okay, still alive after timeout and linger timeout? Kill it!
614 if child.poll() is None:
615 timeout_action = _TIMEOUT_KILL
616 logging.warning(msg_linger)
617 IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
622 status = child.wait()
623 return out, err, status, timeout_action
626 def _RunCmdFile(cmd, env, via_shell, output, cwd):
627 """Run a command and save its output to a file.
629 @type cmd: string or list
630 @param cmd: Command to run
632 @param env: The environment to use
633 @type via_shell: bool
634 @param via_shell: if we should run via the shell
636 @param output: the filename in which to save the output
638 @param cwd: the working directory for the program
640 @return: the exit status
643 fh = open(output, "a")
645 child = subprocess.Popen(cmd, shell=via_shell,
646 stderr=subprocess.STDOUT,
648 stdin=subprocess.PIPE,
649 close_fds=True, env=env,
653 status = child.wait()
659 def SetCloseOnExecFlag(fd, enable):
660 """Sets or unsets the close-on-exec flag on a file descriptor.
663 @param fd: File descriptor
665 @param enable: Whether to set or unset it.
668 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
671 flags |= fcntl.FD_CLOEXEC
673 flags &= ~fcntl.FD_CLOEXEC
675 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
678 def SetNonblockFlag(fd, enable):
679 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
682 @param fd: File descriptor
684 @param enable: Whether to set or unset it
687 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
690 flags |= os.O_NONBLOCK
692 flags &= ~os.O_NONBLOCK
694 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
697 def RetryOnSignal(fn, *args, **kwargs):
698 """Calls a function again if it failed due to EINTR.
703 return fn(*args, **kwargs)
704 except EnvironmentError, err:
705 if err.errno != errno.EINTR:
707 except (socket.error, select.error), err:
708 # In python 2.6 and above select.error is an IOError, so it's handled
709 # above, in 2.5 and below it's not, and it's handled here.
710 if not (err.args and err.args[0] == errno.EINTR):
714 def RunParts(dir_name, env=None, reset_env=False):
715 """Run Scripts or programs in a directory
717 @type dir_name: string
718 @param dir_name: absolute path to a directory
720 @param env: The environment to use
721 @type reset_env: boolean
722 @param reset_env: whether to reset or keep the default os environment
723 @rtype: list of tuples
724 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
730 dir_contents = ListVisibleFiles(dir_name)
732 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
735 for relname in sorted(dir_contents):
736 fname = PathJoin(dir_name, relname)
737 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
738 constants.EXT_PLUGIN_MASK.match(relname) is not None):
739 rr.append((relname, constants.RUNPARTS_SKIP, None))
742 result = RunCmd([fname], env=env, reset_env=reset_env)
743 except Exception, err: # pylint: disable-msg=W0703
744 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
746 rr.append((relname, constants.RUNPARTS_RUN, result))
751 def RemoveFile(filename):
752 """Remove a file ignoring some errors.
754 Remove a file, ignoring non-existing ones or directories. Other
758 @param filename: the file to be removed
764 if err.errno not in (errno.ENOENT, errno.EISDIR):
768 def RemoveDir(dirname):
769 """Remove an empty directory.
771 Remove a directory, ignoring non-existing ones.
772 Other errors are passed. This includes the case,
773 where the directory is not empty, so it can't be removed.
776 @param dirname: the empty directory to be removed
782 if err.errno != errno.ENOENT:
786 def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
790 @param old: Original path
794 @param mkdir: Whether to create target directory if it doesn't exist
795 @type mkdir_mode: int
796 @param mkdir_mode: Mode for newly created directories
800 return os.rename(old, new)
802 # In at least one use case of this function, the job queue, directory
803 # creation is very rare. Checking for the directory before renaming is not
805 if mkdir and err.errno == errno.ENOENT:
806 # Create directory and try again
807 Makedirs(os.path.dirname(new), mode=mkdir_mode)
809 return os.rename(old, new)
814 def Makedirs(path, mode=0750):
815 """Super-mkdir; create a leaf directory and all intermediate ones.
817 This is a wrapper around C{os.makedirs} adding error handling not implemented
822 os.makedirs(path, mode)
824 # Ignore EEXIST. This is only handled in os.makedirs as included in
825 # Python 2.5 and above.
826 if err.errno != errno.EEXIST or not os.path.exists(path):
830 def ResetTempfileModule():
831 """Resets the random name generator of the tempfile module.
833 This function should be called after C{os.fork} in the child process to
834 ensure it creates a newly seeded random generator. Otherwise it would
835 generate the same random parts as the parent process. If several processes
836 race for the creation of a temporary file, this could lead to one not getting
840 # pylint: disable-msg=W0212
841 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
842 tempfile._once_lock.acquire()
844 # Reset random name generator
845 tempfile._name_sequence = None
847 tempfile._once_lock.release()
849 logging.critical("The tempfile module misses at least one of the"
850 " '_once_lock' and '_name_sequence' attributes")
853 def _FingerprintFile(filename):
854 """Compute the fingerprint of a file.
856 If the file does not exist, a None will be returned
860 @param filename: the filename to checksum
862 @return: the hex digest of the sha checksum of the contents
866 if not (os.path.exists(filename) and os.path.isfile(filename)):
871 fp = compat.sha1_hash()
879 return fp.hexdigest()
882 def FingerprintFiles(files):
883 """Compute fingerprints for a list of files.
886 @param files: the list of filename to fingerprint
888 @return: a dictionary filename: fingerprint, holding only
894 for filename in files:
895 cksum = _FingerprintFile(filename)
897 ret[filename] = cksum
902 def ForceDictType(target, key_types, allowed_values=None):
903 """Force the values of a dict to have certain types.
906 @param target: the dict to update
907 @type key_types: dict
908 @param key_types: dict mapping target dict keys to types
909 in constants.ENFORCEABLE_TYPES
910 @type allowed_values: list
911 @keyword allowed_values: list of specially allowed values
914 if allowed_values is None:
917 if not isinstance(target, dict):
918 msg = "Expected dictionary, got '%s'" % target
919 raise errors.TypeEnforcementError(msg)
922 if key not in key_types:
923 msg = "Unknown key '%s'" % key
924 raise errors.TypeEnforcementError(msg)
926 if target[key] in allowed_values:
929 ktype = key_types[key]
930 if ktype not in constants.ENFORCEABLE_TYPES:
931 msg = "'%s' has non-enforceable type %s" % (key, ktype)
932 raise errors.ProgrammerError(msg)
934 if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
935 if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
937 elif not isinstance(target[key], basestring):
938 if isinstance(target[key], bool) and not target[key]:
941 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
942 raise errors.TypeEnforcementError(msg)
943 elif ktype == constants.VTYPE_BOOL:
944 if isinstance(target[key], basestring) and target[key]:
945 if target[key].lower() == constants.VALUE_FALSE:
947 elif target[key].lower() == constants.VALUE_TRUE:
950 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
951 raise errors.TypeEnforcementError(msg)
956 elif ktype == constants.VTYPE_SIZE:
958 target[key] = ParseUnit(target[key])
959 except errors.UnitParseError, err:
960 msg = "'%s' (value %s) is not a valid size. error: %s" % \
961 (key, target[key], err)
962 raise errors.TypeEnforcementError(msg)
963 elif ktype == constants.VTYPE_INT:
965 target[key] = int(target[key])
966 except (ValueError, TypeError):
967 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
968 raise errors.TypeEnforcementError(msg)
971 def _GetProcStatusPath(pid):
972 """Returns the path for a PID's proc status file.
975 @param pid: Process ID
979 return "/proc/%d/status" % pid
982 def IsProcessAlive(pid):
983 """Check if a given pid exists on the system.
985 @note: zombie status is not handled, so zombie processes
986 will be returned as alive
988 @param pid: the process ID to check
990 @return: True if the process exists
997 except EnvironmentError, err:
998 if err.errno in (errno.ENOENT, errno.ENOTDIR):
1000 elif err.errno == errno.EINVAL:
1001 raise RetryAgain(err)
1004 assert isinstance(pid, int), "pid must be an integer"
1008 # /proc in a multiprocessor environment can have strange behaviors.
1009 # Retry the os.stat a few times until we get a good result.
1011 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
1012 args=[_GetProcStatusPath(pid)])
1013 except RetryTimeout, err:
1017 def _ParseSigsetT(sigset):
1018 """Parse a rendered sigset_t value.
1020 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
1023 @type sigset: string
1024 @param sigset: Rendered signal set from /proc/$pid/status
1026 @return: Set of all enabled signal numbers
1032 for ch in reversed(sigset):
1035 # The following could be done in a loop, but it's easier to read and
1036 # understand in the unrolled form
1038 result.add(signum + 1)
1040 result.add(signum + 2)
1042 result.add(signum + 3)
1044 result.add(signum + 4)
1051 def _GetProcStatusField(pstatus, field):
1052 """Retrieves a field from the contents of a proc status file.
1054 @type pstatus: string
1055 @param pstatus: Contents of /proc/$pid/status
1057 @param field: Name of field whose value should be returned
1061 for line in pstatus.splitlines():
1062 parts = line.split(":", 1)
1064 if len(parts) < 2 or parts[0] != field:
1067 return parts[1].strip()
1072 def IsProcessHandlingSignal(pid, signum, status_path=None):
1073 """Checks whether a process is handling a signal.
1076 @param pid: Process ID
1078 @param signum: Signal number
1082 if status_path is None:
1083 status_path = _GetProcStatusPath(pid)
1086 proc_status = ReadFile(status_path)
1087 except EnvironmentError, err:
1088 # In at least one case, reading /proc/$pid/status failed with ESRCH.
1089 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
1093 sigcgt = _GetProcStatusField(proc_status, "SigCgt")
1095 raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
1097 # Now check whether signal is handled
1098 return signum in _ParseSigsetT(sigcgt)
1101 def ReadPidFile(pidfile):
1102 """Read a pid from a file.
1104 @type pidfile: string
1105 @param pidfile: path to the file containing the pid
1107 @return: The process id, if the file exists and contains a valid PID,
1112 raw_data = ReadOneLineFile(pidfile)
1113 except EnvironmentError, err:
1114 if err.errno != errno.ENOENT:
1115 logging.exception("Can't read pid file")
1120 except (TypeError, ValueError), err:
1121 logging.info("Can't parse pid file contents", exc_info=True)
1127 def ReadLockedPidFile(path):
1128 """Reads a locked PID file.
1130 This can be used together with L{StartDaemon}.
1133 @param path: Path to PID file
1134 @return: PID as integer or, if file was unlocked or couldn't be opened, None
1138 fd = os.open(path, os.O_RDONLY)
1139 except EnvironmentError, err:
1140 if err.errno == errno.ENOENT:
1141 # PID file doesn't exist
1147 # Try to acquire lock
1149 except errors.LockError:
1150 # Couldn't lock, daemon is running
1151 return int(os.read(fd, 100))
1158 def ValidateServiceName(name):
1159 """Validate the given service name.
1161 @type name: number or string
1162 @param name: Service name or port specification
1167 except (ValueError, TypeError):
1168 # Non-numeric service name
1169 valid = _VALID_SERVICE_NAME_RE.match(name)
1171 # Numeric port (protocols other than TCP or UDP might need adjustments
1173 valid = (numport >= 0 and numport < (1 << 16))
1176 raise errors.OpPrereqError("Invalid service name '%s'" % name,
1182 def ListVolumeGroups():
1183 """List volume groups and their size
1187 Dictionary with keys volume name and values
1188 the size of the volume
1191 command = "vgs --noheadings --units m --nosuffix -o name,size"
1192 result = RunCmd(command)
1197 for line in result.stdout.splitlines():
1199 name, size = line.split()
1200 size = int(float(size))
1201 except (IndexError, ValueError), err:
1202 logging.error("Invalid output from vgs (%s): %s", err, line)
1210 def BridgeExists(bridge):
1211 """Check whether the given bridge exists in the system
1214 @param bridge: the bridge name to check
1216 @return: True if it does
1219 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1222 def TryConvert(fn, val):
1223 """Try to convert a value ignoring errors.
1225 This function tries to apply function I{fn} to I{val}. If no
1226 C{ValueError} or C{TypeError} exceptions are raised, it will return
1227 the result, else it will return the original value. Any other
1228 exceptions are propagated to the caller.
1231 @param fn: function to apply to the value
1232 @param val: the value to be converted
1233 @return: The converted value if the conversion was successful,
1234 otherwise the original value.
1239 except (ValueError, TypeError):
1244 def IsValidShellParam(word):
1245 """Verifies is the given word is safe from the shell's p.o.v.
1247 This means that we can pass this to a command via the shell and be
1248 sure that it doesn't alter the command line and is passed as such to
1251 Note that we are overly restrictive here, in order to be on the safe
1255 @param word: the word to check
1257 @return: True if the word is 'safe'
1260 return bool(_SHELLPARAM_REGEX.match(word))
1263 def BuildShellCmd(template, *args):
1264 """Build a safe shell command line from the given arguments.
1266 This function will check all arguments in the args list so that they
1267 are valid shell parameters (i.e. they don't contain shell
1268 metacharacters). If everything is ok, it will return the result of
1272 @param template: the string holding the template for the
1275 @return: the expanded command line
1279 if not IsValidShellParam(word):
1280 raise errors.ProgrammerError("Shell argument '%s' contains"
1281 " invalid characters" % word)
1282 return template % args
1285 def ParseCpuMask(cpu_mask):
1286 """Parse a CPU mask definition and return the list of CPU IDs.
1288 CPU mask format: comma-separated list of CPU IDs
1289 or dash-separated ID ranges
1290 Example: "0-2,5" -> "0,1,2,5"
1293 @param cpu_mask: CPU mask definition
1295 @return: list of CPU IDs
1301 for range_def in cpu_mask.split(","):
1302 boundaries = range_def.split("-")
1303 n_elements = len(boundaries)
1305 raise errors.ParseError("Invalid CPU ID range definition"
1306 " (only one hyphen allowed): %s" % range_def)
1308 lower = int(boundaries[0])
1309 except (ValueError, TypeError), err:
1310 raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1311 " CPU ID range: %s" % str(err))
1313 higher = int(boundaries[-1])
1314 except (ValueError, TypeError), err:
1315 raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1316 " CPU ID range: %s" % str(err))
1318 raise errors.ParseError("Invalid CPU ID range definition"
1319 " (%d > %d): %s" % (lower, higher, range_def))
1320 cpu_list.extend(range(lower, higher + 1))
1324 def AddAuthorizedKey(file_obj, key):
1325 """Adds an SSH public key to an authorized_keys file.
1327 @type file_obj: str or file handle
1328 @param file_obj: path to authorized_keys file
1330 @param key: string containing key
1333 key_fields = key.split()
1335 if isinstance(file_obj, basestring):
1336 f = open(file_obj, 'a+')
1343 # Ignore whitespace changes
1344 if line.split() == key_fields:
1346 nl = line.endswith('\n')
1350 f.write(key.rstrip('\r\n'))
1357 def RemoveAuthorizedKey(file_name, key):
1358 """Removes an SSH public key from an authorized_keys file.
1360 @type file_name: str
1361 @param file_name: path to authorized_keys file
1363 @param key: string containing key
1366 key_fields = key.split()
1368 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1370 out = os.fdopen(fd, 'w')
1372 f = open(file_name, 'r')
1375 # Ignore whitespace changes while comparing lines
1376 if line.split() != key_fields:
1380 os.rename(tmpname, file_name)
1390 def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1391 """Sets the name of an IP address and hostname in /etc/hosts.
1393 @type file_name: str
1394 @param file_name: path to the file to modify (usually C{/etc/hosts})
1396 @param ip: the IP address
1398 @param hostname: the hostname to be added
1400 @param aliases: the list of aliases to add for the hostname
1403 # Ensure aliases are unique
1404 aliases = UniqueSequence([hostname] + aliases)[1:]
1406 def _WriteEtcHosts(fd):
1407 # Duplicating file descriptor because os.fdopen's result will automatically
1408 # close the descriptor, but we would still like to have its functionality.
1409 out = os.fdopen(os.dup(fd), "w")
1411 for line in ReadFile(file_name).splitlines(True):
1412 fields = line.split()
1413 if fields and not fields[0].startswith("#") and ip == fields[0]:
1417 out.write("%s\t%s" % (ip, hostname))
1419 out.write(" %s" % " ".join(aliases))
1425 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1428 def AddHostToEtcHosts(hostname, ip):
1429 """Wrapper around SetEtcHostsEntry.
1432 @param hostname: a hostname that will be resolved and added to
1433 L{constants.ETC_HOSTS}
1435 @param ip: The ip address of the host
1438 SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1441 def RemoveEtcHostsEntry(file_name, hostname):
1442 """Removes a hostname from /etc/hosts.
1444 IP addresses without names are removed from the file.
1446 @type file_name: str
1447 @param file_name: path to the file to modify (usually C{/etc/hosts})
1449 @param hostname: the hostname to be removed
1452 def _WriteEtcHosts(fd):
1453 # Duplicating file descriptor because os.fdopen's result will automatically
1454 # close the descriptor, but we would still like to have its functionality.
1455 out = os.fdopen(os.dup(fd), "w")
1457 for line in ReadFile(file_name).splitlines(True):
1458 fields = line.split()
1459 if len(fields) > 1 and not fields[0].startswith("#"):
1461 if hostname in names:
1462 while hostname in names:
1463 names.remove(hostname)
1465 out.write("%s %s\n" % (fields[0], " ".join(names)))
1474 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
1477 def RemoveHostFromEtcHosts(hostname):
1478 """Wrapper around RemoveEtcHostsEntry.
1481 @param hostname: hostname that will be resolved and its
1482 full and shot name will be removed from
1483 L{constants.ETC_HOSTS}
1486 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
1487 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1490 def TimestampForFilename():
1491 """Returns the current time formatted for filenames.
1493 The format doesn't contain colons as some shells and applications treat them
1494 as separators. Uses the local timezone.
1497 return time.strftime("%Y-%m-%d_%H_%M_%S")
1500 def CreateBackup(file_name):
1501 """Creates a backup of a file.
1503 @type file_name: str
1504 @param file_name: file to be backed up
1506 @return: the path to the newly created backup
1507 @raise errors.ProgrammerError: for invalid file names
1510 if not os.path.isfile(file_name):
1511 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1514 prefix = ("%s.backup-%s." %
1515 (os.path.basename(file_name), TimestampForFilename()))
1516 dir_name = os.path.dirname(file_name)
1518 fsrc = open(file_name, 'rb')
1520 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1521 fdst = os.fdopen(fd, 'wb')
1523 logging.debug("Backing up %s at %s", file_name, backup_name)
1524 shutil.copyfileobj(fsrc, fdst)
1533 def ListVisibleFiles(path):
1534 """Returns a list of visible files in a directory.
1537 @param path: the directory to enumerate
1539 @return: the list of all files not starting with a dot
1540 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1543 if not IsNormAbsPath(path):
1544 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1545 " absolute/normalized: '%s'" % path)
1546 files = [i for i in os.listdir(path) if not i.startswith(".")]
1550 def GetHomeDir(user, default=None):
1551 """Try to get the homedir of the given user.
1553 The user can be passed either as a string (denoting the name) or as
1554 an integer (denoting the user id). If the user is not found, the
1555 'default' argument is returned, which defaults to None.
1559 if isinstance(user, basestring):
1560 result = pwd.getpwnam(user)
1561 elif isinstance(user, (int, long)):
1562 result = pwd.getpwuid(user)
1564 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1568 return result.pw_dir
1572 """Returns a random UUID.
1574 @note: This is a Linux-specific method as it uses the /proc
1579 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1582 def EnsureDirs(dirs):
1583 """Make required directories, if they don't exist.
1585 @param dirs: list of tuples (dir_name, dir_mode)
1586 @type dirs: list of (string, integer)
1589 for dir_name, dir_mode in dirs:
1591 os.mkdir(dir_name, dir_mode)
1592 except EnvironmentError, err:
1593 if err.errno != errno.EEXIST:
1594 raise errors.GenericError("Cannot create needed directory"
1595 " '%s': %s" % (dir_name, err))
1597 os.chmod(dir_name, dir_mode)
1598 except EnvironmentError, err:
1599 raise errors.GenericError("Cannot change directory permissions on"
1600 " '%s': %s" % (dir_name, err))
1601 if not os.path.isdir(dir_name):
1602 raise errors.GenericError("%s is not a directory" % dir_name)
1605 def ReadFile(file_name, size=-1):
1609 @param size: Read at most size bytes (if negative, entire file)
1611 @return: the (possibly partial) content of the file
1614 f = open(file_name, "r")
1621 def WriteFile(file_name, fn=None, data=None,
1622 mode=None, uid=-1, gid=-1,
1623 atime=None, mtime=None, close=True,
1624 dry_run=False, backup=False,
1625 prewrite=None, postwrite=None):
1626 """(Over)write a file atomically.
1628 The file_name and either fn (a function taking one argument, the
1629 file descriptor, and which should write the data to it) or data (the
1630 contents of the file) must be passed. The other arguments are
1631 optional and allow setting the file mode, owner and group, and the
1632 mtime/atime of the file.
1634 If the function doesn't raise an exception, it has succeeded and the
1635 target file has the new contents. If the function has raised an
1636 exception, an existing target file should be unmodified and the
1637 temporary file should be removed.
1639 @type file_name: str
1640 @param file_name: the target filename
1642 @param fn: content writing function, called with
1643 file descriptor as parameter
1645 @param data: contents of the file
1647 @param mode: file mode
1649 @param uid: the owner of the file
1651 @param gid: the group of the file
1653 @param atime: a custom access time to be set on the file
1655 @param mtime: a custom modification time to be set on the file
1656 @type close: boolean
1657 @param close: whether to close file after writing it
1658 @type prewrite: callable
1659 @param prewrite: function to be called before writing content
1660 @type postwrite: callable
1661 @param postwrite: function to be called after writing content
1664 @return: None if the 'close' parameter evaluates to True,
1665 otherwise the file descriptor
1667 @raise errors.ProgrammerError: if any of the arguments are not valid
1670 if not os.path.isabs(file_name):
1671 raise errors.ProgrammerError("Path passed to WriteFile is not"
1672 " absolute: '%s'" % file_name)
1674 if [fn, data].count(None) != 1:
1675 raise errors.ProgrammerError("fn or data required")
1677 if [atime, mtime].count(None) == 1:
1678 raise errors.ProgrammerError("Both atime and mtime must be either"
1681 if backup and not dry_run and os.path.isfile(file_name):
1682 CreateBackup(file_name)
1684 dir_name, base_name = os.path.split(file_name)
1685 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1687 # here we need to make sure we remove the temp file, if any error
1688 # leaves it in place
1690 if uid != -1 or gid != -1:
1691 os.chown(new_name, uid, gid)
1693 os.chmod(new_name, mode)
1694 if callable(prewrite):
1696 if data is not None:
1700 if callable(postwrite):
1703 if atime is not None and mtime is not None:
1704 os.utime(new_name, (atime, mtime))
1706 os.rename(new_name, file_name)
1715 RemoveFile(new_name)
1720 def GetFileID(path=None, fd=None):
1721 """Returns the file 'id', i.e. the dev/inode and mtime information.
1723 Either the path to the file or the fd must be given.
1725 @param path: the file path
1726 @param fd: a file descriptor
1727 @return: a tuple of (device number, inode number, mtime)
1730 if [path, fd].count(None) != 1:
1731 raise errors.ProgrammerError("One and only one of fd/path must be given")
1738 return (st.st_dev, st.st_ino, st.st_mtime)
1741 def VerifyFileID(fi_disk, fi_ours):
1742 """Verifies that two file IDs are matching.
1744 Differences in the inode/device are not accepted, but and older
1745 timestamp for fi_disk is accepted.
1747 @param fi_disk: tuple (dev, inode, mtime) representing the actual
1749 @param fi_ours: tuple (dev, inode, mtime) representing the last
1754 (d1, i1, m1) = fi_disk
1755 (d2, i2, m2) = fi_ours
1757 return (d1, i1) == (d2, i2) and m1 <= m2
1760 def SafeWriteFile(file_name, file_id, **kwargs):
1761 """Wraper over L{WriteFile} that locks the target file.
1763 By keeping the target file locked during WriteFile, we ensure that
1764 cooperating writers will safely serialise access to the file.
1766 @type file_name: str
1767 @param file_name: the target filename
1768 @type file_id: tuple
1769 @param file_id: a result from L{GetFileID}
1772 fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
1775 if file_id is not None:
1776 disk_id = GetFileID(fd=fd)
1777 if not VerifyFileID(disk_id, file_id):
1778 raise errors.LockError("Cannot overwrite file %s, it has been modified"
1779 " since last written" % file_name)
1780 return WriteFile(file_name, **kwargs)
1785 def ReadOneLineFile(file_name, strict=False):
1786 """Return the first non-empty line from a file.
1788 @type strict: boolean
1789 @param strict: if True, abort if the file has more than one
1793 file_lines = ReadFile(file_name).splitlines()
1794 full_lines = filter(bool, file_lines)
1795 if not file_lines or not full_lines:
1796 raise errors.GenericError("No data in one-liner file %s" % file_name)
1797 elif strict and len(full_lines) > 1:
1798 raise errors.GenericError("Too many lines in one-liner file %s" %
1800 return full_lines[0]
1803 def FirstFree(seq, base=0):
1804 """Returns the first non-existing integer from seq.
1806 The seq argument should be a sorted list of positive integers. The
1807 first time the index of an element is smaller than the element
1808 value, the index will be returned.
1810 The base argument is used to start at a different offset,
1811 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1813 Example: C{[0, 1, 3]} will return I{2}.
1816 @param seq: the sequence to be analyzed.
1818 @param base: use this value as the base index of the sequence
1820 @return: the first non-used index in the sequence
1823 for idx, elem in enumerate(seq):
1824 assert elem >= base, "Passed element is higher than base offset"
1825 if elem > idx + base:
1831 def SingleWaitForFdCondition(fdobj, event, timeout):
1832 """Waits for a condition to occur on the socket.
1834 Immediately returns at the first interruption.
1836 @type fdobj: integer or object supporting a fileno() method
1837 @param fdobj: entity to wait for events on
1838 @type event: integer
1839 @param event: ORed condition (see select module)
1840 @type timeout: float or None
1841 @param timeout: Timeout in seconds
1843 @return: None for timeout, otherwise occured conditions
1846 check = (event | select.POLLPRI |
1847 select.POLLNVAL | select.POLLHUP | select.POLLERR)
1849 if timeout is not None:
1850 # Poller object expects milliseconds
1853 poller = select.poll()
1854 poller.register(fdobj, event)
1856 # TODO: If the main thread receives a signal and we have no timeout, we
1857 # could wait forever. This should check a global "quit" flag or something
1859 io_events = poller.poll(timeout)
1860 except select.error, err:
1861 if err[0] != errno.EINTR:
1864 if io_events and io_events[0][1] & check:
1865 return io_events[0][1]
1870 class FdConditionWaiterHelper(object):
1871 """Retry helper for WaitForFdCondition.
1873 This class contains the retried and wait functions that make sure
1874 WaitForFdCondition can continue waiting until the timeout is actually
1879 def __init__(self, timeout):
1880 self.timeout = timeout
1882 def Poll(self, fdobj, event):
1883 result = SingleWaitForFdCondition(fdobj, event, self.timeout)
1889 def UpdateTimeout(self, timeout):
1890 self.timeout = timeout
1893 def WaitForFdCondition(fdobj, event, timeout):
1894 """Waits for a condition to occur on the socket.
1896 Retries until the timeout is expired, even if interrupted.
1898 @type fdobj: integer or object supporting a fileno() method
1899 @param fdobj: entity to wait for events on
1900 @type event: integer
1901 @param event: ORed condition (see select module)
1902 @type timeout: float or None
1903 @param timeout: Timeout in seconds
1905 @return: None for timeout, otherwise occured conditions
1908 if timeout is not None:
1909 retrywaiter = FdConditionWaiterHelper(timeout)
1911 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1912 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1913 except RetryTimeout:
1917 while result is None:
1918 result = SingleWaitForFdCondition(fdobj, event, timeout)
1922 def TestDelay(duration):
1923 """Sleep for a fixed amount of time.
1925 @type duration: float
1926 @param duration: the sleep duration
1928 @return: False for negative value, True otherwise
1932 return False, "Invalid sleep duration"
1933 time.sleep(duration)
1937 def CloseFdNoError(fd, retries=5):
1938 """Close a file descriptor ignoring errors.
1941 @param fd: the file descriptor
1943 @param retries: how many retries to make, in case we get any
1944 other error than EBADF
1949 except OSError, err:
1950 if err.errno != errno.EBADF:
1952 CloseFdNoError(fd, retries - 1)
1953 # else either it's closed already or we're out of retries, so we
1954 # ignore this and go on
1957 def CloseFDs(noclose_fds=None):
1958 """Close file descriptors.
1960 This closes all file descriptors above 2 (i.e. except
1963 @type noclose_fds: list or None
1964 @param noclose_fds: if given, it denotes a list of file descriptor
1965 that should not be closed
1968 # Default maximum for the number of available file descriptors.
1969 if 'SC_OPEN_MAX' in os.sysconf_names:
1971 MAXFD = os.sysconf('SC_OPEN_MAX')
1978 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1979 if (maxfd == resource.RLIM_INFINITY):
1982 # Iterate through and close all file descriptors (except the standard ones)
1983 for fd in range(3, maxfd):
1984 if noclose_fds and fd in noclose_fds:
1989 def Daemonize(logfile):
1990 """Daemonize the current process.
1992 This detaches the current process from the controlling terminal and
1993 runs it in the background as a daemon.
1996 @param logfile: the logfile to which we should redirect stdout/stderr
1998 @return: the value zero
2001 # pylint: disable-msg=W0212
2002 # yes, we really want os._exit
2004 # TODO: do another attempt to merge Daemonize and StartDaemon, or at
2005 # least abstract the pipe functionality between them
2007 # Create pipe for sending error messages
2008 (rpipe, wpipe) = os.pipe()
2012 if (pid == 0): # The first child.
2016 pid = os.fork() # Fork a second child.
2017 if (pid == 0): # The second child.
2018 CloseFdNoError(rpipe)
2020 # exit() or _exit()? See below.
2021 os._exit(0) # Exit parent (the first child) of the second child.
2023 CloseFdNoError(wpipe)
2024 # Wait for daemon to be started (or an error message to
2025 # arrive) and read up to 100 KB as an error message
2026 errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
2028 sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
2032 os._exit(rcode) # Exit parent of the first child.
2034 SetupDaemonFDs(logfile, None)
2038 def DaemonPidFileName(name):
2039 """Compute a ganeti pid file absolute path
2042 @param name: the daemon name
2044 @return: the full path to the pidfile corresponding to the given
2048 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2051 def EnsureDaemon(name):
2052 """Check for and start daemon if not alive.
2055 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2057 logging.error("Can't start daemon '%s', failure %s, output: %s",
2058 name, result.fail_reason, result.output)
2064 def StopDaemon(name):
2068 result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2070 logging.error("Can't stop daemon '%s', failure %s, output: %s",
2071 name, result.fail_reason, result.output)
2077 def WritePidFile(pidfile):
2078 """Write the current process pidfile.
2080 @type pidfile: sting
2081 @param pidfile: the path to the file to be written
2082 @raise errors.LockError: if the pid file already exists and
2083 points to a live process
2085 @return: the file descriptor of the lock file; do not close this unless
2086 you want to unlock the pid file
2089 # We don't rename nor truncate the file to not drop locks under
2090 # existing processes
2091 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
2093 # Lock the PID file (and fail if not possible to do so). Any code
2094 # wanting to send a signal to the daemon should try to lock the PID
2095 # file before reading it. If acquiring the lock succeeds, the daemon is
2096 # no longer running and the signal should not be sent.
2097 LockFile(fd_pidfile)
2099 os.write(fd_pidfile, "%d\n" % os.getpid())
2104 def RemovePidFile(name):
2105 """Remove the current process pidfile.
2107 Any errors are ignored.
2110 @param name: the daemon name used to derive the pidfile name
2113 pidfilename = DaemonPidFileName(name)
2114 # TODO: we could check here that the file contains our pid
2116 RemoveFile(pidfilename)
2117 except: # pylint: disable-msg=W0702
2121 def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2123 """Kill a process given by its pid.
2126 @param pid: The PID to terminate.
2128 @param signal_: The signal to send, by default SIGTERM
2130 @param timeout: The timeout after which, if the process is still alive,
2131 a SIGKILL will be sent. If not positive, no such checking
2133 @type waitpid: boolean
2134 @param waitpid: If true, we should waitpid on this process after
2135 sending signals, since it's our own child and otherwise it
2136 would remain as zombie
2139 def _helper(pid, signal_, wait):
2140 """Simple helper to encapsulate the kill/waitpid sequence"""
2141 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2143 os.waitpid(pid, os.WNOHANG)
2148 # kill with pid=0 == suicide
2149 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2151 if not IsProcessAlive(pid):
2154 _helper(pid, signal_, waitpid)
2159 def _CheckProcess():
2160 if not IsProcessAlive(pid):
2164 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2174 # Wait up to $timeout seconds
2175 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2176 except RetryTimeout:
2179 if IsProcessAlive(pid):
2180 # Kill process if it's still alive
2181 _helper(pid, signal.SIGKILL, waitpid)
2184 def FindFile(name, search_path, test=os.path.exists):
2185 """Look for a filesystem object in a given path.
2187 This is an abstract method to search for filesystem object (files,
2188 dirs) under a given search path.
2191 @param name: the name to look for
2192 @type search_path: str
2193 @param search_path: location to start at
2194 @type test: callable
2195 @param test: a function taking one argument that should return True
2196 if the a given object is valid; the default value is
2197 os.path.exists, causing only existing files to be returned
2199 @return: full path to the object if found, None otherwise
2202 # validate the filename mask
2203 if constants.EXT_PLUGIN_MASK.match(name) is None:
2204 logging.critical("Invalid value passed for external script name: '%s'",
2208 for dir_name in search_path:
2209 # FIXME: investigate switch to PathJoin
2210 item_name = os.path.sep.join([dir_name, name])
2211 # check the user test and that we're indeed resolving to the given
2213 if test(item_name) and os.path.basename(item_name) == name:
2218 def CheckVolumeGroupSize(vglist, vgname, minsize):
2219 """Checks if the volume group list is valid.
2221 The function will check if a given volume group is in the list of
2222 volume groups and has a minimum size.
2225 @param vglist: dictionary of volume group names and their size
2227 @param vgname: the volume group we should check
2229 @param minsize: the minimum size we accept
2231 @return: None for success, otherwise the error message
2234 vgsize = vglist.get(vgname, None)
2236 return "volume group '%s' missing" % vgname
2237 elif vgsize < minsize:
2238 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2239 (vgname, minsize, vgsize))
2243 def SplitTime(value):
2244 """Splits time as floating point number into a tuple.
2246 @param value: Time in seconds
2247 @type value: int or float
2248 @return: Tuple containing (seconds, microseconds)
2251 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2253 assert 0 <= seconds, \
2254 "Seconds must be larger than or equal to 0, but are %s" % seconds
2255 assert 0 <= microseconds <= 999999, \
2256 "Microseconds must be 0-999999, but are %s" % microseconds
2258 return (int(seconds), int(microseconds))
2261 def MergeTime(timetuple):
2262 """Merges a tuple into time as a floating point number.
2264 @param timetuple: Time as tuple, (seconds, microseconds)
2265 @type timetuple: tuple
2266 @return: Time as a floating point number expressed in seconds
2269 (seconds, microseconds) = timetuple
2271 assert 0 <= seconds, \
2272 "Seconds must be larger than or equal to 0, but are %s" % seconds
2273 assert 0 <= microseconds <= 999999, \
2274 "Microseconds must be 0-999999, but are %s" % microseconds
2276 return float(seconds) + (float(microseconds) * 0.000001)
2279 def IsNormAbsPath(path):
2280 """Check whether a path is absolute and also normalized
2282 This avoids things like /dir/../../other/path to be valid.
2285 return os.path.normpath(path) == path and os.path.isabs(path)
2288 def PathJoin(*args):
2289 """Safe-join a list of path components.
2292 - the first argument must be an absolute path
2293 - no component in the path must have backtracking (e.g. /../),
2294 since we check for normalization at the end
2296 @param args: the path components to be joined
2297 @raise ValueError: for invalid paths
2300 # ensure we're having at least one path passed in
2302 # ensure the first component is an absolute and normalized path name
2304 if not IsNormAbsPath(root):
2305 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2306 result = os.path.join(*args)
2307 # ensure that the whole path is normalized
2308 if not IsNormAbsPath(result):
2309 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2310 # check that we're still under the original prefix
2311 prefix = os.path.commonprefix([root, result])
2313 raise ValueError("Error: path joining resulted in different prefix"
2314 " (%s != %s)" % (prefix, root))
2318 def TailFile(fname, lines=20):
2319 """Return the last lines from a file.
2321 @note: this function will only read and parse the last 4KB of
2322 the file; if the lines are very long, it could be that less
2323 than the requested number of lines are returned
2325 @param fname: the file name
2327 @param lines: the (maximum) number of lines to return
2330 fd = open(fname, "r")
2334 pos = max(0, pos-4096)
2336 raw_data = fd.read()
2340 rows = raw_data.splitlines()
2341 return rows[-lines:]
2344 def _ParseAsn1Generalizedtime(value):
2345 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2348 @param value: ASN1 GENERALIZEDTIME timestamp
2349 @return: Seconds since the Epoch (1970-01-01 00:00:00 UTC)
2352 m = _ASN1_TIME_REGEX.match(value)
2355 asn1time = m.group(1)
2356 hours = int(m.group(2))
2357 minutes = int(m.group(3))
2358 utcoffset = (60 * hours) + minutes
2360 if not value.endswith("Z"):
2361 raise ValueError("Missing timezone")
2362 asn1time = value[:-1]
2365 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2367 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2369 return calendar.timegm(tt.utctimetuple())
2372 def GetX509CertValidity(cert):
2373 """Returns the validity period of the certificate.
2375 @type cert: OpenSSL.crypto.X509
2376 @param cert: X509 certificate object
2379 # The get_notBefore and get_notAfter functions are only supported in
2380 # pyOpenSSL 0.7 and above.
2382 get_notbefore_fn = cert.get_notBefore
2383 except AttributeError:
2386 not_before_asn1 = get_notbefore_fn()
2388 if not_before_asn1 is None:
2391 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2394 get_notafter_fn = cert.get_notAfter
2395 except AttributeError:
2398 not_after_asn1 = get_notafter_fn()
2400 if not_after_asn1 is None:
2403 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2405 return (not_before, not_after)
2408 def _VerifyCertificateInner(expired, not_before, not_after, now,
2409 warn_days, error_days):
2410 """Verifies certificate validity.
2413 @param expired: Whether pyOpenSSL considers the certificate as expired
2414 @type not_before: number or None
2415 @param not_before: Unix timestamp before which certificate is not valid
2416 @type not_after: number or None
2417 @param not_after: Unix timestamp after which certificate is invalid
2419 @param now: Current time as Unix timestamp
2420 @type warn_days: number or None
2421 @param warn_days: How many days before expiration a warning should be reported
2422 @type error_days: number or None
2423 @param error_days: How many days before expiration an error should be reported
2427 msg = "Certificate is expired"
2429 if not_before is not None and not_after is not None:
2430 msg += (" (valid from %s to %s)" %
2431 (FormatTime(not_before), FormatTime(not_after)))
2432 elif not_before is not None:
2433 msg += " (valid from %s)" % FormatTime(not_before)
2434 elif not_after is not None:
2435 msg += " (valid until %s)" % FormatTime(not_after)
2437 return (CERT_ERROR, msg)
2439 elif not_before is not None and not_before > now:
2440 return (CERT_WARNING,
2441 "Certificate not yet valid (valid from %s)" %
2442 FormatTime(not_before))
2444 elif not_after is not None:
2445 remaining_days = int((not_after - now) / (24 * 3600))
2447 msg = "Certificate expires in about %d days" % remaining_days
2449 if error_days is not None and remaining_days <= error_days:
2450 return (CERT_ERROR, msg)
2452 if warn_days is not None and remaining_days <= warn_days:
2453 return (CERT_WARNING, msg)
2458 def VerifyX509Certificate(cert, warn_days, error_days):
2459 """Verifies a certificate for LUVerifyCluster.
2461 @type cert: OpenSSL.crypto.X509
2462 @param cert: X509 certificate object
2463 @type warn_days: number or None
2464 @param warn_days: How many days before expiration a warning should be reported
2465 @type error_days: number or None
2466 @param error_days: How many days before expiration an error should be reported
2469 # Depending on the pyOpenSSL version, this can just return (None, None)
2470 (not_before, not_after) = GetX509CertValidity(cert)
2472 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2473 time.time(), warn_days, error_days)
2476 def SignX509Certificate(cert, key, salt):
2477 """Sign a X509 certificate.
2479 An RFC822-like signature header is added in front of the certificate.
2481 @type cert: OpenSSL.crypto.X509
2482 @param cert: X509 certificate object
2484 @param key: Key for HMAC
2486 @param salt: Salt for HMAC
2488 @return: Serialized and signed certificate in PEM format
2491 if not VALID_X509_SIGNATURE_SALT.match(salt):
2492 raise errors.GenericError("Invalid salt: %r" % salt)
2494 # Dumping as PEM here ensures the certificate is in a sane format
2495 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2497 return ("%s: %s/%s\n\n%s" %
2498 (constants.X509_CERT_SIGNATURE_HEADER, salt,
2499 Sha1Hmac(key, cert_pem, salt=salt),
2503 def _ExtractX509CertificateSignature(cert_pem):
2504 """Helper function to extract signature from X509 certificate.
2507 # Extract signature from original PEM data
2508 for line in cert_pem.splitlines():
2509 if line.startswith("---"):
2512 m = X509_SIGNATURE.match(line.strip())
2514 return (m.group("salt"), m.group("sign"))
2516 raise errors.GenericError("X509 certificate signature is missing")
2519 def LoadSignedX509Certificate(cert_pem, key):
2520 """Verifies a signed X509 certificate.
2522 @type cert_pem: string
2523 @param cert_pem: Certificate in PEM format and with signature header
2525 @param key: Key for HMAC
2526 @rtype: tuple; (OpenSSL.crypto.X509, string)
2527 @return: X509 certificate object and salt
2530 (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2533 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2535 # Dump again to ensure it's in a sane format
2536 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2538 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2539 raise errors.GenericError("X509 certificate signature is invalid")
2544 def Sha1Hmac(key, text, salt=None):
2545 """Calculates the HMAC-SHA1 digest of a text.
2547 HMAC is defined in RFC2104.
2550 @param key: Secret key
2555 salted_text = salt + text
2559 return hmac.new(key, salted_text, compat.sha1).hexdigest()
2562 def VerifySha1Hmac(key, text, digest, salt=None):
2563 """Verifies the HMAC-SHA1 digest of a text.
2565 HMAC is defined in RFC2104.
2568 @param key: Secret key
2570 @type digest: string
2571 @param digest: Expected digest
2573 @return: Whether HMAC-SHA1 digest matches
2576 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2579 def FindMatch(data, name):
2580 """Tries to find an item in a dictionary matching a name.
2582 Callers have to ensure the data names aren't contradictory (e.g. a regexp
2583 that matches a string). If the name isn't a direct key, all regular
2584 expression objects in the dictionary are matched against it.
2587 @param data: Dictionary containing data
2589 @param name: Name to look for
2590 @rtype: tuple; (value in dictionary, matched groups as list)
2594 return (data[name], [])
2596 for key, value in data.items():
2598 if hasattr(key, "match"):
2601 return (value, list(m.groups()))
2606 def BytesToMebibyte(value):
2607 """Converts bytes to mebibytes.
2610 @param value: Value in bytes
2612 @return: Value in mebibytes
2615 return int(round(value / (1024.0 * 1024.0), 0))
2618 def CalculateDirectorySize(path):
2619 """Calculates the size of a directory recursively.
2622 @param path: Path to directory
2624 @return: Size in mebibytes
2629 for (curpath, _, files) in os.walk(path):
2630 for filename in files:
2631 st = os.lstat(PathJoin(curpath, filename))
2634 return BytesToMebibyte(size)
2637 def GetMounts(filename=constants.PROC_MOUNTS):
2638 """Returns the list of mounted filesystems.
2640 This function is Linux-specific.
2642 @param filename: path of mounts file (/proc/mounts by default)
2643 @rtype: list of tuples
2644 @return: list of mount entries (device, mountpoint, fstype, options)
2647 # TODO(iustin): investigate non-Linux options (e.g. via mount output)
2649 mountlines = ReadFile(filename).splitlines()
2650 for line in mountlines:
2651 device, mountpoint, fstype, options, _ = line.split(None, 4)
2652 data.append((device, mountpoint, fstype, options))
2657 def GetFilesystemStats(path):
2658 """Returns the total and free space on a filesystem.
2661 @param path: Path on filesystem to be examined
2663 @return: tuple of (Total space, Free space) in mebibytes
2666 st = os.statvfs(path)
2668 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2669 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2670 return (tsize, fsize)
2673 def RunInSeparateProcess(fn, *args):
2674 """Runs a function in a separate process.
2676 Note: Only boolean return values are supported.
2679 @param fn: Function to be called
2681 @return: Function's result
2688 # In case the function uses temporary files
2689 ResetTempfileModule()
2692 result = int(bool(fn(*args)))
2693 assert result in (0, 1)
2694 except: # pylint: disable-msg=W0702
2695 logging.exception("Error while calling function in separate process")
2696 # 0 and 1 are reserved for the return value
2699 os._exit(result) # pylint: disable-msg=W0212
2703 # Avoid zombies and check exit code
2704 (_, status) = os.waitpid(pid, 0)
2706 if os.WIFSIGNALED(status):
2708 signum = os.WTERMSIG(status)
2710 exitcode = os.WEXITSTATUS(status)
2713 if not (exitcode in (0, 1) and signum is None):
2714 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2717 return bool(exitcode)
2720 def IgnoreProcessNotFound(fn, *args, **kwargs):
2721 """Ignores ESRCH when calling a process-related function.
2723 ESRCH is raised when a process is not found.
2726 @return: Whether process was found
2731 except EnvironmentError, err:
2733 if err.errno == errno.ESRCH:
2740 def IgnoreSignals(fn, *args, **kwargs):
2741 """Tries to call a function ignoring failures due to EINTR.
2745 return fn(*args, **kwargs)
2746 except EnvironmentError, err:
2747 if err.errno == errno.EINTR:
2751 except (select.error, socket.error), err:
2752 # In python 2.6 and above select.error is an IOError, so it's handled
2753 # above, in 2.5 and below it's not, and it's handled here.
2754 if err.args and err.args[0] == errno.EINTR:
2761 """Locks a file using POSIX locks.
2764 @param fd: the file descriptor we need to lock
2768 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2769 except IOError, err:
2770 if err.errno == errno.EAGAIN:
2771 raise errors.LockError("File already locked")
2775 def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2776 """Reads the watcher pause file.
2778 @type filename: string
2779 @param filename: Path to watcher pause file
2780 @type now: None, float or int
2781 @param now: Current time as Unix timestamp
2782 @type remove_after: int
2783 @param remove_after: Remove watcher pause file after specified amount of
2784 seconds past the pause end time
2791 value = ReadFile(filename)
2792 except IOError, err:
2793 if err.errno != errno.ENOENT:
2797 if value is not None:
2801 logging.warning(("Watcher pause file (%s) contains invalid value,"
2802 " removing it"), filename)
2803 RemoveFile(filename)
2806 if value is not None:
2807 # Remove file if it's outdated
2808 if now > (value + remove_after):
2809 RemoveFile(filename)
2818 def GetClosedTempfile(*args, **kwargs):
2819 """Creates a temporary file and returns its path.
2822 (fd, path) = tempfile.mkstemp(*args, **kwargs)
2827 def GenerateSelfSignedX509Cert(common_name, validity):
2828 """Generates a self-signed X509 certificate.
2830 @type common_name: string
2831 @param common_name: commonName value
2833 @param validity: Validity for certificate in seconds
2836 # Create private and public key
2837 key = OpenSSL.crypto.PKey()
2838 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
2840 # Create self-signed certificate
2841 cert = OpenSSL.crypto.X509()
2843 cert.get_subject().CN = common_name
2844 cert.set_serial_number(1)
2845 cert.gmtime_adj_notBefore(0)
2846 cert.gmtime_adj_notAfter(validity)
2847 cert.set_issuer(cert.get_subject())
2848 cert.set_pubkey(key)
2849 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
2851 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
2852 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2854 return (key_pem, cert_pem)
2857 def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
2858 validity=constants.X509_CERT_DEFAULT_VALIDITY):
2859 """Legacy function to generate self-signed X509 certificate.
2862 @param filename: path to write certificate to
2863 @type common_name: string
2864 @param common_name: commonName value
2866 @param validity: validity of certificate in number of days
2869 # TODO: Investigate using the cluster name instead of X505_CERT_CN for
2870 # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
2871 # and node daemon certificates have the proper Subject/Issuer.
2872 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
2873 validity * 24 * 60 * 60)
2875 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
2878 class FileLock(object):
2879 """Utility class for file locks.
2882 def __init__(self, fd, filename):
2883 """Constructor for FileLock.
2886 @param fd: File object
2888 @param filename: Path of the file opened at I{fd}
2892 self.filename = filename
2895 def Open(cls, filename):
2896 """Creates and opens a file to be used as a file-based lock.
2898 @type filename: string
2899 @param filename: path to the file to be locked
2902 # Using "os.open" is necessary to allow both opening existing file
2903 # read/write and creating if not existing. Vanilla "open" will truncate an
2904 # existing file -or- allow creating if not existing.
2905 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2912 """Close the file and release the lock.
2915 if hasattr(self, "fd") and self.fd:
2919 def _flock(self, flag, blocking, timeout, errmsg):
2920 """Wrapper for fcntl.flock.
2923 @param flag: operation flag
2924 @type blocking: bool
2925 @param blocking: whether the operation should be done in blocking mode.
2926 @type timeout: None or float
2927 @param timeout: for how long the operation should be retried (implies
2929 @type errmsg: string
2930 @param errmsg: error message in case operation fails.
2933 assert self.fd, "Lock was closed"
2934 assert timeout is None or timeout >= 0, \
2935 "If specified, timeout must be positive"
2936 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2938 # When a timeout is used, LOCK_NB must always be set
2939 if not (timeout is None and blocking):
2940 flag |= fcntl.LOCK_NB
2943 self._Lock(self.fd, flag, timeout)
2946 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2947 args=(self.fd, flag, timeout))
2948 except RetryTimeout:
2949 raise errors.LockError(errmsg)
2952 def _Lock(fd, flag, timeout):
2954 fcntl.flock(fd, flag)
2955 except IOError, err:
2956 if timeout is not None and err.errno == errno.EAGAIN:
2959 logging.exception("fcntl.flock failed")
2962 def Exclusive(self, blocking=False, timeout=None):
2963 """Locks the file in exclusive mode.
2965 @type blocking: boolean
2966 @param blocking: whether to block and wait until we
2967 can lock the file or return immediately
2968 @type timeout: int or None
2969 @param timeout: if not None, the duration to wait for the lock
2973 self._flock(fcntl.LOCK_EX, blocking, timeout,
2974 "Failed to lock %s in exclusive mode" % self.filename)
2976 def Shared(self, blocking=False, timeout=None):
2977 """Locks the file in shared mode.
2979 @type blocking: boolean
2980 @param blocking: whether to block and wait until we
2981 can lock the file or return immediately
2982 @type timeout: int or None
2983 @param timeout: if not None, the duration to wait for the lock
2987 self._flock(fcntl.LOCK_SH, blocking, timeout,
2988 "Failed to lock %s in shared mode" % self.filename)
2990 def Unlock(self, blocking=True, timeout=None):
2991 """Unlocks the file.
2993 According to C{flock(2)}, unlocking can also be a nonblocking
2996 To make a non-blocking request, include LOCK_NB with any of the above
2999 @type blocking: boolean
3000 @param blocking: whether to block and wait until we
3001 can lock the file or return immediately
3002 @type timeout: int or None
3003 @param timeout: if not None, the duration to wait for the lock
3007 self._flock(fcntl.LOCK_UN, blocking, timeout,
3008 "Failed to unlock %s" % self.filename)
3011 def SignalHandled(signums):
3012 """Signal Handled decoration.
3014 This special decorator installs a signal handler and then calls the target
3015 function. The function must accept a 'signal_handlers' keyword argument,
3016 which will contain a dict indexed by signal number, with SignalHandler
3019 The decorator can be safely stacked with iself, to handle multiple signals
3020 with different handlers.
3023 @param signums: signals to intercept
3027 def sig_function(*args, **kwargs):
3028 assert 'signal_handlers' not in kwargs or \
3029 kwargs['signal_handlers'] is None or \
3030 isinstance(kwargs['signal_handlers'], dict), \
3031 "Wrong signal_handlers parameter in original function call"
3032 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3033 signal_handlers = kwargs['signal_handlers']
3035 signal_handlers = {}
3036 kwargs['signal_handlers'] = signal_handlers
3037 sighandler = SignalHandler(signums)
3040 signal_handlers[sig] = sighandler
3041 return fn(*args, **kwargs)
3048 class SignalWakeupFd(object):
3050 # This is only supported in Python 2.5 and above (some distributions
3051 # backported it to Python 2.4)
3052 _set_wakeup_fd_fn = signal.set_wakeup_fd
3053 except AttributeError:
3055 def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3058 def _SetWakeupFd(self, fd):
3059 return self._set_wakeup_fd_fn(fd)
3062 """Initializes this class.
3065 (read_fd, write_fd) = os.pipe()
3067 # Once these succeeded, the file descriptors will be closed automatically.
3068 # Buffer size 0 is important, otherwise .read() with a specified length
3069 # might buffer data and the file descriptors won't be marked readable.
3070 self._read_fh = os.fdopen(read_fd, "r", 0)
3071 self._write_fh = os.fdopen(write_fd, "w", 0)
3073 self._previous = self._SetWakeupFd(self._write_fh.fileno())
3076 self.fileno = self._read_fh.fileno
3077 self.read = self._read_fh.read
3080 """Restores the previous wakeup file descriptor.
3083 if hasattr(self, "_previous") and self._previous is not None:
3084 self._SetWakeupFd(self._previous)
3085 self._previous = None
3088 """Notifies the wakeup file descriptor.
3091 self._write_fh.write("\0")
3094 """Called before object deletion.
3100 class SignalHandler(object):
3101 """Generic signal handler class.
3103 It automatically restores the original handler when deconstructed or
3104 when L{Reset} is called. You can either pass your own handler
3105 function in or query the L{called} attribute to detect whether the
3109 @ivar signum: the signals we handle
3110 @type called: boolean
3111 @ivar called: tracks whether any of the signals have been raised
3114 def __init__(self, signum, handler_fn=None, wakeup=None):
3115 """Constructs a new SignalHandler instance.
3117 @type signum: int or list of ints
3118 @param signum: Single signal number or set of signal numbers
3119 @type handler_fn: callable
3120 @param handler_fn: Signal handling function
3123 assert handler_fn is None or callable(handler_fn)
3125 self.signum = set(signum)
3128 self._handler_fn = handler_fn
3129 self._wakeup = wakeup
3133 for signum in self.signum:
3135 prev_handler = signal.signal(signum, self._HandleSignal)
3137 self._previous[signum] = prev_handler
3139 # Restore previous handler
3140 signal.signal(signum, prev_handler)
3143 # Reset all handlers
3145 # Here we have a race condition: a handler may have already been called,
3146 # but there's not much we can do about it at this point.
3153 """Restore previous handler.
3155 This will reset all the signals to their previous handlers.
3158 for signum, prev_handler in self._previous.items():
3159 signal.signal(signum, prev_handler)
3160 # If successful, remove from dict
3161 del self._previous[signum]
3164 """Unsets the L{called} flag.
3166 This function can be used in case a signal may arrive several times.
3171 def _HandleSignal(self, signum, frame):
3172 """Actual signal handling function.
3175 # This is not nice and not absolutely atomic, but it appears to be the only
3176 # solution in Python -- there are no atomic types.
3180 # Notify whoever is interested in signals
3181 self._wakeup.Notify()
3183 if self._handler_fn:
3184 self._handler_fn(signum, frame)
3187 class FieldSet(object):
3188 """A simple field set.
3190 Among the features are:
3191 - checking if a string is among a list of static string or regex objects
3192 - checking if a whole list of string matches
3193 - returning the matching groups from a regex match
3195 Internally, all fields are held as regular expression objects.
3198 def __init__(self, *items):
3199 self.items = [re.compile("^%s$" % value) for value in items]
3201 def Extend(self, other_set):
3202 """Extend the field set with the items from another one"""
3203 self.items.extend(other_set.items)
3205 def Matches(self, field):
3206 """Checks if a field matches the current set
3209 @param field: the string to match
3210 @return: either None or a regular expression match object
3213 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3217 def NonMatching(self, items):
3218 """Returns the list of fields not matching the current set
3221 @param items: the list of fields to check
3223 @return: list of non-matching fields
3226 return [val for val in items if not self.Matches(val)]
3229 class RunningTimeout(object):
3230 """Class to calculate remaining timeout when doing several operations.
3240 def __init__(self, timeout, allow_negative, _time_fn=time.time):
3241 """Initializes this class.
3243 @type timeout: float
3244 @param timeout: Timeout duration
3245 @type allow_negative: bool
3246 @param allow_negative: Whether to return values below zero
3247 @param _time_fn: Time function for unittests
3250 object.__init__(self)
3252 if timeout is not None and timeout < 0.0:
3253 raise ValueError("Timeout must not be negative")
3255 self._timeout = timeout
3256 self._allow_negative = allow_negative
3257 self._time_fn = _time_fn
3259 self._start_time = None
3261 def Remaining(self):
3262 """Returns the remaining timeout.
3265 if self._timeout is None:
3268 # Get start time on first calculation
3269 if self._start_time is None:
3270 self._start_time = self._time_fn()
3272 # Calculate remaining time
3273 remaining_timeout = self._start_time + self._timeout - self._time_fn()
3275 if not self._allow_negative:
3276 # Ensure timeout is always >= 0
3277 return max(0.0, remaining_timeout)
3279 return remaining_timeout