4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti utility module.
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
45 import logging.handlers
55 from cStringIO import StringIO
62 from ganeti import errors
63 from ganeti import constants
64 from ganeti import compat
68 _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
72 #: when set to True, L{RunCmd} is disabled
75 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
77 HEX_CHAR_RE = r"[a-zA-Z0-9]"
78 VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
79 X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
80 (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
81 HEX_CHAR_RE, HEX_CHAR_RE),
84 _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
86 # Structure definition for getsockopt(SOL_SOCKET, SO_PEERCRED, ...):
87 # struct ucred { pid_t pid; uid_t uid; gid_t gid; };
89 # The GNU C Library defines gid_t and uid_t to be "unsigned int" and
92 # IEEE Std 1003.1-2008:
93 # "nlink_t, uid_t, gid_t, and id_t shall be integer types"
94 # "blksize_t, pid_t, and ssize_t shall be signed integer types"
96 _STRUCT_UCRED_SIZE = struct.calcsize(_STRUCT_UCRED)
98 # Certificate verification results
100 CERT_ERROR) = range(1, 3)
102 # Flags for mlockall() (from bits/mman.h)
107 class RunResult(object):
108 """Holds the result of running external programs.
111 @ivar exit_code: the exit code of the program, or None (if the program
113 @type signal: int or None
114 @ivar signal: the signal that caused the program to finish, or None
115 (if the program wasn't terminated by a signal)
117 @ivar stdout: the standard output of the program
119 @ivar stderr: the standard error of the program
120 @type failed: boolean
121 @ivar failed: True in case the program was
122 terminated by a signal or exited with a non-zero exit code
123 @ivar fail_reason: a string detailing the termination reason
126 __slots__ = ["exit_code", "signal", "stdout", "stderr",
127 "failed", "fail_reason", "cmd"]
130 def __init__(self, exit_code, signal_, stdout, stderr, cmd):
132 self.exit_code = exit_code
133 self.signal = signal_
136 self.failed = (signal_ is not None or exit_code != 0)
138 if self.signal is not None:
139 self.fail_reason = "terminated by signal %s" % self.signal
140 elif self.exit_code is not None:
141 self.fail_reason = "exited with exit code %s" % self.exit_code
143 self.fail_reason = "unable to determine termination reason"
146 logging.debug("Command '%s' failed (%s); output: %s",
147 self.cmd, self.fail_reason, self.output)
149 def _GetOutput(self):
150 """Returns the combined stdout and stderr for easier usage.
153 return self.stdout + self.stderr
155 output = property(_GetOutput, None, None, "Return full output")
158 def _BuildCmdEnvironment(env, reset):
159 """Builds the environment for an external program.
165 cmd_env = os.environ.copy()
166 cmd_env["LC_ALL"] = "C"
174 def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
175 """Execute a (shell) command.
177 The command should not read from its standard input, as it will be
180 @type cmd: string or list
181 @param cmd: Command to run
183 @param env: Additional environment variables
185 @param output: if desired, the output of the command can be
186 saved in a file instead of the RunResult instance; this
187 parameter denotes the file name (if not None)
189 @param cwd: if specified, will be used as the working
190 directory for the command; the default will be /
191 @type reset_env: boolean
192 @param reset_env: whether to reset or keep the default os environment
194 @return: RunResult instance
195 @raise errors.ProgrammerError: if we call this when forks are disabled
199 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
201 if isinstance(cmd, basestring):
205 cmd = [str(val) for val in cmd]
206 strcmd = ShellQuoteArgs(cmd)
210 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
212 logging.debug("RunCmd %s", strcmd)
214 cmd_env = _BuildCmdEnvironment(env, reset_env)
218 out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
220 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
223 if err.errno == errno.ENOENT:
224 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
236 return RunResult(exitcode, signal_, out, err, strcmd)
239 def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
241 """Start a daemon process after forking twice.
243 @type cmd: string or list
244 @param cmd: Command to run
246 @param env: Additional environment variables
248 @param cwd: Working directory for the program
250 @param output: Path to file in which to save the output
252 @param output_fd: File descriptor for output
253 @type pidfile: string
254 @param pidfile: Process ID file
256 @return: Daemon process ID
257 @raise errors.ProgrammerError: if we call this when forks are disabled
261 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
264 if output and not (bool(output) ^ (output_fd is not None)):
265 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
268 if isinstance(cmd, basestring):
269 cmd = ["/bin/sh", "-c", cmd]
271 strcmd = ShellQuoteArgs(cmd)
274 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
276 logging.debug("StartDaemon %s", strcmd)
278 cmd_env = _BuildCmdEnvironment(env, False)
280 # Create pipe for sending PID back
281 (pidpipe_read, pidpipe_write) = os.pipe()
284 # Create pipe for sending error messages
285 (errpipe_read, errpipe_write) = os.pipe()
292 # Child process, won't return
293 _StartDaemonChild(errpipe_read, errpipe_write,
294 pidpipe_read, pidpipe_write,
296 output, output_fd, pidfile)
298 # Well, maybe child process failed
299 os._exit(1) # pylint: disable-msg=W0212
301 _CloseFDNoErr(errpipe_write)
303 # Wait for daemon to be started (or an error message to arrive) and read
304 # up to 100 KB as an error message
305 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
307 _CloseFDNoErr(errpipe_read)
309 _CloseFDNoErr(pidpipe_write)
311 # Read up to 128 bytes for PID
312 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
314 _CloseFDNoErr(pidpipe_read)
316 # Try to avoid zombies by waiting for child process
323 raise errors.OpExecError("Error when starting daemon process: %r" %
328 except (ValueError, TypeError), err:
329 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
333 def _StartDaemonChild(errpipe_read, errpipe_write,
334 pidpipe_read, pidpipe_write,
336 output, fd_output, pidfile):
337 """Child process for starting daemon.
341 # Close parent's side
342 _CloseFDNoErr(errpipe_read)
343 _CloseFDNoErr(pidpipe_read)
345 # First child process
350 # And fork for the second time
353 # Exit first child process
354 os._exit(0) # pylint: disable-msg=W0212
356 # Make sure pipe is closed on execv* (and thereby notifies original process)
357 SetCloseOnExecFlag(errpipe_write, True)
359 # List of file descriptors to be left open
360 noclose_fds = [errpipe_write]
365 # TODO: Atomic replace with another locked file instead of writing into
367 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
369 # Lock the PID file (and fail if not possible to do so). Any code
370 # wanting to send a signal to the daemon should try to lock the PID
371 # file before reading it. If acquiring the lock succeeds, the daemon is
372 # no longer running and the signal should not be sent.
375 os.write(fd_pidfile, "%d\n" % os.getpid())
376 except Exception, err:
377 raise Exception("Creating and locking PID file failed: %s" % err)
379 # Keeping the file open to hold the lock
380 noclose_fds.append(fd_pidfile)
382 SetCloseOnExecFlag(fd_pidfile, False)
387 fd_devnull = os.open(os.devnull, os.O_RDWR)
389 assert not output or (bool(output) ^ (fd_output is not None))
391 if fd_output is not None:
396 # TODO: Implement flag to set append=yes/no
397 fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
398 except EnvironmentError, err:
399 raise Exception("Opening output file failed: %s" % err)
401 fd_output = fd_devnull
403 # Redirect standard I/O
404 os.dup2(fd_devnull, 0)
405 os.dup2(fd_output, 1)
406 os.dup2(fd_output, 2)
408 # Send daemon PID to parent
409 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
411 # Close all file descriptors except stdio and error message pipe
412 CloseFDs(noclose_fds=noclose_fds)
414 # Change working directory
418 os.execvp(args[0], args)
420 os.execvpe(args[0], args, env)
421 except: # pylint: disable-msg=W0702
423 # Report errors to original process
424 buf = str(sys.exc_info()[1])
426 RetryOnSignal(os.write, errpipe_write, buf)
427 except: # pylint: disable-msg=W0702
428 # Ignore errors in error handling
431 os._exit(1) # pylint: disable-msg=W0212
434 def _RunCmdPipe(cmd, env, via_shell, cwd):
435 """Run a command and return its output.
437 @type cmd: string or list
438 @param cmd: Command to run
440 @param env: The environment to use
441 @type via_shell: bool
442 @param via_shell: if we should run via the shell
444 @param cwd: the working directory for the program
446 @return: (out, err, status)
449 poller = select.poll()
450 child = subprocess.Popen(cmd, shell=via_shell,
451 stderr=subprocess.PIPE,
452 stdout=subprocess.PIPE,
453 stdin=subprocess.PIPE,
454 close_fds=True, env=env,
458 poller.register(child.stdout, select.POLLIN)
459 poller.register(child.stderr, select.POLLIN)
463 child.stdout.fileno(): (out, child.stdout),
464 child.stderr.fileno(): (err, child.stderr),
467 SetNonblockFlag(fd, True)
470 pollresult = RetryOnSignal(poller.poll)
472 for fd, event in pollresult:
473 if event & select.POLLIN or event & select.POLLPRI:
474 data = fdmap[fd][1].read()
475 # no data from read signifies EOF (the same as POLLHUP)
477 poller.unregister(fd)
480 fdmap[fd][0].write(data)
481 if (event & select.POLLNVAL or event & select.POLLHUP or
482 event & select.POLLERR):
483 poller.unregister(fd)
489 status = child.wait()
490 return out, err, status
493 def _RunCmdFile(cmd, env, via_shell, output, cwd):
494 """Run a command and save its output to a file.
496 @type cmd: string or list
497 @param cmd: Command to run
499 @param env: The environment to use
500 @type via_shell: bool
501 @param via_shell: if we should run via the shell
503 @param output: the filename in which to save the output
505 @param cwd: the working directory for the program
507 @return: the exit status
510 fh = open(output, "a")
512 child = subprocess.Popen(cmd, shell=via_shell,
513 stderr=subprocess.STDOUT,
515 stdin=subprocess.PIPE,
516 close_fds=True, env=env,
520 status = child.wait()
526 def SetCloseOnExecFlag(fd, enable):
527 """Sets or unsets the close-on-exec flag on a file descriptor.
530 @param fd: File descriptor
532 @param enable: Whether to set or unset it.
535 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
538 flags |= fcntl.FD_CLOEXEC
540 flags &= ~fcntl.FD_CLOEXEC
542 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
545 def SetNonblockFlag(fd, enable):
546 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
549 @param fd: File descriptor
551 @param enable: Whether to set or unset it
554 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
557 flags |= os.O_NONBLOCK
559 flags &= ~os.O_NONBLOCK
561 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
564 def RetryOnSignal(fn, *args, **kwargs):
565 """Calls a function again if it failed due to EINTR.
570 return fn(*args, **kwargs)
571 except EnvironmentError, err:
572 if err.errno != errno.EINTR:
574 except (socket.error, select.error), err:
575 # In python 2.6 and above select.error is an IOError, so it's handled
576 # above, in 2.5 and below it's not, and it's handled here.
577 if not (err.args and err.args[0] == errno.EINTR):
581 def RunParts(dir_name, env=None, reset_env=False):
582 """Run Scripts or programs in a directory
584 @type dir_name: string
585 @param dir_name: absolute path to a directory
587 @param env: The environment to use
588 @type reset_env: boolean
589 @param reset_env: whether to reset or keep the default os environment
590 @rtype: list of tuples
591 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
597 dir_contents = ListVisibleFiles(dir_name)
599 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
602 for relname in sorted(dir_contents):
603 fname = PathJoin(dir_name, relname)
604 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
605 constants.EXT_PLUGIN_MASK.match(relname) is not None):
606 rr.append((relname, constants.RUNPARTS_SKIP, None))
609 result = RunCmd([fname], env=env, reset_env=reset_env)
610 except Exception, err: # pylint: disable-msg=W0703
611 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
613 rr.append((relname, constants.RUNPARTS_RUN, result))
618 def GetSocketCredentials(sock):
619 """Returns the credentials of the foreign process connected to a socket.
621 @param sock: Unix socket
622 @rtype: tuple; (number, number, number)
623 @return: The PID, UID and GID of the connected foreign process.
626 peercred = sock.getsockopt(socket.SOL_SOCKET, IN.SO_PEERCRED,
628 return struct.unpack(_STRUCT_UCRED, peercred)
631 def RemoveFile(filename):
632 """Remove a file ignoring some errors.
634 Remove a file, ignoring non-existing ones or directories. Other
638 @param filename: the file to be removed
644 if err.errno not in (errno.ENOENT, errno.EISDIR):
648 def RemoveDir(dirname):
649 """Remove an empty directory.
651 Remove a directory, ignoring non-existing ones.
652 Other errors are passed. This includes the case,
653 where the directory is not empty, so it can't be removed.
656 @param dirname: the empty directory to be removed
662 if err.errno != errno.ENOENT:
666 def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
670 @param old: Original path
674 @param mkdir: Whether to create target directory if it doesn't exist
675 @type mkdir_mode: int
676 @param mkdir_mode: Mode for newly created directories
680 return os.rename(old, new)
682 # In at least one use case of this function, the job queue, directory
683 # creation is very rare. Checking for the directory before renaming is not
685 if mkdir and err.errno == errno.ENOENT:
686 # Create directory and try again
687 Makedirs(os.path.dirname(new), mode=mkdir_mode)
689 return os.rename(old, new)
694 def Makedirs(path, mode=0750):
695 """Super-mkdir; create a leaf directory and all intermediate ones.
697 This is a wrapper around C{os.makedirs} adding error handling not implemented
702 os.makedirs(path, mode)
704 # Ignore EEXIST. This is only handled in os.makedirs as included in
705 # Python 2.5 and above.
706 if err.errno != errno.EEXIST or not os.path.exists(path):
710 def ResetTempfileModule():
711 """Resets the random name generator of the tempfile module.
713 This function should be called after C{os.fork} in the child process to
714 ensure it creates a newly seeded random generator. Otherwise it would
715 generate the same random parts as the parent process. If several processes
716 race for the creation of a temporary file, this could lead to one not getting
720 # pylint: disable-msg=W0212
721 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
722 tempfile._once_lock.acquire()
724 # Reset random name generator
725 tempfile._name_sequence = None
727 tempfile._once_lock.release()
729 logging.critical("The tempfile module misses at least one of the"
730 " '_once_lock' and '_name_sequence' attributes")
733 def _FingerprintFile(filename):
734 """Compute the fingerprint of a file.
736 If the file does not exist, a None will be returned
740 @param filename: the filename to checksum
742 @return: the hex digest of the sha checksum of the contents
746 if not (os.path.exists(filename) and os.path.isfile(filename)):
751 fp = compat.sha1_hash()
759 return fp.hexdigest()
762 def FingerprintFiles(files):
763 """Compute fingerprints for a list of files.
766 @param files: the list of filename to fingerprint
768 @return: a dictionary filename: fingerprint, holding only
774 for filename in files:
775 cksum = _FingerprintFile(filename)
777 ret[filename] = cksum
782 def ForceDictType(target, key_types, allowed_values=None):
783 """Force the values of a dict to have certain types.
786 @param target: the dict to update
787 @type key_types: dict
788 @param key_types: dict mapping target dict keys to types
789 in constants.ENFORCEABLE_TYPES
790 @type allowed_values: list
791 @keyword allowed_values: list of specially allowed values
794 if allowed_values is None:
797 if not isinstance(target, dict):
798 msg = "Expected dictionary, got '%s'" % target
799 raise errors.TypeEnforcementError(msg)
802 if key not in key_types:
803 msg = "Unknown key '%s'" % key
804 raise errors.TypeEnforcementError(msg)
806 if target[key] in allowed_values:
809 ktype = key_types[key]
810 if ktype not in constants.ENFORCEABLE_TYPES:
811 msg = "'%s' has non-enforceable type %s" % (key, ktype)
812 raise errors.ProgrammerError(msg)
814 if ktype == constants.VTYPE_STRING:
815 if not isinstance(target[key], basestring):
816 if isinstance(target[key], bool) and not target[key]:
819 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
820 raise errors.TypeEnforcementError(msg)
821 elif ktype == constants.VTYPE_BOOL:
822 if isinstance(target[key], basestring) and target[key]:
823 if target[key].lower() == constants.VALUE_FALSE:
825 elif target[key].lower() == constants.VALUE_TRUE:
828 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
829 raise errors.TypeEnforcementError(msg)
834 elif ktype == constants.VTYPE_SIZE:
836 target[key] = ParseUnit(target[key])
837 except errors.UnitParseError, err:
838 msg = "'%s' (value %s) is not a valid size. error: %s" % \
839 (key, target[key], err)
840 raise errors.TypeEnforcementError(msg)
841 elif ktype == constants.VTYPE_INT:
843 target[key] = int(target[key])
844 except (ValueError, TypeError):
845 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
846 raise errors.TypeEnforcementError(msg)
849 def _GetProcStatusPath(pid):
850 """Returns the path for a PID's proc status file.
853 @param pid: Process ID
857 return "/proc/%d/status" % pid
860 def IsProcessAlive(pid):
861 """Check if a given pid exists on the system.
863 @note: zombie status is not handled, so zombie processes
864 will be returned as alive
866 @param pid: the process ID to check
868 @return: True if the process exists
875 except EnvironmentError, err:
876 if err.errno in (errno.ENOENT, errno.ENOTDIR):
878 elif err.errno == errno.EINVAL:
879 raise RetryAgain(err)
882 assert isinstance(pid, int), "pid must be an integer"
886 # /proc in a multiprocessor environment can have strange behaviors.
887 # Retry the os.stat a few times until we get a good result.
889 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
890 args=[_GetProcStatusPath(pid)])
891 except RetryTimeout, err:
895 def _ParseSigsetT(sigset):
896 """Parse a rendered sigset_t value.
898 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
902 @param sigset: Rendered signal set from /proc/$pid/status
904 @return: Set of all enabled signal numbers
910 for ch in reversed(sigset):
913 # The following could be done in a loop, but it's easier to read and
914 # understand in the unrolled form
916 result.add(signum + 1)
918 result.add(signum + 2)
920 result.add(signum + 3)
922 result.add(signum + 4)
929 def _GetProcStatusField(pstatus, field):
930 """Retrieves a field from the contents of a proc status file.
932 @type pstatus: string
933 @param pstatus: Contents of /proc/$pid/status
935 @param field: Name of field whose value should be returned
939 for line in pstatus.splitlines():
940 parts = line.split(":", 1)
942 if len(parts) < 2 or parts[0] != field:
945 return parts[1].strip()
950 def IsProcessHandlingSignal(pid, signum, status_path=None):
951 """Checks whether a process is handling a signal.
954 @param pid: Process ID
956 @param signum: Signal number
960 if status_path is None:
961 status_path = _GetProcStatusPath(pid)
964 proc_status = ReadFile(status_path)
965 except EnvironmentError, err:
966 # In at least one case, reading /proc/$pid/status failed with ESRCH.
967 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
971 sigcgt = _GetProcStatusField(proc_status, "SigCgt")
973 raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
975 # Now check whether signal is handled
976 return signum in _ParseSigsetT(sigcgt)
979 def ReadPidFile(pidfile):
980 """Read a pid from a file.
982 @type pidfile: string
983 @param pidfile: path to the file containing the pid
985 @return: The process id, if the file exists and contains a valid PID,
990 raw_data = ReadOneLineFile(pidfile)
991 except EnvironmentError, err:
992 if err.errno != errno.ENOENT:
993 logging.exception("Can't read pid file")
998 except (TypeError, ValueError), err:
999 logging.info("Can't parse pid file contents", exc_info=True)
1005 def ReadLockedPidFile(path):
1006 """Reads a locked PID file.
1008 This can be used together with L{StartDaemon}.
1011 @param path: Path to PID file
1012 @return: PID as integer or, if file was unlocked or couldn't be opened, None
1016 fd = os.open(path, os.O_RDONLY)
1017 except EnvironmentError, err:
1018 if err.errno == errno.ENOENT:
1019 # PID file doesn't exist
1025 # Try to acquire lock
1027 except errors.LockError:
1028 # Couldn't lock, daemon is running
1029 return int(os.read(fd, 100))
1036 def MatchNameComponent(key, name_list, case_sensitive=True):
1037 """Try to match a name against a list.
1039 This function will try to match a name like test1 against a list
1040 like C{['test1.example.com', 'test2.example.com', ...]}. Against
1041 this list, I{'test1'} as well as I{'test1.example'} will match, but
1042 not I{'test1.ex'}. A multiple match will be considered as no match
1043 at all (e.g. I{'test1'} against C{['test1.example.com',
1044 'test1.example.org']}), except when the key fully matches an entry
1045 (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1048 @param key: the name to be searched
1049 @type name_list: list
1050 @param name_list: the list of strings against which to search the key
1051 @type case_sensitive: boolean
1052 @param case_sensitive: whether to provide a case-sensitive match
1055 @return: None if there is no match I{or} if there are multiple matches,
1056 otherwise the element from the list which matches
1059 if key in name_list:
1063 if not case_sensitive:
1064 re_flags |= re.IGNORECASE
1066 mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1069 for name in name_list:
1070 if mo.match(name) is not None:
1071 names_filtered.append(name)
1072 if not case_sensitive and key == name.upper():
1073 string_matches.append(name)
1075 if len(string_matches) == 1:
1076 return string_matches[0]
1077 if len(names_filtered) == 1:
1078 return names_filtered[0]
1083 """Class implementing resolver and hostname functionality
1086 _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
1088 def __init__(self, name=None):
1089 """Initialize the host name object.
1091 If the name argument is not passed, it will use this system's
1096 name = self.SysName()
1099 self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
1100 self.ip = self.ipaddrs[0]
1102 def ShortName(self):
1103 """Returns the hostname without domain.
1106 return self.name.split('.')[0]
1110 """Return the current system's name.
1112 This is simply a wrapper over C{socket.gethostname()}.
1115 return socket.gethostname()
1118 def LookupHostname(hostname):
1122 @param hostname: hostname to look up
1125 @return: a tuple (name, aliases, ipaddrs) as returned by
1126 C{socket.gethostbyname_ex}
1127 @raise errors.ResolverError: in case of errors in resolving
1131 result = socket.gethostbyname_ex(hostname)
1132 except (socket.gaierror, socket.herror, socket.error), err:
1133 # hostname not found in DNS, or other socket exception in the
1134 # (code, description format)
1135 raise errors.ResolverError(hostname, err.args[0], err.args[1])
1140 def NormalizeName(cls, hostname):
1141 """Validate and normalize the given hostname.
1143 @attention: the validation is a bit more relaxed than the standards
1144 require; most importantly, we allow underscores in names
1145 @raise errors.OpPrereqError: when the name is not valid
1148 hostname = hostname.lower()
1149 if (not cls._VALID_NAME_RE.match(hostname) or
1150 # double-dots, meaning empty label
1152 # empty initial label
1153 hostname.startswith(".")):
1154 raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
1156 if hostname.endswith("."):
1157 hostname = hostname.rstrip(".")
1161 def ValidateServiceName(name):
1162 """Validate the given service name.
1164 @type name: number or string
1165 @param name: Service name or port specification
1170 except (ValueError, TypeError):
1171 # Non-numeric service name
1172 valid = _VALID_SERVICE_NAME_RE.match(name)
1174 # Numeric port (protocols other than TCP or UDP might need adjustments
1176 valid = (numport >= 0 and numport < (1 << 16))
1179 raise errors.OpPrereqError("Invalid service name '%s'" % name,
1185 def GetHostInfo(name=None):
1186 """Lookup host name and raise an OpPrereqError for failures"""
1189 return HostInfo(name)
1190 except errors.ResolverError, err:
1191 raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
1192 (err[0], err[2]), errors.ECODE_RESOLVER)
1195 def ListVolumeGroups():
1196 """List volume groups and their size
1200 Dictionary with keys volume name and values
1201 the size of the volume
1204 command = "vgs --noheadings --units m --nosuffix -o name,size"
1205 result = RunCmd(command)
1210 for line in result.stdout.splitlines():
1212 name, size = line.split()
1213 size = int(float(size))
1214 except (IndexError, ValueError), err:
1215 logging.error("Invalid output from vgs (%s): %s", err, line)
1223 def BridgeExists(bridge):
1224 """Check whether the given bridge exists in the system
1227 @param bridge: the bridge name to check
1229 @return: True if it does
1232 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1235 def NiceSort(name_list):
1236 """Sort a list of strings based on digit and non-digit groupings.
1238 Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1239 will sort the list in the logical order C{['a1', 'a2', 'a10',
1242 The sort algorithm breaks each name in groups of either only-digits
1243 or no-digits. Only the first eight such groups are considered, and
1244 after that we just use what's left of the string.
1246 @type name_list: list
1247 @param name_list: the names to be sorted
1249 @return: a copy of the name list sorted with our algorithm
1252 _SORTER_BASE = "(\D+|\d+)"
1253 _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1254 _SORTER_BASE, _SORTER_BASE,
1255 _SORTER_BASE, _SORTER_BASE,
1256 _SORTER_BASE, _SORTER_BASE)
1257 _SORTER_RE = re.compile(_SORTER_FULL)
1258 _SORTER_NODIGIT = re.compile("^\D*$")
1260 """Attempts to convert a variable to integer."""
1261 if val is None or _SORTER_NODIGIT.match(val):
1266 to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1267 for name in name_list]
1269 return [tup[1] for tup in to_sort]
1272 def TryConvert(fn, val):
1273 """Try to convert a value ignoring errors.
1275 This function tries to apply function I{fn} to I{val}. If no
1276 C{ValueError} or C{TypeError} exceptions are raised, it will return
1277 the result, else it will return the original value. Any other
1278 exceptions are propagated to the caller.
1281 @param fn: function to apply to the value
1282 @param val: the value to be converted
1283 @return: The converted value if the conversion was successful,
1284 otherwise the original value.
1289 except (ValueError, TypeError):
1295 """Verifies the syntax of an IPv4 address.
1297 This function checks if the IPv4 address passes is valid or not based
1298 on syntax (not IP range, class calculations, etc.).
1301 @param ip: the address to be checked
1302 @rtype: a regular expression match object
1303 @return: a regular expression match object, or None if the
1304 address is not valid
1307 unit = "(0|[1-9]\d{0,2})"
1308 #TODO: convert and return only boolean
1309 return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
1312 def IsValidShellParam(word):
1313 """Verifies is the given word is safe from the shell's p.o.v.
1315 This means that we can pass this to a command via the shell and be
1316 sure that it doesn't alter the command line and is passed as such to
1319 Note that we are overly restrictive here, in order to be on the safe
1323 @param word: the word to check
1325 @return: True if the word is 'safe'
1328 return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1331 def BuildShellCmd(template, *args):
1332 """Build a safe shell command line from the given arguments.
1334 This function will check all arguments in the args list so that they
1335 are valid shell parameters (i.e. they don't contain shell
1336 metacharacters). If everything is ok, it will return the result of
1340 @param template: the string holding the template for the
1343 @return: the expanded command line
1347 if not IsValidShellParam(word):
1348 raise errors.ProgrammerError("Shell argument '%s' contains"
1349 " invalid characters" % word)
1350 return template % args
1353 def FormatUnit(value, units):
1354 """Formats an incoming number of MiB with the appropriate unit.
1357 @param value: integer representing the value in MiB (1048576)
1359 @param units: the type of formatting we should do:
1360 - 'h' for automatic scaling
1365 @return: the formatted value (with suffix)
1368 if units not in ('m', 'g', 't', 'h'):
1369 raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1373 if units == 'm' or (units == 'h' and value < 1024):
1376 return "%d%s" % (round(value, 0), suffix)
1378 elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1381 return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1386 return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1389 def ParseUnit(input_string):
1390 """Tries to extract number and scale from the given string.
1392 Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1393 [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1394 is always an int in MiB.
1397 m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1399 raise errors.UnitParseError("Invalid format")
1401 value = float(m.groups()[0])
1403 unit = m.groups()[1]
1405 lcunit = unit.lower()
1409 if lcunit in ('m', 'mb', 'mib'):
1410 # Value already in MiB
1413 elif lcunit in ('g', 'gb', 'gib'):
1416 elif lcunit in ('t', 'tb', 'tib'):
1417 value *= 1024 * 1024
1420 raise errors.UnitParseError("Unknown unit: %s" % unit)
1422 # Make sure we round up
1423 if int(value) < value:
1426 # Round up to the next multiple of 4
1429 value += 4 - value % 4
1434 def AddAuthorizedKey(file_name, key):
1435 """Adds an SSH public key to an authorized_keys file.
1437 @type file_name: str
1438 @param file_name: path to authorized_keys file
1440 @param key: string containing key
1443 key_fields = key.split()
1445 f = open(file_name, 'a+')
1449 # Ignore whitespace changes
1450 if line.split() == key_fields:
1452 nl = line.endswith('\n')
1456 f.write(key.rstrip('\r\n'))
1463 def RemoveAuthorizedKey(file_name, key):
1464 """Removes an SSH public key from an authorized_keys file.
1466 @type file_name: str
1467 @param file_name: path to authorized_keys file
1469 @param key: string containing key
1472 key_fields = key.split()
1474 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1476 out = os.fdopen(fd, 'w')
1478 f = open(file_name, 'r')
1481 # Ignore whitespace changes while comparing lines
1482 if line.split() != key_fields:
1486 os.rename(tmpname, file_name)
1496 def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1497 """Sets the name of an IP address and hostname in /etc/hosts.
1499 @type file_name: str
1500 @param file_name: path to the file to modify (usually C{/etc/hosts})
1502 @param ip: the IP address
1504 @param hostname: the hostname to be added
1506 @param aliases: the list of aliases to add for the hostname
1509 # FIXME: use WriteFile + fn rather than duplicating its efforts
1510 # Ensure aliases are unique
1511 aliases = UniqueSequence([hostname] + aliases)[1:]
1513 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1515 out = os.fdopen(fd, 'w')
1517 f = open(file_name, 'r')
1520 fields = line.split()
1521 if fields and not fields[0].startswith('#') and ip == fields[0]:
1525 out.write("%s\t%s" % (ip, hostname))
1527 out.write(" %s" % ' '.join(aliases))
1532 os.chmod(tmpname, 0644)
1533 os.rename(tmpname, file_name)
1543 def AddHostToEtcHosts(hostname):
1544 """Wrapper around SetEtcHostsEntry.
1547 @param hostname: a hostname that will be resolved and added to
1548 L{constants.ETC_HOSTS}
1551 hi = HostInfo(name=hostname)
1552 SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1555 def RemoveEtcHostsEntry(file_name, hostname):
1556 """Removes a hostname from /etc/hosts.
1558 IP addresses without names are removed from the file.
1560 @type file_name: str
1561 @param file_name: path to the file to modify (usually C{/etc/hosts})
1563 @param hostname: the hostname to be removed
1566 # FIXME: use WriteFile + fn rather than duplicating its efforts
1567 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1569 out = os.fdopen(fd, 'w')
1571 f = open(file_name, 'r')
1574 fields = line.split()
1575 if len(fields) > 1 and not fields[0].startswith('#'):
1577 if hostname in names:
1578 while hostname in names:
1579 names.remove(hostname)
1581 out.write("%s %s\n" % (fields[0], ' '.join(names)))
1588 os.chmod(tmpname, 0644)
1589 os.rename(tmpname, file_name)
1599 def RemoveHostFromEtcHosts(hostname):
1600 """Wrapper around RemoveEtcHostsEntry.
1603 @param hostname: hostname that will be resolved and its
1604 full and shot name will be removed from
1605 L{constants.ETC_HOSTS}
1608 hi = HostInfo(name=hostname)
1609 RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1610 RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1613 def TimestampForFilename():
1614 """Returns the current time formatted for filenames.
1616 The format doesn't contain colons as some shells and applications them as
1620 return time.strftime("%Y-%m-%d_%H_%M_%S")
1623 def CreateBackup(file_name):
1624 """Creates a backup of a file.
1626 @type file_name: str
1627 @param file_name: file to be backed up
1629 @return: the path to the newly created backup
1630 @raise errors.ProgrammerError: for invalid file names
1633 if not os.path.isfile(file_name):
1634 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1637 prefix = ("%s.backup-%s." %
1638 (os.path.basename(file_name), TimestampForFilename()))
1639 dir_name = os.path.dirname(file_name)
1641 fsrc = open(file_name, 'rb')
1643 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1644 fdst = os.fdopen(fd, 'wb')
1646 logging.debug("Backing up %s at %s", file_name, backup_name)
1647 shutil.copyfileobj(fsrc, fdst)
1656 def ShellQuote(value):
1657 """Quotes shell argument according to POSIX.
1660 @param value: the argument to be quoted
1662 @return: the quoted value
1665 if _re_shell_unquoted.match(value):
1668 return "'%s'" % value.replace("'", "'\\''")
1671 def ShellQuoteArgs(args):
1672 """Quotes a list of shell arguments.
1675 @param args: list of arguments to be quoted
1677 @return: the quoted arguments concatenated with spaces
1680 return ' '.join([ShellQuote(i) for i in args])
1683 def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1684 """Simple ping implementation using TCP connect(2).
1686 Check if the given IP is reachable by doing attempting a TCP connect
1690 @param target: the IP or hostname to ping
1692 @param port: the port to connect to
1694 @param timeout: the timeout on the connection attempt
1695 @type live_port_needed: boolean
1696 @param live_port_needed: whether a closed port will cause the
1697 function to return failure, as if there was a timeout
1698 @type source: str or None
1699 @param source: if specified, will cause the connect to be made
1700 from this specific source address; failures to bind other
1701 than C{EADDRNOTAVAIL} will be ignored
1704 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1708 if source is not None:
1710 sock.bind((source, 0))
1711 except socket.error, (errcode, _):
1712 if errcode == errno.EADDRNOTAVAIL:
1715 sock.settimeout(timeout)
1718 sock.connect((target, port))
1721 except socket.timeout:
1723 except socket.error, (errcode, _):
1724 success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1729 def OwnIpAddress(address):
1730 """Check if the current host has the the given IP address.
1732 Currently this is done by TCP-pinging the address from the loopback
1735 @type address: string
1736 @param address: the address to check
1738 @return: True if we own the address
1741 return TcpPing(address, constants.DEFAULT_NODED_PORT,
1742 source=constants.LOCALHOST_IP_ADDRESS)
1745 def ListVisibleFiles(path):
1746 """Returns a list of visible files in a directory.
1749 @param path: the directory to enumerate
1751 @return: the list of all files not starting with a dot
1752 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1755 if not IsNormAbsPath(path):
1756 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1757 " absolute/normalized: '%s'" % path)
1758 files = [i for i in os.listdir(path) if not i.startswith(".")]
1762 def GetHomeDir(user, default=None):
1763 """Try to get the homedir of the given user.
1765 The user can be passed either as a string (denoting the name) or as
1766 an integer (denoting the user id). If the user is not found, the
1767 'default' argument is returned, which defaults to None.
1771 if isinstance(user, basestring):
1772 result = pwd.getpwnam(user)
1773 elif isinstance(user, (int, long)):
1774 result = pwd.getpwuid(user)
1776 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1780 return result.pw_dir
1784 """Returns a random UUID.
1786 @note: This is a Linux-specific method as it uses the /proc
1791 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1794 def GenerateSecret(numbytes=20):
1795 """Generates a random secret.
1797 This will generate a pseudo-random secret returning an hex string
1798 (so that it can be used where an ASCII string is needed).
1800 @param numbytes: the number of bytes which will be represented by the returned
1801 string (defaulting to 20, the length of a SHA1 hash)
1803 @return: an hex representation of the pseudo-random sequence
1806 return os.urandom(numbytes).encode('hex')
1809 def EnsureDirs(dirs):
1810 """Make required directories, if they don't exist.
1812 @param dirs: list of tuples (dir_name, dir_mode)
1813 @type dirs: list of (string, integer)
1816 for dir_name, dir_mode in dirs:
1818 os.mkdir(dir_name, dir_mode)
1819 except EnvironmentError, err:
1820 if err.errno != errno.EEXIST:
1821 raise errors.GenericError("Cannot create needed directory"
1822 " '%s': %s" % (dir_name, err))
1824 os.chmod(dir_name, dir_mode)
1825 except EnvironmentError, err:
1826 raise errors.GenericError("Cannot change directory permissions on"
1827 " '%s': %s" % (dir_name, err))
1828 if not os.path.isdir(dir_name):
1829 raise errors.GenericError("%s is not a directory" % dir_name)
1832 def ReadFile(file_name, size=-1):
1836 @param size: Read at most size bytes (if negative, entire file)
1838 @return: the (possibly partial) content of the file
1841 f = open(file_name, "r")
1848 def WriteFile(file_name, fn=None, data=None,
1849 mode=None, uid=-1, gid=-1,
1850 atime=None, mtime=None, close=True,
1851 dry_run=False, backup=False,
1852 prewrite=None, postwrite=None):
1853 """(Over)write a file atomically.
1855 The file_name and either fn (a function taking one argument, the
1856 file descriptor, and which should write the data to it) or data (the
1857 contents of the file) must be passed. The other arguments are
1858 optional and allow setting the file mode, owner and group, and the
1859 mtime/atime of the file.
1861 If the function doesn't raise an exception, it has succeeded and the
1862 target file has the new contents. If the function has raised an
1863 exception, an existing target file should be unmodified and the
1864 temporary file should be removed.
1866 @type file_name: str
1867 @param file_name: the target filename
1869 @param fn: content writing function, called with
1870 file descriptor as parameter
1872 @param data: contents of the file
1874 @param mode: file mode
1876 @param uid: the owner of the file
1878 @param gid: the group of the file
1880 @param atime: a custom access time to be set on the file
1882 @param mtime: a custom modification time to be set on the file
1883 @type close: boolean
1884 @param close: whether to close file after writing it
1885 @type prewrite: callable
1886 @param prewrite: function to be called before writing content
1887 @type postwrite: callable
1888 @param postwrite: function to be called after writing content
1891 @return: None if the 'close' parameter evaluates to True,
1892 otherwise the file descriptor
1894 @raise errors.ProgrammerError: if any of the arguments are not valid
1897 if not os.path.isabs(file_name):
1898 raise errors.ProgrammerError("Path passed to WriteFile is not"
1899 " absolute: '%s'" % file_name)
1901 if [fn, data].count(None) != 1:
1902 raise errors.ProgrammerError("fn or data required")
1904 if [atime, mtime].count(None) == 1:
1905 raise errors.ProgrammerError("Both atime and mtime must be either"
1908 if backup and not dry_run and os.path.isfile(file_name):
1909 CreateBackup(file_name)
1911 dir_name, base_name = os.path.split(file_name)
1912 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1914 # here we need to make sure we remove the temp file, if any error
1915 # leaves it in place
1917 if uid != -1 or gid != -1:
1918 os.chown(new_name, uid, gid)
1920 os.chmod(new_name, mode)
1921 if callable(prewrite):
1923 if data is not None:
1927 if callable(postwrite):
1930 if atime is not None and mtime is not None:
1931 os.utime(new_name, (atime, mtime))
1933 os.rename(new_name, file_name)
1942 RemoveFile(new_name)
1947 def ReadOneLineFile(file_name, strict=False):
1948 """Return the first non-empty line from a file.
1950 @type strict: boolean
1951 @param strict: if True, abort if the file has more than one
1955 file_lines = ReadFile(file_name).splitlines()
1956 full_lines = filter(bool, file_lines)
1957 if not file_lines or not full_lines:
1958 raise errors.GenericError("No data in one-liner file %s" % file_name)
1959 elif strict and len(full_lines) > 1:
1960 raise errors.GenericError("Too many lines in one-liner file %s" %
1962 return full_lines[0]
1965 def FirstFree(seq, base=0):
1966 """Returns the first non-existing integer from seq.
1968 The seq argument should be a sorted list of positive integers. The
1969 first time the index of an element is smaller than the element
1970 value, the index will be returned.
1972 The base argument is used to start at a different offset,
1973 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1975 Example: C{[0, 1, 3]} will return I{2}.
1978 @param seq: the sequence to be analyzed.
1980 @param base: use this value as the base index of the sequence
1982 @return: the first non-used index in the sequence
1985 for idx, elem in enumerate(seq):
1986 assert elem >= base, "Passed element is higher than base offset"
1987 if elem > idx + base:
1993 def SingleWaitForFdCondition(fdobj, event, timeout):
1994 """Waits for a condition to occur on the socket.
1996 Immediately returns at the first interruption.
1998 @type fdobj: integer or object supporting a fileno() method
1999 @param fdobj: entity to wait for events on
2000 @type event: integer
2001 @param event: ORed condition (see select module)
2002 @type timeout: float or None
2003 @param timeout: Timeout in seconds
2005 @return: None for timeout, otherwise occured conditions
2008 check = (event | select.POLLPRI |
2009 select.POLLNVAL | select.POLLHUP | select.POLLERR)
2011 if timeout is not None:
2012 # Poller object expects milliseconds
2015 poller = select.poll()
2016 poller.register(fdobj, event)
2018 # TODO: If the main thread receives a signal and we have no timeout, we
2019 # could wait forever. This should check a global "quit" flag or something
2021 io_events = poller.poll(timeout)
2022 except select.error, err:
2023 if err[0] != errno.EINTR:
2026 if io_events and io_events[0][1] & check:
2027 return io_events[0][1]
2032 class FdConditionWaiterHelper(object):
2033 """Retry helper for WaitForFdCondition.
2035 This class contains the retried and wait functions that make sure
2036 WaitForFdCondition can continue waiting until the timeout is actually
2041 def __init__(self, timeout):
2042 self.timeout = timeout
2044 def Poll(self, fdobj, event):
2045 result = SingleWaitForFdCondition(fdobj, event, self.timeout)
2051 def UpdateTimeout(self, timeout):
2052 self.timeout = timeout
2055 def WaitForFdCondition(fdobj, event, timeout):
2056 """Waits for a condition to occur on the socket.
2058 Retries until the timeout is expired, even if interrupted.
2060 @type fdobj: integer or object supporting a fileno() method
2061 @param fdobj: entity to wait for events on
2062 @type event: integer
2063 @param event: ORed condition (see select module)
2064 @type timeout: float or None
2065 @param timeout: Timeout in seconds
2067 @return: None for timeout, otherwise occured conditions
2070 if timeout is not None:
2071 retrywaiter = FdConditionWaiterHelper(timeout)
2073 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
2074 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
2075 except RetryTimeout:
2079 while result is None:
2080 result = SingleWaitForFdCondition(fdobj, event, timeout)
2084 def UniqueSequence(seq):
2085 """Returns a list with unique elements.
2087 Element order is preserved.
2090 @param seq: the sequence with the source elements
2092 @return: list of unique elements from seq
2096 return [i for i in seq if i not in seen and not seen.add(i)]
2099 def NormalizeAndValidateMac(mac):
2100 """Normalizes and check if a MAC address is valid.
2102 Checks whether the supplied MAC address is formally correct, only
2103 accepts colon separated format. Normalize it to all lower.
2106 @param mac: the MAC to be validated
2108 @return: returns the normalized and validated MAC.
2110 @raise errors.OpPrereqError: If the MAC isn't valid
2113 mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
2114 if not mac_check.match(mac):
2115 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2116 mac, errors.ECODE_INVAL)
2121 def TestDelay(duration):
2122 """Sleep for a fixed amount of time.
2124 @type duration: float
2125 @param duration: the sleep duration
2127 @return: False for negative value, True otherwise
2131 return False, "Invalid sleep duration"
2132 time.sleep(duration)
2136 def _CloseFDNoErr(fd, retries=5):
2137 """Close a file descriptor ignoring errors.
2140 @param fd: the file descriptor
2142 @param retries: how many retries to make, in case we get any
2143 other error than EBADF
2148 except OSError, err:
2149 if err.errno != errno.EBADF:
2151 _CloseFDNoErr(fd, retries - 1)
2152 # else either it's closed already or we're out of retries, so we
2153 # ignore this and go on
2156 def CloseFDs(noclose_fds=None):
2157 """Close file descriptors.
2159 This closes all file descriptors above 2 (i.e. except
2162 @type noclose_fds: list or None
2163 @param noclose_fds: if given, it denotes a list of file descriptor
2164 that should not be closed
2167 # Default maximum for the number of available file descriptors.
2168 if 'SC_OPEN_MAX' in os.sysconf_names:
2170 MAXFD = os.sysconf('SC_OPEN_MAX')
2177 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2178 if (maxfd == resource.RLIM_INFINITY):
2181 # Iterate through and close all file descriptors (except the standard ones)
2182 for fd in range(3, maxfd):
2183 if noclose_fds and fd in noclose_fds:
2189 """Lock current process' virtual address space into RAM.
2191 This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2192 see mlock(2) for more details. This function requires ctypes module.
2196 logging.warning("Cannot set memory lock, ctypes module not found")
2199 libc = ctypes.cdll.LoadLibrary("libc.so.6")
2201 logging.error("Cannot set memory lock, ctypes cannot load libc")
2204 # Some older version of the ctypes module don't have built-in functionality
2205 # to access the errno global variable, where function error codes are stored.
2206 # By declaring this variable as a pointer to an integer we can then access
2207 # its value correctly, should the mlockall call fail, in order to see what
2208 # the actual error code was.
2209 # pylint: disable-msg=W0212
2210 libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int)
2212 if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2213 # pylint: disable-msg=W0212
2214 logging.error("Cannot set memory lock: %s",
2215 os.strerror(libc.__errno_location().contents.value))
2218 logging.debug("Memory lock set")
2221 def Daemonize(logfile, run_uid, run_gid):
2222 """Daemonize the current process.
2224 This detaches the current process from the controlling terminal and
2225 runs it in the background as a daemon.
2228 @param logfile: the logfile to which we should redirect stdout/stderr
2230 @param run_uid: Run the child under this uid
2232 @param run_gid: Run the child under this gid
2234 @return: the value zero
2237 # pylint: disable-msg=W0212
2238 # yes, we really want os._exit
2244 if (pid == 0): # The first child.
2246 # FIXME: When removing again and moving to start-stop-daemon privilege drop
2247 # make sure to check for config permission and bail out when invoked
2252 pid = os.fork() # Fork a second child.
2253 if (pid == 0): # The second child.
2257 # exit() or _exit()? See below.
2258 os._exit(0) # Exit parent (the first child) of the second child.
2260 os._exit(0) # Exit parent of the first child.
2264 i = os.open("/dev/null", os.O_RDONLY) # stdin
2265 assert i == 0, "Can't close/reopen stdin"
2266 i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
2267 assert i == 1, "Can't close/reopen stdout"
2268 # Duplicate standard output to standard error.
2273 def DaemonPidFileName(name):
2274 """Compute a ganeti pid file absolute path
2277 @param name: the daemon name
2279 @return: the full path to the pidfile corresponding to the given
2283 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2286 def EnsureDaemon(name):
2287 """Check for and start daemon if not alive.
2290 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2292 logging.error("Can't start daemon '%s', failure %s, output: %s",
2293 name, result.fail_reason, result.output)
2299 def StopDaemon(name):
2303 result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2305 logging.error("Can't stop daemon '%s', failure %s, output: %s",
2306 name, result.fail_reason, result.output)
2312 def WritePidFile(name):
2313 """Write the current process pidfile.
2315 The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2318 @param name: the daemon name to use
2319 @raise errors.GenericError: if the pid file already exists and
2320 points to a live process
2324 pidfilename = DaemonPidFileName(name)
2325 if IsProcessAlive(ReadPidFile(pidfilename)):
2326 raise errors.GenericError("%s contains a live process" % pidfilename)
2328 WriteFile(pidfilename, data="%d\n" % pid)
2331 def RemovePidFile(name):
2332 """Remove the current process pidfile.
2334 Any errors are ignored.
2337 @param name: the daemon name used to derive the pidfile name
2340 pidfilename = DaemonPidFileName(name)
2341 # TODO: we could check here that the file contains our pid
2343 RemoveFile(pidfilename)
2344 except: # pylint: disable-msg=W0702
2348 def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
2350 """Kill a process given by its pid.
2353 @param pid: The PID to terminate.
2355 @param signal_: The signal to send, by default SIGTERM
2357 @param timeout: The timeout after which, if the process is still alive,
2358 a SIGKILL will be sent. If not positive, no such checking
2360 @type waitpid: boolean
2361 @param waitpid: If true, we should waitpid on this process after
2362 sending signals, since it's our own child and otherwise it
2363 would remain as zombie
2366 def _helper(pid, signal_, wait):
2367 """Simple helper to encapsulate the kill/waitpid sequence"""
2368 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2370 os.waitpid(pid, os.WNOHANG)
2375 # kill with pid=0 == suicide
2376 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2378 if not IsProcessAlive(pid):
2381 _helper(pid, signal_, waitpid)
2386 def _CheckProcess():
2387 if not IsProcessAlive(pid):
2391 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2401 # Wait up to $timeout seconds
2402 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2403 except RetryTimeout:
2406 if IsProcessAlive(pid):
2407 # Kill process if it's still alive
2408 _helper(pid, signal.SIGKILL, waitpid)
2411 def FindFile(name, search_path, test=os.path.exists):
2412 """Look for a filesystem object in a given path.
2414 This is an abstract method to search for filesystem object (files,
2415 dirs) under a given search path.
2418 @param name: the name to look for
2419 @type search_path: str
2420 @param search_path: location to start at
2421 @type test: callable
2422 @param test: a function taking one argument that should return True
2423 if the a given object is valid; the default value is
2424 os.path.exists, causing only existing files to be returned
2426 @return: full path to the object if found, None otherwise
2429 # validate the filename mask
2430 if constants.EXT_PLUGIN_MASK.match(name) is None:
2431 logging.critical("Invalid value passed for external script name: '%s'",
2435 for dir_name in search_path:
2436 # FIXME: investigate switch to PathJoin
2437 item_name = os.path.sep.join([dir_name, name])
2438 # check the user test and that we're indeed resolving to the given
2440 if test(item_name) and os.path.basename(item_name) == name:
2445 def CheckVolumeGroupSize(vglist, vgname, minsize):
2446 """Checks if the volume group list is valid.
2448 The function will check if a given volume group is in the list of
2449 volume groups and has a minimum size.
2452 @param vglist: dictionary of volume group names and their size
2454 @param vgname: the volume group we should check
2456 @param minsize: the minimum size we accept
2458 @return: None for success, otherwise the error message
2461 vgsize = vglist.get(vgname, None)
2463 return "volume group '%s' missing" % vgname
2464 elif vgsize < minsize:
2465 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2466 (vgname, minsize, vgsize))
2470 def SplitTime(value):
2471 """Splits time as floating point number into a tuple.
2473 @param value: Time in seconds
2474 @type value: int or float
2475 @return: Tuple containing (seconds, microseconds)
2478 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2480 assert 0 <= seconds, \
2481 "Seconds must be larger than or equal to 0, but are %s" % seconds
2482 assert 0 <= microseconds <= 999999, \
2483 "Microseconds must be 0-999999, but are %s" % microseconds
2485 return (int(seconds), int(microseconds))
2488 def MergeTime(timetuple):
2489 """Merges a tuple into time as a floating point number.
2491 @param timetuple: Time as tuple, (seconds, microseconds)
2492 @type timetuple: tuple
2493 @return: Time as a floating point number expressed in seconds
2496 (seconds, microseconds) = timetuple
2498 assert 0 <= seconds, \
2499 "Seconds must be larger than or equal to 0, but are %s" % seconds
2500 assert 0 <= microseconds <= 999999, \
2501 "Microseconds must be 0-999999, but are %s" % microseconds
2503 return float(seconds) + (float(microseconds) * 0.000001)
2506 def GetDaemonPort(daemon_name):
2507 """Get the daemon port for this cluster.
2509 Note that this routine does not read a ganeti-specific file, but
2510 instead uses C{socket.getservbyname} to allow pre-customization of
2511 this parameter outside of Ganeti.
2513 @type daemon_name: string
2514 @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2518 if daemon_name not in constants.DAEMONS_PORTS:
2519 raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2521 (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2523 port = socket.getservbyname(daemon_name, proto)
2524 except socket.error:
2530 class LogFileHandler(logging.FileHandler):
2531 """Log handler that doesn't fallback to stderr.
2533 When an error occurs while writing on the logfile, logging.FileHandler tries
2534 to log on stderr. This doesn't work in ganeti since stderr is redirected to
2535 the logfile. This class avoids failures reporting errors to /dev/console.
2538 def __init__(self, filename, mode="a", encoding=None):
2539 """Open the specified file and use it as the stream for logging.
2541 Also open /dev/console to report errors while logging.
2544 logging.FileHandler.__init__(self, filename, mode, encoding)
2545 self.console = open(constants.DEV_CONSOLE, "a")
2547 def handleError(self, record): # pylint: disable-msg=C0103
2548 """Handle errors which occur during an emit() call.
2550 Try to handle errors with FileHandler method, if it fails write to
2555 logging.FileHandler.handleError(self, record)
2556 except Exception: # pylint: disable-msg=W0703
2558 self.console.write("Cannot log message:\n%s\n" % self.format(record))
2559 except Exception: # pylint: disable-msg=W0703
2560 # Log handler tried everything it could, now just give up
2564 def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2565 multithreaded=False, syslog=constants.SYSLOG_USAGE,
2566 console_logging=False):
2567 """Configures the logging module.
2570 @param logfile: the filename to which we should log
2571 @type debug: integer
2572 @param debug: if greater than zero, enable debug messages, otherwise
2573 only those at C{INFO} and above level
2574 @type stderr_logging: boolean
2575 @param stderr_logging: whether we should also log to the standard error
2577 @param program: the name under which we should log messages
2578 @type multithreaded: boolean
2579 @param multithreaded: if True, will add the thread name to the log file
2580 @type syslog: string
2581 @param syslog: one of 'no', 'yes', 'only':
2582 - if no, syslog is not used
2583 - if yes, syslog is used (in addition to file-logging)
2584 - if only, only syslog is used
2585 @type console_logging: boolean
2586 @param console_logging: if True, will use a FileHandler which falls back to
2587 the system console if logging fails
2588 @raise EnvironmentError: if we can't open the log file and
2589 syslog/stderr logging is disabled
2592 fmt = "%(asctime)s: " + program + " pid=%(process)d"
2593 sft = program + "[%(process)d]:"
2595 fmt += "/%(threadName)s"
2596 sft += " (%(threadName)s)"
2598 fmt += " %(module)s:%(lineno)s"
2599 # no debug info for syslog loggers
2600 fmt += " %(levelname)s %(message)s"
2601 # yes, we do want the textual level, as remote syslog will probably
2602 # lose the error level, and it's easier to grep for it
2603 sft += " %(levelname)s %(message)s"
2604 formatter = logging.Formatter(fmt)
2605 sys_fmt = logging.Formatter(sft)
2607 root_logger = logging.getLogger("")
2608 root_logger.setLevel(logging.NOTSET)
2610 # Remove all previously setup handlers
2611 for handler in root_logger.handlers:
2613 root_logger.removeHandler(handler)
2616 stderr_handler = logging.StreamHandler()
2617 stderr_handler.setFormatter(formatter)
2619 stderr_handler.setLevel(logging.NOTSET)
2621 stderr_handler.setLevel(logging.CRITICAL)
2622 root_logger.addHandler(stderr_handler)
2624 if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2625 facility = logging.handlers.SysLogHandler.LOG_DAEMON
2626 syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2628 syslog_handler.setFormatter(sys_fmt)
2629 # Never enable debug over syslog
2630 syslog_handler.setLevel(logging.INFO)
2631 root_logger.addHandler(syslog_handler)
2633 if syslog != constants.SYSLOG_ONLY:
2634 # this can fail, if the logging directories are not setup or we have
2635 # a permisssion problem; in this case, it's best to log but ignore
2636 # the error if stderr_logging is True, and if false we re-raise the
2637 # exception since otherwise we could run but without any logs at all
2640 logfile_handler = LogFileHandler(logfile)
2642 logfile_handler = logging.FileHandler(logfile)
2643 logfile_handler.setFormatter(formatter)
2645 logfile_handler.setLevel(logging.DEBUG)
2647 logfile_handler.setLevel(logging.INFO)
2648 root_logger.addHandler(logfile_handler)
2649 except EnvironmentError:
2650 if stderr_logging or syslog == constants.SYSLOG_YES:
2651 logging.exception("Failed to enable logging to file '%s'", logfile)
2653 # we need to re-raise the exception
2657 def IsNormAbsPath(path):
2658 """Check whether a path is absolute and also normalized
2660 This avoids things like /dir/../../other/path to be valid.
2663 return os.path.normpath(path) == path and os.path.isabs(path)
2666 def PathJoin(*args):
2667 """Safe-join a list of path components.
2670 - the first argument must be an absolute path
2671 - no component in the path must have backtracking (e.g. /../),
2672 since we check for normalization at the end
2674 @param args: the path components to be joined
2675 @raise ValueError: for invalid paths
2678 # ensure we're having at least one path passed in
2680 # ensure the first component is an absolute and normalized path name
2682 if not IsNormAbsPath(root):
2683 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2684 result = os.path.join(*args)
2685 # ensure that the whole path is normalized
2686 if not IsNormAbsPath(result):
2687 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2688 # check that we're still under the original prefix
2689 prefix = os.path.commonprefix([root, result])
2691 raise ValueError("Error: path joining resulted in different prefix"
2692 " (%s != %s)" % (prefix, root))
2696 def TailFile(fname, lines=20):
2697 """Return the last lines from a file.
2699 @note: this function will only read and parse the last 4KB of
2700 the file; if the lines are very long, it could be that less
2701 than the requested number of lines are returned
2703 @param fname: the file name
2705 @param lines: the (maximum) number of lines to return
2708 fd = open(fname, "r")
2712 pos = max(0, pos-4096)
2714 raw_data = fd.read()
2718 rows = raw_data.splitlines()
2719 return rows[-lines:]
2722 def FormatTimestampWithTZ(secs):
2723 """Formats a Unix timestamp with the local timezone.
2726 return time.strftime("%F %T %Z", time.gmtime(secs))
2729 def _ParseAsn1Generalizedtime(value):
2730 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2733 @param value: ASN1 GENERALIZEDTIME timestamp
2736 m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2739 asn1time = m.group(1)
2740 hours = int(m.group(2))
2741 minutes = int(m.group(3))
2742 utcoffset = (60 * hours) + minutes
2744 if not value.endswith("Z"):
2745 raise ValueError("Missing timezone")
2746 asn1time = value[:-1]
2749 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2751 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2753 return calendar.timegm(tt.utctimetuple())
2756 def GetX509CertValidity(cert):
2757 """Returns the validity period of the certificate.
2759 @type cert: OpenSSL.crypto.X509
2760 @param cert: X509 certificate object
2763 # The get_notBefore and get_notAfter functions are only supported in
2764 # pyOpenSSL 0.7 and above.
2766 get_notbefore_fn = cert.get_notBefore
2767 except AttributeError:
2770 not_before_asn1 = get_notbefore_fn()
2772 if not_before_asn1 is None:
2775 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2778 get_notafter_fn = cert.get_notAfter
2779 except AttributeError:
2782 not_after_asn1 = get_notafter_fn()
2784 if not_after_asn1 is None:
2787 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2789 return (not_before, not_after)
2792 def _VerifyCertificateInner(expired, not_before, not_after, now,
2793 warn_days, error_days):
2794 """Verifies certificate validity.
2797 @param expired: Whether pyOpenSSL considers the certificate as expired
2798 @type not_before: number or None
2799 @param not_before: Unix timestamp before which certificate is not valid
2800 @type not_after: number or None
2801 @param not_after: Unix timestamp after which certificate is invalid
2803 @param now: Current time as Unix timestamp
2804 @type warn_days: number or None
2805 @param warn_days: How many days before expiration a warning should be reported
2806 @type error_days: number or None
2807 @param error_days: How many days before expiration an error should be reported
2811 msg = "Certificate is expired"
2813 if not_before is not None and not_after is not None:
2814 msg += (" (valid from %s to %s)" %
2815 (FormatTimestampWithTZ(not_before),
2816 FormatTimestampWithTZ(not_after)))
2817 elif not_before is not None:
2818 msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2819 elif not_after is not None:
2820 msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2822 return (CERT_ERROR, msg)
2824 elif not_before is not None and not_before > now:
2825 return (CERT_WARNING,
2826 "Certificate not yet valid (valid from %s)" %
2827 FormatTimestampWithTZ(not_before))
2829 elif not_after is not None:
2830 remaining_days = int((not_after - now) / (24 * 3600))
2832 msg = "Certificate expires in about %d days" % remaining_days
2834 if error_days is not None and remaining_days <= error_days:
2835 return (CERT_ERROR, msg)
2837 if warn_days is not None and remaining_days <= warn_days:
2838 return (CERT_WARNING, msg)
2843 def VerifyX509Certificate(cert, warn_days, error_days):
2844 """Verifies a certificate for LUVerifyCluster.
2846 @type cert: OpenSSL.crypto.X509
2847 @param cert: X509 certificate object
2848 @type warn_days: number or None
2849 @param warn_days: How many days before expiration a warning should be reported
2850 @type error_days: number or None
2851 @param error_days: How many days before expiration an error should be reported
2854 # Depending on the pyOpenSSL version, this can just return (None, None)
2855 (not_before, not_after) = GetX509CertValidity(cert)
2857 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2858 time.time(), warn_days, error_days)
2861 def SignX509Certificate(cert, key, salt):
2862 """Sign a X509 certificate.
2864 An RFC822-like signature header is added in front of the certificate.
2866 @type cert: OpenSSL.crypto.X509
2867 @param cert: X509 certificate object
2869 @param key: Key for HMAC
2871 @param salt: Salt for HMAC
2873 @return: Serialized and signed certificate in PEM format
2876 if not VALID_X509_SIGNATURE_SALT.match(salt):
2877 raise errors.GenericError("Invalid salt: %r" % salt)
2879 # Dumping as PEM here ensures the certificate is in a sane format
2880 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2882 return ("%s: %s/%s\n\n%s" %
2883 (constants.X509_CERT_SIGNATURE_HEADER, salt,
2884 Sha1Hmac(key, cert_pem, salt=salt),
2888 def _ExtractX509CertificateSignature(cert_pem):
2889 """Helper function to extract signature from X509 certificate.
2892 # Extract signature from original PEM data
2893 for line in cert_pem.splitlines():
2894 if line.startswith("---"):
2897 m = X509_SIGNATURE.match(line.strip())
2899 return (m.group("salt"), m.group("sign"))
2901 raise errors.GenericError("X509 certificate signature is missing")
2904 def LoadSignedX509Certificate(cert_pem, key):
2905 """Verifies a signed X509 certificate.
2907 @type cert_pem: string
2908 @param cert_pem: Certificate in PEM format and with signature header
2910 @param key: Key for HMAC
2911 @rtype: tuple; (OpenSSL.crypto.X509, string)
2912 @return: X509 certificate object and salt
2915 (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2918 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2920 # Dump again to ensure it's in a sane format
2921 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2923 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2924 raise errors.GenericError("X509 certificate signature is invalid")
2929 def Sha1Hmac(key, text, salt=None):
2930 """Calculates the HMAC-SHA1 digest of a text.
2932 HMAC is defined in RFC2104.
2935 @param key: Secret key
2940 salted_text = salt + text
2944 return hmac.new(key, salted_text, compat.sha1).hexdigest()
2947 def VerifySha1Hmac(key, text, digest, salt=None):
2948 """Verifies the HMAC-SHA1 digest of a text.
2950 HMAC is defined in RFC2104.
2953 @param key: Secret key
2955 @type digest: string
2956 @param digest: Expected digest
2958 @return: Whether HMAC-SHA1 digest matches
2961 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2964 def SafeEncode(text):
2965 """Return a 'safe' version of a source string.
2967 This function mangles the input string and returns a version that
2968 should be safe to display/encode as ASCII. To this end, we first
2969 convert it to ASCII using the 'backslashreplace' encoding which
2970 should get rid of any non-ASCII chars, and then we process it
2971 through a loop copied from the string repr sources in the python; we
2972 don't use string_escape anymore since that escape single quotes and
2973 backslashes too, and that is too much; and that escaping is not
2974 stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2976 @type text: str or unicode
2977 @param text: input data
2979 @return: a safe version of text
2982 if isinstance(text, unicode):
2983 # only if unicode; if str already, we handle it below
2984 text = text.encode('ascii', 'backslashreplace')
2994 elif c < 32 or c >= 127: # non-printable
2995 resu += "\\x%02x" % (c & 0xff)
3001 def UnescapeAndSplit(text, sep=","):
3002 """Split and unescape a string based on a given separator.
3004 This function splits a string based on a separator where the
3005 separator itself can be escape in order to be an element of the
3006 elements. The escaping rules are (assuming coma being the
3008 - a plain , separates the elements
3009 - a sequence \\\\, (double backslash plus comma) is handled as a
3010 backslash plus a separator comma
3011 - a sequence \, (backslash plus comma) is handled as a
3015 @param text: the string to split
3017 @param text: the separator
3019 @return: a list of strings
3022 # we split the list by sep (with no escaping at this stage)
3023 slist = text.split(sep)
3024 # next, we revisit the elements and if any of them ended with an odd
3025 # number of backslashes, then we join it with the next
3029 if e1.endswith("\\"):
3030 num_b = len(e1) - len(e1.rstrip("\\"))
3033 # here the backslashes remain (all), and will be reduced in
3035 rlist.append(e1 + sep + e2)
3038 # finally, replace backslash-something with something
3039 rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
3043 def CommaJoin(names):
3044 """Nicely join a set of identifiers.
3046 @param names: set, list or tuple
3047 @return: a string with the formatted results
3050 return ", ".join([str(val) for val in names])
3053 def BytesToMebibyte(value):
3054 """Converts bytes to mebibytes.
3057 @param value: Value in bytes
3059 @return: Value in mebibytes
3062 return int(round(value / (1024.0 * 1024.0), 0))
3065 def CalculateDirectorySize(path):
3066 """Calculates the size of a directory recursively.
3069 @param path: Path to directory
3071 @return: Size in mebibytes
3076 for (curpath, _, files) in os.walk(path):
3077 for filename in files:
3078 st = os.lstat(PathJoin(curpath, filename))
3081 return BytesToMebibyte(size)
3084 def GetFilesystemStats(path):
3085 """Returns the total and free space on a filesystem.
3088 @param path: Path on filesystem to be examined
3090 @return: tuple of (Total space, Free space) in mebibytes
3093 st = os.statvfs(path)
3095 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
3096 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
3097 return (tsize, fsize)
3100 def RunInSeparateProcess(fn, *args):
3101 """Runs a function in a separate process.
3103 Note: Only boolean return values are supported.
3106 @param fn: Function to be called
3108 @return: Function's result
3115 # In case the function uses temporary files
3116 ResetTempfileModule()
3119 result = int(bool(fn(*args)))
3120 assert result in (0, 1)
3121 except: # pylint: disable-msg=W0702
3122 logging.exception("Error while calling function in separate process")
3123 # 0 and 1 are reserved for the return value
3126 os._exit(result) # pylint: disable-msg=W0212
3130 # Avoid zombies and check exit code
3131 (_, status) = os.waitpid(pid, 0)
3133 if os.WIFSIGNALED(status):
3135 signum = os.WTERMSIG(status)
3137 exitcode = os.WEXITSTATUS(status)
3140 if not (exitcode in (0, 1) and signum is None):
3141 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3144 return bool(exitcode)
3147 def IgnoreProcessNotFound(fn, *args, **kwargs):
3148 """Ignores ESRCH when calling a process-related function.
3150 ESRCH is raised when a process is not found.
3153 @return: Whether process was found
3158 except EnvironmentError, err:
3160 if err.errno == errno.ESRCH:
3167 def IgnoreSignals(fn, *args, **kwargs):
3168 """Tries to call a function ignoring failures due to EINTR.
3172 return fn(*args, **kwargs)
3173 except EnvironmentError, err:
3174 if err.errno == errno.EINTR:
3178 except (select.error, socket.error), err:
3179 # In python 2.6 and above select.error is an IOError, so it's handled
3180 # above, in 2.5 and below it's not, and it's handled here.
3181 if err.args and err.args[0] == errno.EINTR:
3187 def LockedMethod(fn):
3188 """Synchronized object access decorator.
3190 This decorator is intended to protect access to an object using the
3191 object's own lock which is hardcoded to '_lock'.
3194 def _LockDebug(*args, **kwargs):
3196 logging.debug(*args, **kwargs)
3198 def wrapper(self, *args, **kwargs):
3199 # pylint: disable-msg=W0212
3200 assert hasattr(self, '_lock')
3202 _LockDebug("Waiting for %s", lock)
3205 _LockDebug("Acquired %s", lock)
3206 result = fn(self, *args, **kwargs)
3208 _LockDebug("Releasing %s", lock)
3210 _LockDebug("Released %s", lock)
3216 """Locks a file using POSIX locks.
3219 @param fd: the file descriptor we need to lock
3223 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3224 except IOError, err:
3225 if err.errno == errno.EAGAIN:
3226 raise errors.LockError("File already locked")
3230 def FormatTime(val):
3231 """Formats a time value.
3233 @type val: float or None
3234 @param val: the timestamp as returned by time.time()
3235 @return: a string value or N/A if we don't have a valid timestamp
3238 if val is None or not isinstance(val, (int, float)):
3240 # these two codes works on Linux, but they are not guaranteed on all
3242 return time.strftime("%F %T", time.localtime(val))
3245 def FormatSeconds(secs):
3246 """Formats seconds for easier reading.
3249 @param secs: Number of seconds
3251 @return: Formatted seconds (e.g. "2d 9h 19m 49s")
3256 secs = round(secs, 0)
3259 # Negative values would be a bit tricky
3260 for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]:
3261 (complete, secs) = divmod(secs, one)
3262 if complete or parts:
3263 parts.append("%d%s" % (complete, unit))
3265 parts.append("%ds" % secs)
3267 return " ".join(parts)
3270 def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3271 """Reads the watcher pause file.
3273 @type filename: string
3274 @param filename: Path to watcher pause file
3275 @type now: None, float or int
3276 @param now: Current time as Unix timestamp
3277 @type remove_after: int
3278 @param remove_after: Remove watcher pause file after specified amount of
3279 seconds past the pause end time
3286 value = ReadFile(filename)
3287 except IOError, err:
3288 if err.errno != errno.ENOENT:
3292 if value is not None:
3296 logging.warning(("Watcher pause file (%s) contains invalid value,"
3297 " removing it"), filename)
3298 RemoveFile(filename)
3301 if value is not None:
3302 # Remove file if it's outdated
3303 if now > (value + remove_after):
3304 RemoveFile(filename)
3313 class RetryTimeout(Exception):
3314 """Retry loop timed out.
3316 Any arguments which was passed by the retried function to RetryAgain will be
3317 preserved in RetryTimeout, if it is raised. If such argument was an exception
3318 the RaiseInner helper method will reraise it.
3321 def RaiseInner(self):
3322 if self.args and isinstance(self.args[0], Exception):
3325 raise RetryTimeout(*self.args)
3328 class RetryAgain(Exception):
3331 Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3332 arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3333 of the RetryTimeout() method can be used to reraise it.
3338 class _RetryDelayCalculator(object):
3339 """Calculator for increasing delays.
3349 def __init__(self, start, factor, limit):
3350 """Initializes this class.
3353 @param start: Initial delay
3355 @param factor: Factor for delay increase
3356 @type limit: float or None
3357 @param limit: Upper limit for delay or None for no limit
3361 assert factor >= 1.0
3362 assert limit is None or limit >= 0.0
3365 self._factor = factor
3371 """Returns current delay and calculates the next one.
3374 current = self._next
3376 # Update for next run
3377 if self._limit is None or self._next < self._limit:
3378 self._next = min(self._limit, self._next * self._factor)
3383 #: Special delay to specify whole remaining timeout
3384 RETRY_REMAINING_TIME = object()
3387 def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3388 _time_fn=time.time):
3389 """Call a function repeatedly until it succeeds.
3391 The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3392 anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3393 total of C{timeout} seconds, this function throws L{RetryTimeout}.
3395 C{delay} can be one of the following:
3396 - callable returning the delay length as a float
3397 - Tuple of (start, factor, limit)
3398 - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3399 useful when overriding L{wait_fn} to wait for an external event)
3400 - A static delay as a number (int or float)
3403 @param fn: Function to be called
3404 @param delay: Either a callable (returning the delay), a tuple of (start,
3405 factor, limit) (see L{_RetryDelayCalculator}),
3406 L{RETRY_REMAINING_TIME} or a number (int or float)
3407 @type timeout: float
3408 @param timeout: Total timeout
3409 @type wait_fn: callable
3410 @param wait_fn: Waiting function
3411 @return: Return value of function
3415 assert callable(wait_fn)
3416 assert callable(_time_fn)
3421 end_time = _time_fn() + timeout
3424 # External function to calculate delay
3427 elif isinstance(delay, (tuple, list)):
3428 # Increasing delay with optional upper boundary
3429 (start, factor, limit) = delay
3430 calc_delay = _RetryDelayCalculator(start, factor, limit)
3432 elif delay is RETRY_REMAINING_TIME:
3433 # Always use the remaining time
3438 calc_delay = lambda: delay
3440 assert calc_delay is None or callable(calc_delay)
3445 # pylint: disable-msg=W0142
3447 except RetryAgain, err:
3448 retry_args = err.args
3449 except RetryTimeout:
3450 raise errors.ProgrammerError("Nested retry loop detected that didn't"
3451 " handle RetryTimeout")
3453 remaining_time = end_time - _time_fn()
3455 if remaining_time < 0.0:
3456 # pylint: disable-msg=W0142
3457 raise RetryTimeout(*retry_args)
3459 assert remaining_time >= 0.0
3461 if calc_delay is None:
3462 wait_fn(remaining_time)
3464 current_delay = calc_delay()
3465 if current_delay > 0.0:
3466 wait_fn(current_delay)
3469 def GetClosedTempfile(*args, **kwargs):
3470 """Creates a temporary file and returns its path.
3473 (fd, path) = tempfile.mkstemp(*args, **kwargs)
3478 def GenerateSelfSignedX509Cert(common_name, validity):
3479 """Generates a self-signed X509 certificate.
3481 @type common_name: string
3482 @param common_name: commonName value
3484 @param validity: Validity for certificate in seconds
3487 # Create private and public key
3488 key = OpenSSL.crypto.PKey()
3489 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3491 # Create self-signed certificate
3492 cert = OpenSSL.crypto.X509()
3494 cert.get_subject().CN = common_name
3495 cert.set_serial_number(1)
3496 cert.gmtime_adj_notBefore(0)
3497 cert.gmtime_adj_notAfter(validity)
3498 cert.set_issuer(cert.get_subject())
3499 cert.set_pubkey(key)
3500 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3502 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3503 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3505 return (key_pem, cert_pem)
3508 def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
3509 """Legacy function to generate self-signed X509 certificate.
3512 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
3513 validity * 24 * 60 * 60)
3515 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3518 class FileLock(object):
3519 """Utility class for file locks.
3522 def __init__(self, fd, filename):
3523 """Constructor for FileLock.
3526 @param fd: File object
3528 @param filename: Path of the file opened at I{fd}
3532 self.filename = filename
3535 def Open(cls, filename):
3536 """Creates and opens a file to be used as a file-based lock.
3538 @type filename: string
3539 @param filename: path to the file to be locked
3542 # Using "os.open" is necessary to allow both opening existing file
3543 # read/write and creating if not existing. Vanilla "open" will truncate an
3544 # existing file -or- allow creating if not existing.
3545 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3552 """Close the file and release the lock.
3555 if hasattr(self, "fd") and self.fd:
3559 def _flock(self, flag, blocking, timeout, errmsg):
3560 """Wrapper for fcntl.flock.
3563 @param flag: operation flag
3564 @type blocking: bool
3565 @param blocking: whether the operation should be done in blocking mode.
3566 @type timeout: None or float
3567 @param timeout: for how long the operation should be retried (implies
3569 @type errmsg: string
3570 @param errmsg: error message in case operation fails.
3573 assert self.fd, "Lock was closed"
3574 assert timeout is None or timeout >= 0, \
3575 "If specified, timeout must be positive"
3576 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3578 # When a timeout is used, LOCK_NB must always be set
3579 if not (timeout is None and blocking):
3580 flag |= fcntl.LOCK_NB
3583 self._Lock(self.fd, flag, timeout)
3586 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3587 args=(self.fd, flag, timeout))
3588 except RetryTimeout:
3589 raise errors.LockError(errmsg)
3592 def _Lock(fd, flag, timeout):
3594 fcntl.flock(fd, flag)
3595 except IOError, err:
3596 if timeout is not None and err.errno == errno.EAGAIN:
3599 logging.exception("fcntl.flock failed")
3602 def Exclusive(self, blocking=False, timeout=None):
3603 """Locks the file in exclusive mode.
3605 @type blocking: boolean
3606 @param blocking: whether to block and wait until we
3607 can lock the file or return immediately
3608 @type timeout: int or None
3609 @param timeout: if not None, the duration to wait for the lock
3613 self._flock(fcntl.LOCK_EX, blocking, timeout,
3614 "Failed to lock %s in exclusive mode" % self.filename)
3616 def Shared(self, blocking=False, timeout=None):
3617 """Locks the file in shared mode.
3619 @type blocking: boolean
3620 @param blocking: whether to block and wait until we
3621 can lock the file or return immediately
3622 @type timeout: int or None
3623 @param timeout: if not None, the duration to wait for the lock
3627 self._flock(fcntl.LOCK_SH, blocking, timeout,
3628 "Failed to lock %s in shared mode" % self.filename)
3630 def Unlock(self, blocking=True, timeout=None):
3631 """Unlocks the file.
3633 According to C{flock(2)}, unlocking can also be a nonblocking
3636 To make a non-blocking request, include LOCK_NB with any of the above
3639 @type blocking: boolean
3640 @param blocking: whether to block and wait until we
3641 can lock the file or return immediately
3642 @type timeout: int or None
3643 @param timeout: if not None, the duration to wait for the lock
3647 self._flock(fcntl.LOCK_UN, blocking, timeout,
3648 "Failed to unlock %s" % self.filename)
3652 """Splits data chunks into lines separated by newline.
3654 Instances provide a file-like interface.
3657 def __init__(self, line_fn, *args):
3658 """Initializes this class.
3660 @type line_fn: callable
3661 @param line_fn: Function called for each line, first parameter is line
3662 @param args: Extra arguments for L{line_fn}
3665 assert callable(line_fn)
3668 # Python 2.4 doesn't have functools.partial yet
3670 lambda line: line_fn(line, *args) # pylint: disable-msg=W0142
3672 self._line_fn = line_fn
3674 self._lines = collections.deque()
3677 def write(self, data):
3678 parts = (self._buffer + data).split("\n")
3679 self._buffer = parts.pop()
3680 self._lines.extend(parts)
3684 self._line_fn(self._lines.popleft().rstrip("\r\n"))
3689 self._line_fn(self._buffer)
3692 def SignalHandled(signums):
3693 """Signal Handled decoration.
3695 This special decorator installs a signal handler and then calls the target
3696 function. The function must accept a 'signal_handlers' keyword argument,
3697 which will contain a dict indexed by signal number, with SignalHandler
3700 The decorator can be safely stacked with iself, to handle multiple signals
3701 with different handlers.
3704 @param signums: signals to intercept
3708 def sig_function(*args, **kwargs):
3709 assert 'signal_handlers' not in kwargs or \
3710 kwargs['signal_handlers'] is None or \
3711 isinstance(kwargs['signal_handlers'], dict), \
3712 "Wrong signal_handlers parameter in original function call"
3713 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3714 signal_handlers = kwargs['signal_handlers']
3716 signal_handlers = {}
3717 kwargs['signal_handlers'] = signal_handlers
3718 sighandler = SignalHandler(signums)
3721 signal_handlers[sig] = sighandler
3722 return fn(*args, **kwargs)
3729 class SignalWakeupFd(object):
3731 # This is only supported in Python 2.5 and above (some distributions
3732 # backported it to Python 2.4)
3733 _set_wakeup_fd_fn = signal.set_wakeup_fd
3734 except AttributeError:
3736 def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3739 def _SetWakeupFd(self, fd):
3740 return self._set_wakeup_fd_fn(fd)
3743 """Initializes this class.
3746 (read_fd, write_fd) = os.pipe()
3748 # Once these succeeded, the file descriptors will be closed automatically.
3749 # Buffer size 0 is important, otherwise .read() with a specified length
3750 # might buffer data and the file descriptors won't be marked readable.
3751 self._read_fh = os.fdopen(read_fd, "r", 0)
3752 self._write_fh = os.fdopen(write_fd, "w", 0)
3754 self._previous = self._SetWakeupFd(self._write_fh.fileno())
3757 self.fileno = self._read_fh.fileno
3758 self.read = self._read_fh.read
3761 """Restores the previous wakeup file descriptor.
3764 if hasattr(self, "_previous") and self._previous is not None:
3765 self._SetWakeupFd(self._previous)
3766 self._previous = None
3769 """Notifies the wakeup file descriptor.
3772 self._write_fh.write("\0")
3775 """Called before object deletion.
3781 class SignalHandler(object):
3782 """Generic signal handler class.
3784 It automatically restores the original handler when deconstructed or
3785 when L{Reset} is called. You can either pass your own handler
3786 function in or query the L{called} attribute to detect whether the
3790 @ivar signum: the signals we handle
3791 @type called: boolean
3792 @ivar called: tracks whether any of the signals have been raised
3795 def __init__(self, signum, handler_fn=None, wakeup=None):
3796 """Constructs a new SignalHandler instance.
3798 @type signum: int or list of ints
3799 @param signum: Single signal number or set of signal numbers
3800 @type handler_fn: callable
3801 @param handler_fn: Signal handling function
3804 assert handler_fn is None or callable(handler_fn)
3806 self.signum = set(signum)
3809 self._handler_fn = handler_fn
3810 self._wakeup = wakeup
3814 for signum in self.signum:
3816 prev_handler = signal.signal(signum, self._HandleSignal)
3818 self._previous[signum] = prev_handler
3820 # Restore previous handler
3821 signal.signal(signum, prev_handler)
3824 # Reset all handlers
3826 # Here we have a race condition: a handler may have already been called,
3827 # but there's not much we can do about it at this point.
3834 """Restore previous handler.
3836 This will reset all the signals to their previous handlers.
3839 for signum, prev_handler in self._previous.items():
3840 signal.signal(signum, prev_handler)
3841 # If successful, remove from dict
3842 del self._previous[signum]
3845 """Unsets the L{called} flag.
3847 This function can be used in case a signal may arrive several times.
3852 def _HandleSignal(self, signum, frame):
3853 """Actual signal handling function.
3856 # This is not nice and not absolutely atomic, but it appears to be the only
3857 # solution in Python -- there are no atomic types.
3861 # Notify whoever is interested in signals
3862 self._wakeup.Notify()
3864 if self._handler_fn:
3865 self._handler_fn(signum, frame)
3868 class FieldSet(object):
3869 """A simple field set.
3871 Among the features are:
3872 - checking if a string is among a list of static string or regex objects
3873 - checking if a whole list of string matches
3874 - returning the matching groups from a regex match
3876 Internally, all fields are held as regular expression objects.
3879 def __init__(self, *items):
3880 self.items = [re.compile("^%s$" % value) for value in items]
3882 def Extend(self, other_set):
3883 """Extend the field set with the items from another one"""
3884 self.items.extend(other_set.items)
3886 def Matches(self, field):
3887 """Checks if a field matches the current set
3890 @param field: the string to match
3891 @return: either None or a regular expression match object
3894 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3898 def NonMatching(self, items):
3899 """Returns the list of fields not matching the current set
3902 @param items: the list of fields to check
3904 @return: list of non-matching fields
3907 return [val for val in items if not self.Matches(val)]