4 # Copyright (C) 2006, 2007 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Ganeti utility module.
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
45 import logging.handlers
51 from cStringIO import StringIO
54 from hashlib import sha1
59 from ganeti import errors
60 from ganeti import constants
64 _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
68 #: when set to True, L{RunCmd} is disabled
71 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
74 class RunResult(object):
75 """Holds the result of running external programs.
78 @ivar exit_code: the exit code of the program, or None (if the program
80 @type signal: int or None
81 @ivar signal: the signal that caused the program to finish, or None
82 (if the program wasn't terminated by a signal)
84 @ivar stdout: the standard output of the program
86 @ivar stderr: the standard error of the program
88 @ivar failed: True in case the program was
89 terminated by a signal or exited with a non-zero exit code
90 @ivar fail_reason: a string detailing the termination reason
93 __slots__ = ["exit_code", "signal", "stdout", "stderr",
94 "failed", "fail_reason", "cmd"]
97 def __init__(self, exit_code, signal_, stdout, stderr, cmd):
99 self.exit_code = exit_code
100 self.signal = signal_
103 self.failed = (signal_ is not None or exit_code != 0)
105 if self.signal is not None:
106 self.fail_reason = "terminated by signal %s" % self.signal
107 elif self.exit_code is not None:
108 self.fail_reason = "exited with exit code %s" % self.exit_code
110 self.fail_reason = "unable to determine termination reason"
113 logging.debug("Command '%s' failed (%s); output: %s",
114 self.cmd, self.fail_reason, self.output)
116 def _GetOutput(self):
117 """Returns the combined stdout and stderr for easier usage.
120 return self.stdout + self.stderr
122 output = property(_GetOutput, None, None, "Return full output")
125 def _BuildCmdEnvironment(env, reset):
126 """Builds the environment for an external program.
132 cmd_env = os.environ.copy()
133 cmd_env["LC_ALL"] = "C"
141 def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
142 """Execute a (shell) command.
144 The command should not read from its standard input, as it will be
147 @type cmd: string or list
148 @param cmd: Command to run
150 @param env: Additional environment variables
152 @param output: if desired, the output of the command can be
153 saved in a file instead of the RunResult instance; this
154 parameter denotes the file name (if not None)
156 @param cwd: if specified, will be used as the working
157 directory for the command; the default will be /
158 @type reset_env: boolean
159 @param reset_env: whether to reset or keep the default os environment
161 @return: RunResult instance
162 @raise errors.ProgrammerError: if we call this when forks are disabled
166 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
168 if isinstance(cmd, basestring):
172 cmd = [str(val) for val in cmd]
173 strcmd = ShellQuoteArgs(cmd)
177 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
179 logging.debug("RunCmd %s", strcmd)
181 cmd_env = _BuildCmdEnvironment(env, reset_env)
185 out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
187 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
190 if err.errno == errno.ENOENT:
191 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
203 return RunResult(exitcode, signal_, out, err, strcmd)
206 def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
208 """Start a daemon process after forking twice.
210 @type cmd: string or list
211 @param cmd: Command to run
213 @param env: Additional environment variables
215 @param cwd: Working directory for the program
217 @param output: Path to file in which to save the output
219 @param output_fd: File descriptor for output
220 @type pidfile: string
221 @param pidfile: Process ID file
223 @return: Daemon process ID
224 @raise errors.ProgrammerError: if we call this when forks are disabled
228 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
231 if output and not (bool(output) ^ (output_fd is not None)):
232 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
235 if isinstance(cmd, basestring):
236 cmd = ["/bin/sh", "-c", cmd]
238 strcmd = ShellQuoteArgs(cmd)
241 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
243 logging.debug("StartDaemon %s", strcmd)
245 cmd_env = _BuildCmdEnvironment(env, False)
247 # Create pipe for sending PID back
248 (pidpipe_read, pidpipe_write) = os.pipe()
251 # Create pipe for sending error messages
252 (errpipe_read, errpipe_write) = os.pipe()
259 # Child process, won't return
260 _StartDaemonChild(errpipe_read, errpipe_write,
261 pidpipe_read, pidpipe_write,
263 output, output_fd, pidfile)
265 # Well, maybe child process failed
266 os._exit(1) # pylint: disable-msg=W0212
268 _CloseFDNoErr(errpipe_write)
270 # Wait for daemon to be started (or an error message to arrive) and read
271 # up to 100 KB as an error message
272 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
274 _CloseFDNoErr(errpipe_read)
276 _CloseFDNoErr(pidpipe_write)
278 # Read up to 128 bytes for PID
279 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
281 _CloseFDNoErr(pidpipe_read)
283 # Try to avoid zombies by waiting for child process
290 raise errors.OpExecError("Error when starting daemon process: %r" %
295 except (ValueError, TypeError), err:
296 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
300 def _StartDaemonChild(errpipe_read, errpipe_write,
301 pidpipe_read, pidpipe_write,
303 output, fd_output, pidfile):
304 """Child process for starting daemon.
308 # Close parent's side
309 _CloseFDNoErr(errpipe_read)
310 _CloseFDNoErr(pidpipe_read)
312 # First child process
317 # And fork for the second time
320 # Exit first child process
321 os._exit(0) # pylint: disable-msg=W0212
323 # Make sure pipe is closed on execv* (and thereby notifies original process)
324 SetCloseOnExecFlag(errpipe_write, True)
326 # List of file descriptors to be left open
327 noclose_fds = [errpipe_write]
332 # TODO: Atomic replace with another locked file instead of writing into
334 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
336 # Lock the PID file (and fail if not possible to do so). Any code
337 # wanting to send a signal to the daemon should try to lock the PID
338 # file before reading it. If acquiring the lock succeeds, the daemon is
339 # no longer running and the signal should not be sent.
342 os.write(fd_pidfile, "%d\n" % os.getpid())
343 except Exception, err:
344 raise Exception("Creating and locking PID file failed: %s" % err)
346 # Keeping the file open to hold the lock
347 noclose_fds.append(fd_pidfile)
349 SetCloseOnExecFlag(fd_pidfile, False)
354 fd_devnull = os.open(os.devnull, os.O_RDWR)
356 assert not output or (bool(output) ^ (fd_output is not None))
358 if fd_output is not None:
363 # TODO: Implement flag to set append=yes/no
364 fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
365 except EnvironmentError, err:
366 raise Exception("Opening output file failed: %s" % err)
368 fd_output = fd_devnull
370 # Redirect standard I/O
371 os.dup2(fd_devnull, 0)
372 os.dup2(fd_output, 1)
373 os.dup2(fd_output, 2)
375 # Send daemon PID to parent
376 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
378 # Close all file descriptors except stdio and error message pipe
379 CloseFDs(noclose_fds=noclose_fds)
381 # Change working directory
385 os.execvp(args[0], args)
387 os.execvpe(args[0], args, env)
388 except: # pylint: disable-msg=W0702
390 # Report errors to original process
391 buf = str(sys.exc_info()[1])
393 RetryOnSignal(os.write, errpipe_write, buf)
394 except: # pylint: disable-msg=W0702
395 # Ignore errors in error handling
398 os._exit(1) # pylint: disable-msg=W0212
401 def _RunCmdPipe(cmd, env, via_shell, cwd):
402 """Run a command and return its output.
404 @type cmd: string or list
405 @param cmd: Command to run
407 @param env: The environment to use
408 @type via_shell: bool
409 @param via_shell: if we should run via the shell
411 @param cwd: the working directory for the program
413 @return: (out, err, status)
416 poller = select.poll()
417 child = subprocess.Popen(cmd, shell=via_shell,
418 stderr=subprocess.PIPE,
419 stdout=subprocess.PIPE,
420 stdin=subprocess.PIPE,
421 close_fds=True, env=env,
425 poller.register(child.stdout, select.POLLIN)
426 poller.register(child.stderr, select.POLLIN)
430 child.stdout.fileno(): (out, child.stdout),
431 child.stderr.fileno(): (err, child.stderr),
434 SetNonblockFlag(fd, True)
437 pollresult = RetryOnSignal(poller.poll)
439 for fd, event in pollresult:
440 if event & select.POLLIN or event & select.POLLPRI:
441 data = fdmap[fd][1].read()
442 # no data from read signifies EOF (the same as POLLHUP)
444 poller.unregister(fd)
447 fdmap[fd][0].write(data)
448 if (event & select.POLLNVAL or event & select.POLLHUP or
449 event & select.POLLERR):
450 poller.unregister(fd)
456 status = child.wait()
457 return out, err, status
460 def _RunCmdFile(cmd, env, via_shell, output, cwd):
461 """Run a command and save its output to a file.
463 @type cmd: string or list
464 @param cmd: Command to run
466 @param env: The environment to use
467 @type via_shell: bool
468 @param via_shell: if we should run via the shell
470 @param output: the filename in which to save the output
472 @param cwd: the working directory for the program
474 @return: the exit status
477 fh = open(output, "a")
479 child = subprocess.Popen(cmd, shell=via_shell,
480 stderr=subprocess.STDOUT,
482 stdin=subprocess.PIPE,
483 close_fds=True, env=env,
487 status = child.wait()
493 def SetCloseOnExecFlag(fd, enable):
494 """Sets or unsets the close-on-exec flag on a file descriptor.
497 @param fd: File descriptor
499 @param enable: Whether to set or unset it.
502 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
505 flags |= fcntl.FD_CLOEXEC
507 flags &= ~fcntl.FD_CLOEXEC
509 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
512 def SetNonblockFlag(fd, enable):
513 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
516 @param fd: File descriptor
518 @param enable: Whether to set or unset it
521 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
524 flags |= os.O_NONBLOCK
526 flags &= ~os.O_NONBLOCK
528 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
531 def RetryOnSignal(fn, *args, **kwargs):
532 """Calls a function again if it failed due to EINTR.
537 return fn(*args, **kwargs)
538 except EnvironmentError, err:
539 if err.errno != errno.EINTR:
541 except select.error, err:
542 if not (err.args and err.args[0] == errno.EINTR):
546 def RunParts(dir_name, env=None, reset_env=False):
547 """Run Scripts or programs in a directory
549 @type dir_name: string
550 @param dir_name: absolute path to a directory
552 @param env: The environment to use
553 @type reset_env: boolean
554 @param reset_env: whether to reset or keep the default os environment
555 @rtype: list of tuples
556 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
562 dir_contents = ListVisibleFiles(dir_name)
564 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
567 for relname in sorted(dir_contents):
568 fname = PathJoin(dir_name, relname)
569 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
570 constants.EXT_PLUGIN_MASK.match(relname) is not None):
571 rr.append((relname, constants.RUNPARTS_SKIP, None))
574 result = RunCmd([fname], env=env, reset_env=reset_env)
575 except Exception, err: # pylint: disable-msg=W0703
576 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
578 rr.append((relname, constants.RUNPARTS_RUN, result))
583 def RemoveFile(filename):
584 """Remove a file ignoring some errors.
586 Remove a file, ignoring non-existing ones or directories. Other
590 @param filename: the file to be removed
596 if err.errno not in (errno.ENOENT, errno.EISDIR):
600 def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
604 @param old: Original path
608 @param mkdir: Whether to create target directory if it doesn't exist
609 @type mkdir_mode: int
610 @param mkdir_mode: Mode for newly created directories
614 return os.rename(old, new)
616 # In at least one use case of this function, the job queue, directory
617 # creation is very rare. Checking for the directory before renaming is not
619 if mkdir and err.errno == errno.ENOENT:
620 # Create directory and try again
621 dirname = os.path.dirname(new)
623 os.makedirs(dirname, mode=mkdir_mode)
625 # Ignore EEXIST. This is only handled in os.makedirs as included in
626 # Python 2.5 and above.
627 if err.errno != errno.EEXIST or not os.path.exists(dirname):
630 return os.rename(old, new)
635 def ResetTempfileModule():
636 """Resets the random name generator of the tempfile module.
638 This function should be called after C{os.fork} in the child process to
639 ensure it creates a newly seeded random generator. Otherwise it would
640 generate the same random parts as the parent process. If several processes
641 race for the creation of a temporary file, this could lead to one not getting
645 # pylint: disable-msg=W0212
646 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
647 tempfile._once_lock.acquire()
649 # Reset random name generator
650 tempfile._name_sequence = None
652 tempfile._once_lock.release()
654 logging.critical("The tempfile module misses at least one of the"
655 " '_once_lock' and '_name_sequence' attributes")
658 def _FingerprintFile(filename):
659 """Compute the fingerprint of a file.
661 If the file does not exist, a None will be returned
665 @param filename: the filename to checksum
667 @return: the hex digest of the sha checksum of the contents
671 if not (os.path.exists(filename) and os.path.isfile(filename)):
684 return fp.hexdigest()
687 def FingerprintFiles(files):
688 """Compute fingerprints for a list of files.
691 @param files: the list of filename to fingerprint
693 @return: a dictionary filename: fingerprint, holding only
699 for filename in files:
700 cksum = _FingerprintFile(filename)
702 ret[filename] = cksum
707 def ForceDictType(target, key_types, allowed_values=None):
708 """Force the values of a dict to have certain types.
711 @param target: the dict to update
712 @type key_types: dict
713 @param key_types: dict mapping target dict keys to types
714 in constants.ENFORCEABLE_TYPES
715 @type allowed_values: list
716 @keyword allowed_values: list of specially allowed values
719 if allowed_values is None:
722 if not isinstance(target, dict):
723 msg = "Expected dictionary, got '%s'" % target
724 raise errors.TypeEnforcementError(msg)
727 if key not in key_types:
728 msg = "Unknown key '%s'" % key
729 raise errors.TypeEnforcementError(msg)
731 if target[key] in allowed_values:
734 ktype = key_types[key]
735 if ktype not in constants.ENFORCEABLE_TYPES:
736 msg = "'%s' has non-enforceable type %s" % (key, ktype)
737 raise errors.ProgrammerError(msg)
739 if ktype == constants.VTYPE_STRING:
740 if not isinstance(target[key], basestring):
741 if isinstance(target[key], bool) and not target[key]:
744 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
745 raise errors.TypeEnforcementError(msg)
746 elif ktype == constants.VTYPE_BOOL:
747 if isinstance(target[key], basestring) and target[key]:
748 if target[key].lower() == constants.VALUE_FALSE:
750 elif target[key].lower() == constants.VALUE_TRUE:
753 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
754 raise errors.TypeEnforcementError(msg)
759 elif ktype == constants.VTYPE_SIZE:
761 target[key] = ParseUnit(target[key])
762 except errors.UnitParseError, err:
763 msg = "'%s' (value %s) is not a valid size. error: %s" % \
764 (key, target[key], err)
765 raise errors.TypeEnforcementError(msg)
766 elif ktype == constants.VTYPE_INT:
768 target[key] = int(target[key])
769 except (ValueError, TypeError):
770 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
771 raise errors.TypeEnforcementError(msg)
774 def IsProcessAlive(pid):
775 """Check if a given pid exists on the system.
777 @note: zombie status is not handled, so zombie processes
778 will be returned as alive
780 @param pid: the process ID to check
782 @return: True if the process exists
789 os.stat("/proc/%d/status" % pid)
791 except EnvironmentError, err:
792 if err.errno in (errno.ENOENT, errno.ENOTDIR):
797 def ReadPidFile(pidfile):
798 """Read a pid from a file.
800 @type pidfile: string
801 @param pidfile: path to the file containing the pid
803 @return: The process id, if the file exists and contains a valid PID,
808 raw_data = ReadFile(pidfile)
809 except EnvironmentError, err:
810 if err.errno != errno.ENOENT:
811 logging.exception("Can't read pid file")
816 except (TypeError, ValueError), err:
817 logging.info("Can't parse pid file contents", exc_info=True)
823 def MatchNameComponent(key, name_list, case_sensitive=True):
824 """Try to match a name against a list.
826 This function will try to match a name like test1 against a list
827 like C{['test1.example.com', 'test2.example.com', ...]}. Against
828 this list, I{'test1'} as well as I{'test1.example'} will match, but
829 not I{'test1.ex'}. A multiple match will be considered as no match
830 at all (e.g. I{'test1'} against C{['test1.example.com',
831 'test1.example.org']}), except when the key fully matches an entry
832 (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
835 @param key: the name to be searched
836 @type name_list: list
837 @param name_list: the list of strings against which to search the key
838 @type case_sensitive: boolean
839 @param case_sensitive: whether to provide a case-sensitive match
842 @return: None if there is no match I{or} if there are multiple matches,
843 otherwise the element from the list which matches
850 if not case_sensitive:
851 re_flags |= re.IGNORECASE
853 mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
856 for name in name_list:
857 if mo.match(name) is not None:
858 names_filtered.append(name)
859 if not case_sensitive and key == name.upper():
860 string_matches.append(name)
862 if len(string_matches) == 1:
863 return string_matches[0]
864 if len(names_filtered) == 1:
865 return names_filtered[0]
870 """Class implementing resolver and hostname functionality
873 _VALID_NAME_RE = re.compile("^[a-z0-9._-]{1,255}$")
875 def __init__(self, name=None):
876 """Initialize the host name object.
878 If the name argument is not passed, it will use this system's
883 name = self.SysName()
886 self.name, self.aliases, self.ipaddrs = self.LookupHostname(name)
887 self.ip = self.ipaddrs[0]
890 """Returns the hostname without domain.
893 return self.name.split('.')[0]
897 """Return the current system's name.
899 This is simply a wrapper over C{socket.gethostname()}.
902 return socket.gethostname()
905 def LookupHostname(hostname):
909 @param hostname: hostname to look up
912 @return: a tuple (name, aliases, ipaddrs) as returned by
913 C{socket.gethostbyname_ex}
914 @raise errors.ResolverError: in case of errors in resolving
918 result = socket.gethostbyname_ex(hostname)
919 except socket.gaierror, err:
920 # hostname not found in DNS
921 raise errors.ResolverError(hostname, err.args[0], err.args[1])
926 def NormalizeName(cls, hostname):
927 """Validate and normalize the given hostname.
929 @attention: the validation is a bit more relaxed than the standards
930 require; most importantly, we allow underscores in names
931 @raise errors.OpPrereqError: when the name is not valid
934 hostname = hostname.lower()
935 if (not cls._VALID_NAME_RE.match(hostname) or
936 # double-dots, meaning empty label
938 # empty initial label
939 hostname.startswith(".")):
940 raise errors.OpPrereqError("Invalid hostname '%s'" % hostname,
942 if hostname.endswith("."):
943 hostname = hostname.rstrip(".")
947 def GetHostInfo(name=None):
948 """Lookup host name and raise an OpPrereqError for failures"""
951 return HostInfo(name)
952 except errors.ResolverError, err:
953 raise errors.OpPrereqError("The given name (%s) does not resolve: %s" %
954 (err[0], err[2]), errors.ECODE_RESOLVER)
957 def ListVolumeGroups():
958 """List volume groups and their size
962 Dictionary with keys volume name and values
963 the size of the volume
966 command = "vgs --noheadings --units m --nosuffix -o name,size"
967 result = RunCmd(command)
972 for line in result.stdout.splitlines():
974 name, size = line.split()
975 size = int(float(size))
976 except (IndexError, ValueError), err:
977 logging.error("Invalid output from vgs (%s): %s", err, line)
985 def BridgeExists(bridge):
986 """Check whether the given bridge exists in the system
989 @param bridge: the bridge name to check
991 @return: True if it does
994 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
997 def NiceSort(name_list):
998 """Sort a list of strings based on digit and non-digit groupings.
1000 Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1001 will sort the list in the logical order C{['a1', 'a2', 'a10',
1004 The sort algorithm breaks each name in groups of either only-digits
1005 or no-digits. Only the first eight such groups are considered, and
1006 after that we just use what's left of the string.
1008 @type name_list: list
1009 @param name_list: the names to be sorted
1011 @return: a copy of the name list sorted with our algorithm
1014 _SORTER_BASE = "(\D+|\d+)"
1015 _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1016 _SORTER_BASE, _SORTER_BASE,
1017 _SORTER_BASE, _SORTER_BASE,
1018 _SORTER_BASE, _SORTER_BASE)
1019 _SORTER_RE = re.compile(_SORTER_FULL)
1020 _SORTER_NODIGIT = re.compile("^\D*$")
1022 """Attempts to convert a variable to integer."""
1023 if val is None or _SORTER_NODIGIT.match(val):
1028 to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1029 for name in name_list]
1031 return [tup[1] for tup in to_sort]
1034 def TryConvert(fn, val):
1035 """Try to convert a value ignoring errors.
1037 This function tries to apply function I{fn} to I{val}. If no
1038 C{ValueError} or C{TypeError} exceptions are raised, it will return
1039 the result, else it will return the original value. Any other
1040 exceptions are propagated to the caller.
1043 @param fn: function to apply to the value
1044 @param val: the value to be converted
1045 @return: The converted value if the conversion was successful,
1046 otherwise the original value.
1051 except (ValueError, TypeError):
1057 """Verifies the syntax of an IPv4 address.
1059 This function checks if the IPv4 address passes is valid or not based
1060 on syntax (not IP range, class calculations, etc.).
1063 @param ip: the address to be checked
1064 @rtype: a regular expression match object
1065 @return: a regular expression match object, or None if the
1066 address is not valid
1069 unit = "(0|[1-9]\d{0,2})"
1070 #TODO: convert and return only boolean
1071 return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip)
1074 def IsValidShellParam(word):
1075 """Verifies is the given word is safe from the shell's p.o.v.
1077 This means that we can pass this to a command via the shell and be
1078 sure that it doesn't alter the command line and is passed as such to
1081 Note that we are overly restrictive here, in order to be on the safe
1085 @param word: the word to check
1087 @return: True if the word is 'safe'
1090 return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1093 def BuildShellCmd(template, *args):
1094 """Build a safe shell command line from the given arguments.
1096 This function will check all arguments in the args list so that they
1097 are valid shell parameters (i.e. they don't contain shell
1098 metacharacters). If everything is ok, it will return the result of
1102 @param template: the string holding the template for the
1105 @return: the expanded command line
1109 if not IsValidShellParam(word):
1110 raise errors.ProgrammerError("Shell argument '%s' contains"
1111 " invalid characters" % word)
1112 return template % args
1115 def FormatUnit(value, units):
1116 """Formats an incoming number of MiB with the appropriate unit.
1119 @param value: integer representing the value in MiB (1048576)
1121 @param units: the type of formatting we should do:
1122 - 'h' for automatic scaling
1127 @return: the formatted value (with suffix)
1130 if units not in ('m', 'g', 't', 'h'):
1131 raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units))
1135 if units == 'm' or (units == 'h' and value < 1024):
1138 return "%d%s" % (round(value, 0), suffix)
1140 elif units == 'g' or (units == 'h' and value < (1024 * 1024)):
1143 return "%0.1f%s" % (round(float(value) / 1024, 1), suffix)
1148 return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1151 def ParseUnit(input_string):
1152 """Tries to extract number and scale from the given string.
1154 Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1155 [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1156 is always an int in MiB.
1159 m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1161 raise errors.UnitParseError("Invalid format")
1163 value = float(m.groups()[0])
1165 unit = m.groups()[1]
1167 lcunit = unit.lower()
1171 if lcunit in ('m', 'mb', 'mib'):
1172 # Value already in MiB
1175 elif lcunit in ('g', 'gb', 'gib'):
1178 elif lcunit in ('t', 'tb', 'tib'):
1179 value *= 1024 * 1024
1182 raise errors.UnitParseError("Unknown unit: %s" % unit)
1184 # Make sure we round up
1185 if int(value) < value:
1188 # Round up to the next multiple of 4
1191 value += 4 - value % 4
1196 def AddAuthorizedKey(file_name, key):
1197 """Adds an SSH public key to an authorized_keys file.
1199 @type file_name: str
1200 @param file_name: path to authorized_keys file
1202 @param key: string containing key
1205 key_fields = key.split()
1207 f = open(file_name, 'a+')
1211 # Ignore whitespace changes
1212 if line.split() == key_fields:
1214 nl = line.endswith('\n')
1218 f.write(key.rstrip('\r\n'))
1225 def RemoveAuthorizedKey(file_name, key):
1226 """Removes an SSH public key from an authorized_keys file.
1228 @type file_name: str
1229 @param file_name: path to authorized_keys file
1231 @param key: string containing key
1234 key_fields = key.split()
1236 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1238 out = os.fdopen(fd, 'w')
1240 f = open(file_name, 'r')
1243 # Ignore whitespace changes while comparing lines
1244 if line.split() != key_fields:
1248 os.rename(tmpname, file_name)
1258 def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1259 """Sets the name of an IP address and hostname in /etc/hosts.
1261 @type file_name: str
1262 @param file_name: path to the file to modify (usually C{/etc/hosts})
1264 @param ip: the IP address
1266 @param hostname: the hostname to be added
1268 @param aliases: the list of aliases to add for the hostname
1271 # FIXME: use WriteFile + fn rather than duplicating its efforts
1272 # Ensure aliases are unique
1273 aliases = UniqueSequence([hostname] + aliases)[1:]
1275 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1277 out = os.fdopen(fd, 'w')
1279 f = open(file_name, 'r')
1282 fields = line.split()
1283 if fields and not fields[0].startswith('#') and ip == fields[0]:
1287 out.write("%s\t%s" % (ip, hostname))
1289 out.write(" %s" % ' '.join(aliases))
1294 os.chmod(tmpname, 0644)
1295 os.rename(tmpname, file_name)
1305 def AddHostToEtcHosts(hostname):
1306 """Wrapper around SetEtcHostsEntry.
1309 @param hostname: a hostname that will be resolved and added to
1310 L{constants.ETC_HOSTS}
1313 hi = HostInfo(name=hostname)
1314 SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1317 def RemoveEtcHostsEntry(file_name, hostname):
1318 """Removes a hostname from /etc/hosts.
1320 IP addresses without names are removed from the file.
1322 @type file_name: str
1323 @param file_name: path to the file to modify (usually C{/etc/hosts})
1325 @param hostname: the hostname to be removed
1328 # FIXME: use WriteFile + fn rather than duplicating its efforts
1329 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1331 out = os.fdopen(fd, 'w')
1333 f = open(file_name, 'r')
1336 fields = line.split()
1337 if len(fields) > 1 and not fields[0].startswith('#'):
1339 if hostname in names:
1340 while hostname in names:
1341 names.remove(hostname)
1343 out.write("%s %s\n" % (fields[0], ' '.join(names)))
1350 os.chmod(tmpname, 0644)
1351 os.rename(tmpname, file_name)
1361 def RemoveHostFromEtcHosts(hostname):
1362 """Wrapper around RemoveEtcHostsEntry.
1365 @param hostname: hostname that will be resolved and its
1366 full and shot name will be removed from
1367 L{constants.ETC_HOSTS}
1370 hi = HostInfo(name=hostname)
1371 RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
1372 RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
1375 def TimestampForFilename():
1376 """Returns the current time formatted for filenames.
1378 The format doesn't contain colons as some shells and applications them as
1382 return time.strftime("%Y-%m-%d_%H_%M_%S")
1385 def CreateBackup(file_name):
1386 """Creates a backup of a file.
1388 @type file_name: str
1389 @param file_name: file to be backed up
1391 @return: the path to the newly created backup
1392 @raise errors.ProgrammerError: for invalid file names
1395 if not os.path.isfile(file_name):
1396 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1399 prefix = ("%s.backup-%s." %
1400 (os.path.basename(file_name), TimestampForFilename()))
1401 dir_name = os.path.dirname(file_name)
1403 fsrc = open(file_name, 'rb')
1405 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1406 fdst = os.fdopen(fd, 'wb')
1408 logging.debug("Backing up %s at %s", file_name, backup_name)
1409 shutil.copyfileobj(fsrc, fdst)
1418 def ShellQuote(value):
1419 """Quotes shell argument according to POSIX.
1422 @param value: the argument to be quoted
1424 @return: the quoted value
1427 if _re_shell_unquoted.match(value):
1430 return "'%s'" % value.replace("'", "'\\''")
1433 def ShellQuoteArgs(args):
1434 """Quotes a list of shell arguments.
1437 @param args: list of arguments to be quoted
1439 @return: the quoted arguments concatenated with spaces
1442 return ' '.join([ShellQuote(i) for i in args])
1445 def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
1446 """Simple ping implementation using TCP connect(2).
1448 Check if the given IP is reachable by doing attempting a TCP connect
1452 @param target: the IP or hostname to ping
1454 @param port: the port to connect to
1456 @param timeout: the timeout on the connection attempt
1457 @type live_port_needed: boolean
1458 @param live_port_needed: whether a closed port will cause the
1459 function to return failure, as if there was a timeout
1460 @type source: str or None
1461 @param source: if specified, will cause the connect to be made
1462 from this specific source address; failures to bind other
1463 than C{EADDRNOTAVAIL} will be ignored
1466 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1470 if source is not None:
1472 sock.bind((source, 0))
1473 except socket.error, (errcode, _):
1474 if errcode == errno.EADDRNOTAVAIL:
1477 sock.settimeout(timeout)
1480 sock.connect((target, port))
1483 except socket.timeout:
1485 except socket.error, (errcode, _):
1486 success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
1491 def OwnIpAddress(address):
1492 """Check if the current host has the the given IP address.
1494 Currently this is done by TCP-pinging the address from the loopback
1497 @type address: string
1498 @param address: the address to check
1500 @return: True if we own the address
1503 return TcpPing(address, constants.DEFAULT_NODED_PORT,
1504 source=constants.LOCALHOST_IP_ADDRESS)
1507 def ListVisibleFiles(path):
1508 """Returns a list of visible files in a directory.
1511 @param path: the directory to enumerate
1513 @return: the list of all files not starting with a dot
1514 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1517 if not IsNormAbsPath(path):
1518 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1519 " absolute/normalized: '%s'" % path)
1520 files = [i for i in os.listdir(path) if not i.startswith(".")]
1525 def GetHomeDir(user, default=None):
1526 """Try to get the homedir of the given user.
1528 The user can be passed either as a string (denoting the name) or as
1529 an integer (denoting the user id). If the user is not found, the
1530 'default' argument is returned, which defaults to None.
1534 if isinstance(user, basestring):
1535 result = pwd.getpwnam(user)
1536 elif isinstance(user, (int, long)):
1537 result = pwd.getpwuid(user)
1539 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1543 return result.pw_dir
1547 """Returns a random UUID.
1549 @note: This is a Linux-specific method as it uses the /proc
1554 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1557 def GenerateSecret(numbytes=20):
1558 """Generates a random secret.
1560 This will generate a pseudo-random secret returning an hex string
1561 (so that it can be used where an ASCII string is needed).
1563 @param numbytes: the number of bytes which will be represented by the returned
1564 string (defaulting to 20, the length of a SHA1 hash)
1566 @return: an hex representation of the pseudo-random sequence
1569 return os.urandom(numbytes).encode('hex')
1572 def EnsureDirs(dirs):
1573 """Make required directories, if they don't exist.
1575 @param dirs: list of tuples (dir_name, dir_mode)
1576 @type dirs: list of (string, integer)
1579 for dir_name, dir_mode in dirs:
1581 os.mkdir(dir_name, dir_mode)
1582 except EnvironmentError, err:
1583 if err.errno != errno.EEXIST:
1584 raise errors.GenericError("Cannot create needed directory"
1585 " '%s': %s" % (dir_name, err))
1586 if not os.path.isdir(dir_name):
1587 raise errors.GenericError("%s is not a directory" % dir_name)
1590 def ReadFile(file_name, size=-1):
1594 @param size: Read at most size bytes (if negative, entire file)
1596 @return: the (possibly partial) content of the file
1599 f = open(file_name, "r")
1606 def WriteFile(file_name, fn=None, data=None,
1607 mode=None, uid=-1, gid=-1,
1608 atime=None, mtime=None, close=True,
1609 dry_run=False, backup=False,
1610 prewrite=None, postwrite=None):
1611 """(Over)write a file atomically.
1613 The file_name and either fn (a function taking one argument, the
1614 file descriptor, and which should write the data to it) or data (the
1615 contents of the file) must be passed. The other arguments are
1616 optional and allow setting the file mode, owner and group, and the
1617 mtime/atime of the file.
1619 If the function doesn't raise an exception, it has succeeded and the
1620 target file has the new contents. If the function has raised an
1621 exception, an existing target file should be unmodified and the
1622 temporary file should be removed.
1624 @type file_name: str
1625 @param file_name: the target filename
1627 @param fn: content writing function, called with
1628 file descriptor as parameter
1630 @param data: contents of the file
1632 @param mode: file mode
1634 @param uid: the owner of the file
1636 @param gid: the group of the file
1638 @param atime: a custom access time to be set on the file
1640 @param mtime: a custom modification time to be set on the file
1641 @type close: boolean
1642 @param close: whether to close file after writing it
1643 @type prewrite: callable
1644 @param prewrite: function to be called before writing content
1645 @type postwrite: callable
1646 @param postwrite: function to be called after writing content
1649 @return: None if the 'close' parameter evaluates to True,
1650 otherwise the file descriptor
1652 @raise errors.ProgrammerError: if any of the arguments are not valid
1655 if not os.path.isabs(file_name):
1656 raise errors.ProgrammerError("Path passed to WriteFile is not"
1657 " absolute: '%s'" % file_name)
1659 if [fn, data].count(None) != 1:
1660 raise errors.ProgrammerError("fn or data required")
1662 if [atime, mtime].count(None) == 1:
1663 raise errors.ProgrammerError("Both atime and mtime must be either"
1666 if backup and not dry_run and os.path.isfile(file_name):
1667 CreateBackup(file_name)
1669 dir_name, base_name = os.path.split(file_name)
1670 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1672 # here we need to make sure we remove the temp file, if any error
1673 # leaves it in place
1675 if uid != -1 or gid != -1:
1676 os.chown(new_name, uid, gid)
1678 os.chmod(new_name, mode)
1679 if callable(prewrite):
1681 if data is not None:
1685 if callable(postwrite):
1688 if atime is not None and mtime is not None:
1689 os.utime(new_name, (atime, mtime))
1691 os.rename(new_name, file_name)
1700 RemoveFile(new_name)
1705 def FirstFree(seq, base=0):
1706 """Returns the first non-existing integer from seq.
1708 The seq argument should be a sorted list of positive integers. The
1709 first time the index of an element is smaller than the element
1710 value, the index will be returned.
1712 The base argument is used to start at a different offset,
1713 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1715 Example: C{[0, 1, 3]} will return I{2}.
1718 @param seq: the sequence to be analyzed.
1720 @param base: use this value as the base index of the sequence
1722 @return: the first non-used index in the sequence
1725 for idx, elem in enumerate(seq):
1726 assert elem >= base, "Passed element is higher than base offset"
1727 if elem > idx + base:
1733 def all(seq, pred=bool): # pylint: disable-msg=W0622
1734 "Returns True if pred(x) is True for every element in the iterable"
1735 for _ in itertools.ifilterfalse(pred, seq):
1740 def any(seq, pred=bool): # pylint: disable-msg=W0622
1741 "Returns True if pred(x) is True for at least one element in the iterable"
1742 for _ in itertools.ifilter(pred, seq):
1747 def partition(seq, pred=bool): # # pylint: disable-msg=W0622
1748 "Partition a list in two, based on the given predicate"
1749 return (list(itertools.ifilter(pred, seq)),
1750 list(itertools.ifilterfalse(pred, seq)))
1753 def UniqueSequence(seq):
1754 """Returns a list with unique elements.
1756 Element order is preserved.
1759 @param seq: the sequence with the source elements
1761 @return: list of unique elements from seq
1765 return [i for i in seq if i not in seen and not seen.add(i)]
1768 def NormalizeAndValidateMac(mac):
1769 """Normalizes and check if a MAC address is valid.
1771 Checks whether the supplied MAC address is formally correct, only
1772 accepts colon separated format. Normalize it to all lower.
1775 @param mac: the MAC to be validated
1777 @return: returns the normalized and validated MAC.
1779 @raise errors.OpPrereqError: If the MAC isn't valid
1782 mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
1783 if not mac_check.match(mac):
1784 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
1785 mac, errors.ECODE_INVAL)
1790 def TestDelay(duration):
1791 """Sleep for a fixed amount of time.
1793 @type duration: float
1794 @param duration: the sleep duration
1796 @return: False for negative value, True otherwise
1800 return False, "Invalid sleep duration"
1801 time.sleep(duration)
1805 def _CloseFDNoErr(fd, retries=5):
1806 """Close a file descriptor ignoring errors.
1809 @param fd: the file descriptor
1811 @param retries: how many retries to make, in case we get any
1812 other error than EBADF
1817 except OSError, err:
1818 if err.errno != errno.EBADF:
1820 _CloseFDNoErr(fd, retries - 1)
1821 # else either it's closed already or we're out of retries, so we
1822 # ignore this and go on
1825 def CloseFDs(noclose_fds=None):
1826 """Close file descriptors.
1828 This closes all file descriptors above 2 (i.e. except
1831 @type noclose_fds: list or None
1832 @param noclose_fds: if given, it denotes a list of file descriptor
1833 that should not be closed
1836 # Default maximum for the number of available file descriptors.
1837 if 'SC_OPEN_MAX' in os.sysconf_names:
1839 MAXFD = os.sysconf('SC_OPEN_MAX')
1846 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
1847 if (maxfd == resource.RLIM_INFINITY):
1850 # Iterate through and close all file descriptors (except the standard ones)
1851 for fd in range(3, maxfd):
1852 if noclose_fds and fd in noclose_fds:
1857 def Daemonize(logfile):
1858 """Daemonize the current process.
1860 This detaches the current process from the controlling terminal and
1861 runs it in the background as a daemon.
1864 @param logfile: the logfile to which we should redirect stdout/stderr
1866 @return: the value zero
1869 # pylint: disable-msg=W0212
1870 # yes, we really want os._exit
1876 if (pid == 0): # The first child.
1879 pid = os.fork() # Fork a second child.
1880 if (pid == 0): # The second child.
1884 # exit() or _exit()? See below.
1885 os._exit(0) # Exit parent (the first child) of the second child.
1887 os._exit(0) # Exit parent of the first child.
1891 i = os.open("/dev/null", os.O_RDONLY) # stdin
1892 assert i == 0, "Can't close/reopen stdin"
1893 i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
1894 assert i == 1, "Can't close/reopen stdout"
1895 # Duplicate standard output to standard error.
1900 def DaemonPidFileName(name):
1901 """Compute a ganeti pid file absolute path
1904 @param name: the daemon name
1906 @return: the full path to the pidfile corresponding to the given
1910 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
1913 def EnsureDaemon(name):
1914 """Check for and start daemon if not alive.
1917 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
1919 logging.error("Can't start daemon '%s', failure %s, output: %s",
1920 name, result.fail_reason, result.output)
1926 def WritePidFile(name):
1927 """Write the current process pidfile.
1929 The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
1932 @param name: the daemon name to use
1933 @raise errors.GenericError: if the pid file already exists and
1934 points to a live process
1938 pidfilename = DaemonPidFileName(name)
1939 if IsProcessAlive(ReadPidFile(pidfilename)):
1940 raise errors.GenericError("%s contains a live process" % pidfilename)
1942 WriteFile(pidfilename, data="%d\n" % pid)
1945 def RemovePidFile(name):
1946 """Remove the current process pidfile.
1948 Any errors are ignored.
1951 @param name: the daemon name used to derive the pidfile name
1954 pidfilename = DaemonPidFileName(name)
1955 # TODO: we could check here that the file contains our pid
1957 RemoveFile(pidfilename)
1958 except: # pylint: disable-msg=W0702
1962 def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
1964 """Kill a process given by its pid.
1967 @param pid: The PID to terminate.
1969 @param signal_: The signal to send, by default SIGTERM
1971 @param timeout: The timeout after which, if the process is still alive,
1972 a SIGKILL will be sent. If not positive, no such checking
1974 @type waitpid: boolean
1975 @param waitpid: If true, we should waitpid on this process after
1976 sending signals, since it's our own child and otherwise it
1977 would remain as zombie
1980 def _helper(pid, signal_, wait):
1981 """Simple helper to encapsulate the kill/waitpid sequence"""
1982 os.kill(pid, signal_)
1985 os.waitpid(pid, os.WNOHANG)
1990 # kill with pid=0 == suicide
1991 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
1993 if not IsProcessAlive(pid):
1996 _helper(pid, signal_, waitpid)
2001 def _CheckProcess():
2002 if not IsProcessAlive(pid):
2006 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2016 # Wait up to $timeout seconds
2017 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2018 except RetryTimeout:
2021 if IsProcessAlive(pid):
2022 # Kill process if it's still alive
2023 _helper(pid, signal.SIGKILL, waitpid)
2026 def FindFile(name, search_path, test=os.path.exists):
2027 """Look for a filesystem object in a given path.
2029 This is an abstract method to search for filesystem object (files,
2030 dirs) under a given search path.
2033 @param name: the name to look for
2034 @type search_path: str
2035 @param search_path: location to start at
2036 @type test: callable
2037 @param test: a function taking one argument that should return True
2038 if the a given object is valid; the default value is
2039 os.path.exists, causing only existing files to be returned
2041 @return: full path to the object if found, None otherwise
2044 # validate the filename mask
2045 if constants.EXT_PLUGIN_MASK.match(name) is None:
2046 logging.critical("Invalid value passed for external script name: '%s'",
2050 for dir_name in search_path:
2051 # FIXME: investigate switch to PathJoin
2052 item_name = os.path.sep.join([dir_name, name])
2053 # check the user test and that we're indeed resolving to the given
2055 if test(item_name) and os.path.basename(item_name) == name:
2060 def CheckVolumeGroupSize(vglist, vgname, minsize):
2061 """Checks if the volume group list is valid.
2063 The function will check if a given volume group is in the list of
2064 volume groups and has a minimum size.
2067 @param vglist: dictionary of volume group names and their size
2069 @param vgname: the volume group we should check
2071 @param minsize: the minimum size we accept
2073 @return: None for success, otherwise the error message
2076 vgsize = vglist.get(vgname, None)
2078 return "volume group '%s' missing" % vgname
2079 elif vgsize < minsize:
2080 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2081 (vgname, minsize, vgsize))
2085 def SplitTime(value):
2086 """Splits time as floating point number into a tuple.
2088 @param value: Time in seconds
2089 @type value: int or float
2090 @return: Tuple containing (seconds, microseconds)
2093 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2095 assert 0 <= seconds, \
2096 "Seconds must be larger than or equal to 0, but are %s" % seconds
2097 assert 0 <= microseconds <= 999999, \
2098 "Microseconds must be 0-999999, but are %s" % microseconds
2100 return (int(seconds), int(microseconds))
2103 def MergeTime(timetuple):
2104 """Merges a tuple into time as a floating point number.
2106 @param timetuple: Time as tuple, (seconds, microseconds)
2107 @type timetuple: tuple
2108 @return: Time as a floating point number expressed in seconds
2111 (seconds, microseconds) = timetuple
2113 assert 0 <= seconds, \
2114 "Seconds must be larger than or equal to 0, but are %s" % seconds
2115 assert 0 <= microseconds <= 999999, \
2116 "Microseconds must be 0-999999, but are %s" % microseconds
2118 return float(seconds) + (float(microseconds) * 0.000001)
2121 def GetDaemonPort(daemon_name):
2122 """Get the daemon port for this cluster.
2124 Note that this routine does not read a ganeti-specific file, but
2125 instead uses C{socket.getservbyname} to allow pre-customization of
2126 this parameter outside of Ganeti.
2128 @type daemon_name: string
2129 @param daemon_name: daemon name (in constants.DAEMONS_PORTS)
2133 if daemon_name not in constants.DAEMONS_PORTS:
2134 raise errors.ProgrammerError("Unknown daemon: %s" % daemon_name)
2136 (proto, default_port) = constants.DAEMONS_PORTS[daemon_name]
2138 port = socket.getservbyname(daemon_name, proto)
2139 except socket.error:
2145 def SetupLogging(logfile, debug=0, stderr_logging=False, program="",
2146 multithreaded=False, syslog=constants.SYSLOG_USAGE):
2147 """Configures the logging module.
2150 @param logfile: the filename to which we should log
2151 @type debug: integer
2152 @param debug: if greater than zero, enable debug messages, otherwise
2153 only those at C{INFO} and above level
2154 @type stderr_logging: boolean
2155 @param stderr_logging: whether we should also log to the standard error
2157 @param program: the name under which we should log messages
2158 @type multithreaded: boolean
2159 @param multithreaded: if True, will add the thread name to the log file
2160 @type syslog: string
2161 @param syslog: one of 'no', 'yes', 'only':
2162 - if no, syslog is not used
2163 - if yes, syslog is used (in addition to file-logging)
2164 - if only, only syslog is used
2165 @raise EnvironmentError: if we can't open the log file and
2166 syslog/stderr logging is disabled
2169 fmt = "%(asctime)s: " + program + " pid=%(process)d"
2170 sft = program + "[%(process)d]:"
2172 fmt += "/%(threadName)s"
2173 sft += " (%(threadName)s)"
2175 fmt += " %(module)s:%(lineno)s"
2176 # no debug info for syslog loggers
2177 fmt += " %(levelname)s %(message)s"
2178 # yes, we do want the textual level, as remote syslog will probably
2179 # lose the error level, and it's easier to grep for it
2180 sft += " %(levelname)s %(message)s"
2181 formatter = logging.Formatter(fmt)
2182 sys_fmt = logging.Formatter(sft)
2184 root_logger = logging.getLogger("")
2185 root_logger.setLevel(logging.NOTSET)
2187 # Remove all previously setup handlers
2188 for handler in root_logger.handlers:
2190 root_logger.removeHandler(handler)
2193 stderr_handler = logging.StreamHandler()
2194 stderr_handler.setFormatter(formatter)
2196 stderr_handler.setLevel(logging.NOTSET)
2198 stderr_handler.setLevel(logging.CRITICAL)
2199 root_logger.addHandler(stderr_handler)
2201 if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2202 facility = logging.handlers.SysLogHandler.LOG_DAEMON
2203 syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2205 syslog_handler.setFormatter(sys_fmt)
2206 # Never enable debug over syslog
2207 syslog_handler.setLevel(logging.INFO)
2208 root_logger.addHandler(syslog_handler)
2210 if syslog != constants.SYSLOG_ONLY:
2211 # this can fail, if the logging directories are not setup or we have
2212 # a permisssion problem; in this case, it's best to log but ignore
2213 # the error if stderr_logging is True, and if false we re-raise the
2214 # exception since otherwise we could run but without any logs at all
2216 logfile_handler = logging.FileHandler(logfile)
2217 logfile_handler.setFormatter(formatter)
2219 logfile_handler.setLevel(logging.DEBUG)
2221 logfile_handler.setLevel(logging.INFO)
2222 root_logger.addHandler(logfile_handler)
2223 except EnvironmentError:
2224 if stderr_logging or syslog == constants.SYSLOG_YES:
2225 logging.exception("Failed to enable logging to file '%s'", logfile)
2227 # we need to re-raise the exception
2231 def IsNormAbsPath(path):
2232 """Check whether a path is absolute and also normalized
2234 This avoids things like /dir/../../other/path to be valid.
2237 return os.path.normpath(path) == path and os.path.isabs(path)
2240 def PathJoin(*args):
2241 """Safe-join a list of path components.
2244 - the first argument must be an absolute path
2245 - no component in the path must have backtracking (e.g. /../),
2246 since we check for normalization at the end
2248 @param args: the path components to be joined
2249 @raise ValueError: for invalid paths
2252 # ensure we're having at least one path passed in
2254 # ensure the first component is an absolute and normalized path name
2256 if not IsNormAbsPath(root):
2257 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2258 result = os.path.join(*args)
2259 # ensure that the whole path is normalized
2260 if not IsNormAbsPath(result):
2261 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2262 # check that we're still under the original prefix
2263 prefix = os.path.commonprefix([root, result])
2265 raise ValueError("Error: path joining resulted in different prefix"
2266 " (%s != %s)" % (prefix, root))
2270 def TailFile(fname, lines=20):
2271 """Return the last lines from a file.
2273 @note: this function will only read and parse the last 4KB of
2274 the file; if the lines are very long, it could be that less
2275 than the requested number of lines are returned
2277 @param fname: the file name
2279 @param lines: the (maximum) number of lines to return
2282 fd = open(fname, "r")
2286 pos = max(0, pos-4096)
2288 raw_data = fd.read()
2292 rows = raw_data.splitlines()
2293 return rows[-lines:]
2296 def _ParseAsn1Generalizedtime(value):
2297 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2300 @param value: ASN1 GENERALIZEDTIME timestamp
2303 m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2306 asn1time = m.group(1)
2307 hours = int(m.group(2))
2308 minutes = int(m.group(3))
2309 utcoffset = (60 * hours) + minutes
2311 if not value.endswith("Z"):
2312 raise ValueError("Missing timezone")
2313 asn1time = value[:-1]
2316 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2318 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2320 return calendar.timegm(tt.utctimetuple())
2323 def GetX509CertValidity(cert):
2324 """Returns the validity period of the certificate.
2326 @type cert: OpenSSL.crypto.X509
2327 @param cert: X509 certificate object
2330 # The get_notBefore and get_notAfter functions are only supported in
2331 # pyOpenSSL 0.7 and above.
2333 get_notbefore_fn = cert.get_notBefore
2334 except AttributeError:
2337 not_before_asn1 = get_notbefore_fn()
2339 if not_before_asn1 is None:
2342 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2345 get_notafter_fn = cert.get_notAfter
2346 except AttributeError:
2349 not_after_asn1 = get_notafter_fn()
2351 if not_after_asn1 is None:
2354 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2356 return (not_before, not_after)
2359 def SafeEncode(text):
2360 """Return a 'safe' version of a source string.
2362 This function mangles the input string and returns a version that
2363 should be safe to display/encode as ASCII. To this end, we first
2364 convert it to ASCII using the 'backslashreplace' encoding which
2365 should get rid of any non-ASCII chars, and then we process it
2366 through a loop copied from the string repr sources in the python; we
2367 don't use string_escape anymore since that escape single quotes and
2368 backslashes too, and that is too much; and that escaping is not
2369 stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2371 @type text: str or unicode
2372 @param text: input data
2374 @return: a safe version of text
2377 if isinstance(text, unicode):
2378 # only if unicode; if str already, we handle it below
2379 text = text.encode('ascii', 'backslashreplace')
2389 elif c < 32 or c >= 127: # non-printable
2390 resu += "\\x%02x" % (c & 0xff)
2396 def UnescapeAndSplit(text, sep=","):
2397 """Split and unescape a string based on a given separator.
2399 This function splits a string based on a separator where the
2400 separator itself can be escape in order to be an element of the
2401 elements. The escaping rules are (assuming coma being the
2403 - a plain , separates the elements
2404 - a sequence \\\\, (double backslash plus comma) is handled as a
2405 backslash plus a separator comma
2406 - a sequence \, (backslash plus comma) is handled as a
2410 @param text: the string to split
2412 @param text: the separator
2414 @return: a list of strings
2417 # we split the list by sep (with no escaping at this stage)
2418 slist = text.split(sep)
2419 # next, we revisit the elements and if any of them ended with an odd
2420 # number of backslashes, then we join it with the next
2424 if e1.endswith("\\"):
2425 num_b = len(e1) - len(e1.rstrip("\\"))
2428 # here the backslashes remain (all), and will be reduced in
2430 rlist.append(e1 + sep + e2)
2433 # finally, replace backslash-something with something
2434 rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2438 def CommaJoin(names):
2439 """Nicely join a set of identifiers.
2441 @param names: set, list or tuple
2442 @return: a string with the formatted results
2445 return ", ".join([str(val) for val in names])
2448 def BytesToMebibyte(value):
2449 """Converts bytes to mebibytes.
2452 @param value: Value in bytes
2454 @return: Value in mebibytes
2457 return int(round(value / (1024.0 * 1024.0), 0))
2460 def CalculateDirectorySize(path):
2461 """Calculates the size of a directory recursively.
2464 @param path: Path to directory
2466 @return: Size in mebibytes
2471 for (curpath, _, files) in os.walk(path):
2472 for filename in files:
2473 st = os.lstat(PathJoin(curpath, filename))
2476 return BytesToMebibyte(size)
2479 def GetFilesystemStats(path):
2480 """Returns the total and free space on a filesystem.
2483 @param path: Path on filesystem to be examined
2485 @return: tuple of (Total space, Free space) in mebibytes
2488 st = os.statvfs(path)
2490 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2491 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2492 return (tsize, fsize)
2495 def RunInSeparateProcess(fn, *args):
2496 """Runs a function in a separate process.
2498 Note: Only boolean return values are supported.
2501 @param fn: Function to be called
2503 @return: Function's result
2510 # In case the function uses temporary files
2511 ResetTempfileModule()
2514 result = int(bool(fn(*args)))
2515 assert result in (0, 1)
2516 except: # pylint: disable-msg=W0702
2517 logging.exception("Error while calling function in separate process")
2518 # 0 and 1 are reserved for the return value
2521 os._exit(result) # pylint: disable-msg=W0212
2525 # Avoid zombies and check exit code
2526 (_, status) = os.waitpid(pid, 0)
2528 if os.WIFSIGNALED(status):
2530 signum = os.WTERMSIG(status)
2532 exitcode = os.WEXITSTATUS(status)
2535 if not (exitcode in (0, 1) and signum is None):
2536 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
2539 return bool(exitcode)
2542 def LockedMethod(fn):
2543 """Synchronized object access decorator.
2545 This decorator is intended to protect access to an object using the
2546 object's own lock which is hardcoded to '_lock'.
2549 def _LockDebug(*args, **kwargs):
2551 logging.debug(*args, **kwargs)
2553 def wrapper(self, *args, **kwargs):
2554 # pylint: disable-msg=W0212
2555 assert hasattr(self, '_lock')
2557 _LockDebug("Waiting for %s", lock)
2560 _LockDebug("Acquired %s", lock)
2561 result = fn(self, *args, **kwargs)
2563 _LockDebug("Releasing %s", lock)
2565 _LockDebug("Released %s", lock)
2571 """Locks a file using POSIX locks.
2574 @param fd: the file descriptor we need to lock
2578 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
2579 except IOError, err:
2580 if err.errno == errno.EAGAIN:
2581 raise errors.LockError("File already locked")
2585 def FormatTime(val):
2586 """Formats a time value.
2588 @type val: float or None
2589 @param val: the timestamp as returned by time.time()
2590 @return: a string value or N/A if we don't have a valid timestamp
2593 if val is None or not isinstance(val, (int, float)):
2595 # these two codes works on Linux, but they are not guaranteed on all
2597 return time.strftime("%F %T", time.localtime(val))
2600 def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
2601 """Reads the watcher pause file.
2603 @type filename: string
2604 @param filename: Path to watcher pause file
2605 @type now: None, float or int
2606 @param now: Current time as Unix timestamp
2607 @type remove_after: int
2608 @param remove_after: Remove watcher pause file after specified amount of
2609 seconds past the pause end time
2616 value = ReadFile(filename)
2617 except IOError, err:
2618 if err.errno != errno.ENOENT:
2622 if value is not None:
2626 logging.warning(("Watcher pause file (%s) contains invalid value,"
2627 " removing it"), filename)
2628 RemoveFile(filename)
2631 if value is not None:
2632 # Remove file if it's outdated
2633 if now > (value + remove_after):
2634 RemoveFile(filename)
2643 class RetryTimeout(Exception):
2644 """Retry loop timed out.
2649 class RetryAgain(Exception):
2655 class _RetryDelayCalculator(object):
2656 """Calculator for increasing delays.
2666 def __init__(self, start, factor, limit):
2667 """Initializes this class.
2670 @param start: Initial delay
2672 @param factor: Factor for delay increase
2673 @type limit: float or None
2674 @param limit: Upper limit for delay or None for no limit
2678 assert factor >= 1.0
2679 assert limit is None or limit >= 0.0
2682 self._factor = factor
2688 """Returns current delay and calculates the next one.
2691 current = self._next
2693 # Update for next run
2694 if self._limit is None or self._next < self._limit:
2695 self._next = min(self._limit, self._next * self._factor)
2700 #: Special delay to specify whole remaining timeout
2701 RETRY_REMAINING_TIME = object()
2704 def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
2705 _time_fn=time.time):
2706 """Call a function repeatedly until it succeeds.
2708 The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
2709 anymore. Between calls a delay, specified by C{delay}, is inserted. After a
2710 total of C{timeout} seconds, this function throws L{RetryTimeout}.
2712 C{delay} can be one of the following:
2713 - callable returning the delay length as a float
2714 - Tuple of (start, factor, limit)
2715 - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
2716 useful when overriding L{wait_fn} to wait for an external event)
2717 - A static delay as a number (int or float)
2720 @param fn: Function to be called
2721 @param delay: Either a callable (returning the delay), a tuple of (start,
2722 factor, limit) (see L{_RetryDelayCalculator}),
2723 L{RETRY_REMAINING_TIME} or a number (int or float)
2724 @type timeout: float
2725 @param timeout: Total timeout
2726 @type wait_fn: callable
2727 @param wait_fn: Waiting function
2728 @return: Return value of function
2732 assert callable(wait_fn)
2733 assert callable(_time_fn)
2738 end_time = _time_fn() + timeout
2741 # External function to calculate delay
2744 elif isinstance(delay, (tuple, list)):
2745 # Increasing delay with optional upper boundary
2746 (start, factor, limit) = delay
2747 calc_delay = _RetryDelayCalculator(start, factor, limit)
2749 elif delay is RETRY_REMAINING_TIME:
2750 # Always use the remaining time
2755 calc_delay = lambda: delay
2757 assert calc_delay is None or callable(calc_delay)
2761 # pylint: disable-msg=W0142
2766 remaining_time = end_time - _time_fn()
2768 if remaining_time < 0.0:
2769 raise RetryTimeout()
2771 assert remaining_time >= 0.0
2773 if calc_delay is None:
2774 wait_fn(remaining_time)
2776 current_delay = calc_delay()
2777 if current_delay > 0.0:
2778 wait_fn(current_delay)
2781 def GetClosedTempfile(*args, **kwargs):
2782 """Creates a temporary file and returns its path.
2785 (fd, path) = tempfile.mkstemp(*args, **kwargs)
2790 def GenerateSelfSignedX509Cert(common_name, validity):
2791 """Generates a self-signed X509 certificate.
2793 @type common_name: string
2794 @param common_name: commonName value
2796 @param validity: Validity for certificate in seconds
2799 # Create private and public key
2800 key = OpenSSL.crypto.PKey()
2801 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
2803 # Create self-signed certificate
2804 cert = OpenSSL.crypto.X509()
2806 cert.get_subject().CN = common_name
2807 cert.set_serial_number(1)
2808 cert.gmtime_adj_notBefore(0)
2809 cert.gmtime_adj_notAfter(validity)
2810 cert.set_issuer(cert.get_subject())
2811 cert.set_pubkey(key)
2812 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
2814 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
2815 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2817 return (key_pem, cert_pem)
2820 def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
2821 """Legacy function to generate self-signed X509 certificate.
2824 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
2825 validity * 24 * 60 * 60)
2827 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
2830 class FileLock(object):
2831 """Utility class for file locks.
2834 def __init__(self, fd, filename):
2835 """Constructor for FileLock.
2838 @param fd: File object
2840 @param filename: Path of the file opened at I{fd}
2844 self.filename = filename
2847 def Open(cls, filename):
2848 """Creates and opens a file to be used as a file-based lock.
2850 @type filename: string
2851 @param filename: path to the file to be locked
2854 # Using "os.open" is necessary to allow both opening existing file
2855 # read/write and creating if not existing. Vanilla "open" will truncate an
2856 # existing file -or- allow creating if not existing.
2857 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
2864 """Close the file and release the lock.
2867 if hasattr(self, "fd") and self.fd:
2871 def _flock(self, flag, blocking, timeout, errmsg):
2872 """Wrapper for fcntl.flock.
2875 @param flag: operation flag
2876 @type blocking: bool
2877 @param blocking: whether the operation should be done in blocking mode.
2878 @type timeout: None or float
2879 @param timeout: for how long the operation should be retried (implies
2881 @type errmsg: string
2882 @param errmsg: error message in case operation fails.
2885 assert self.fd, "Lock was closed"
2886 assert timeout is None or timeout >= 0, \
2887 "If specified, timeout must be positive"
2888 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
2890 # When a timeout is used, LOCK_NB must always be set
2891 if not (timeout is None and blocking):
2892 flag |= fcntl.LOCK_NB
2895 self._Lock(self.fd, flag, timeout)
2898 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
2899 args=(self.fd, flag, timeout))
2900 except RetryTimeout:
2901 raise errors.LockError(errmsg)
2904 def _Lock(fd, flag, timeout):
2906 fcntl.flock(fd, flag)
2907 except IOError, err:
2908 if timeout is not None and err.errno == errno.EAGAIN:
2911 logging.exception("fcntl.flock failed")
2914 def Exclusive(self, blocking=False, timeout=None):
2915 """Locks the file in exclusive mode.
2917 @type blocking: boolean
2918 @param blocking: whether to block and wait until we
2919 can lock the file or return immediately
2920 @type timeout: int or None
2921 @param timeout: if not None, the duration to wait for the lock
2925 self._flock(fcntl.LOCK_EX, blocking, timeout,
2926 "Failed to lock %s in exclusive mode" % self.filename)
2928 def Shared(self, blocking=False, timeout=None):
2929 """Locks the file in shared mode.
2931 @type blocking: boolean
2932 @param blocking: whether to block and wait until we
2933 can lock the file or return immediately
2934 @type timeout: int or None
2935 @param timeout: if not None, the duration to wait for the lock
2939 self._flock(fcntl.LOCK_SH, blocking, timeout,
2940 "Failed to lock %s in shared mode" % self.filename)
2942 def Unlock(self, blocking=True, timeout=None):
2943 """Unlocks the file.
2945 According to C{flock(2)}, unlocking can also be a nonblocking
2948 To make a non-blocking request, include LOCK_NB with any of the above
2951 @type blocking: boolean
2952 @param blocking: whether to block and wait until we
2953 can lock the file or return immediately
2954 @type timeout: int or None
2955 @param timeout: if not None, the duration to wait for the lock
2959 self._flock(fcntl.LOCK_UN, blocking, timeout,
2960 "Failed to unlock %s" % self.filename)
2963 def SignalHandled(signums):
2964 """Signal Handled decoration.
2966 This special decorator installs a signal handler and then calls the target
2967 function. The function must accept a 'signal_handlers' keyword argument,
2968 which will contain a dict indexed by signal number, with SignalHandler
2971 The decorator can be safely stacked with iself, to handle multiple signals
2972 with different handlers.
2975 @param signums: signals to intercept
2979 def sig_function(*args, **kwargs):
2980 assert 'signal_handlers' not in kwargs or \
2981 kwargs['signal_handlers'] is None or \
2982 isinstance(kwargs['signal_handlers'], dict), \
2983 "Wrong signal_handlers parameter in original function call"
2984 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
2985 signal_handlers = kwargs['signal_handlers']
2987 signal_handlers = {}
2988 kwargs['signal_handlers'] = signal_handlers
2989 sighandler = SignalHandler(signums)
2992 signal_handlers[sig] = sighandler
2993 return fn(*args, **kwargs)
3000 class SignalHandler(object):
3001 """Generic signal handler class.
3003 It automatically restores the original handler when deconstructed or
3004 when L{Reset} is called. You can either pass your own handler
3005 function in or query the L{called} attribute to detect whether the
3009 @ivar signum: the signals we handle
3010 @type called: boolean
3011 @ivar called: tracks whether any of the signals have been raised
3014 def __init__(self, signum, handler_fn=None):
3015 """Constructs a new SignalHandler instance.
3017 @type signum: int or list of ints
3018 @param signum: Single signal number or set of signal numbers
3019 @type handler_fn: callable
3020 @param handler_fn: Signal handling function
3023 assert handler_fn is None or callable(handler_fn)
3025 self.signum = set(signum)
3028 self._handler_fn = handler_fn
3032 for signum in self.signum:
3034 prev_handler = signal.signal(signum, self._HandleSignal)
3036 self._previous[signum] = prev_handler
3038 # Restore previous handler
3039 signal.signal(signum, prev_handler)
3042 # Reset all handlers
3044 # Here we have a race condition: a handler may have already been called,
3045 # but there's not much we can do about it at this point.
3052 """Restore previous handler.
3054 This will reset all the signals to their previous handlers.
3057 for signum, prev_handler in self._previous.items():
3058 signal.signal(signum, prev_handler)
3059 # If successful, remove from dict
3060 del self._previous[signum]
3063 """Unsets the L{called} flag.
3065 This function can be used in case a signal may arrive several times.
3070 def _HandleSignal(self, signum, frame):
3071 """Actual signal handling function.
3074 # This is not nice and not absolutely atomic, but it appears to be the only
3075 # solution in Python -- there are no atomic types.
3078 if self._handler_fn:
3079 self._handler_fn(signum, frame)
3082 class FieldSet(object):
3083 """A simple field set.
3085 Among the features are:
3086 - checking if a string is among a list of static string or regex objects
3087 - checking if a whole list of string matches
3088 - returning the matching groups from a regex match
3090 Internally, all fields are held as regular expression objects.
3093 def __init__(self, *items):
3094 self.items = [re.compile("^%s$" % value) for value in items]
3096 def Extend(self, other_set):
3097 """Extend the field set with the items from another one"""
3098 self.items.extend(other_set.items)
3100 def Matches(self, field):
3101 """Checks if a field matches the current set
3104 @param field: the string to match
3105 @return: either None or a regular expression match object
3108 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3112 def NonMatching(self, items):
3113 """Returns the list of fields not matching the current set
3116 @param items: the list of fields to check
3118 @return: list of non-matching fields
3121 return [val for val in items if not self.Matches(val)]