X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/d9f311d78e983c98343954c85e7415115fb22885..944bf54895c1d4491c6d06ad464aa6e97844c366:/lib/utils.py?ds=sidebyside diff --git a/lib/utils.py b/lib/utils.py index a9b71b5..48388ba 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -19,14 +19,16 @@ # 02110-1301, USA. -"""Ganeti small utilities +"""Ganeti utility module. + +This module holds functions that can be used in both daemons (all) and +the command line scripts. """ import sys import os -import sha import time import subprocess import re @@ -44,6 +46,12 @@ import signal from cStringIO import StringIO +try: + from hashlib import sha1 +except ImportError: + import sha + sha1 = sha.new + from ganeti import errors from ganeti import constants @@ -52,35 +60,42 @@ _locksheld = [] _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$') debug = False +debug_locks = False + +#: when set to True, L{RunCmd} is disabled no_fork = False class RunResult(object): - """Simple class for holding the result of running external programs. - - Instance variables: - exit_code: the exit code of the program, or None (if the program - didn't exit()) - signal: numeric signal that caused the program to finish, or None - (if the program wasn't terminated by a signal) - stdout: the standard output of the program - stderr: the standard error of the program - failed: a Boolean value which is True in case the program was - terminated by a signal or exited with a non-zero exit code - fail_reason: a string detailing the termination reason + """Holds the result of running external programs. + + @type exit_code: int + @ivar exit_code: the exit code of the program, or None (if the program + didn't exit()) + @type signal: int or None + @ivar signal: the signal that caused the program to finish, or None + (if the program wasn't terminated by a signal) + @type stdout: str + @ivar stdout: the standard output of the program + @type stderr: str + @ivar stderr: the standard error of the program + @type failed: boolean + @ivar failed: True in case the program was + terminated by a signal or exited with a non-zero exit code + @ivar fail_reason: a string detailing the termination reason """ __slots__ = ["exit_code", "signal", "stdout", "stderr", "failed", "fail_reason", "cmd"] - def __init__(self, exit_code, signal, stdout, stderr, cmd): + def __init__(self, exit_code, signal_, stdout, stderr, cmd): self.cmd = cmd self.exit_code = exit_code - self.signal = signal + self.signal = signal_ self.stdout = stdout self.stderr = stderr - self.failed = (signal is not None or exit_code != 0) + self.failed = (signal_ is not None or exit_code != 0) if self.signal is not None: self.fail_reason = "terminated by signal %s" % self.signal @@ -102,16 +117,26 @@ class RunResult(object): output = property(_GetOutput, None, None, "Return full output") -def RunCmd(cmd): +def RunCmd(cmd, env=None, output=None, cwd='/'): """Execute a (shell) command. The command should not read from its standard input, as it will be closed. - Args: - cmd: command to run. (str) - - Returns: `RunResult` instance + @type cmd: string or list + @param cmd: Command to run + @type env: dict + @param env: Additional environment + @type output: str + @param output: if desired, the output of the command can be + saved in a file instead of the RunResult instance; this + parameter denotes the file name (if not None) + @type cwd: string + @param cwd: if specified, will be used as the working + directory for the command; the default will be / + @rtype: L{RunResult} + @return: RunResult instance + @raise erors.ProgrammerError: if we call this when forks are disabled """ if no_fork: @@ -125,14 +150,57 @@ def RunCmd(cmd): strcmd = cmd shell = True logging.debug("RunCmd '%s'", strcmd) - env = os.environ.copy() - env["LC_ALL"] = "C" + + cmd_env = os.environ.copy() + cmd_env["LC_ALL"] = "C" + if env is not None: + cmd_env.update(env) + + try: + if output is None: + out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd) + else: + status = _RunCmdFile(cmd, cmd_env, shell, output, cwd) + out = err = "" + except OSError, err: + if err.errno == errno.ENOENT: + raise errors.OpExecError("Can't execute '%s': not found (%s)" % + (strcmd, err)) + else: + raise + + if status >= 0: + exitcode = status + signal_ = None + else: + exitcode = None + signal_ = -status + + return RunResult(exitcode, signal_, out, err, strcmd) + + +def _RunCmdPipe(cmd, env, via_shell, cwd): + """Run a command and return its output. + + @type cmd: string or list + @param cmd: Command to run + @type env: dict + @param env: The environment to use + @type via_shell: bool + @param via_shell: if we should run via the shell + @type cwd: string + @param cwd: the working directory for the program + @rtype: tuple + @return: (out, err, status) + + """ poller = select.poll() - child = subprocess.Popen(cmd, shell=shell, + child = subprocess.Popen(cmd, shell=via_shell, stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, - close_fds=True, env=env) + close_fds=True, env=env, + cwd=cwd) child.stdin.close() poller.register(child.stdout, select.POLLIN) @@ -148,7 +216,18 @@ def RunCmd(cmd): fcntl.fcntl(fd, fcntl.F_SETFL, status | os.O_NONBLOCK) while fdmap: - for fd, event in poller.poll(): + try: + pollresult = poller.poll() + except EnvironmentError, eerr: + if eerr.errno == errno.EINTR: + continue + raise + except select.error, serr: + if serr[0] == errno.EINTR: + continue + raise + + for fd, event in pollresult: if event & select.POLLIN or event & select.POLLPRI: data = fdmap[fd][1].read() # no data from read signifies EOF (the same as POLLHUP) @@ -166,14 +245,40 @@ def RunCmd(cmd): err = err.getvalue() status = child.wait() - if status >= 0: - exitcode = status - signal = None - else: - exitcode = None - signal = -status + return out, err, status + + +def _RunCmdFile(cmd, env, via_shell, output, cwd): + """Run a command and save its output to a file. + + @type cmd: string or list + @param cmd: Command to run + @type env: dict + @param env: The environment to use + @type via_shell: bool + @param via_shell: if we should run via the shell + @type output: str + @param output: the filename in which to save the output + @type cwd: string + @param cwd: the working directory for the program + @rtype: int + @return: the exit status - return RunResult(exitcode, signal, out, err, strcmd) + """ + fh = open(output, "a") + try: + child = subprocess.Popen(cmd, shell=via_shell, + stderr=subprocess.STDOUT, + stdout=fh, + stdin=subprocess.PIPE, + close_fds=True, env=env, + cwd=cwd) + + child.stdin.close() + status = child.wait() + finally: + fh.close() + return status def RemoveFile(filename): @@ -182,6 +287,9 @@ def RemoveFile(filename): Remove a file, ignoring non-existing ones or directories. Other errors are passed. + @type filename: str + @param filename: the file to be removed + """ try: os.unlink(filename) @@ -190,14 +298,43 @@ def RemoveFile(filename): raise +def RenameFile(old, new, mkdir=False, mkdir_mode=0750): + """Renames a file. + + @type old: string + @param old: Original path + @type new: string + @param new: New path + @type mkdir: bool + @param mkdir: Whether to create target directory if it doesn't exist + @type mkdir_mode: int + @param mkdir_mode: Mode for newly created directories + + """ + try: + return os.rename(old, new) + except OSError, err: + # In at least one use case of this function, the job queue, directory + # creation is very rare. Checking for the directory before renaming is not + # as efficient. + if mkdir and err.errno == errno.ENOENT: + # Create directory and try again + os.makedirs(os.path.dirname(new), mkdir_mode) + return os.rename(old, new) + raise + + def _FingerprintFile(filename): """Compute the fingerprint of a file. If the file does not exist, a None will be returned instead. - Args: - filename - Filename (str) + @type filename: str + @param filename: the filename to checksum + @rtype: str + @return: the hex digest of the sha checksum of the contents + of the file """ if not (os.path.exists(filename) and os.path.isfile(filename)): @@ -205,7 +342,7 @@ def _FingerprintFile(filename): f = open(filename) - fp = sha.sha() + fp = sha1() while True: data = f.read(4096) if not data: @@ -219,11 +356,11 @@ def _FingerprintFile(filename): def FingerprintFiles(files): """Compute fingerprints for a list of files. - Args: - files - array of filenames. ( [str, ...] ) - - Return value: - dictionary of filename: fingerprint for the files that exist + @type files: list + @param files: the list of filename to fingerprint + @rtype: dict + @return: a dictionary filename: fingerprint, holding only + existing files """ ret = {} @@ -239,18 +376,17 @@ def FingerprintFiles(files): def CheckDict(target, template, logname=None): """Ensure a dictionary has a required set of keys. - For the given dictionaries `target` and `template`, ensure target - has all the keys from template. Missing keys are added with values - from template. + For the given dictionaries I{target} and I{template}, ensure + I{target} has all the keys from I{template}. Missing keys are added + with values from template. - Args: - target - the dictionary to check - template - template dictionary - logname - a caller-chosen string to identify the debug log - entry; if None, no logging will be done - - Returns value: - None + @type target: dict + @param target: the dictionary to update + @type template: dict + @param template: the dictionary holding the default values + @type logname: str or None + @param logname: if not None, causes the missing keys to be + logged with this name """ missing = [] @@ -263,45 +399,100 @@ def CheckDict(target, template, logname=None): logging.warning('%s missing keys %s', logname, ', '.join(missing)) +def ForceDictType(target, key_types, allowed_values=None): + """Force the values of a dict to have certain types. + + @type target: dict + @param target: the dict to update + @type key_types: dict + @param key_types: dict mapping target dict keys to types + in constants.ENFORCEABLE_TYPES + @type allowed_values: list + @keyword allowed_values: list of specially allowed values + + """ + if allowed_values is None: + allowed_values = [] + + for key in target: + if key not in key_types: + msg = "Unknown key '%s'" % key + raise errors.TypeEnforcementError(msg) + + if target[key] in allowed_values: + continue + + type = key_types[key] + if type not in constants.ENFORCEABLE_TYPES: + msg = "'%s' has non-enforceable type %s" % (key, type) + raise errors.ProgrammerError(msg) + + if type == constants.VTYPE_STRING: + if not isinstance(target[key], basestring): + if isinstance(target[key], bool) and not target[key]: + target[key] = '' + else: + msg = "'%s' (value %s) is not a valid string" % (key, target[key]) + raise errors.TypeEnforcementError(msg) + elif type == constants.VTYPE_BOOL: + if isinstance(target[key], basestring) and target[key]: + if target[key].lower() == constants.VALUE_FALSE: + target[key] = False + elif target[key].lower() == constants.VALUE_TRUE: + target[key] = True + else: + msg = "'%s' (value %s) is not a valid boolean" % (key, target[key]) + raise errors.TypeEnforcementError(msg) + elif target[key]: + target[key] = True + else: + target[key] = False + elif type == constants.VTYPE_SIZE: + try: + target[key] = ParseUnit(target[key]) + except errors.UnitParseError, err: + msg = "'%s' (value %s) is not a valid size. error: %s" % \ + (key, target[key], err) + raise errors.TypeEnforcementError(msg) + elif type == constants.VTYPE_INT: + try: + target[key] = int(target[key]) + except (ValueError, TypeError): + msg = "'%s' (value %s) is not a valid integer" % (key, target[key]) + raise errors.TypeEnforcementError(msg) + + def IsProcessAlive(pid): """Check if a given pid exists on the system. - Returns: true or false, depending on if the pid exists or not - - Remarks: zombie processes treated as not alive, and giving a pid <= - 0 makes the function to return False. + @note: zombie status is not handled, so zombie processes + will be returned as alive + @type pid: int + @param pid: the process ID to check + @rtype: boolean + @return: True if the process exists """ if pid <= 0: return False try: - f = open("/proc/%d/status" % pid) - except IOError, err: + os.stat("/proc/%d/status" % pid) + return True + except EnvironmentError, err: if err.errno in (errno.ENOENT, errno.ENOTDIR): return False - - alive = True - try: - data = f.readlines() - if len(data) > 1: - state = data[1].split() - if len(state) > 1 and state[1] == "Z": - alive = False - finally: - f.close() - - return alive + raise def ReadPidFile(pidfile): - """Read the pid from a file. + """Read a pid from a file. - @param pidfile: Path to a file containing the pid to be checked - @type pidfile: string (filename) - @return: The process id, if the file exista and contains a valid PID, - otherwise 0 + @type pidfile: string + @param pidfile: path to the file containing the pid @rtype: int + @return: The process id, if the file exists and contains a valid PID, + otherwise 0 """ try: @@ -314,7 +505,7 @@ def ReadPidFile(pidfile): try: pid = int(pf.read()) except ValueError, err: - logging.info("Can't parse pid file contents", exc_info=err) + logging.info("Can't parse pid file contents", exc_info=True) return 0 return pid @@ -324,18 +515,20 @@ def MatchNameComponent(key, name_list): """Try to match a name against a list. This function will try to match a name like test1 against a list - like ['test1.example.com', 'test2.example.com', ...]. Against this - list, 'test1' as well as 'test1.example' will match, but not - 'test1.ex'. A multiple match will be considered as no match at all - (e.g. 'test1' against ['test1.example.com', 'test1.example.org']). + like C{['test1.example.com', 'test2.example.com', ...]}. Against + this list, I{'test1'} as well as I{'test1.example'} will match, but + not I{'test1.ex'}. A multiple match will be considered as no match + at all (e.g. I{'test1'} against C{['test1.example.com', + 'test1.example.org']}). - Args: - key: the name to be searched - name_list: the list of strings against which to search the key + @type key: str + @param key: the name to be searched + @type name_list: list + @param name_list: the list of strings against which to search the key - Returns: - None if there is no match *or* if there are multiple matches - otherwise the element from the list which matches + @rtype: None or str + @return: None if there is no match I{or} if there are multiple matches, + otherwise the element from the list which matches """ mo = re.compile("^%s(\..*)?$" % re.escape(key)) @@ -373,7 +566,7 @@ class HostInfo: def SysName(): """Return the current system's name. - This is simply a wrapper over socket.gethostname() + This is simply a wrapper over C{socket.gethostname()}. """ return socket.gethostname() @@ -382,12 +575,13 @@ class HostInfo: def LookupHostname(hostname): """Look up hostname - Args: - hostname: hostname to look up + @type hostname: str + @param hostname: hostname to look up - Returns: - a tuple (name, aliases, ipaddrs) as returned by socket.gethostbyname_ex - in case of errors in resolving, we raise a ResolverError + @rtype: tuple + @return: a tuple (name, aliases, ipaddrs) as returned by + C{socket.gethostbyname_ex} + @raise errors.ResolverError: in case of errors in resolving """ try: @@ -402,8 +596,10 @@ class HostInfo: def ListVolumeGroups(): """List volume groups and their size - Returns: - Dictionary with keys volume name and values the size of the volume + @rtype: dict + @return: + Dictionary with keys volume name and values + the size of the volume """ command = "vgs --noheadings --units m --nosuffix -o name,size" @@ -428,8 +624,10 @@ def ListVolumeGroups(): def BridgeExists(bridge): """Check whether the given bridge exists in the system - Returns: - True if it does, false otherwise. + @type bridge: str + @param bridge: the bridge name to check + @rtype: boolean + @return: True if it does """ return os.path.isdir("/sys/class/net/%s/bridge" % bridge) @@ -438,15 +636,18 @@ def BridgeExists(bridge): def NiceSort(name_list): """Sort a list of strings based on digit and non-digit groupings. - Given a list of names ['a1', 'a10', 'a11', 'a2'] this function will - sort the list in the logical order ['a1', 'a2', 'a10', 'a11']. + Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function + will sort the list in the logical order C{['a1', 'a2', 'a10', + 'a11']}. The sort algorithm breaks each name in groups of either only-digits or no-digits. Only the first eight such groups are considered, and after that we just use what's left of the string. - Return value - - a copy of the list sorted according to our algorithm + @type name_list: list + @param name_list: the names to be sorted + @rtype: list + @return: a copy of the name list sorted with our algorithm """ _SORTER_BASE = "(\D+|\d+)" @@ -472,10 +673,16 @@ def NiceSort(name_list): def TryConvert(fn, val): """Try to convert a value ignoring errors. - This function tries to apply function `fn` to `val`. If no - ValueError or TypeError exceptions are raised, it will return the - result, else it will return the original value. Any other exceptions - are propagated to the caller. + This function tries to apply function I{fn} to I{val}. If no + C{ValueError} or C{TypeError} exceptions are raised, it will return + the result, else it will return the original value. Any other + exceptions are propagated to the caller. + + @type fn: callable + @param fn: function to apply to the value + @param val: the value to be converted + @return: The converted value if the conversion was successful, + otherwise the original value. """ try: @@ -486,13 +693,20 @@ def TryConvert(fn, val): def IsValidIP(ip): - """Verifies the syntax of an IP address. + """Verifies the syntax of an IPv4 address. - This function checks if the ip address passes is valid or not based - on syntax (not ip range, class calculations or anything). + This function checks if the IPv4 address passes is valid or not based + on syntax (not IP range, class calculations, etc.). + + @type ip: str + @param ip: the address to be checked + @rtype: a regular expression match object + @return: a regular epression match object, or None if the + address is not valid """ unit = "(0|[1-9]\d{0,2})" + #TODO: convert and return only boolean return re.match("^%s\.%s\.%s\.%s$" % (unit, unit, unit, unit), ip) @@ -506,6 +720,11 @@ def IsValidShellParam(word): Note that we are overly restrictive here, in order to be on the safe side. + @type word: str + @param word: the word to check + @rtype: boolean + @return: True if the word is 'safe' + """ return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word)) @@ -518,6 +737,12 @@ def BuildShellCmd(template, *args): metacharaters). If everything is ok, it will return the result of template % args. + @type template: str + @param template: the string holding the template for the + string formatting + @rtype: str + @return: the expanded command line + """ for word in args: if not IsValidShellParam(word): @@ -526,30 +751,51 @@ def BuildShellCmd(template, *args): return template % args -def FormatUnit(value): +def FormatUnit(value, units): """Formats an incoming number of MiB with the appropriate unit. - Value needs to be passed as a numeric type. Return value is always a string. + @type value: int + @param value: integer representing the value in MiB (1048576) + @type units: char + @param units: the type of formatting we should do: + - 'h' for automatic scaling + - 'm' for MiBs + - 'g' for GiBs + - 't' for TiBs + @rtype: str + @return: the formatted value (with suffix) """ - if value < 1024: - return "%dM" % round(value, 0) + if units not in ('m', 'g', 't', 'h'): + raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units)) + + suffix = '' + + if units == 'm' or (units == 'h' and value < 1024): + if units == 'h': + suffix = 'M' + return "%d%s" % (round(value, 0), suffix) - elif value < (1024 * 1024): - return "%0.1fG" % round(float(value) / 1024, 1) + elif units == 'g' or (units == 'h' and value < (1024 * 1024)): + if units == 'h': + suffix = 'G' + return "%0.1f%s" % (round(float(value) / 1024, 1), suffix) else: - return "%0.1fT" % round(float(value) / 1024 / 1024, 1) + if units == 'h': + suffix = 'T' + return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix) def ParseUnit(input_string): """Tries to extract number and scale from the given string. - Input must be in the format NUMBER+ [DOT NUMBER+] SPACE* [UNIT]. If no unit - is specified, it defaults to MiB. Return value is always an int in MiB. + Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE* + [UNIT]}. If no unit is specified, it defaults to MiB. Return value + is always an int in MiB. """ - m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', input_string) + m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string)) if not m: raise errors.UnitParseError("Invalid format") @@ -589,9 +835,11 @@ def ParseUnit(input_string): def AddAuthorizedKey(file_name, key): """Adds an SSH public key to an authorized_keys file. - Args: - file_name: Path to authorized_keys file - key: String containing key + @type file_name: str + @param file_name: path to authorized_keys file + @type key: str + @param key: string containing key + """ key_fields = key.split() @@ -616,9 +864,11 @@ def AddAuthorizedKey(file_name, key): def RemoveAuthorizedKey(file_name, key): """Removes an SSH public key from an authorized_keys file. - Args: - file_name: Path to authorized_keys file - key: String containing key + @type file_name: str + @param file_name: path to authorized_keys file + @type key: str + @param key: string containing key + """ key_fields = key.split() @@ -647,7 +897,17 @@ def RemoveAuthorizedKey(file_name, key): def SetEtcHostsEntry(file_name, ip, hostname, aliases): """Sets the name of an IP address and hostname in /etc/hosts. + @type file_name: str + @param file_name: path to the file to modify (usually C{/etc/hosts}) + @type ip: str + @param ip: the IP address + @type hostname: str + @param hostname: the hostname to be added + @type aliases: list + @param aliases: the list of aliases to add for the hostname + """ + # FIXME: use WriteFile + fn rather than duplicating its efforts # Ensure aliases are unique aliases = UniqueSequence([hostname] + aliases)[1:] @@ -657,7 +917,6 @@ def SetEtcHostsEntry(file_name, ip, hostname, aliases): try: f = open(file_name, 'r') try: - written = False for line in f: fields = line.split() if fields and not fields[0].startswith('#') and ip == fields[0]: @@ -671,6 +930,7 @@ def SetEtcHostsEntry(file_name, ip, hostname, aliases): out.flush() os.fsync(out) + os.chmod(tmpname, 0644) os.rename(tmpname, file_name) finally: f.close() @@ -684,6 +944,10 @@ def SetEtcHostsEntry(file_name, ip, hostname, aliases): def AddHostToEtcHosts(hostname): """Wrapper around SetEtcHostsEntry. + @type hostname: str + @param hostname: a hostname that will be resolved and added to + L{constants.ETC_HOSTS} + """ hi = HostInfo(name=hostname) SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()]) @@ -693,7 +957,14 @@ def RemoveEtcHostsEntry(file_name, hostname): """Removes a hostname from /etc/hosts. IP addresses without names are removed from the file. + + @type file_name: str + @param file_name: path to the file to modify (usually C{/etc/hosts}) + @type hostname: str + @param hostname: the hostname to be removed + """ + # FIXME: use WriteFile + fn rather than duplicating its efforts fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name)) try: out = os.fdopen(fd, 'w') @@ -715,6 +986,7 @@ def RemoveEtcHostsEntry(file_name, hostname): out.flush() os.fsync(out) + os.chmod(tmpname, 0644) os.rename(tmpname, file_name) finally: f.close() @@ -728,6 +1000,11 @@ def RemoveEtcHostsEntry(file_name, hostname): def RemoveHostFromEtcHosts(hostname): """Wrapper around RemoveEtcHostsEntry. + @type hostname: str + @param hostname: hostname that will be resolved and its + full and shot name will be removed from + L{constants.ETC_HOSTS} + """ hi = HostInfo(name=hostname) RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name) @@ -737,7 +1014,11 @@ def RemoveHostFromEtcHosts(hostname): def CreateBackup(file_name): """Creates a backup of a file. - Returns: the path to the newly created backup file. + @type file_name: str + @param file_name: file to be backed up + @rtype: str + @return: the path to the newly created backup + @raise errors.ProgrammerError: for invalid file names """ if not os.path.isfile(file_name): @@ -764,6 +1045,11 @@ def CreateBackup(file_name): def ShellQuote(value): """Quotes shell argument according to POSIX. + @type value: str + @param value: the argument to be quoted + @rtype: str + @return: the quoted value + """ if _re_shell_unquoted.match(value): return value @@ -772,7 +1058,12 @@ def ShellQuote(value): def ShellQuoteArgs(args): - """Quotes all given shell arguments and concatenates using spaces. + """Quotes a list of shell arguments. + + @type args: list + @param args: list of arguments to be quoted + @rtype: str + @return: the quoted arguments concatenaned with spaces """ return ' '.join([ShellQuote(i) for i in args]) @@ -781,19 +1072,27 @@ def ShellQuoteArgs(args): def TcpPing(target, port, timeout=10, live_port_needed=False, source=None): """Simple ping implementation using TCP connect(2). - Try to do a TCP connect(2) from an optional source IP to the - specified target IP and the specified target port. If the optional - parameter live_port_needed is set to true, requires the remote end - to accept the connection. The timeout is specified in seconds and - defaults to 10 seconds. If the source optional argument is not - passed, the source address selection is left to the kernel, - otherwise we try to connect using the passed address (failures to - bind other than EADDRNOTAVAIL will be ignored). + Check if the given IP is reachable by doing attempting a TCP connect + to it. + + @type target: str + @param target: the IP or hostname to ping + @type port: int + @param port: the port to connect to + @type timeout: int + @param timeout: the timeout on the connection attemp + @type live_port_needed: boolean + @param live_port_needed: whether a closed port will cause the + function to return failure, as if there was a timeout + @type source: str or None + @param source: if specified, will cause the connect to be made + from this specific source address; failures to bind other + than C{EADDRNOTAVAIL} will be ignored """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sucess = False + success = False if source is not None: try: @@ -816,8 +1115,29 @@ def TcpPing(target, port, timeout=10, live_port_needed=False, source=None): return success +def OwnIpAddress(address): + """Check if the current host has the the given IP address. + + Currently this is done by TCP-pinging the address from the loopback + address. + + @type address: string + @param address: the addres to check + @rtype: bool + @return: True if we own the address + + """ + return TcpPing(address, constants.DEFAULT_NODED_PORT, + source=constants.LOCALHOST_IP_ADDRESS) + + def ListVisibleFiles(path): - """Returns a list of all visible files in a directory. + """Returns a list of visible files in a directory. + + @type path: str + @param path: the directory to enumerate + @rtype: list + @return: the list of all files not starting with a dot """ files = [i for i in os.listdir(path) if not i.startswith(".")] @@ -849,6 +1169,10 @@ def GetHomeDir(user, default=None): def NewUUID(): """Returns a random UUID. + @note: This is a Linux-specific method as it uses the /proc + filesystem. + @rtype: str + """ f = open("/proc/sys/kernel/random/uuid", "r") try: @@ -857,6 +1181,56 @@ def NewUUID(): f.close() +def GenerateSecret(): + """Generates a random secret. + + This will generate a pseudo-random secret, and return its sha digest + (so that it can be used where an ASCII string is needed). + + @rtype: str + @return: a sha1 hexdigest of a block of 64 random bytes + + """ + return sha1(os.urandom(64)).hexdigest() + + +def EnsureDirs(dirs): + """Make required directories, if they don't exist. + + @param dirs: list of tuples (dir_name, dir_mode) + @type dirs: list of (string, integer) + + """ + for dir_name, dir_mode in dirs: + try: + os.mkdir(dir_name, dir_mode) + except EnvironmentError, err: + if err.errno != errno.EEXIST: + raise errors.GenericError("Cannot create needed directory" + " '%s': %s" % (dir_name, err)) + if not os.path.isdir(dir_name): + raise errors.GenericError("%s is not a directory" % dir_name) + + +def ReadFile(file_name, size=None): + """Reads a file. + + @type size: None or int + @param size: Read at most size bytes + @rtype: str + @return: the (possibly partial) conent of the file + + """ + f = open(file_name, "r") + try: + if size is None: + return f.read() + else: + return f.read(size) + finally: + f.close() + + def WriteFile(file_name, fn=None, data=None, mode=None, uid=-1, gid=-1, atime=None, mtime=None, close=True, @@ -871,25 +1245,39 @@ def WriteFile(file_name, fn=None, data=None, mtime/atime of the file. If the function doesn't raise an exception, it has succeeded and the - target file has the new contents. If the file has raised an + target file has the new contents. If the function has raised an exception, an existing target file should be unmodified and the temporary file should be removed. - Args: - file_name: New filename - fn: Content writing function, called with file descriptor as parameter - data: Content as string - mode: File mode - uid: Owner - gid: Group - atime: Access time - mtime: Modification time - close: Whether to close file after writing it - prewrite: Function object called before writing content - postwrite: Function object called after writing content - - Returns: - None if "close" parameter evaluates to True, otherwise file descriptor. + @type file_name: str + @param file_name: the target filename + @type fn: callable + @param fn: content writing function, called with + file descriptor as parameter + @type data: str + @param data: contents of the file + @type mode: int + @param mode: file mode + @type uid: int + @param uid: the owner of the file + @type gid: int + @param gid: the group of the file + @type atime: int + @param atime: a custom access time to be set on the file + @type mtime: int + @param mtime: a custom modification time to be set on the file + @type close: boolean + @param close: whether to close file after writing it + @type prewrite: callable + @param prewrite: function to be called before writing content + @type postwrite: callable + @param postwrite: function to be called after writing content + + @rtype: None or int + @return: None if the 'close' parameter evaluates to True, + otherwise the file descriptor + + @raise errors.ProgrammerError: if any of the arguments are not valid """ if not os.path.isabs(file_name): @@ -908,6 +1296,7 @@ def WriteFile(file_name, fn=None, data=None, dir_name, base_name = os.path.split(file_name) fd, new_name = tempfile.mkstemp('.new', base_name, dir_name) + do_remove = True # here we need to make sure we remove the temp file, if any error # leaves it in place try: @@ -928,13 +1317,15 @@ def WriteFile(file_name, fn=None, data=None, os.utime(new_name, (atime, mtime)) if not dry_run: os.rename(new_name, file_name) + do_remove = False finally: if close: os.close(fd) result = None else: result = fd - RemoveFile(new_name) + if do_remove: + RemoveFile(new_name) return result @@ -947,9 +1338,16 @@ def FirstFree(seq, base=0): value, the index will be returned. The base argument is used to start at a different offset, - i.e. [3, 4, 6] with offset=3 will return 5. + i.e. C{[3, 4, 6]} with I{offset=3} will return 5. + + Example: C{[0, 1, 3]} will return I{2}. - Example: [0, 1, 3] will return 2. + @type seq: sequence + @param seq: the sequence to be analyzed. + @type base: int + @param base: use this value as the base index of the sequence + @rtype: int + @return: the first non-used index in the sequence """ for idx, elem in enumerate(seq): @@ -978,6 +1376,12 @@ def UniqueSequence(seq): """Returns a list with unique elements. Element order is preserved. + + @type seq: sequence + @param seq: the sequence with the source elementes + @rtype: list + @return: list of unique elements from seq + """ seen = set() return [i for i in seq if i not in seen and not seen.add(i)] @@ -988,6 +1392,12 @@ def IsValidMac(mac): Checks wether the supplied MAC address is formally correct, only accepts colon separated format. + + @type mac: str + @param mac: the MAC to be validated + @rtype: boolean + @return: True is the MAC seems valid + """ mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$") return mac_check.match(mac) is not None @@ -996,6 +1406,11 @@ def IsValidMac(mac): def TestDelay(duration): """Sleep for a fixed amount of time. + @type duration: float + @param duration: the sleep duration + @rtype: boolean + @return: False for negative value, True otherwise + """ if duration < 0: return False @@ -1003,15 +1418,37 @@ def TestDelay(duration): return True -def Daemonize(logfile, noclose_fds=None): - """Daemonize the current process. +def _CloseFDNoErr(fd, retries=5): + """Close a file descriptor ignoring errors. - This detaches the current process from the controlling terminal and - runs it in the background as a daemon. + @type fd: int + @param fd: the file descriptor + @type retries: int + @param retries: how many retries to make, in case we get any + other error than EBADF + + """ + try: + os.close(fd) + except OSError, err: + if err.errno != errno.EBADF: + if retries > 0: + _CloseFDNoErr(fd, retries - 1) + # else either it's closed already or we're out of retries, so we + # ignore this and go on + + +def CloseFDs(noclose_fds=None): + """Close file descriptors. + + This closes all file descriptors above 2 (i.e. except + stdin/out/err). + + @type noclose_fds: list or None + @param noclose_fds: if given, it denotes a list of file descriptor + that should not be closed """ - UMASK = 077 - WORKDIR = "/" # Default maximum for the number of available file descriptors. if 'SC_OPEN_MAX' in os.sysconf_names: try: @@ -1022,6 +1459,31 @@ def Daemonize(logfile, noclose_fds=None): MAXFD = 1024 else: MAXFD = 1024 + maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if (maxfd == resource.RLIM_INFINITY): + maxfd = MAXFD + + # Iterate through and close all file descriptors (except the standard ones) + for fd in range(3, maxfd): + if noclose_fds and fd in noclose_fds: + continue + _CloseFDNoErr(fd) + + +def Daemonize(logfile): + """Daemonize the current process. + + This detaches the current process from the controlling terminal and + runs it in the background as a daemon. + + @type logfile: str + @param logfile: the logfile to which we should redirect stdout/stderr + @rtype: int + @return: the value zero + + """ + UMASK = 077 + WORKDIR = "/" # this might fail pid = os.fork() @@ -1037,27 +1499,26 @@ def Daemonize(logfile, noclose_fds=None): os._exit(0) # Exit parent (the first child) of the second child. else: os._exit(0) # Exit parent of the first child. - maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] - if (maxfd == resource.RLIM_INFINITY): - maxfd = MAXFD - # Iterate through and close all file descriptors. - for fd in range(0, maxfd): - if noclose_fds and fd in noclose_fds: - continue - try: - os.close(fd) - except OSError: # ERROR, fd wasn't open to begin with (ignored) - pass - os.open(logfile, os.O_RDWR|os.O_CREAT|os.O_APPEND, 0600) - # Duplicate standard input to standard output and standard error. - os.dup2(0, 1) # standard output (1) - os.dup2(0, 2) # standard error (2) + for fd in range(3): + _CloseFDNoErr(fd) + i = os.open("/dev/null", os.O_RDONLY) # stdin + assert i == 0, "Can't close/reopen stdin" + i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout + assert i == 1, "Can't close/reopen stdout" + # Duplicate standard output to standard error. + os.dup2(1, 2) return 0 -def _DaemonPidFileName(name): - """Compute a ganeti pid file absolute path, given the daemon name. +def DaemonPidFileName(name): + """Compute a ganeti pid file absolute path + + @type name: str + @param name: the daemon name + @rtype: str + @return: the full path to the pidfile corresponding to the given + daemon name """ return os.path.join(constants.RUN_GANETI_DIR, "%s.pid" % name) @@ -1066,11 +1527,16 @@ def _DaemonPidFileName(name): def WritePidFile(name): """Write the current process pidfile. - The file will be written to constants.RUN_GANETI_DIR/name.pid + The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid} + + @type name: str + @param name: the daemon name to use + @raise errors.GenericError: if the pid file already exists and + points to a live process """ pid = os.getpid() - pidfilename = _DaemonPidFileName(name) + pidfilename = DaemonPidFileName(name) if IsProcessAlive(ReadPidFile(pidfilename)): raise errors.GenericError("%s contains a live process" % pidfilename) @@ -1082,9 +1548,12 @@ def RemovePidFile(name): Any errors are ignored. + @type name: str + @param name: the daemon name used to derive the pidfile name + """ pid = os.getpid() - pidfilename = _DaemonPidFileName(name) + pidfilename = DaemonPidFileName(name) # TODO: we could check here that the file contains our pid try: RemoveFile(pidfilename) @@ -1092,21 +1561,79 @@ def RemovePidFile(name): pass +def KillProcess(pid, signal_=signal.SIGTERM, timeout=30, + waitpid=False): + """Kill a process given by its pid. + + @type pid: int + @param pid: The PID to terminate. + @type signal_: int + @param signal_: The signal to send, by default SIGTERM + @type timeout: int + @param timeout: The timeout after which, if the process is still alive, + a SIGKILL will be sent. If not positive, no such checking + will be done + @type waitpid: boolean + @param waitpid: If true, we should waitpid on this process after + sending signals, since it's our own child and otherwise it + would remain as zombie + + """ + def _helper(pid, signal_, wait): + """Simple helper to encapsulate the kill/waitpid sequence""" + os.kill(pid, signal_) + if wait: + try: + os.waitpid(pid, os.WNOHANG) + except OSError: + pass + + if pid <= 0: + # kill with pid=0 == suicide + raise errors.ProgrammerError("Invalid pid given '%s'" % pid) + + if not IsProcessAlive(pid): + return + _helper(pid, signal_, waitpid) + if timeout <= 0: + return + + # Wait up to $timeout seconds + end = time.time() + timeout + wait = 0.01 + while time.time() < end and IsProcessAlive(pid): + try: + (result_pid, _) = os.waitpid(pid, os.WNOHANG) + if result_pid > 0: + break + except OSError: + pass + time.sleep(wait) + # Make wait time longer for next try + if wait < 0.1: + wait *= 1.5 + + if IsProcessAlive(pid): + # Kill process if it's still alive + _helper(pid, signal.SIGKILL, waitpid) + + def FindFile(name, search_path, test=os.path.exists): """Look for a filesystem object in a given path. This is an abstract method to search for filesystem object (files, dirs) under a given search path. - Args: - - name: the name to look for - - search_path: list of directory names - - test: the test which the full path must satisfy - (defaults to os.path.exists) - - Returns: - - full path to the item if found - - None otherwise + @type name: str + @param name: the name to look for + @type search_path: str + @param search_path: location to start at + @type test: callable + @param test: a function taking one argument that should return True + if the a given object is valid; the default value is + os.path.exists, causing only existing files to be returned + @rtype: str or None + @return: full path to the object if found, None otherwise """ for dir_name in search_path: @@ -1119,8 +1646,17 @@ def FindFile(name, search_path, test=os.path.exists): def CheckVolumeGroupSize(vglist, vgname, minsize): """Checks if the volume group list is valid. - A non-None return value means there's an error, and the return value - is the error message. + The function will check if a given volume group is in the list of + volume groups and has a minimum size. + + @type vglist: dict + @param vglist: dictionary of volume group names and their size + @type vgname: str + @param vgname: the volume group we should check + @type minsize: int + @param minsize: the minimum size we accept + @rtype: None or str + @return: None for success, otherwise the error message """ vgsize = vglist.get(vgname, None) @@ -1132,6 +1668,188 @@ def CheckVolumeGroupSize(vglist, vgname, minsize): return None +def SplitTime(value): + """Splits time as floating point number into a tuple. + + @param value: Time in seconds + @type value: int or float + @return: Tuple containing (seconds, microseconds) + + """ + (seconds, microseconds) = divmod(int(value * 1000000), 1000000) + + assert 0 <= seconds, \ + "Seconds must be larger than or equal to 0, but are %s" % seconds + assert 0 <= microseconds <= 999999, \ + "Microseconds must be 0-999999, but are %s" % microseconds + + return (int(seconds), int(microseconds)) + + +def MergeTime(timetuple): + """Merges a tuple into time as a floating point number. + + @param timetuple: Time as tuple, (seconds, microseconds) + @type timetuple: tuple + @return: Time as a floating point number expressed in seconds + + """ + (seconds, microseconds) = timetuple + + assert 0 <= seconds, \ + "Seconds must be larger than or equal to 0, but are %s" % seconds + assert 0 <= microseconds <= 999999, \ + "Microseconds must be 0-999999, but are %s" % microseconds + + return float(seconds) + (float(microseconds) * 0.000001) + + +def GetNodeDaemonPort(): + """Get the node daemon port for this cluster. + + Note that this routine does not read a ganeti-specific file, but + instead uses C{socket.getservbyname} to allow pre-customization of + this parameter outside of Ganeti. + + @rtype: int + + """ + try: + port = socket.getservbyname("ganeti-noded", "tcp") + except socket.error: + port = constants.DEFAULT_NODED_PORT + + return port + + +def SetupLogging(logfile, debug=False, stderr_logging=False, program="", + multithreaded=False): + """Configures the logging module. + + @type logfile: str + @param logfile: the filename to which we should log + @type debug: boolean + @param debug: whether to enable debug messages too or + only those at C{INFO} and above level + @type stderr_logging: boolean + @param stderr_logging: whether we should also log to the standard error + @type program: str + @param program: the name under which we should log messages + @type multithreaded: boolean + @param multithreaded: if True, will add the thread name to the log file + @raise EnvironmentError: if we can't open the log file and + stderr logging is disabled + + """ + fmt = "%(asctime)s: " + program + " pid=%(process)d" + if multithreaded: + fmt += "/%(threadName)s" + if debug: + fmt += " %(module)s:%(lineno)s" + fmt += " %(levelname)s %(message)s" + formatter = logging.Formatter(fmt) + + root_logger = logging.getLogger("") + root_logger.setLevel(logging.NOTSET) + + # Remove all previously setup handlers + for handler in root_logger.handlers: + handler.close() + root_logger.removeHandler(handler) + + if stderr_logging: + stderr_handler = logging.StreamHandler() + stderr_handler.setFormatter(formatter) + if debug: + stderr_handler.setLevel(logging.NOTSET) + else: + stderr_handler.setLevel(logging.CRITICAL) + root_logger.addHandler(stderr_handler) + + # this can fail, if the logging directories are not setup or we have + # a permisssion problem; in this case, it's best to log but ignore + # the error if stderr_logging is True, and if false we re-raise the + # exception since otherwise we could run but without any logs at all + try: + logfile_handler = logging.FileHandler(logfile) + logfile_handler.setFormatter(formatter) + if debug: + logfile_handler.setLevel(logging.DEBUG) + else: + logfile_handler.setLevel(logging.INFO) + root_logger.addHandler(logfile_handler) + except EnvironmentError: + if stderr_logging: + logging.exception("Failed to enable logging to file '%s'", logfile) + else: + # we need to re-raise the exception + raise + +def IsNormAbsPath(path): + """Check whether a path is absolute and also normalized + + This avoids things like /dir/../../other/path to be valid. + + """ + return os.path.normpath(path) == path and os.path.isabs(path) + +def TailFile(fname, lines=20): + """Return the last lines from a file. + + @note: this function will only read and parse the last 4KB of + the file; if the lines are very long, it could be that less + than the requested number of lines are returned + + @param fname: the file name + @type lines: int + @param lines: the (maximum) number of lines to return + + """ + fd = open(fname, "r") + try: + fd.seek(0, 2) + pos = fd.tell() + pos = max(0, pos-4096) + fd.seek(pos, 0) + raw_data = fd.read() + finally: + fd.close() + + rows = raw_data.splitlines() + return rows[-lines:] + + +def SafeEncode(text): + """Return a 'safe' version of a source string. + + This function mangles the input string and returns a version that + should be safe to disply/encode as ASCII. To this end, we first + convert it to ASCII using the 'backslashreplace' encoding which + should get rid of any non-ASCII chars, and then we again encode it + via 'string_escape' which converts '\n' into '\\n' so that log + messages remain one-line. + + @type text: str or unicode + @param text: input data + @rtype: str + @return: a safe version of text + + """ + text = text.encode('ascii', 'backslashreplace') + text = text.encode('string_escape') + return text + + +def CommaJoin(names): + """Nicely join a set of identifiers. + + @param names: set, list or tuple + @return: a string with the formatted results + + """ + return ", ".join(["'%s'" % val for val in names]) + + def LockedMethod(fn): """Synchronized object access decorator. @@ -1139,14 +1857,22 @@ def LockedMethod(fn): object's own lock which is hardcoded to '_lock'. """ + def _LockDebug(*args, **kwargs): + if debug_locks: + logging.debug(*args, **kwargs) + def wrapper(self, *args, **kwargs): assert hasattr(self, '_lock') lock = self._lock + _LockDebug("Waiting for %s", lock) lock.acquire() try: + _LockDebug("Acquired %s", lock) result = fn(self, *args, **kwargs) finally: + _LockDebug("Releasing %s", lock) lock.release() + _LockDebug("Released %s", lock) return result return wrapper @@ -1154,6 +1880,9 @@ def LockedMethod(fn): def LockFile(fd): """Locks a file using POSIX locks. + @type fd: int + @param fd: the file descriptor we need to lock + """ try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) @@ -1163,17 +1892,143 @@ def LockFile(fd): raise +class FileLock(object): + """Utility class for file locks. + + """ + def __init__(self, filename): + """Constructor for FileLock. + + This will open the file denoted by the I{filename} argument. + + @type filename: str + @param filename: path to the file to be locked + + """ + self.filename = filename + self.fd = open(self.filename, "w") + + def __del__(self): + self.Close() + + def Close(self): + """Close the file and release the lock. + + """ + if self.fd: + self.fd.close() + self.fd = None + + def _flock(self, flag, blocking, timeout, errmsg): + """Wrapper for fcntl.flock. + + @type flag: int + @param flag: operation flag + @type blocking: bool + @param blocking: whether the operation should be done in blocking mode. + @type timeout: None or float + @param timeout: for how long the operation should be retried (implies + non-blocking mode). + @type errmsg: string + @param errmsg: error message in case operation fails. + + """ + assert self.fd, "Lock was closed" + assert timeout is None or timeout >= 0, \ + "If specified, timeout must be positive" + + if timeout is not None: + flag |= fcntl.LOCK_NB + timeout_end = time.time() + timeout + + # Blocking doesn't have effect with timeout + elif not blocking: + flag |= fcntl.LOCK_NB + timeout_end = None + + retry = True + while retry: + try: + fcntl.flock(self.fd, flag) + retry = False + except IOError, err: + if err.errno in (errno.EAGAIN, ): + if timeout_end is not None and time.time() < timeout_end: + # Wait before trying again + time.sleep(max(0.1, min(1.0, timeout))) + else: + raise errors.LockError(errmsg) + else: + logging.exception("fcntl.flock failed") + raise + + def Exclusive(self, blocking=False, timeout=None): + """Locks the file in exclusive mode. + + @type blocking: boolean + @param blocking: whether to block and wait until we + can lock the file or return immediately + @type timeout: int or None + @param timeout: if not None, the duration to wait for the lock + (in blocking mode) + + """ + self._flock(fcntl.LOCK_EX, blocking, timeout, + "Failed to lock %s in exclusive mode" % self.filename) + + def Shared(self, blocking=False, timeout=None): + """Locks the file in shared mode. + + @type blocking: boolean + @param blocking: whether to block and wait until we + can lock the file or return immediately + @type timeout: int or None + @param timeout: if not None, the duration to wait for the lock + (in blocking mode) + + """ + self._flock(fcntl.LOCK_SH, blocking, timeout, + "Failed to lock %s in shared mode" % self.filename) + + def Unlock(self, blocking=True, timeout=None): + """Unlocks the file. + + According to C{flock(2)}, unlocking can also be a nonblocking + operation:: + + To make a non-blocking request, include LOCK_NB with any of the above + operations. + + @type blocking: boolean + @param blocking: whether to block and wait until we + can lock the file or return immediately + @type timeout: int or None + @param timeout: if not None, the duration to wait for the lock + (in blocking mode) + + """ + self._flock(fcntl.LOCK_UN, blocking, timeout, + "Failed to unlock %s" % self.filename) + + class SignalHandler(object): """Generic signal handler class. - It automatically restores the original handler when deconstructed or when - Reset() is called. You can either pass your own handler function in or query - the "called" attribute to detect whether the signal was sent. + It automatically restores the original handler when deconstructed or + when L{Reset} is called. You can either pass your own handler + function in or query the L{called} attribute to detect whether the + signal was sent. + + @type signum: list + @ivar signum: the signals we handle + @type called: boolean + @ivar called: tracks whether any of the signals have been raised """ def __init__(self, signum): """Constructs a new SignalHandler instance. + @type signum: int or list of ints @param signum: Single signal number or set of signal numbers """ @@ -1208,6 +2063,8 @@ class SignalHandler(object): def Reset(self): """Restore previous handler. + This will reset all the signals to their previous handlers. + """ for signum, prev_handler in self._previous.items(): signal.signal(signum, prev_handler) @@ -1215,7 +2072,7 @@ class SignalHandler(object): del self._previous[signum] def Clear(self): - """Unsets "called" flag. + """Unsets the L{called} flag. This function can be used in case a signal may arrive several times. @@ -1229,3 +2086,45 @@ class SignalHandler(object): # This is not nice and not absolutely atomic, but it appears to be the only # solution in Python -- there are no atomic types. self.called = True + + +class FieldSet(object): + """A simple field set. + + Among the features are: + - checking if a string is among a list of static string or regex objects + - checking if a whole list of string matches + - returning the matching groups from a regex match + + Internally, all fields are held as regular expression objects. + + """ + def __init__(self, *items): + self.items = [re.compile("^%s$" % value) for value in items] + + def Extend(self, other_set): + """Extend the field set with the items from another one""" + self.items.extend(other_set.items) + + def Matches(self, field): + """Checks if a field matches the current set + + @type field: str + @param field: the string to match + @return: either False or a regular expression match object + + """ + for m in itertools.ifilter(None, (val.match(field) for val in self.items)): + return m + return False + + def NonMatching(self, items): + """Returns the list of fields not matching the current set + + @type items: list + @param items: the list of fields to check + @rtype: list + @return: list of non-matching fields + + """ + return [val for val in items if not self.Matches(val)]