+
+
+def TcpPing(target, port, timeout=10, live_port_needed=False, source=None):
+ """Simple ping implementation using TCP connect(2).
+
+ Check if the given IP is reachable by doing attempting a TCP connect
+ to it.
+
+ @type target: str
+ @param target: the IP or hostname to ping
+ @type port: int
+ @param port: the port to connect to
+ @type timeout: int
+ @param timeout: the timeout on the connection attemp
+ @type live_port_needed: boolean
+ @param live_port_needed: whether a closed port will cause the
+ function to return failure, as if there was a timeout
+ @type source: str or None
+ @param source: if specified, will cause the connect to be made
+ from this specific source address; failures to bind other
+ than C{EADDRNOTAVAIL} will be ignored
+
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ success = False
+
+ if source is not None:
+ try:
+ sock.bind((source, 0))
+ except socket.error, (errcode, errstring):
+ if errcode == errno.EADDRNOTAVAIL:
+ success = False
+
+ sock.settimeout(timeout)
+
+ try:
+ sock.connect((target, port))
+ sock.close()
+ success = True
+ except socket.timeout:
+ success = False
+ except socket.error, (errcode, errstring):
+ success = (not live_port_needed) and (errcode == errno.ECONNREFUSED)
+
+ return success
+
+
+def OwnIpAddress(address):
+ """Check if the current host has the the given IP address.
+
+ Currently this is done by TCP-pinging the address from the loopback
+ address.
+
+ @type address: string
+ @param address: the addres to check
+ @rtype: bool
+ @return: True if we own the address
+
+ """
+ return TcpPing(address, constants.DEFAULT_NODED_PORT,
+ source=constants.LOCALHOST_IP_ADDRESS)
+
+
+def ListVisibleFiles(path):
+ """Returns a list of visible files in a directory.
+
+ @type path: str
+ @param path: the directory to enumerate
+ @rtype: list
+ @return: the list of all files not starting with a dot
+
+ """
+ files = [i for i in os.listdir(path) if not i.startswith(".")]
+ files.sort()
+ return files
+
+
+def GetHomeDir(user, default=None):
+ """Try to get the homedir of the given user.
+
+ The user can be passed either as a string (denoting the name) or as
+ an integer (denoting the user id). If the user is not found, the
+ 'default' argument is returned, which defaults to None.
+
+ """
+ try:
+ if isinstance(user, basestring):
+ result = pwd.getpwnam(user)
+ elif isinstance(user, (int, long)):
+ result = pwd.getpwuid(user)
+ else:
+ raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
+ type(user))
+ except KeyError:
+ return default
+ return result.pw_dir
+
+
+def NewUUID():
+ """Returns a random UUID.
+
+ @note: This is a Linux-specific method as it uses the /proc
+ filesystem.
+ @rtype: str
+
+ """
+ f = open("/proc/sys/kernel/random/uuid", "r")
+ try:
+ return f.read(128).rstrip("\n")
+ finally:
+ f.close()
+
+
+def GenerateSecret():
+ """Generates a random secret.
+
+ This will generate a pseudo-random secret, and return its sha digest
+ (so that it can be used where an ASCII string is needed).
+
+ @rtype: str
+ @return: a sha1 hexdigest of a block of 64 random bytes
+
+ """
+ return sha1(os.urandom(64)).hexdigest()
+
+
+def EnsureDirs(dirs):
+ """Make required directories, if they don't exist.
+
+ @param dirs: list of tuples (dir_name, dir_mode)
+ @type dirs: list of (string, integer)
+
+ """
+ for dir_name, dir_mode in dirs:
+ try:
+ os.mkdir(dir_name, dir_mode)
+ except EnvironmentError, err:
+ if err.errno != errno.EEXIST:
+ raise errors.GenericError("Cannot create needed directory"
+ " '%s': %s" % (dir_name, err))
+ if not os.path.isdir(dir_name):
+ raise errors.GenericError("%s is not a directory" % dir_name)
+
+
+def ReadFile(file_name, size=None):
+ """Reads a file.
+
+ @type size: None or int
+ @param size: Read at most size bytes
+ @rtype: str
+ @return: the (possibly partial) conent of the file
+
+ """
+ f = open(file_name, "r")
+ try:
+ if size is None:
+ return f.read()
+ else:
+ return f.read(size)
+ finally:
+ f.close()
+
+
+def WriteFile(file_name, fn=None, data=None,
+ mode=None, uid=-1, gid=-1,
+ atime=None, mtime=None, close=True,
+ dry_run=False, backup=False,
+ prewrite=None, postwrite=None):
+ """(Over)write a file atomically.
+
+ The file_name and either fn (a function taking one argument, the
+ file descriptor, and which should write the data to it) or data (the
+ contents of the file) must be passed. The other arguments are
+ optional and allow setting the file mode, owner and group, and the
+ mtime/atime of the file.
+
+ If the function doesn't raise an exception, it has succeeded and the
+ target file has the new contents. If the function has raised an
+ exception, an existing target file should be unmodified and the
+ temporary file should be removed.
+
+ @type file_name: str
+ @param file_name: the target filename
+ @type fn: callable
+ @param fn: content writing function, called with
+ file descriptor as parameter
+ @type data: str
+ @param data: contents of the file
+ @type mode: int
+ @param mode: file mode
+ @type uid: int
+ @param uid: the owner of the file
+ @type gid: int
+ @param gid: the group of the file
+ @type atime: int
+ @param atime: a custom access time to be set on the file
+ @type mtime: int
+ @param mtime: a custom modification time to be set on the file
+ @type close: boolean
+ @param close: whether to close file after writing it
+ @type prewrite: callable
+ @param prewrite: function to be called before writing content
+ @type postwrite: callable
+ @param postwrite: function to be called after writing content
+
+ @rtype: None or int
+ @return: None if the 'close' parameter evaluates to True,
+ otherwise the file descriptor
+
+ @raise errors.ProgrammerError: if any of the arguments are not valid
+
+ """
+ if not os.path.isabs(file_name):
+ raise errors.ProgrammerError("Path passed to WriteFile is not"
+ " absolute: '%s'" % file_name)
+
+ if [fn, data].count(None) != 1:
+ raise errors.ProgrammerError("fn or data required")
+
+ if [atime, mtime].count(None) == 1:
+ raise errors.ProgrammerError("Both atime and mtime must be either"
+ " set or None")
+
+ if backup and not dry_run and os.path.isfile(file_name):
+ CreateBackup(file_name)
+
+ dir_name, base_name = os.path.split(file_name)
+ fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
+ do_remove = True
+ # here we need to make sure we remove the temp file, if any error
+ # leaves it in place
+ try:
+ if uid != -1 or gid != -1:
+ os.chown(new_name, uid, gid)
+ if mode:
+ os.chmod(new_name, mode)
+ if callable(prewrite):
+ prewrite(fd)
+ if data is not None:
+ os.write(fd, data)
+ else:
+ fn(fd)
+ if callable(postwrite):
+ postwrite(fd)
+ os.fsync(fd)
+ if atime is not None and mtime is not None:
+ os.utime(new_name, (atime, mtime))
+ if not dry_run:
+ os.rename(new_name, file_name)
+ do_remove = False
+ finally:
+ if close:
+ os.close(fd)
+ result = None
+ else:
+ result = fd
+ if do_remove:
+ RemoveFile(new_name)
+
+ return result
+
+
+def FirstFree(seq, base=0):
+ """Returns the first non-existing integer from seq.
+
+ The seq argument should be a sorted list of positive integers. The
+ first time the index of an element is smaller than the element
+ value, the index will be returned.
+
+ The base argument is used to start at a different offset,
+ i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
+
+ Example: C{[0, 1, 3]} will return I{2}.
+
+ @type seq: sequence
+ @param seq: the sequence to be analyzed.
+ @type base: int
+ @param base: use this value as the base index of the sequence
+ @rtype: int
+ @return: the first non-used index in the sequence
+
+ """
+ for idx, elem in enumerate(seq):
+ assert elem >= base, "Passed element is higher than base offset"
+ if elem > idx + base:
+ # idx is not used
+ return idx + base
+ return None
+
+
+def all(seq, pred=bool):
+ "Returns True if pred(x) is True for every element in the iterable"
+ for elem in itertools.ifilterfalse(pred, seq):
+ return False
+ return True
+
+
+def any(seq, pred=bool):
+ "Returns True if pred(x) is True for at least one element in the iterable"
+ for elem in itertools.ifilter(pred, seq):
+ return True
+ return False
+
+
+def UniqueSequence(seq):
+ """Returns a list with unique elements.
+
+ Element order is preserved.
+
+ @type seq: sequence
+ @param seq: the sequence with the source elementes
+ @rtype: list
+ @return: list of unique elements from seq
+
+ """
+ seen = set()
+ return [i for i in seq if i not in seen and not seen.add(i)]
+
+
+def IsValidMac(mac):
+ """Predicate to check if a MAC address is valid.
+
+ Checks wether the supplied MAC address is formally correct, only
+ accepts colon separated format.
+
+ @type mac: str
+ @param mac: the MAC to be validated
+ @rtype: boolean
+ @return: True is the MAC seems valid
+
+ """
+ mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$")
+ return mac_check.match(mac) is not None
+
+
+def TestDelay(duration):
+ """Sleep for a fixed amount of time.
+
+ @type duration: float
+ @param duration: the sleep duration
+ @rtype: boolean
+ @return: False for negative value, True otherwise
+
+ """
+ if duration < 0:
+ return False
+ time.sleep(duration)
+ return True
+
+
+def _CloseFDNoErr(fd, retries=5):
+ """Close a file descriptor ignoring errors.
+
+ @type fd: int
+ @param fd: the file descriptor
+ @type retries: int
+ @param retries: how many retries to make, in case we get any
+ other error than EBADF
+
+ """
+ try:
+ os.close(fd)
+ except OSError, err:
+ if err.errno != errno.EBADF:
+ if retries > 0:
+ _CloseFDNoErr(fd, retries - 1)
+ # else either it's closed already or we're out of retries, so we
+ # ignore this and go on
+
+
+def CloseFDs(noclose_fds=None):
+ """Close file descriptors.
+
+ This closes all file descriptors above 2 (i.e. except
+ stdin/out/err).
+
+ @type noclose_fds: list or None
+ @param noclose_fds: if given, it denotes a list of file descriptor
+ that should not be closed
+
+ """
+ # Default maximum for the number of available file descriptors.
+ if 'SC_OPEN_MAX' in os.sysconf_names:
+ try:
+ MAXFD = os.sysconf('SC_OPEN_MAX')
+ if MAXFD < 0:
+ MAXFD = 1024
+ except OSError:
+ MAXFD = 1024
+ else:
+ MAXFD = 1024
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if (maxfd == resource.RLIM_INFINITY):
+ maxfd = MAXFD
+
+ # Iterate through and close all file descriptors (except the standard ones)
+ for fd in range(3, maxfd):
+ if noclose_fds and fd in noclose_fds:
+ continue
+ _CloseFDNoErr(fd)
+
+
+def Daemonize(logfile):
+ """Daemonize the current process.
+
+ This detaches the current process from the controlling terminal and
+ runs it in the background as a daemon.
+
+ @type logfile: str
+ @param logfile: the logfile to which we should redirect stdout/stderr
+ @rtype: int
+ @return: the value zero
+
+ """
+ UMASK = 077
+ WORKDIR = "/"
+
+ # this might fail
+ pid = os.fork()
+ if (pid == 0): # The first child.
+ os.setsid()
+ # this might fail
+ pid = os.fork() # Fork a second child.
+ if (pid == 0): # The second child.
+ os.chdir(WORKDIR)
+ os.umask(UMASK)
+ else:
+ # exit() or _exit()? See below.
+ os._exit(0) # Exit parent (the first child) of the second child.
+ else:
+ os._exit(0) # Exit parent of the first child.
+
+ for fd in range(3):
+ _CloseFDNoErr(fd)
+ i = os.open("/dev/null", os.O_RDONLY) # stdin
+ assert i == 0, "Can't close/reopen stdin"
+ i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
+ assert i == 1, "Can't close/reopen stdout"
+ # Duplicate standard output to standard error.
+ os.dup2(1, 2)
+ return 0
+
+
+def DaemonPidFileName(name):
+ """Compute a ganeti pid file absolute path
+
+ @type name: str
+ @param name: the daemon name
+ @rtype: str
+ @return: the full path to the pidfile corresponding to the given
+ daemon name
+
+ """
+ return os.path.join(constants.RUN_GANETI_DIR, "%s.pid" % name)
+
+
+def WritePidFile(name):
+ """Write the current process pidfile.
+
+ The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
+
+ @type name: str
+ @param name: the daemon name to use
+ @raise errors.GenericError: if the pid file already exists and
+ points to a live process
+
+ """
+ pid = os.getpid()
+ pidfilename = DaemonPidFileName(name)
+ if IsProcessAlive(ReadPidFile(pidfilename)):
+ raise errors.GenericError("%s contains a live process" % pidfilename)
+
+ WriteFile(pidfilename, data="%d\n" % pid)
+
+
+def RemovePidFile(name):
+ """Remove the current process pidfile.
+
+ Any errors are ignored.
+
+ @type name: str
+ @param name: the daemon name used to derive the pidfile name
+
+ """
+ pid = os.getpid()
+ pidfilename = DaemonPidFileName(name)
+ # TODO: we could check here that the file contains our pid
+ try:
+ RemoveFile(pidfilename)
+ except:
+ pass
+
+
+def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
+ waitpid=False):
+ """Kill a process given by its pid.
+
+ @type pid: int
+ @param pid: The PID to terminate.
+ @type signal_: int
+ @param signal_: The signal to send, by default SIGTERM
+ @type timeout: int
+ @param timeout: The timeout after which, if the process is still alive,
+ a SIGKILL will be sent. If not positive, no such checking
+ will be done
+ @type waitpid: boolean
+ @param waitpid: If true, we should waitpid on this process after
+ sending signals, since it's our own child and otherwise it
+ would remain as zombie
+
+ """
+ def _helper(pid, signal_, wait):
+ """Simple helper to encapsulate the kill/waitpid sequence"""
+ os.kill(pid, signal_)
+ if wait:
+ try:
+ os.waitpid(pid, os.WNOHANG)
+ except OSError:
+ pass
+
+ if pid <= 0:
+ # kill with pid=0 == suicide
+ raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
+
+ if not IsProcessAlive(pid):
+ return
+ _helper(pid, signal_, waitpid)
+ if timeout <= 0:
+ return
+
+ # Wait up to $timeout seconds
+ end = time.time() + timeout
+ wait = 0.01
+ while time.time() < end and IsProcessAlive(pid):
+ try:
+ (result_pid, _) = os.waitpid(pid, os.WNOHANG)
+ if result_pid > 0:
+ break
+ except OSError:
+ pass
+ time.sleep(wait)
+ # Make wait time longer for next try
+ if wait < 0.1:
+ wait *= 1.5
+
+ if IsProcessAlive(pid):
+ # Kill process if it's still alive
+ _helper(pid, signal.SIGKILL, waitpid)
+
+
+def FindFile(name, search_path, test=os.path.exists):
+ """Look for a filesystem object in a given path.
+
+ This is an abstract method to search for filesystem object (files,
+ dirs) under a given search path.
+
+ @type name: str
+ @param name: the name to look for
+ @type search_path: str
+ @param search_path: location to start at
+ @type test: callable
+ @param test: a function taking one argument that should return True
+ if the a given object is valid; the default value is
+ os.path.exists, causing only existing files to be returned
+ @rtype: str or None
+ @return: full path to the object if found, None otherwise
+
+ """
+ for dir_name in search_path:
+ item_name = os.path.sep.join([dir_name, name])
+ if test(item_name):
+ return item_name
+ return None
+
+
+def CheckVolumeGroupSize(vglist, vgname, minsize):
+ """Checks if the volume group list is valid.
+
+ The function will check if a given volume group is in the list of
+ volume groups and has a minimum size.
+
+ @type vglist: dict
+ @param vglist: dictionary of volume group names and their size
+ @type vgname: str
+ @param vgname: the volume group we should check
+ @type minsize: int
+ @param minsize: the minimum size we accept
+ @rtype: None or str
+ @return: None for success, otherwise the error message
+
+ """
+ vgsize = vglist.get(vgname, None)
+ if vgsize is None:
+ return "volume group '%s' missing" % vgname
+ elif vgsize < minsize:
+ return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
+ (vgname, minsize, vgsize))
+ return None
+
+
+def SplitTime(value):
+ """Splits time as floating point number into a tuple.
+
+ @param value: Time in seconds
+ @type value: int or float
+ @return: Tuple containing (seconds, microseconds)
+
+ """
+ (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
+
+ assert 0 <= seconds, \
+ "Seconds must be larger than or equal to 0, but are %s" % seconds
+ assert 0 <= microseconds <= 999999, \
+ "Microseconds must be 0-999999, but are %s" % microseconds
+
+ return (int(seconds), int(microseconds))
+
+
+def MergeTime(timetuple):
+ """Merges a tuple into time as a floating point number.
+
+ @param timetuple: Time as tuple, (seconds, microseconds)
+ @type timetuple: tuple
+ @return: Time as a floating point number expressed in seconds
+
+ """
+ (seconds, microseconds) = timetuple
+
+ assert 0 <= seconds, \
+ "Seconds must be larger than or equal to 0, but are %s" % seconds
+ assert 0 <= microseconds <= 999999, \
+ "Microseconds must be 0-999999, but are %s" % microseconds
+
+ return float(seconds) + (float(microseconds) * 0.000001)
+
+
+def GetNodeDaemonPort():
+ """Get the node daemon port for this cluster.
+
+ Note that this routine does not read a ganeti-specific file, but
+ instead uses C{socket.getservbyname} to allow pre-customization of
+ this parameter outside of Ganeti.
+
+ @rtype: int
+
+ """
+ try:
+ port = socket.getservbyname("ganeti-noded", "tcp")
+ except socket.error:
+ port = constants.DEFAULT_NODED_PORT
+
+ return port
+
+
+def SetupLogging(logfile, debug=False, stderr_logging=False, program="",
+ multithreaded=False):
+ """Configures the logging module.
+
+ @type logfile: str
+ @param logfile: the filename to which we should log
+ @type debug: boolean
+ @param debug: whether to enable debug messages too or
+ only those at C{INFO} and above level
+ @type stderr_logging: boolean
+ @param stderr_logging: whether we should also log to the standard error
+ @type program: str
+ @param program: the name under which we should log messages
+ @type multithreaded: boolean
+ @param multithreaded: if True, will add the thread name to the log file
+ @raise EnvironmentError: if we can't open the log file and
+ stderr logging is disabled
+
+ """
+ fmt = "%(asctime)s: " + program + " pid=%(process)d"
+ if multithreaded:
+ fmt += "/%(threadName)s"
+ if debug:
+ fmt += " %(module)s:%(lineno)s"
+ fmt += " %(levelname)s %(message)s"
+ formatter = logging.Formatter(fmt)
+
+ root_logger = logging.getLogger("")
+ root_logger.setLevel(logging.NOTSET)
+
+ # Remove all previously setup handlers
+ for handler in root_logger.handlers:
+ handler.close()
+ root_logger.removeHandler(handler)
+
+ if stderr_logging:
+ stderr_handler = logging.StreamHandler()
+ stderr_handler.setFormatter(formatter)
+ if debug:
+ stderr_handler.setLevel(logging.NOTSET)
+ else:
+ stderr_handler.setLevel(logging.CRITICAL)
+ root_logger.addHandler(stderr_handler)
+
+ # this can fail, if the logging directories are not setup or we have
+ # a permisssion problem; in this case, it's best to log but ignore
+ # the error if stderr_logging is True, and if false we re-raise the
+ # exception since otherwise we could run but without any logs at all
+ try:
+ logfile_handler = logging.FileHandler(logfile)
+ logfile_handler.setFormatter(formatter)
+ if debug:
+ logfile_handler.setLevel(logging.DEBUG)
+ else:
+ logfile_handler.setLevel(logging.INFO)
+ root_logger.addHandler(logfile_handler)
+ except EnvironmentError:
+ if stderr_logging:
+ logging.exception("Failed to enable logging to file '%s'", logfile)
+ else:
+ # we need to re-raise the exception
+ raise
+
+def IsNormAbsPath(path):
+ """Check whether a path is absolute and also normalized
+
+ This avoids things like /dir/../../other/path to be valid.
+
+ """
+ return os.path.normpath(path) == path and os.path.isabs(path)
+
+def TailFile(fname, lines=20):
+ """Return the last lines from a file.
+
+ @note: this function will only read and parse the last 4KB of
+ the file; if the lines are very long, it could be that less
+ than the requested number of lines are returned
+
+ @param fname: the file name
+ @type lines: int
+ @param lines: the (maximum) number of lines to return
+
+ """
+ fd = open(fname, "r")
+ try:
+ fd.seek(0, 2)
+ pos = fd.tell()
+ pos = max(0, pos-4096)
+ fd.seek(pos, 0)
+ raw_data = fd.read()
+ finally:
+ fd.close()
+
+ rows = raw_data.splitlines()
+ return rows[-lines:]
+
+
+def SafeEncode(text):
+ """Return a 'safe' version of a source string.
+
+ This function mangles the input string and returns a version that
+ should be safe to disply/encode as ASCII. To this end, we first
+ convert it to ASCII using the 'backslashreplace' encoding which
+ should get rid of any non-ASCII chars, and then we again encode it
+ via 'string_escape' which converts '\n' into '\\n' so that log
+ messages remain one-line.
+
+ @type text: str or unicode
+ @param text: input data
+ @rtype: str
+ @return: a safe version of text
+
+ """
+ text = text.encode('ascii', 'backslashreplace')
+ text = text.encode('string_escape')
+ return text
+
+
+def CommaJoin(names):
+ """Nicely join a set of identifiers.
+
+ @param names: set, list or tuple
+ @return: a string with the formatted results
+
+ """
+ return ", ".join(["'%s'" % val for val in names])
+
+
+def LockedMethod(fn):
+ """Synchronized object access decorator.
+
+ This decorator is intended to protect access to an object using the
+ object's own lock which is hardcoded to '_lock'.
+
+ """
+ def _LockDebug(*args, **kwargs):
+ if debug_locks:
+ logging.debug(*args, **kwargs)
+
+ def wrapper(self, *args, **kwargs):
+ assert hasattr(self, '_lock')
+ lock = self._lock
+ _LockDebug("Waiting for %s", lock)
+ lock.acquire()
+ try:
+ _LockDebug("Acquired %s", lock)
+ result = fn(self, *args, **kwargs)
+ finally:
+ _LockDebug("Releasing %s", lock)
+ lock.release()
+ _LockDebug("Released %s", lock)
+ return result
+ return wrapper
+
+
+def LockFile(fd):
+ """Locks a file using POSIX locks.
+
+ @type fd: int
+ @param fd: the file descriptor we need to lock
+
+ """
+ try:
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError, err:
+ if err.errno == errno.EAGAIN:
+ raise errors.LockError("File already locked")
+ raise
+
+
+class FileLock(object):
+ """Utility class for file locks.
+
+ """
+ def __init__(self, filename):
+ """Constructor for FileLock.
+
+ This will open the file denoted by the I{filename} argument.
+
+ @type filename: str
+ @param filename: path to the file to be locked
+
+ """
+ self.filename = filename
+ self.fd = open(self.filename, "w")
+
+ def __del__(self):
+ self.Close()
+
+ def Close(self):
+ """Close the file and release the lock.
+
+ """
+ if self.fd:
+ self.fd.close()
+ self.fd = None
+
+ def _flock(self, flag, blocking, timeout, errmsg):
+ """Wrapper for fcntl.flock.
+
+ @type flag: int
+ @param flag: operation flag
+ @type blocking: bool
+ @param blocking: whether the operation should be done in blocking mode.
+ @type timeout: None or float
+ @param timeout: for how long the operation should be retried (implies
+ non-blocking mode).
+ @type errmsg: string
+ @param errmsg: error message in case operation fails.
+
+ """
+ assert self.fd, "Lock was closed"
+ assert timeout is None or timeout >= 0, \
+ "If specified, timeout must be positive"
+
+ if timeout is not None:
+ flag |= fcntl.LOCK_NB
+ timeout_end = time.time() + timeout
+
+ # Blocking doesn't have effect with timeout
+ elif not blocking:
+ flag |= fcntl.LOCK_NB
+ timeout_end = None
+
+ retry = True
+ while retry:
+ try:
+ fcntl.flock(self.fd, flag)
+ retry = False
+ except IOError, err:
+ if err.errno in (errno.EAGAIN, ):
+ if timeout_end is not None and time.time() < timeout_end:
+ # Wait before trying again
+ time.sleep(max(0.1, min(1.0, timeout)))
+ else:
+ raise errors.LockError(errmsg)
+ else:
+ logging.exception("fcntl.flock failed")
+ raise
+
+ def Exclusive(self, blocking=False, timeout=None):
+ """Locks the file in exclusive mode.
+
+ @type blocking: boolean
+ @param blocking: whether to block and wait until we
+ can lock the file or return immediately
+ @type timeout: int or None
+ @param timeout: if not None, the duration to wait for the lock
+ (in blocking mode)
+
+ """
+ self._flock(fcntl.LOCK_EX, blocking, timeout,
+ "Failed to lock %s in exclusive mode" % self.filename)
+
+ def Shared(self, blocking=False, timeout=None):
+ """Locks the file in shared mode.
+
+ @type blocking: boolean
+ @param blocking: whether to block and wait until we
+ can lock the file or return immediately
+ @type timeout: int or None
+ @param timeout: if not None, the duration to wait for the lock
+ (in blocking mode)
+
+ """
+ self._flock(fcntl.LOCK_SH, blocking, timeout,
+ "Failed to lock %s in shared mode" % self.filename)
+
+ def Unlock(self, blocking=True, timeout=None):
+ """Unlocks the file.
+
+ According to C{flock(2)}, unlocking can also be a nonblocking
+ operation::
+
+ To make a non-blocking request, include LOCK_NB with any of the above
+ operations.
+
+ @type blocking: boolean
+ @param blocking: whether to block and wait until we
+ can lock the file or return immediately
+ @type timeout: int or None
+ @param timeout: if not None, the duration to wait for the lock
+ (in blocking mode)
+
+ """
+ self._flock(fcntl.LOCK_UN, blocking, timeout,
+ "Failed to unlock %s" % self.filename)
+
+
+class SignalHandler(object):
+ """Generic signal handler class.
+
+ It automatically restores the original handler when deconstructed or
+ when L{Reset} is called. You can either pass your own handler
+ function in or query the L{called} attribute to detect whether the
+ signal was sent.
+
+ @type signum: list
+ @ivar signum: the signals we handle
+ @type called: boolean
+ @ivar called: tracks whether any of the signals have been raised
+
+ """
+ def __init__(self, signum):
+ """Constructs a new SignalHandler instance.
+
+ @type signum: int or list of ints
+ @param signum: Single signal number or set of signal numbers
+
+ """
+ if isinstance(signum, (int, long)):
+ self.signum = set([signum])
+ else:
+ self.signum = set(signum)
+
+ self.called = False
+
+ self._previous = {}
+ try:
+ for signum in self.signum:
+ # Setup handler
+ prev_handler = signal.signal(signum, self._HandleSignal)
+ try:
+ self._previous[signum] = prev_handler
+ except:
+ # Restore previous handler
+ signal.signal(signum, prev_handler)
+ raise
+ except:
+ # Reset all handlers
+ self.Reset()
+ # Here we have a race condition: a handler may have already been called,
+ # but there's not much we can do about it at this point.
+ raise
+
+ def __del__(self):
+ self.Reset()
+
+ def Reset(self):
+ """Restore previous handler.
+
+ This will reset all the signals to their previous handlers.
+
+ """
+ for signum, prev_handler in self._previous.items():
+ signal.signal(signum, prev_handler)
+ # If successful, remove from dict
+ del self._previous[signum]
+
+ def Clear(self):
+ """Unsets the L{called} flag.
+
+ This function can be used in case a signal may arrive several times.
+
+ """
+ self.called = False
+
+ def _HandleSignal(self, signum, frame):
+ """Actual signal handling function.
+
+ """
+ # This is not nice and not absolutely atomic, but it appears to be the only
+ # solution in Python -- there are no atomic types.
+ self.called = True
+
+
+class FieldSet(object):
+ """A simple field set.
+
+ Among the features are:
+ - checking if a string is among a list of static string or regex objects
+ - checking if a whole list of string matches
+ - returning the matching groups from a regex match
+
+ Internally, all fields are held as regular expression objects.
+
+ """
+ def __init__(self, *items):
+ self.items = [re.compile("^%s$" % value) for value in items]
+
+ def Extend(self, other_set):
+ """Extend the field set with the items from another one"""
+ self.items.extend(other_set.items)
+
+ def Matches(self, field):
+ """Checks if a field matches the current set
+
+ @type field: str
+ @param field: the string to match
+ @return: either False or a regular expression match object
+
+ """
+ for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
+ return m
+ return False
+
+ def NonMatching(self, items):
+ """Returns the list of fields not matching the current set
+
+ @type items: list
+ @param items: the list of fields to check
+ @rtype: list
+ @return: list of non-matching fields
+
+ """
+ return [val for val in items if not self.Matches(val)]