return RunResult(exitcode, signal_, out, err, strcmd)
+
def _RunCmdPipe(cmd, env, via_shell, cwd):
"""Run a command and return its output.
fcntl.fcntl(fd, fcntl.F_SETFL, status | os.O_NONBLOCK)
while fdmap:
- for fd, event in poller.poll():
+ try:
+ pollresult = poller.poll()
+ except EnvironmentError, eerr:
+ if eerr.errno == errno.EINTR:
+ continue
+ raise
+ except select.error, serr:
+ if serr[0] == errno.EINTR:
+ continue
+ raise
+
+ for fd, event in pollresult:
if event & select.POLLIN or event & select.POLLPRI:
data = fdmap[fd][1].read()
# no data from read signifies EOF (the same as POLLHUP)
raise
+def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
+ """Renames a file.
+
+ @type old: string
+ @param old: Original path
+ @type new: string
+ @param new: New path
+ @type mkdir: bool
+ @param mkdir: Whether to create target directory if it doesn't exist
+ @type mkdir_mode: int
+ @param mkdir_mode: Mode for newly created directories
+
+ """
+ try:
+ return os.rename(old, new)
+ except OSError, err:
+ # In at least one use case of this function, the job queue, directory
+ # creation is very rare. Checking for the directory before renaming is not
+ # as efficient.
+ if mkdir and err.errno == errno.ENOENT:
+ # Create directory and try again
+ os.makedirs(os.path.dirname(new), mkdir_mode)
+ return os.rename(old, new)
+ raise
+
+
def _FingerprintFile(filename):
"""Compute the fingerprint of a file.
@type pidfile: string
@param pidfile: path to the file containing the pid
@rtype: int
- @return: The process id, if the file exista and contains a valid PID,
+ @return: The process id, if the file exists and contains a valid PID,
otherwise 0
"""
try:
val = int(val)
except ValueError, err:
- raise errors.OpPrereqError("Invalid %s size: %s" % (item, str(err)))
+ raise errors.OpPrereqError("Invalid %s size: %s" % (item, err))
beparams[item] = val
if item in (constants.BE_AUTO_BALANCE):
val = beparams[item]
is always an int in MiB.
"""
- m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', input_string)
+ m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
if not m:
raise errors.UnitParseError("Invalid format")
return True
-def Daemonize(logfile, noclose_fds=None):
- """Daemonize the current process.
+def _CloseFDNoErr(fd, retries=5):
+ """Close a file descriptor ignoring errors.
- This detaches the current process from the controlling terminal and
- runs it in the background as a daemon.
+ @type fd: int
+ @param fd: the file descriptor
+ @type retries: int
+ @param retries: how many retries to make, in case we get any
+ other error than EBADF
+
+ """
+ try:
+ os.close(fd)
+ except OSError, err:
+ if err.errno != errno.EBADF:
+ if retries > 0:
+ _CloseFDNoErr(fd, retries - 1)
+ # else either it's closed already or we're out of retries, so we
+ # ignore this and go on
+
+
+def CloseFDs(noclose_fds=None):
+ """Close file descriptors.
+
+ This closes all file descriptors above 2 (i.e. except
+ stdin/out/err).
- @type logfile: str
- @param logfile: the logfile to which we should redirect stdout/stderr
@type noclose_fds: list or None
@param noclose_fds: if given, it denotes a list of file descriptor
that should not be closed
- @rtype: int
- @returns: the value zero
"""
- UMASK = 077
- WORKDIR = "/"
# Default maximum for the number of available file descriptors.
if 'SC_OPEN_MAX' in os.sysconf_names:
try:
MAXFD = 1024
else:
MAXFD = 1024
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if (maxfd == resource.RLIM_INFINITY):
+ maxfd = MAXFD
+
+ # Iterate through and close all file descriptors (except the standard ones)
+ for fd in range(3, maxfd):
+ if noclose_fds and fd in noclose_fds:
+ continue
+ _CloseFDNoErr(fd)
+
+
+def Daemonize(logfile):
+ """Daemonize the current process.
+
+ This detaches the current process from the controlling terminal and
+ runs it in the background as a daemon.
+
+ @type logfile: str
+ @param logfile: the logfile to which we should redirect stdout/stderr
+ @rtype: int
+ @returns: the value zero
+
+ """
+ UMASK = 077
+ WORKDIR = "/"
# this might fail
pid = os.fork()
os._exit(0) # Exit parent (the first child) of the second child.
else:
os._exit(0) # Exit parent of the first child.
- maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
- if (maxfd == resource.RLIM_INFINITY):
- maxfd = MAXFD
- # Iterate through and close all file descriptors.
- for fd in range(0, maxfd):
- if noclose_fds and fd in noclose_fds:
- continue
- try:
- os.close(fd)
- except OSError: # ERROR, fd wasn't open to begin with (ignored)
- pass
- os.open(logfile, os.O_RDWR|os.O_CREAT|os.O_APPEND, 0600)
- # Duplicate standard input to standard output and standard error.
- os.dup2(0, 1) # standard output (1)
- os.dup2(0, 2) # standard error (2)
+ for fd in range(3):
+ _CloseFDNoErr(fd)
+ i = os.open("/dev/null", os.O_RDONLY) # stdin
+ assert i == 0, "Can't close/reopen stdin"
+ i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
+ assert i == 1, "Can't close/reopen stdout"
+ # Duplicate standard output to standard error.
+ os.dup2(1, 2)
return 0
_helper(pid, signal_, waitpid)
if timeout <= 0:
return
+
+ # Wait up to $timeout seconds
end = time.time() + timeout
+ wait = 0.01
while time.time() < end and IsProcessAlive(pid):
- time.sleep(0.1)
+ try:
+ (result_pid, _) = os.waitpid(pid, os.WNOHANG)
+ if result_pid > 0:
+ break
+ except OSError:
+ pass
+ time.sleep(wait)
+ # Make wait time longer for next try
+ if wait < 0.1:
+ wait *= 1.5
+
if IsProcessAlive(pid):
+ # Kill process if it's still alive
_helper(pid, signal.SIGKILL, waitpid)
return port
-def SetupLogging(logfile, debug=False, stderr_logging=False, program=""):
+def SetupLogging(logfile, debug=False, stderr_logging=False, program="",
+ multithreaded=False):
"""Configures the logging module.
@type logfile: str
@param stderr_logging: whether we should also log to the standard error
@type program: str
@param program: the name under which we should log messages
+ @type multithreaded: boolean
+ @param multithreaded: if True, will add the thread name to the log file
@raise EnvironmentError: if we can't open the log file and
stderr logging is disabled
"""
- fmt = "%(asctime)s: " + program + " "
+ fmt = "%(asctime)s: " + program + " pid=%(process)d"
+ if multithreaded:
+ fmt += "/%(threadName)s"
if debug:
- fmt += ("pid=%(process)d/%(threadName)s %(levelname)s"
- " %(module)s:%(lineno)s %(message)s")
- else:
- fmt += "pid=%(process)d %(levelname)s %(message)s"
+ fmt += " %(module)s:%(lineno)s"
+ fmt += " %(levelname)s %(message)s"
formatter = logging.Formatter(fmt)
root_logger = logging.getLogger("")
# Remove all previously setup handlers
for handler in root_logger.handlers:
+ handler.close()
root_logger.removeHandler(handler)
if stderr_logging:
else:
logfile_handler.setLevel(logging.INFO)
root_logger.addHandler(logfile_handler)
- except EnvironmentError, err:
+ except EnvironmentError:
if stderr_logging:
logging.exception("Failed to enable logging to file '%s'", logfile)
else:
raise
+def TailFile(fname, lines=20):
+ """Return the last lines from a file.
+
+ @note: this function will only read and parse the last 4KB of
+ the file; if the lines are very long, it could be that less
+ than the requested number of lines are returned
+
+ @param fname: the file name
+ @type lines: int
+ @param lines: the (maximum) number of lines to return
+
+ """
+ fd = open(fname, "r")
+ try:
+ fd.seek(0, 2)
+ pos = fd.tell()
+ pos = max(0, pos-4096)
+ fd.seek(pos, 0)
+ raw_data = fd.read()
+ finally:
+ fd.close()
+
+ rows = raw_data.splitlines()
+ return rows[-lines:]
+
+
+def SafeEncode(text):
+ """Return a 'safe' version of a source string.
+
+ This function mangles the input string and returns a version that
+ should be safe to disply/encode as ASCII. To this end, we first
+ convert it to ASCII using the 'backslashreplace' encoding which
+ should get rid of any non-ASCII chars, and then we again encode it
+ via 'string_escape' which converts '\n' into '\\n' so that log
+ messages remain one-line.
+
+ @type text: str or unicode
+ @param text: input data
+ @rtype: str
+ @return: a safe version of text
+
+ """
+ text = text.encode('ascii', 'backslashreplace')
+ text = text.encode('string_escape')
+ return text
+
+
def LockedMethod(fn):
"""Synchronized object access decorator.