#
#
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import errors
from ganeti import constants
from ganeti import compat
-from ganeti import netutils
_locksheld = []
_VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
+UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-'
+ '[a-f0-9]{4}-[a-f0-9]{12}$')
+
# Certificate verification results
(CERT_WARNING,
CERT_ERROR) = range(1, 3)
_MCL_CURRENT = 1
_MCL_FUTURE = 2
+#: MAC checker regexp
+_MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
+
+(_TIMEOUT_NONE,
+ _TIMEOUT_TERM,
+ _TIMEOUT_KILL) = range(3)
+
+#: Shell param checker regexp
+_SHELLPARAM_REGEX = re.compile(r"^[-a-zA-Z0-9._+/:%@]+$")
+
+#: Unit checker regexp
+_PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$")
+
+#: ASN1 time regexp
+_ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
+
+_SORTER_RE = re.compile("^%s(.*)$" % (8 * "(\D+|\d+)?"))
+_SORTER_DIGIT = re.compile("^\d+$")
+
class RunResult(object):
"""Holds the result of running external programs.
"failed", "fail_reason", "cmd"]
- def __init__(self, exit_code, signal_, stdout, stderr, cmd):
+ def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
+ timeout):
self.cmd = cmd
self.exit_code = exit_code
self.signal = signal_
self.stderr = stderr
self.failed = (signal_ is not None or exit_code != 0)
+ fail_msgs = []
if self.signal is not None:
- self.fail_reason = "terminated by signal %s" % self.signal
+ fail_msgs.append("terminated by signal %s" % self.signal)
elif self.exit_code is not None:
- self.fail_reason = "exited with exit code %s" % self.exit_code
+ fail_msgs.append("exited with exit code %s" % self.exit_code)
else:
- self.fail_reason = "unable to determine termination reason"
+ fail_msgs.append("unable to determine termination reason")
+
+ if timeout_action == _TIMEOUT_TERM:
+ fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
+ elif timeout_action == _TIMEOUT_KILL:
+ fail_msgs.append(("force termination after timeout of %.2f seconds"
+ " and linger for another %.2f seconds") %
+ (timeout, constants.CHILD_LINGER_TIMEOUT))
+
+ if fail_msgs and self.failed:
+ self.fail_reason = CommaJoin(fail_msgs)
if self.failed:
logging.debug("Command '%s' failed (%s); output: %s",
return cmd_env
-def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
+def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
+ interactive=False, timeout=None):
"""Execute a (shell) command.
The command should not read from its standard input, as it will be
directory for the command; the default will be /
@type reset_env: boolean
@param reset_env: whether to reset or keep the default os environment
+ @type interactive: boolean
+ @param interactive: weather we pipe stdin, stdout and stderr
+ (default behaviour) or run the command interactive
+ @type timeout: int
+ @param timeout: If not None, timeout in seconds until child process gets
+ killed
@rtype: L{RunResult}
@return: RunResult instance
@raise errors.ProgrammerError: if we call this when forks are disabled
if no_fork:
raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
+ if output and interactive:
+ raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
+ " not be provided at the same time")
+
if isinstance(cmd, basestring):
strcmd = cmd
shell = True
try:
if output is None:
- out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
+ out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
+ interactive, timeout)
else:
+ timeout_action = _TIMEOUT_NONE
status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
out = err = ""
except OSError, err:
exitcode = None
signal_ = -status
- return RunResult(exitcode, signal_, out, err, strcmd)
+ return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
+
+
+def SetupDaemonEnv(cwd="/", umask=077):
+ """Setup a daemon's environment.
+
+ This should be called between the first and second fork, due to
+ setsid usage.
+
+ @param cwd: the directory to which to chdir
+ @param umask: the umask to setup
+
+ """
+ os.chdir(cwd)
+ os.umask(umask)
+ os.setsid()
+
+
+def SetupDaemonFDs(output_file, output_fd):
+ """Setups up a daemon's file descriptors.
+
+ @param output_file: if not None, the file to which to redirect
+ stdout/stderr
+ @param output_fd: if not None, the file descriptor for stdout/stderr
+
+ """
+ # check that at most one is defined
+ assert [output_file, output_fd].count(None) >= 1
+
+ # Open /dev/null (read-only, only for stdin)
+ devnull_fd = os.open(os.devnull, os.O_RDONLY)
+
+ if output_fd is not None:
+ pass
+ elif output_file is not None:
+ # Open output file
+ try:
+ output_fd = os.open(output_file,
+ os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
+ except EnvironmentError, err:
+ raise Exception("Opening output file failed: %s" % err)
+ else:
+ output_fd = os.open(os.devnull, os.O_WRONLY)
+
+ # Redirect standard I/O
+ os.dup2(devnull_fd, 0)
+ os.dup2(output_fd, 1)
+ os.dup2(output_fd, 2)
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
finally:
_CloseFDNoErr(errpipe_write)
- # Wait for daemon to be started (or an error message to arrive) and read
- # up to 100 KB as an error message
+ # Wait for daemon to be started (or an error message to
+ # arrive) and read up to 100 KB as an error message
errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
finally:
_CloseFDNoErr(errpipe_read)
_CloseFDNoErr(pidpipe_read)
# First child process
- os.chdir("/")
- os.umask(077)
- os.setsid()
+ SetupDaemonEnv()
# And fork for the second time
pid = os.fork()
# Exit first child process
os._exit(0) # pylint: disable-msg=W0212
- # Make sure pipe is closed on execv* (and thereby notifies original process)
+ # Make sure pipe is closed on execv* (and thereby notifies
+ # original process)
SetCloseOnExecFlag(errpipe_write, True)
# List of file descriptors to be left open
# Open PID file
if pidfile:
- try:
- # TODO: Atomic replace with another locked file instead of writing into
- # it after creating
- fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
-
- # Lock the PID file (and fail if not possible to do so). Any code
- # wanting to send a signal to the daemon should try to lock the PID
- # file before reading it. If acquiring the lock succeeds, the daemon is
- # no longer running and the signal should not be sent.
- LockFile(fd_pidfile)
-
- os.write(fd_pidfile, "%d\n" % os.getpid())
- except Exception, err:
- raise Exception("Creating and locking PID file failed: %s" % err)
+ fd_pidfile = WritePidFile(pidfile)
# Keeping the file open to hold the lock
noclose_fds.append(fd_pidfile)
else:
fd_pidfile = None
- # Open /dev/null
- fd_devnull = os.open(os.devnull, os.O_RDWR)
-
- assert not output or (bool(output) ^ (fd_output is not None))
-
- if fd_output is not None:
- pass
- elif output:
- # Open output file
- try:
- # TODO: Implement flag to set append=yes/no
- fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
- except EnvironmentError, err:
- raise Exception("Opening output file failed: %s" % err)
- else:
- fd_output = fd_devnull
-
- # Redirect standard I/O
- os.dup2(fd_devnull, 0)
- os.dup2(fd_output, 1)
- os.dup2(fd_output, 2)
+ SetupDaemonFDs(output, fd_output)
# Send daemon PID to parent
RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
except: # pylint: disable-msg=W0702
try:
# Report errors to original process
- buf = str(sys.exc_info()[1])
-
- RetryOnSignal(os.write, errpipe_write, buf)
+ WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
except: # pylint: disable-msg=W0702
# Ignore errors in error handling
pass
os._exit(1) # pylint: disable-msg=W0212
-def _RunCmdPipe(cmd, env, via_shell, cwd):
+def WriteErrorToFD(fd, err):
+ """Possibly write an error message to a fd.
+
+ @type fd: None or int (file descriptor)
+ @param fd: if not None, the error will be written to this fd
+ @param err: string, the error message
+
+ """
+ if fd is None:
+ return
+
+ if not err:
+ err = "<unknown error>"
+
+ RetryOnSignal(os.write, fd, err)
+
+
+def _CheckIfAlive(child):
+ """Raises L{RetryAgain} if child is still alive.
+
+ @raises RetryAgain: If child is still alive
+
+ """
+ if child.poll() is None:
+ raise RetryAgain()
+
+
+def _WaitForProcess(child, timeout):
+ """Waits for the child to terminate or until we reach timeout.
+
+ """
+ try:
+ Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout), args=[child])
+ except RetryTimeout:
+ pass
+
+
+def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout,
+ _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
"""Run a command and return its output.
@type cmd: string or list
@param via_shell: if we should run via the shell
@type cwd: string
@param cwd: the working directory for the program
+ @type interactive: boolean
+ @param interactive: Run command interactive (without piping)
+ @type timeout: int
+ @param timeout: Timeout after the programm gets terminated
@rtype: tuple
@return: (out, err, status)
"""
poller = select.poll()
+
+ stderr = subprocess.PIPE
+ stdout = subprocess.PIPE
+ stdin = subprocess.PIPE
+
+ if interactive:
+ stderr = stdout = stdin = None
+
child = subprocess.Popen(cmd, shell=via_shell,
- stderr=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stdin=subprocess.PIPE,
+ stderr=stderr,
+ stdout=stdout,
+ stdin=stdin,
close_fds=True, env=env,
cwd=cwd)
- child.stdin.close()
- poller.register(child.stdout, select.POLLIN)
- poller.register(child.stderr, select.POLLIN)
out = StringIO()
err = StringIO()
- fdmap = {
- child.stdout.fileno(): (out, child.stdout),
- child.stderr.fileno(): (err, child.stderr),
- }
- for fd in fdmap:
- SetNonblockFlag(fd, True)
-
- while fdmap:
- pollresult = RetryOnSignal(poller.poll)
-
- for fd, event in pollresult:
- if event & select.POLLIN or event & select.POLLPRI:
- data = fdmap[fd][1].read()
- # no data from read signifies EOF (the same as POLLHUP)
- if not data:
+
+ linger_timeout = None
+
+ if timeout is None:
+ poll_timeout = None
+ else:
+ poll_timeout = RunningTimeout(timeout, True).Remaining
+
+ msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
+ (cmd, child.pid))
+ msg_linger = ("Command %s (%d) run into linger timeout, killing" %
+ (cmd, child.pid))
+
+ timeout_action = _TIMEOUT_NONE
+
+ if not interactive:
+ child.stdin.close()
+ poller.register(child.stdout, select.POLLIN)
+ poller.register(child.stderr, select.POLLIN)
+ fdmap = {
+ child.stdout.fileno(): (out, child.stdout),
+ child.stderr.fileno(): (err, child.stderr),
+ }
+ for fd in fdmap:
+ SetNonblockFlag(fd, True)
+
+ while fdmap:
+ if poll_timeout:
+ pt = poll_timeout() * 1000
+ if pt < 0:
+ if linger_timeout is None:
+ logging.warning(msg_timeout)
+ if child.poll() is None:
+ timeout_action = _TIMEOUT_TERM
+ IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
+ linger_timeout = RunningTimeout(_linger_timeout, True).Remaining
+ pt = linger_timeout() * 1000
+ if pt < 0:
+ break
+ else:
+ pt = None
+
+ pollresult = RetryOnSignal(poller.poll, pt)
+
+ for fd, event in pollresult:
+ if event & select.POLLIN or event & select.POLLPRI:
+ data = fdmap[fd][1].read()
+ # no data from read signifies EOF (the same as POLLHUP)
+ if not data:
+ poller.unregister(fd)
+ del fdmap[fd]
+ continue
+ fdmap[fd][0].write(data)
+ if (event & select.POLLNVAL or event & select.POLLHUP or
+ event & select.POLLERR):
poller.unregister(fd)
del fdmap[fd]
- continue
- fdmap[fd][0].write(data)
- if (event & select.POLLNVAL or event & select.POLLHUP or
- event & select.POLLERR):
- poller.unregister(fd)
- del fdmap[fd]
+
+ if timeout is not None:
+ assert callable(poll_timeout)
+
+ # We have no I/O left but it might still run
+ if child.poll() is None:
+ _WaitForProcess(child, poll_timeout())
+
+ # Terminate if still alive after timeout
+ if child.poll() is None:
+ if linger_timeout is None:
+ logging.warning(msg_timeout)
+ timeout_action = _TIMEOUT_TERM
+ IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
+ lt = _linger_timeout
+ else:
+ lt = linger_timeout()
+ _WaitForProcess(child, lt)
+
+ # Okay, still alive after timeout and linger timeout? Kill it!
+ if child.poll() is None:
+ timeout_action = _TIMEOUT_KILL
+ logging.warning(msg_linger)
+ IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
out = out.getvalue()
err = err.getvalue()
status = child.wait()
- return out, err, status
+ return out, err, status, timeout_action
def _RunCmdFile(cmd, env, via_shell, output, cwd):
msg = "'%s' has non-enforceable type %s" % (key, ktype)
raise errors.ProgrammerError(msg)
- if ktype == constants.VTYPE_STRING:
- if not isinstance(target[key], basestring):
+ if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
+ if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
+ pass
+ elif not isinstance(target[key], basestring):
if isinstance(target[key], bool) and not target[key]:
target[key] = ''
else:
return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
-def NiceSort(name_list):
+def _NiceSortTryInt(val):
+ """Attempts to convert a string to an integer.
+
+ """
+ if val and _SORTER_DIGIT.match(val):
+ return int(val)
+ else:
+ return val
+
+
+def _NiceSortKey(value):
+ """Extract key for sorting.
+
+ """
+ return [_NiceSortTryInt(grp)
+ for grp in _SORTER_RE.match(value).groups()]
+
+
+def NiceSort(values, key=None):
"""Sort a list of strings based on digit and non-digit groupings.
Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
or no-digits. Only the first eight such groups are considered, and
after that we just use what's left of the string.
- @type name_list: list
- @param name_list: the names to be sorted
+ @type values: list
+ @param values: the names to be sorted
+ @type key: callable or None
+ @param key: function of one argument to extract a comparison key from each
+ list element, must return string
@rtype: list
@return: a copy of the name list sorted with our algorithm
"""
- _SORTER_BASE = "(\D+|\d+)"
- _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
- _SORTER_BASE, _SORTER_BASE,
- _SORTER_BASE, _SORTER_BASE,
- _SORTER_BASE, _SORTER_BASE)
- _SORTER_RE = re.compile(_SORTER_FULL)
- _SORTER_NODIGIT = re.compile("^\D*$")
- def _TryInt(val):
- """Attempts to convert a variable to integer."""
- if val is None or _SORTER_NODIGIT.match(val):
- return val
- rval = int(val)
- return rval
+ if key is None:
+ keyfunc = _NiceSortKey
+ else:
+ keyfunc = lambda value: _NiceSortKey(key(value))
- to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
- for name in name_list]
- to_sort.sort()
- return [tup[1] for tup in to_sort]
+ return sorted(values, key=keyfunc)
def TryConvert(fn, val):
@return: True if the word is 'safe'
"""
- return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
+ return bool(_SHELLPARAM_REGEX.match(word))
def BuildShellCmd(template, *args):
is always an int in MiB.
"""
- m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
+ m = _PARSEUNIT_REGEX.match(str(input_string))
if not m:
raise errors.UnitParseError("Invalid format")
return value
-def AddAuthorizedKey(file_name, key):
+def ParseCpuMask(cpu_mask):
+ """Parse a CPU mask definition and return the list of CPU IDs.
+
+ CPU mask format: comma-separated list of CPU IDs
+ or dash-separated ID ranges
+ Example: "0-2,5" -> "0,1,2,5"
+
+ @type cpu_mask: str
+ @param cpu_mask: CPU mask definition
+ @rtype: list of int
+ @return: list of CPU IDs
+
+ """
+ if not cpu_mask:
+ return []
+ cpu_list = []
+ for range_def in cpu_mask.split(","):
+ boundaries = range_def.split("-")
+ n_elements = len(boundaries)
+ if n_elements > 2:
+ raise errors.ParseError("Invalid CPU ID range definition"
+ " (only one hyphen allowed): %s" % range_def)
+ try:
+ lower = int(boundaries[0])
+ except (ValueError, TypeError), err:
+ raise errors.ParseError("Invalid CPU ID value for lower boundary of"
+ " CPU ID range: %s" % str(err))
+ try:
+ higher = int(boundaries[-1])
+ except (ValueError, TypeError), err:
+ raise errors.ParseError("Invalid CPU ID value for higher boundary of"
+ " CPU ID range: %s" % str(err))
+ if lower > higher:
+ raise errors.ParseError("Invalid CPU ID range definition"
+ " (%d > %d): %s" % (lower, higher, range_def))
+ cpu_list.extend(range(lower, higher + 1))
+ return cpu_list
+
+
+def AddAuthorizedKey(file_obj, key):
"""Adds an SSH public key to an authorized_keys file.
- @type file_name: str
- @param file_name: path to authorized_keys file
+ @type file_obj: str or file handle
+ @param file_obj: path to authorized_keys file
@type key: str
@param key: string containing key
"""
key_fields = key.split()
- f = open(file_name, 'a+')
+ if isinstance(file_obj, basestring):
+ f = open(file_obj, 'a+')
+ else:
+ f = file_obj
+
try:
nl = True
for line in f:
@param aliases: the list of aliases to add for the hostname
"""
- # FIXME: use WriteFile + fn rather than duplicating its efforts
# Ensure aliases are unique
aliases = UniqueSequence([hostname] + aliases)[1:]
- fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
- try:
- out = os.fdopen(fd, 'w')
+ def _WriteEtcHosts(fd):
+ # Duplicating file descriptor because os.fdopen's result will automatically
+ # close the descriptor, but we would still like to have its functionality.
+ out = os.fdopen(os.dup(fd), "w")
try:
- f = open(file_name, 'r')
- try:
- for line in f:
- fields = line.split()
- if fields and not fields[0].startswith('#') and ip == fields[0]:
- continue
- out.write(line)
-
- out.write("%s\t%s" % (ip, hostname))
- if aliases:
- out.write(" %s" % ' '.join(aliases))
- out.write('\n')
+ for line in ReadFile(file_name).splitlines(True):
+ fields = line.split()
+ if fields and not fields[0].startswith("#") and ip == fields[0]:
+ continue
+ out.write(line)
- out.flush()
- os.fsync(out)
- os.chmod(tmpname, 0644)
- os.rename(tmpname, file_name)
- finally:
- f.close()
+ out.write("%s\t%s" % (ip, hostname))
+ if aliases:
+ out.write(" %s" % " ".join(aliases))
+ out.write("\n")
+ out.flush()
finally:
out.close()
- except:
- RemoveFile(tmpname)
- raise
+ WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
-def AddHostToEtcHosts(hostname):
+
+def AddHostToEtcHosts(hostname, ip):
"""Wrapper around SetEtcHostsEntry.
@type hostname: str
@param hostname: a hostname that will be resolved and added to
L{constants.ETC_HOSTS}
+ @type ip: str
+ @param ip: The ip address of the host
"""
- hi = netutils.HostInfo(name=hostname)
- SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
+ SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
def RemoveEtcHostsEntry(file_name, hostname):
@param hostname: the hostname to be removed
"""
- # FIXME: use WriteFile + fn rather than duplicating its efforts
- fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
- try:
- out = os.fdopen(fd, 'w')
+ def _WriteEtcHosts(fd):
+ # Duplicating file descriptor because os.fdopen's result will automatically
+ # close the descriptor, but we would still like to have its functionality.
+ out = os.fdopen(os.dup(fd), "w")
try:
- f = open(file_name, 'r')
- try:
- for line in f:
- fields = line.split()
- if len(fields) > 1 and not fields[0].startswith('#'):
- names = fields[1:]
- if hostname in names:
- while hostname in names:
- names.remove(hostname)
- if names:
- out.write("%s %s\n" % (fields[0], ' '.join(names)))
- continue
-
- out.write(line)
+ for line in ReadFile(file_name).splitlines(True):
+ fields = line.split()
+ if len(fields) > 1 and not fields[0].startswith("#"):
+ names = fields[1:]
+ if hostname in names:
+ while hostname in names:
+ names.remove(hostname)
+ if names:
+ out.write("%s %s\n" % (fields[0], " ".join(names)))
+ continue
- out.flush()
- os.fsync(out)
- os.chmod(tmpname, 0644)
- os.rename(tmpname, file_name)
- finally:
- f.close()
+ out.write(line)
+
+ out.flush()
finally:
out.close()
- except:
- RemoveFile(tmpname)
- raise
+
+ WriteFile(file_name, fn=_WriteEtcHosts, mode=0644)
def RemoveHostFromEtcHosts(hostname):
L{constants.ETC_HOSTS}
"""
- hi = netutils.HostInfo(name=hostname)
- RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
- RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
+ RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname)
+ RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
def TimestampForFilename():
"""Returns the current time formatted for filenames.
- The format doesn't contain colons as some shells and applications them as
- separators.
+ The format doesn't contain colons as some shells and applications treat them
+ as separators. Uses the local timezone.
"""
return time.strftime("%Y-%m-%d_%H_%M_%S")
return ' '.join([ShellQuote(i) for i in args])
+class ShellWriter:
+ """Helper class to write scripts with indentation.
+
+ """
+ INDENT_STR = " "
+
+ def __init__(self, fh):
+ """Initializes this class.
+
+ """
+ self._fh = fh
+ self._indent = 0
+
+ def IncIndent(self):
+ """Increase indentation level by 1.
+
+ """
+ self._indent += 1
+
+ def DecIndent(self):
+ """Decrease indentation level by 1.
+
+ """
+ assert self._indent > 0
+ self._indent -= 1
+
+ def Write(self, txt, *args):
+ """Write line to output file.
+
+ """
+ assert self._indent >= 0
+
+ self._fh.write(self._indent * self.INDENT_STR)
+
+ if args:
+ self._fh.write(txt % args)
+ else:
+ self._fh.write(txt)
+
+ self._fh.write("\n")
+
+
def ListVisibleFiles(path):
"""Returns a list of visible files in a directory.
return result
+def GetFileID(path=None, fd=None):
+ """Returns the file 'id', i.e. the dev/inode and mtime information.
+
+ Either the path to the file or the fd must be given.
+
+ @param path: the file path
+ @param fd: a file descriptor
+ @return: a tuple of (device number, inode number, mtime)
+
+ """
+ if [path, fd].count(None) != 1:
+ raise errors.ProgrammerError("One and only one of fd/path must be given")
+
+ if fd is None:
+ st = os.stat(path)
+ else:
+ st = os.fstat(fd)
+
+ return (st.st_dev, st.st_ino, st.st_mtime)
+
+
+def VerifyFileID(fi_disk, fi_ours):
+ """Verifies that two file IDs are matching.
+
+ Differences in the inode/device are not accepted, but and older
+ timestamp for fi_disk is accepted.
+
+ @param fi_disk: tuple (dev, inode, mtime) representing the actual
+ file data
+ @param fi_ours: tuple (dev, inode, mtime) representing the last
+ written file data
+ @rtype: boolean
+
+ """
+ (d1, i1, m1) = fi_disk
+ (d2, i2, m2) = fi_ours
+
+ return (d1, i1) == (d2, i2) and m1 <= m2
+
+
+def SafeWriteFile(file_name, file_id, **kwargs):
+ """Wraper over L{WriteFile} that locks the target file.
+
+ By keeping the target file locked during WriteFile, we ensure that
+ cooperating writers will safely serialise access to the file.
+
+ @type file_name: str
+ @param file_name: the target filename
+ @type file_id: tuple
+ @param file_id: a result from L{GetFileID}
+
+ """
+ fd = os.open(file_name, os.O_RDONLY | os.O_CREAT)
+ try:
+ LockFile(fd)
+ if file_id is not None:
+ disk_id = GetFileID(fd=fd)
+ if not VerifyFileID(disk_id, file_id):
+ raise errors.LockError("Cannot overwrite file %s, it has been modified"
+ " since last written" % file_name)
+ return WriteFile(file_name, **kwargs)
+ finally:
+ os.close(fd)
+
+
def ReadOneLineFile(file_name, strict=False):
"""Return the first non-empty line from a file.
return [i for i in seq if i not in seen and not seen.add(i)]
+def FindDuplicates(seq):
+ """Identifies duplicates in a list.
+
+ Does not preserve element order.
+
+ @type seq: sequence
+ @param seq: Sequence with source elements
+ @rtype: list
+ @return: List of duplicate elements from seq
+
+ """
+ dup = set()
+ seen = set()
+
+ for item in seq:
+ if item in seen:
+ dup.add(item)
+ else:
+ seen.add(item)
+
+ return list(dup)
+
+
def NormalizeAndValidateMac(mac):
"""Normalizes and check if a MAC address is valid.
@raise errors.OpPrereqError: If the MAC isn't valid
"""
- mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
- if not mac_check.match(mac):
+ if not _MAC_CHECK.match(mac):
raise errors.OpPrereqError("Invalid MAC address specified: %s" %
mac, errors.ECODE_INVAL)
logging.debug("Memory lock set")
-def Daemonize(logfile, run_uid, run_gid):
+def Daemonize(logfile):
"""Daemonize the current process.
This detaches the current process from the controlling terminal and
@type logfile: str
@param logfile: the logfile to which we should redirect stdout/stderr
- @type run_uid: int
- @param run_uid: Run the child under this uid
- @type run_gid: int
- @param run_gid: Run the child under this gid
@rtype: int
@return: the value zero
"""
# pylint: disable-msg=W0212
# yes, we really want os._exit
- UMASK = 077
- WORKDIR = "/"
+
+ # TODO: do another attempt to merge Daemonize and StartDaemon, or at
+ # least abstract the pipe functionality between them
+
+ # Create pipe for sending error messages
+ (rpipe, wpipe) = os.pipe()
# this might fail
pid = os.fork()
if (pid == 0): # The first child.
- os.setsid()
- # FIXME: When removing again and moving to start-stop-daemon privilege drop
- # make sure to check for config permission and bail out when invoked
- # with wrong user.
- os.setgid(run_gid)
- os.setuid(run_uid)
+ SetupDaemonEnv()
+
# this might fail
pid = os.fork() # Fork a second child.
if (pid == 0): # The second child.
- os.chdir(WORKDIR)
- os.umask(UMASK)
+ _CloseFDNoErr(rpipe)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
- os._exit(0) # Exit parent of the first child.
+ _CloseFDNoErr(wpipe)
+ # Wait for daemon to be started (or an error message to
+ # arrive) and read up to 100 KB as an error message
+ errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024)
+ if errormsg:
+ sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
+ rcode = 1
+ else:
+ rcode = 0
+ os._exit(rcode) # Exit parent of the first child.
- for fd in range(3):
- _CloseFDNoErr(fd)
- i = os.open("/dev/null", os.O_RDONLY) # stdin
- assert i == 0, "Can't close/reopen stdin"
- i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600) # stdout
- assert i == 1, "Can't close/reopen stdout"
- # Duplicate standard output to standard error.
- os.dup2(1, 2)
- return 0
+ SetupDaemonFDs(logfile, None)
+ return wpipe
def DaemonPidFileName(name):
return True
-def WritePidFile(name):
+def WritePidFile(pidfile):
"""Write the current process pidfile.
- The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
-
- @type name: str
- @param name: the daemon name to use
- @raise errors.GenericError: if the pid file already exists and
+ @type pidfile: sting
+ @param pidfile: the path to the file to be written
+ @raise errors.LockError: if the pid file already exists and
points to a live process
+ @rtype: int
+ @return: the file descriptor of the lock file; do not close this unless
+ you want to unlock the pid file
"""
- pid = os.getpid()
- pidfilename = DaemonPidFileName(name)
- if IsProcessAlive(ReadPidFile(pidfilename)):
- raise errors.GenericError("%s contains a live process" % pidfilename)
+ # We don't rename nor truncate the file to not drop locks under
+ # existing processes
+ fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
- WriteFile(pidfilename, data="%d\n" % pid)
+ # Lock the PID file (and fail if not possible to do so). Any code
+ # wanting to send a signal to the daemon should try to lock the PID
+ # file before reading it. If acquiring the lock succeeds, the daemon is
+ # no longer running and the signal should not be sent.
+ LockFile(fd_pidfile)
+
+ os.write(fd_pidfile, "%d\n" % os.getpid())
+
+ return fd_pidfile
def RemovePidFile(name):
return rows[-lines:]
-def FormatTimestampWithTZ(secs):
- """Formats a Unix timestamp with the local timezone.
-
- """
- return time.strftime("%F %T %Z", time.gmtime(secs))
-
-
def _ParseAsn1Generalizedtime(value):
"""Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
@type value: string
@param value: ASN1 GENERALIZEDTIME timestamp
+ @return: Seconds since the Epoch (1970-01-01 00:00:00 UTC)
"""
- m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
+ m = _ASN1_TIME_REGEX.match(value)
if m:
# We have an offset
asn1time = m.group(1)
if not_before is not None and not_after is not None:
msg += (" (valid from %s to %s)" %
- (FormatTimestampWithTZ(not_before),
- FormatTimestampWithTZ(not_after)))
+ (FormatTime(not_before), FormatTime(not_after)))
elif not_before is not None:
- msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
+ msg += " (valid from %s)" % FormatTime(not_before)
elif not_after is not None:
- msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
+ msg += " (valid until %s)" % FormatTime(not_after)
return (CERT_ERROR, msg)
elif not_before is not None and not_before > now:
return (CERT_WARNING,
"Certificate not yet valid (valid from %s)" %
- FormatTimestampWithTZ(not_before))
+ FormatTime(not_before))
elif not_after is not None:
remaining_days = int((not_after - now) / (24 * 3600))
return ", ".join([str(val) for val in names])
+def FindMatch(data, name):
+ """Tries to find an item in a dictionary matching a name.
+
+ Callers have to ensure the data names aren't contradictory (e.g. a regexp
+ that matches a string). If the name isn't a direct key, all regular
+ expression objects in the dictionary are matched against it.
+
+ @type data: dict
+ @param data: Dictionary containing data
+ @type name: string
+ @param name: Name to look for
+ @rtype: tuple; (value in dictionary, matched groups as list)
+
+ """
+ if name in data:
+ return (data[name], [])
+
+ for key, value in data.items():
+ # Regex objects
+ if hasattr(key, "match"):
+ m = key.match(name)
+ if m:
+ return (value, list(m.groups()))
+
+ return None
+
+
def BytesToMebibyte(value):
"""Converts bytes to mebibytes.
"""Formats a time value.
@type val: float or None
- @param val: the timestamp as returned by time.time()
+ @param val: Timestamp as returned by time.time() (seconds since Epoch,
+ 1970-01-01 00:00:00 UTC)
@return: a string value or N/A if we don't have a valid timestamp
"""
return (key_pem, cert_pem)
-def GenerateSelfSignedSslCert(filename, validity=(5 * 365)):
+def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN,
+ validity=constants.X509_CERT_DEFAULT_VALIDITY):
"""Legacy function to generate self-signed X509 certificate.
+ @type filename: str
+ @param filename: path to write certificate to
+ @type common_name: string
+ @param common_name: commonName value
+ @type validity: int
+ @param validity: validity of certificate in number of days
+
"""
- (key_pem, cert_pem) = GenerateSelfSignedX509Cert(None,
+ # TODO: Investigate using the cluster name instead of X505_CERT_CN for
+ # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
+ # and node daemon certificates have the proper Subject/Issuer.
+ (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
validity * 24 * 60 * 60)
WriteFile(filename, mode=0400, data=key_pem + cert_pem)
"""
return [val for val in items if not self.Matches(val)]
+
+
+class RunningTimeout(object):
+ """Class to calculate remaining timeout when doing several operations.
+
+ """
+ __slots__ = [
+ "_allow_negative",
+ "_start_time",
+ "_time_fn",
+ "_timeout",
+ ]
+
+ def __init__(self, timeout, allow_negative, _time_fn=time.time):
+ """Initializes this class.
+
+ @type timeout: float
+ @param timeout: Timeout duration
+ @type allow_negative: bool
+ @param allow_negative: Whether to return values below zero
+ @param _time_fn: Time function for unittests
+
+ """
+ object.__init__(self)
+
+ if timeout is not None and timeout < 0.0:
+ raise ValueError("Timeout must not be negative")
+
+ self._timeout = timeout
+ self._allow_negative = allow_negative
+ self._time_fn = _time_fn
+
+ self._start_time = None
+
+ def Remaining(self):
+ """Returns the remaining timeout.
+
+ """
+ if self._timeout is None:
+ return None
+
+ # Get start time on first calculation
+ if self._start_time is None:
+ self._start_time = self._time_fn()
+
+ # Calculate remaining time
+ remaining_timeout = self._start_time + self._timeout - self._time_fn()
+
+ if not self._allow_negative:
+ # Ensure timeout is always >= 0
+ return max(0.0, remaining_timeout)
+
+ return remaining_timeout