import os
import sys
-import re
import time
-import fcntl
-import errno
import logging
from optparse import OptionParser
from ganeti import serializer
from ganeti import ssconf
from ganeti import errors
+from ganeti import opcodes
+from ganeti import logger
+from ganeti import cli
MAXTRIES = 5
-BAD_STATES = ['stopped']
-HELPLESS_STATES = ['(node down)']
+BAD_STATES = ['ERROR_down']
+HELPLESS_STATES = ['ERROR_nodedown']
NOTICE = 'NOTICE'
ERROR = 'ERROR'
KEY_RESTART_COUNT = "restart_count"
KEY_BOOT_ID = "bootid"
-class Error(Exception):
- """Generic custom error class."""
+# Global client object
+client = None
-class NotMasterError(Error):
+class NotMasterError(errors.GenericError):
"""Exception raised when this host is not the master."""
return "%s%s\n" % (prefix, ('\n' + prefix).join(s.splitlines()))
-def DoCmd(cmd):
- """Run a shell command.
-
- Args:
- cmd: the command to run.
-
- Raises CommandError with verbose commentary on error.
-
- """
- res = utils.RunCmd(cmd)
-
- if res.failed:
- raise Error("Command %s failed:\n%s\nstdout:\n%sstderr:\n%s" %
- (repr(cmd),
- Indent(res.fail_reason),
- Indent(res.stdout),
- Indent(res.stderr)))
-
- return res
-
-
-def LockFile(fd):
- """Locks a file using POSIX locks.
-
- """
- try:
- fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError, err:
- if err.errno == errno.EAGAIN:
- raise StandardError("File already locked")
- raise
-
-
class WatcherState(object):
"""Interface to a state file recording restart attempts.
def __init__(self):
"""Open, lock, read and parse the file.
- Raises StandardError on lock contention.
+ Raises exception on lock contention.
"""
# The two-step dance below is necessary to allow both opening existing
fd = os.open(constants.WATCHER_STATEFILE, os.O_RDWR | os.O_CREAT)
self.statefile = os.fdopen(fd, 'w+')
- LockFile(self.statefile.fileno())
+ utils.LockFile(self.statefile.fileno())
try:
- self.data = serializer.Load(self.statefile.read())
+ self._data = serializer.Load(self.statefile.read())
except Exception, msg:
# Ignore errors while loading the file and treat it as empty
- self.data = {}
+ self._data = {}
logging.warning(("Empty or invalid state file. Using defaults."
" Error message: %s"), msg)
- if "instance" not in self.data:
- self.data["instance"] = {}
- if "node" not in self.data:
- self.data["node"] = {}
+ if "instance" not in self._data:
+ self._data["instance"] = {}
+ if "node" not in self._data:
+ self._data["node"] = {}
+
+ self._orig_data = serializer.Dump(self._data)
def Save(self):
"""Save state to file, then unlock and close it.
"""
assert self.statefile
+ serialized_form = serializer.Dump(self._data)
+ if self._orig_data == serialized_form:
+ logging.debug("Data didn't change, just touching status file")
+ os.utime(constants.WATCHER_STATEFILE, None)
+ return
+
# We need to make sure the file is locked before renaming it, otherwise
# starting ganeti-watcher again at the same time will create a conflict.
fd = utils.WriteFile(constants.WATCHER_STATEFILE,
- data=serializer.Dump(self.data),
- prewrite=LockFile, close=False)
+ data=serialized_form,
+ prewrite=utils.LockFile, close=False)
self.statefile = os.fdopen(fd, 'w+')
def Close(self):
"""Returns the last boot ID of a node or None.
"""
- ndata = self.data["node"]
+ ndata = self._data["node"]
if name in ndata and KEY_BOOT_ID in ndata[name]:
return ndata[name][KEY_BOOT_ID]
"""
assert bootid
- ndata = self.data["node"]
+ ndata = self._data["node"]
if name not in ndata:
ndata[name] = {}
instance - the instance to look up.
"""
- idata = self.data["instance"]
+ idata = self._data["instance"]
if instance.name in idata:
return idata[instance.name][KEY_RESTART_COUNT]
instance - the instance being restarted
"""
- idata = self.data["instance"]
+ idata = self._data["instance"]
if instance.name not in idata:
inst = idata[instance.name] = {}
This method removes the record for a named instance.
"""
- idata = self.data["instance"]
+ idata = self._data["instance"]
if instance.name in idata:
del idata[instance.name]
"""Encapsulates the start of an instance.
"""
- DoCmd(['gnt-instance', 'startup', '--lock-retries=15', self.name])
+ op = opcodes.OpStartupInstance(instance_name=self.name,
+ force=False,
+ extra_args=None)
+ cli.SubmitOpCode(op, cl=client)
def ActivateDisks(self):
"""Encapsulates the activation of all disks of an instance.
"""
- DoCmd(['gnt-instance', 'activate-disks', '--lock-retries=15', self.name])
-
-
-def _RunListCmd(cmd):
- """Runs a command and parses its output into lists.
-
- """
- for line in DoCmd(cmd).stdout.splitlines():
- yield line.split(':')
+ op = opcodes.OpActivateInstanceDisks(instance_name=self.name)
+ cli.SubmitOpCode(op, cl=client)
def GetInstanceList(with_secondaries=None):
"""Get a list of instances on this cluster.
"""
- cmd = ['gnt-instance', 'list', '--lock-retries=15', '--no-headers',
- '--separator=:']
-
- fields = 'name,oper_state,admin_state'
+ fields = ["name", "status", "admin_state"]
if with_secondaries is not None:
- fields += ',snodes'
+ fields.append("snodes")
- cmd.append('-o')
- cmd.append(fields)
+ result = client.QueryInstances([], fields)
instances = []
- for fields in _RunListCmd(cmd):
+ for fields in result:
if with_secondaries is not None:
(name, status, autostart, snodes) = fields
- if snodes == "-":
+ if not snodes:
continue
for node in with_secondaries:
- if node in snodes.split(','):
+ if node in snodes:
break
else:
continue
else:
(name, status, autostart) = fields
- instances.append(Instance(name, status, autostart != "no"))
+ instances.append(Instance(name, status, autostart))
return instances
"""Get a dict mapping nodes to boot IDs.
"""
- cmd = ['gnt-node', 'list', '--lock-retries=15', '--no-headers',
- '--separator=:', '-o', 'name,bootid']
-
- ids = {}
- for fields in _RunListCmd(cmd):
- (name, bootid) = fields
- ids[name] = bootid
-
- return ids
+ result = client.QueryNodes([], ["name", "bootid"])
+ return dict([(name, bootid) for name, bootid in result])
class Watcher(object):
"""
check_nodes = []
- for name, id in self.bootids.iteritems():
+ for name, new_id in self.bootids.iteritems():
old = notepad.GetNodeBootID(name)
- if old != id:
+ if old != new_id:
# Node's boot ID has changed, proably through a reboot.
check_nodes.append(name)
try:
logging.info("Activating disks for instance %s", instance.name)
instance.ActivateDisks()
- except Error, err:
- logging.error(str(err), exc_info=True)
+ except Exception:
+ logging.exception("Error while activating disks for instance %s",
+ instance.name)
# Keep changed boot IDs
for name in check_nodes:
"""
for instance in self.instances:
- # Don't care about manually stopped instances
- if not instance.autostart:
- continue
-
if instance.state in BAD_STATES:
n = notepad.NumberOfRestartAttempts(instance)
instance.name, last)
instance.Restart()
self.started_instances.add(instance.name)
- except Error, err:
- logging.error(str(err), exc_info=True)
+ except Exception:
+ logging.exception("Erro while restarting instance %s", instance.name)
notepad.RecordRestartAttempt(instance)
elif instance.state in HELPLESS_STATES:
notepad.RemoveInstance(instance)
logging.info("Restart of %s succeeded", instance.name)
- def VerifyDisks(self):
+ @staticmethod
+ def VerifyDisks():
"""Run gnt-cluster verify-disks.
"""
- result = DoCmd(['gnt-cluster', 'verify-disks', '--lock-retries=15'])
- if result.output:
- logging.info(result.output)
+ op = opcodes.OpVerifyDisks()
+ result = cli.SubmitOpCode(op, cl=client)
+ if not isinstance(result, (tuple, list)):
+ logging.error("Can't get a valid result from verify-disks")
+ return
+ offline_disk_instances = result[2]
+ if not offline_disk_instances:
+ # nothing to do
+ return
+ logging.debug("Will activate disks for instances %s",
+ ", ".join(offline_disk_instances))
+ # we submit only one job, and wait for it. not optimal, but spams
+ # less the job queue
+ job = [opcodes.OpActivateInstanceDisks(instance_name=name)
+ for name in offline_disk_instances]
+ job_id = cli.SendJob(job, cl=client)
+
+ cli.PollJob(job_id, cl=client, feedback_fn=logging.debug)
def ParseOptions():
return options, args
-def SetupLogging(debug):
- """Configures the logging module.
-
- """
- formatter = logging.Formatter("%(asctime)s: %(message)s")
-
- logfile_handler = logging.FileHandler(constants.LOG_WATCHER)
- logfile_handler.setFormatter(formatter)
- logfile_handler.setLevel(logging.INFO)
-
- stderr_handler = logging.StreamHandler()
- stderr_handler.setFormatter(formatter)
- if debug:
- stderr_handler.setLevel(logging.NOTSET)
- else:
- stderr_handler.setLevel(logging.CRITICAL)
-
- root_logger = logging.getLogger("")
- root_logger.setLevel(logging.NOTSET)
- root_logger.addHandler(logfile_handler)
- root_logger.addHandler(stderr_handler)
-
-
def main():
"""Main function.
"""
+ global client
+
options, args = ParseOptions()
- SetupLogging(options.debug)
+ logger.SetupLogging(constants.LOG_WATCHER, debug=options.debug,
+ stderr_logging=options.debug)
try:
+ client = cli.GetClient()
+
try:
watcher = Watcher()
except errors.ConfigurationError:
# Just exit if there's no configuration
sys.exit(constants.EXIT_SUCCESS)
+
watcher.Run()
except SystemExit:
raise