#!/usr/bin/python
#
-# Copyright (C) 2006, 2007 Google Inc.
+# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
This program and set of classes implement a watchdog to restart
virtual machines in a Ganeti cluster that have crashed or been killed
by a node reboot. Run from cron or similar.
-"""
-
-LOGFILE = '/var/log/ganeti/watcher.log'
-MAXTRIES = 5
-BAD_STATES = ['stopped']
-HELPLESS_STATES = ['(node down)']
-NOTICE = 'NOTICE'
-ERROR = 'ERROR'
+"""
import os
import sys
import time
import fcntl
import errno
-import socket
+import logging
from optparse import OptionParser
-
from ganeti import utils
from ganeti import constants
+from ganeti import serializer
from ganeti import ssconf
+from ganeti import errors
+from ganeti import opcodes
+from ganeti import logger
+from ganeti import cli
-class Error(Exception):
- """Generic custom error class."""
+MAXTRIES = 5
+BAD_STATES = ['stopped']
+HELPLESS_STATES = ['(node down)']
+NOTICE = 'NOTICE'
+ERROR = 'ERROR'
+KEY_RESTART_COUNT = "restart_count"
+KEY_RESTART_WHEN = "restart_when"
+KEY_BOOT_ID = "bootid"
+
+# Global client object
+client = None
-class NotMasterError(Error):
+
+class NotMasterError(errors.GenericError):
"""Exception raised when this host is not the master."""
res = utils.RunCmd(cmd)
if res.failed:
- raise Error("Command %s failed:\n%s\nstdout:\n%sstderr:\n%s" %
- (repr(cmd),
- Indent(res.fail_reason),
- Indent(res.stdout),
- Indent(res.stderr)))
+ msg = ("Command %s failed:\n%s\nstdout:\n%sstderr:\n%s" %
+ (repr(cmd),
+ Indent(res.fail_reason),
+ Indent(res.stdout),
+ Indent(res.stderr)))
+ raise errors.CommandError(msg)
return res
-class RestarterState(object):
+class WatcherState(object):
"""Interface to a state file recording restart attempts.
- Methods:
- Open(): open, lock, read and parse the file.
- Raises StandardError on lock contention.
-
- NumberOfAttempts(name): returns the number of times in succession
- a restart has been attempted of the named instance.
-
- RecordAttempt(name, when): records one restart attempt of name at
- time in when.
-
- Remove(name): remove record given by name, if exists.
-
- Save(name): saves all records to file, releases lock and closes file.
-
"""
def __init__(self):
+ """Open, lock, read and parse the file.
+
+ Raises exception on lock contention.
+
+ """
# The two-step dance below is necessary to allow both opening existing
# file read/write and creating if not existing. Vanilla open will truncate
# an existing file -or- allow creating if not existing.
- f = os.open(constants.WATCHER_STATEFILE, os.O_RDWR | os.O_CREAT)
- f = os.fdopen(f, 'w+')
+ fd = os.open(constants.WATCHER_STATEFILE, os.O_RDWR | os.O_CREAT)
+ self.statefile = os.fdopen(fd, 'w+')
+
+ utils.LockFile(self.statefile.fileno())
try:
- fcntl.flock(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
- except IOError, x:
- if x.errno == errno.EAGAIN:
- raise StandardError('State file already locked')
- raise
+ self._data = serializer.Load(self.statefile.read())
+ except Exception, msg:
+ # Ignore errors while loading the file and treat it as empty
+ self._data = {}
+ logging.warning(("Empty or invalid state file. Using defaults."
+ " Error message: %s"), msg)
+
+ if "instance" not in self._data:
+ self._data["instance"] = {}
+ if "node" not in self._data:
+ self._data["node"] = {}
+
+ self._orig_data = serializer.Dump(self._data)
+
+ def Save(self):
+ """Save state to file, then unlock and close it.
+
+ """
+ assert self.statefile
+
+ serialized_form = serializer.Dump(self._data)
+ if self._orig_data == serialized_form:
+ logging.debug("Data didn't change, just touching status file")
+ os.utime(constants.WATCHER_STATEFILE, None)
+ return
+
+ # We need to make sure the file is locked before renaming it, otherwise
+ # starting ganeti-watcher again at the same time will create a conflict.
+ fd = utils.WriteFile(constants.WATCHER_STATEFILE,
+ data=serialized_form,
+ prewrite=utils.LockFile, close=False)
+ self.statefile = os.fdopen(fd, 'w+')
+
+ def Close(self):
+ """Unlock configuration file and close it.
+
+ """
+ assert self.statefile
+
+ # Files are automatically unlocked when closing them
+ self.statefile.close()
+ self.statefile = None
+
+ def GetNodeBootID(self, name):
+ """Returns the last boot ID of a node or None.
+
+ """
+ ndata = self._data["node"]
+
+ if name in ndata and KEY_BOOT_ID in ndata[name]:
+ return ndata[name][KEY_BOOT_ID]
+ return None
- self.statefile = f
- self.inst_map = {}
+ def SetNodeBootID(self, name, bootid):
+ """Sets the boot ID of a node.
- for line in f:
- name, when, count = line.rstrip().split(':')
+ """
+ assert bootid
+
+ ndata = self._data["node"]
- when = int(when)
- count = int(count)
+ if name not in ndata:
+ ndata[name] = {}
- self.inst_map[name] = (when, count)
+ ndata[name][KEY_BOOT_ID] = bootid
- def NumberOfAttempts(self, instance):
+ def NumberOfRestartAttempts(self, instance):
"""Returns number of previous restart attempts.
Args:
instance - the instance to look up.
"""
- assert self.statefile
+ idata = self._data["instance"]
- if instance.name in self.inst_map:
- return self.inst_map[instance.name][1]
+ if instance.name in idata:
+ return idata[instance.name][KEY_RESTART_COUNT]
return 0
- def RecordAttempt(self, instance):
+ def RecordRestartAttempt(self, instance):
"""Record a restart attempt.
Args:
instance - the instance being restarted
"""
- assert self.statefile
+ idata = self._data["instance"]
- when = time.time()
+ if instance.name not in idata:
+ inst = idata[instance.name] = {}
+ else:
+ inst = idata[instance.name]
- self.inst_map[instance.name] = (when, 1 + self.NumberOfAttempts(instance))
+ inst[KEY_RESTART_WHEN] = time.time()
+ inst[KEY_RESTART_COUNT] = inst.get(KEY_RESTART_COUNT, 0) + 1
- def Remove(self, instance):
+ def RemoveInstance(self, instance):
"""Update state to reflect that a machine is running, i.e. remove record.
Args:
This method removes the record for a named instance.
"""
- assert self.statefile
-
- if instance.name in self.inst_map:
- del self.inst_map[instance.name]
-
- def Save(self):
- """Save records to file, then unlock and close file.
-
- """
- assert self.statefile
-
- self.statefile.seek(0)
- self.statefile.truncate()
+ idata = self._data["instance"]
- for name in self.inst_map:
- print >> self.statefile, "%s:%d:%d" % ((name,) + self.inst_map[name])
-
- fcntl.flock(self.statefile.fileno(), fcntl.LOCK_UN)
-
- self.statefile.close()
- self.statefile = None
+ if instance.name in idata:
+ del idata[instance.name]
class Instance(object):
Methods:
Restart(): issue a command to restart the represented machine.
+
"""
- def __init__(self, name, state):
+ def __init__(self, name, state, autostart):
self.name = name
self.state = state
+ self.autostart = autostart
def Restart(self):
- DoCmd(['gnt-instance', 'startup', '--lock-retries=15', self.name])
+ """Encapsulates the start of an instance.
+ """
+ op = opcodes.OpStartupInstance(instance_name=self.name,
+ force=False,
+ extra_args=None)
+ cli.SubmitOpCode(op, cl=client)
-class InstanceList(object):
- """The set of Virtual Machine instances on a cluster.
+ def ActivateDisks(self):
+ """Encapsulates the activation of all disks of an instance.
+
+ """
+ op = opcodes.OpActivateInstanceDisks(instance_name=self.name)
+ cli.SubmitOpCode(op, cl=client)
+
+
+def GetInstanceList(with_secondaries=None):
+ """Get a list of instances on this cluster.
"""
- cmd = ['gnt-instance', 'list', '--lock-retries=15',
- '-o', 'name,admin_state,oper_state', '--no-headers', '--separator=:']
+ fields = ["name", "oper_state", "admin_state"]
- def __init__(self):
- res = DoCmd(self.cmd)
+ if with_secondaries is not None:
+ fields.append("snodes")
- lines = res.stdout.splitlines()
+ result = client.QueryInstances([], fields)
- self.instances = []
- for line in lines:
- fields = [fld.strip() for fld in line.split(':')]
+ instances = []
+ for fields in result:
+ if with_secondaries is not None:
+ (name, status, autostart, snodes) = fields
- if len(fields) != 3:
+ if not snodes:
continue
- if fields[1] == "no": #no autostart, we don't care about this instance
+
+ for node in with_secondaries:
+ if node in snodes:
+ break
+ else:
continue
- name, status = fields[0], fields[2]
- self.instances.append(Instance(name, status))
+ else:
+ (name, status, autostart) = fields
- def __iter__(self):
- return self.instances.__iter__()
+ instances.append(Instance(name, status, autostart))
+ return instances
-class Message(object):
- """Encapsulation of a notice or error message.
- """
- def __init__(self, level, msg):
- self.level = level
- self.msg = msg
- self.when = time.time()
+def GetNodeBootIDs():
+ """Get a dict mapping nodes to boot IDs.
- def __str__(self):
- return self.level + ' ' + time.ctime(self.when) + '\n' + Indent(self.msg)
+ """
+ result = client.QueryNodes([], ["name", "bootid"])
+ return dict([(name, bootid) for name, bootid in result])
-class Restarter(object):
+class Watcher(object):
"""Encapsulate the logic for restarting erronously halted virtual machines.
The calling program should periodically instantiate me and call Run().
def __init__(self):
sstore = ssconf.SimpleStore()
master = sstore.GetMasterNode()
- if master != socket.gethostname():
- raise NotMasterError, ("This is not the master node")
- self.instances = InstanceList()
- self.messages = []
+ if master != utils.HostInfo().name:
+ raise NotMasterError("This is not the master node")
+ self.instances = GetInstanceList()
+ self.bootids = GetNodeBootIDs()
+ self.started_instances = set()
def Run(self):
- """Make a pass over the list of instances, restarting downed ones.
+ notepad = WatcherState()
+ try:
+ self.CheckInstances(notepad)
+ self.CheckDisks(notepad)
+ self.VerifyDisks()
+ finally:
+ notepad.Save()
+
+ def CheckDisks(self, notepad):
+ """Check all nodes for restarted ones.
"""
- notepad = RestarterState()
+ check_nodes = []
+ for name, new_id in self.bootids.iteritems():
+ old = notepad.GetNodeBootID(name)
+ if old != new_id:
+ # Node's boot ID has changed, proably through a reboot.
+ check_nodes.append(name)
+
+ if check_nodes:
+ # Activate disks for all instances with any of the checked nodes as a
+ # secondary node.
+ for instance in GetInstanceList(with_secondaries=check_nodes):
+ if not instance.autostart:
+ logging.info(("Skipping disk activation for non-autostart"
+ " instance %s"), instance.name)
+ continue
+ if instance.name in self.started_instances:
+ # we already tried to start the instance, which should have
+ # activated its drives (if they can be at all)
+ continue
+ try:
+ logging.info("Activating disks for instance %s", instance.name)
+ instance.ActivateDisks()
+ except Exception, err:
+ logging.error(str(err), exc_info=True)
+
+ # Keep changed boot IDs
+ for name in check_nodes:
+ notepad.SetNodeBootID(name, self.bootids[name])
+ def CheckInstances(self, notepad):
+ """Make a pass over the list of instances, restarting downed ones.
+
+ """
for instance in self.instances:
+ # Don't care about manually stopped instances
+ if not instance.autostart:
+ continue
+
if instance.state in BAD_STATES:
- n = notepad.NumberOfAttempts(instance)
+ n = notepad.NumberOfRestartAttempts(instance)
if n > MAXTRIES:
# stay quiet.
elif n < MAXTRIES:
last = " (Attempt #%d)" % (n + 1)
else:
- notepad.RecordAttempt(instance)
- self.messages.append(Message(ERROR, "Could not restart %s for %d"
- " times, giving up..." %
- (instance.name, MAXTRIES)))
+ notepad.RecordRestartAttempt(instance)
+ logging.error("Could not restart %s after %d attempts, giving up",
+ instance.name, MAXTRIES)
continue
try:
- self.messages.append(Message(NOTICE,
- "Restarting %s%s." %
- (instance.name, last)))
+ logging.info("Restarting %s%s",
+ instance.name, last)
instance.Restart()
- except Error, x:
- self.messages.append(Message(ERROR, str(x)))
+ self.started_instances.add(instance.name)
+ except Exception, err:
+ logging.error(str(err), exc_info=True)
- notepad.RecordAttempt(instance)
+ notepad.RecordRestartAttempt(instance)
elif instance.state in HELPLESS_STATES:
- if notepad.NumberOfAttempts(instance):
- notepad.Remove(instance)
+ if notepad.NumberOfRestartAttempts(instance):
+ notepad.RemoveInstance(instance)
else:
- if notepad.NumberOfAttempts(instance):
- notepad.Remove(instance)
- msg = Message(NOTICE,
- "Restart of %s succeeded." % instance.name)
- self.messages.append(msg)
-
- notepad.Save()
+ if notepad.NumberOfRestartAttempts(instance):
+ notepad.RemoveInstance(instance)
+ logging.info("Restart of %s succeeded", instance.name)
- def WriteReport(self, logfile):
- """Log all messages to file.
-
- Args:
- logfile: file object open for writing (the log file)
+ def VerifyDisks(self):
+ """Run gnt-cluster verify-disks.
"""
- for msg in self.messages:
- print >> logfile, str(msg)
+ # TODO: What should we do here?
+ result = DoCmd(['gnt-cluster', 'verify-disks'])
+ if result.output:
+ logging.info(result.output)
def ParseOptions():
constants.RELEASE_VERSION)
parser.add_option("-d", "--debug", dest="debug",
- help="Don't redirect messages to the log file",
+ help="Write all messages to stderr",
default=False, action="store_true")
options, args = parser.parse_args()
return options, args
"""Main function.
"""
+ global client
+
options, args = ParseOptions()
- if not options.debug:
- sys.stderr = sys.stdout = open(LOGFILE, 'a')
+ logger.SetupLogging(constants.LOG_WATCHER, debug=options.debug)
try:
- restarter = Restarter()
- restarter.Run()
- restarter.WriteReport(sys.stdout)
+ client = cli.GetClient()
+
+ try:
+ watcher = Watcher()
+ except errors.ConfigurationError:
+ # Just exit if there's no configuration
+ sys.exit(constants.EXIT_SUCCESS)
+
+ watcher.Run()
+ except SystemExit:
+ raise
except NotMasterError:
- if options.debug:
- sys.stderr.write("Not master, exiting.\n")
+ logging.debug("Not master, exiting")
sys.exit(constants.EXIT_NOTMASTER)
- except Error, err:
- print err
+ except errors.ResolverError, err:
+ logging.error("Cannot resolve hostname '%s', exiting.", err.args[0])
+ sys.exit(constants.EXIT_NODESETUP_ERROR)
+ except Exception, err:
+ logging.error(str(err), exc_info=True)
+ sys.exit(constants.EXIT_FAILURE)
+
if __name__ == '__main__':
main()