"""
+import os
+import errno
import sys
import SocketServer
import time
import Queue
import random
import signal
-import simplejson
import logging
from cStringIO import StringIO
from ganeti import utils
from ganeti import errors
from ganeti import ssconf
-from ganeti import logger
from ganeti import workerpool
+from ganeti import rpc
+from ganeti import bootstrap
+from ganeti import serializer
CLIENT_REQUEST_WORKERS = 16
cleanup at shutdown.
"""
- def __init__(self, address, rqhandler, context):
+ def __init__(self, address, rqhandler):
"""IOServer constructor
- Args:
- address: the address to bind this IOServer to
- rqhandler: RequestHandler type object
- context: Context Object common to all worker threads
+ @param address: the address to bind this IOServer to
+ @param rqhandler: RequestHandler type object
"""
SocketServer.UnixStreamServer.__init__(self, address, rqhandler)
- self.do_quit = False
- self.context = context
# We'll only start threads once we've forked.
- self.jobqueue = None
+ self.context = None
self.request_workers = None
- signal.signal(signal.SIGINT, self.handle_quit_signals)
- signal.signal(signal.SIGTERM, self.handle_quit_signals)
-
def setup_queue(self):
- self.jobqueue = jqueue.JobQueue(self.context)
+ self.context = GanetiContext()
self.request_workers = workerpool.WorkerPool(CLIENT_REQUEST_WORKERS,
ClientRequestWorker)
"""
self.request_workers.AddTask(self, request, client_address)
- def handle_quit_signals(self, signum, frame):
- print "received %s in %s" % (signum, frame)
- self.do_quit = True
-
def serve_forever(self):
"""Handle one request at a time until told to quit."""
- while not self.do_quit:
- self.handle_request()
- print "served request, quit=%s" % (self.do_quit)
+ sighandler = utils.SignalHandler([signal.SIGINT, signal.SIGTERM])
+ try:
+ while not sighandler.called:
+ self.handle_request()
+ finally:
+ sighandler.Reset()
def server_cleanup(self):
"""Cleanup the server.
"""
try:
self.server_close()
- utils.RemoveFile(constants.MASTER_SOCKET)
finally:
if self.request_workers:
self.request_workers.TerminateWorkers()
- if self.jobqueue:
- self.jobqueue.Shutdown()
+ if self.context:
+ self.context.jobqueue.Shutdown()
class ClientRqHandler(SocketServer.BaseRequestHandler):
while True:
msg = self.read_message()
if msg is None:
- logging.info("client closed connection")
+ logging.debug("client closed connection")
break
- request = simplejson.loads(msg)
+ request = serializer.LoadJson(msg)
logging.debug("request: %s", request)
if not isinstance(request, dict):
logging.error("wrong request received: %s", msg)
try:
result = self._ops.handle_request(method, args)
success = True
+ except errors.GenericError, err:
+ success = False
+ result = (err.__class__.__name__, err.args)
except:
logging.error("Unexpected exception", exc_info=True)
err = sys.exc_info()
luxi.KEY_RESULT: result,
}
logging.debug("response: %s", response)
- self.send_message(simplejson.dumps(response))
+ self.send_message(serializer.DumpJson(response))
def read_message(self):
while not self._msgs:
self.server = server
def handle_request(self, method, args):
- queue = self.server.jobqueue
+ queue = self.server.context.jobqueue
# TODO: Parameter validation
if method == luxi.REQ_SUBMIT_JOB:
+ logging.info("Received new job")
ops = [opcodes.OpCode.LoadOpCode(state) for state in args]
return queue.SubmitJob(ops)
+ if method == luxi.REQ_SUBMIT_MANY_JOBS:
+ logging.info("Received multiple jobs")
+ jobs = []
+ for ops in args:
+ jobs.append([opcodes.OpCode.LoadOpCode(state) for state in ops])
+ return queue.SubmitManyJobs(jobs)
+
elif method == luxi.REQ_CANCEL_JOB:
- (job_id, ) = args
+ job_id = args
+ logging.info("Received job cancel request for %s", job_id)
return queue.CancelJob(job_id)
elif method == luxi.REQ_ARCHIVE_JOB:
- (job_id, ) = args
+ job_id = args
+ logging.info("Received job archive request for %s", job_id)
return queue.ArchiveJob(job_id)
+ elif method == luxi.REQ_AUTOARCHIVE_JOBS:
+ (age, timeout) = args
+ logging.info("Received job autoarchive request for age %s, timeout %s",
+ age, timeout)
+ return queue.AutoArchiveJobs(age, timeout)
+
+ elif method == luxi.REQ_WAIT_FOR_JOB_CHANGE:
+ (job_id, fields, prev_job_info, prev_log_serial, timeout) = args
+ logging.info("Received job poll request for %s", job_id)
+ return queue.WaitForJobChanges(job_id, fields, prev_job_info,
+ prev_log_serial, timeout)
+
elif method == luxi.REQ_QUERY_JOBS:
(job_ids, fields) = args
+ if isinstance(job_ids, (tuple, list)) and job_ids:
+ msg = ", ".join(job_ids)
+ else:
+ msg = str(job_ids)
+ logging.info("Received job query request for %s", msg)
return queue.QueryJobs(job_ids, fields)
+ elif method == luxi.REQ_QUERY_INSTANCES:
+ (names, fields, use_locking) = args
+ logging.info("Received instance query request for %s", names)
+ if use_locking:
+ raise errors.OpPrereqError("Sync queries are not allowed")
+ op = opcodes.OpQueryInstances(names=names, output_fields=fields,
+ use_locking=use_locking)
+ return self._Query(op)
+
+ elif method == luxi.REQ_QUERY_NODES:
+ (names, fields, use_locking) = args
+ logging.info("Received node query request for %s", names)
+ if use_locking:
+ raise errors.OpPrereqError("Sync queries are not allowed")
+ op = opcodes.OpQueryNodes(names=names, output_fields=fields,
+ use_locking=use_locking)
+ return self._Query(op)
+
+ elif method == luxi.REQ_QUERY_EXPORTS:
+ nodes, use_locking = args
+ if use_locking:
+ raise errors.OpPrereqError("Sync queries are not allowed")
+ logging.info("Received exports query request")
+ op = opcodes.OpQueryExports(nodes=nodes, use_locking=use_locking)
+ return self._Query(op)
+
+ elif method == luxi.REQ_QUERY_CONFIG_VALUES:
+ fields = args
+ logging.info("Received config values query request for %s", fields)
+ op = opcodes.OpQueryConfigValues(output_fields=fields)
+ return self._Query(op)
+
+ elif method == luxi.REQ_QUERY_CLUSTER_INFO:
+ logging.info("Received cluster info query request")
+ op = opcodes.OpQueryClusterInfo()
+ return self._Query(op)
+
+ elif method == luxi.REQ_QUEUE_SET_DRAIN_FLAG:
+ drain_flag = args
+ logging.info("Received queue drain flag change request to %s",
+ drain_flag)
+ return queue.SetDrainFlag(drain_flag)
+
else:
- raise ValueError("Invalid operation")
+ logging.info("Received invalid request '%s'", method)
+ raise ValueError("Invalid operation '%s'" % method)
+
+ def _DummyLog(self, *args):
+ pass
+
+ def _Query(self, op):
+ """Runs the specified opcode and returns the result.
+
+ """
+ proc = mcpu.Processor(self.server.context)
+ # TODO: Where should log messages go?
+ return proc.ExecOpCode(op, self._DummyLog, None)
class GanetiContext(object):
"""
assert self.__class__._instance is None, "double GanetiContext instance"
- # Create a ConfigWriter...
+ # Create global configuration object
self.cfg = config.ConfigWriter()
- # And a GanetiLockingManager...
+
+ # Locking manager
self.glm = locking.GanetiLockManager(
self.cfg.GetNodeList(),
self.cfg.GetInstanceList())
+ # Job queue
+ self.jobqueue = jqueue.JobQueue(self)
+
# setting this also locks the class against attribute modifications
self.__class__._instance = self
assert self.__class__._instance is None, "Attempt to modify Ganeti Context"
object.__setattr__(self, name, value)
+ def AddNode(self, node):
+ """Adds a node to the configuration and lock manager.
-def CheckMaster(debug):
- """Checks the node setup.
+ """
+ # Add it to the configuration
+ self.cfg.AddNode(node)
- If this is the master, the function will return. Otherwise it will
- exit with an exit code based on the node status.
+ # If preseeding fails it'll not be added
+ self.jobqueue.AddNode(node)
- """
- try:
- ss = ssconf.SimpleStore()
- master_name = ss.GetMasterNode()
- except errors.ConfigurationError, err:
- print "Cluster configuration incomplete: '%s'" % str(err)
- sys.exit(EXIT_NODESETUP_ERROR)
+ # Add the new node to the Ganeti Lock Manager
+ self.glm.add(locking.LEVEL_NODE, node.name)
- try:
- myself = utils.HostInfo()
- except errors.ResolverError, err:
- sys.stderr.write("Cannot resolve my own name (%s)\n" % err.args[0])
- sys.exit(EXIT_NODESETUP_ERROR)
+ def ReaddNode(self, node):
+ """Updates a node that's already in the configuration
+
+ """
+ # Synchronize the queue again
+ self.jobqueue.AddNode(node)
+
+ def RemoveNode(self, name):
+ """Removes a node from the configuration and lock manager.
+
+ """
+ # Remove node from configuration
+ self.cfg.RemoveNode(name)
- if myself.name != master_name:
- if debug:
- sys.stderr.write("Not master, exiting.\n")
- sys.exit(EXIT_NOTMASTER)
+ # Notify job queue
+ self.jobqueue.RemoveNode(name)
+
+ # Remove the node from the Ganeti Lock Manager
+ self.glm.remove(locking.LEVEL_NODE, name)
def ParseOptions():
"""Parse the command line options.
- Returns:
- (options, args) as from OptionParser.parse_args()
+ @return: (options, args) as from OptionParser.parse_args()
"""
parser = OptionParser(description="Ganeti master daemon",
parser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
+ parser.add_option("--no-voting", dest="no_voting",
+ help="Do not check that the nodes agree on this node"
+ " being the master and start the daemon unconditionally",
+ default=False, action="store_true")
options, args = parser.parse_args()
return options, args
+def CheckAgreement():
+ """Check the agreement on who is the master.
+
+ The function uses a very simple algorithm: we must get more positive
+ than negative answers. Since in most of the cases we are the master,
+ we'll use our own config file for getting the node list. In the
+ future we could collect the current node list from our (possibly
+ obsolete) known nodes.
+
+ In order to account for cold-start of all nodes, we retry for up to
+ a minute until we get a real answer as the top-voted one. If the
+ nodes are more out-of-sync, for now manual startup of the master
+ should be attempted.
+
+ Note that for a even number of nodes cluster, we need at least half
+ of the nodes (beside ourselves) to vote for us. This creates a
+ problem on two-node clusters, since in this case we require the
+ other node to be up too to confirm our status.
+
+ """
+ myself = utils.HostInfo().name
+ #temp instantiation of a config writer, used only to get the node list
+ cfg = config.ConfigWriter()
+ node_list = cfg.GetNodeList()
+ del cfg
+ retries = 6
+ while retries > 0:
+ votes = bootstrap.GatherMasterVotes(node_list)
+ if not votes:
+ # empty node list, this is a one node cluster
+ return True
+ if votes[0][0] is None:
+ retries -= 1
+ time.sleep(10)
+ continue
+ break
+ if retries == 0:
+ logging.critical("Cluster inconsistent, most of the nodes didn't answer"
+ " after multiple retries. Aborting startup")
+ return False
+ # here a real node is at the top of the list
+ all_votes = sum(item[1] for item in votes)
+ top_node, top_votes = votes[0]
+ result = False
+ if top_node != myself:
+ logging.critical("It seems we are not the master (top-voted node"
+ " is %s with %d out of %d votes)", top_node, top_votes,
+ all_votes)
+ elif top_votes < all_votes - top_votes:
+ logging.critical("It seems we are not the master (%d votes for,"
+ " %d votes against)", top_votes, all_votes - top_votes)
+ else:
+ result = True
+
+ return result
+
+
def main():
"""Main function"""
utils.debug = options.debug
utils.no_fork = True
- CheckMaster(options.debug)
+ if options.fork:
+ utils.CloseFDs()
+
+ rpc.Init()
+ try:
+ ssconf.CheckMaster(options.debug)
+
+ # we believe we are the master, let's ask the other nodes...
+ if options.no_voting:
+ sys.stdout.write("The 'no voting' option has been selected.\n")
+ sys.stdout.write("This is dangerous, please confirm by"
+ " typing uppercase 'yes': ")
+ sys.stdout.flush()
+ confirmation = sys.stdin.readline().strip()
+ if confirmation != "YES":
+ print "Aborting."
+ return
+ else:
+ if not CheckAgreement():
+ return
+
+ dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE),
+ (constants.SOCKET_DIR, constants.SOCKET_DIR_MODE),
+ ]
+ utils.EnsureDirs(dirs)
- master = IOServer(constants.MASTER_SOCKET, ClientRqHandler, GanetiContext())
+ # This is safe to do as the pid file guarantees against
+ # concurrent execution.
+ utils.RemoveFile(constants.MASTER_SOCKET)
+
+ master = IOServer(constants.MASTER_SOCKET, ClientRqHandler)
+ finally:
+ rpc.Shutdown()
# become a daemon
if options.fork:
- utils.Daemonize(logfile=constants.LOG_MASTERDAEMON,
- noclose_fds=[master.fileno()])
+ utils.Daemonize(logfile=constants.LOG_MASTERDAEMON)
- logger.SetupDaemon(constants.LOG_MASTERDAEMON, debug=options.debug,
- stderr_logging=not options.fork)
+ utils.WritePidFile(constants.MASTERD_PID)
+ try:
+ utils.SetupLogging(constants.LOG_MASTERDAEMON, debug=options.debug,
+ stderr_logging=not options.fork, multithreaded=True)
- logging.info("ganeti master daemon startup")
+ logging.info("Ganeti master daemon startup")
- master.setup_queue()
- try:
- master.serve_forever()
+ rpc.Init()
+ try:
+ # activate ip
+ master_node = ssconf.SimpleConfigReader().GetMasterNode()
+ if not rpc.RpcRunner.call_node_start_master(master_node, False):
+ logging.error("Can't activate master IP address")
+
+ master.setup_queue()
+ try:
+ master.serve_forever()
+ finally:
+ master.server_cleanup()
+ finally:
+ rpc.Shutdown()
finally:
- master.server_cleanup()
+ utils.RemovePidFile(constants.MASTERD_PID)
+ utils.RemoveFile(constants.MASTER_SOCKET)
if __name__ == "__main__":