X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/b1b6ea87fd151768505401969138492cfefb134b..c8fcde472922e4ee664d904e0bf1a583f1d5040d:/daemons/ganeti-masterd?ds=inline diff --git a/daemons/ganeti-masterd b/daemons/ganeti-masterd index 56b00f7..2305fc6 100755 --- a/daemons/ganeti-masterd +++ b/daemons/ganeti-masterd @@ -27,6 +27,8 @@ inheritance from parent classes requires it. """ +import os +import errno import sys import SocketServer import time @@ -34,7 +36,6 @@ import collections import Queue import random import signal -import simplejson import logging from cStringIO import StringIO @@ -50,9 +51,10 @@ from ganeti import luxi from ganeti import utils from ganeti import errors from ganeti import ssconf -from ganeti import logger from ganeti import workerpool from ganeti import rpc +from ganeti import bootstrap +from ganeti import serializer CLIENT_REQUEST_WORKERS = 16 @@ -84,24 +86,21 @@ class IOServer(SocketServer.UnixStreamServer): cleanup at shutdown. """ - def __init__(self, address, rqhandler, context): + def __init__(self, address, rqhandler): """IOServer constructor - Args: - address: the address to bind this IOServer to - rqhandler: RequestHandler type object - context: Context Object common to all worker threads + @param address: the address to bind this IOServer to + @param rqhandler: RequestHandler type object """ SocketServer.UnixStreamServer.__init__(self, address, rqhandler) - self.context = context # We'll only start threads once we've forked. - self.jobqueue = None + self.context = None self.request_workers = None def setup_queue(self): - self.jobqueue = jqueue.JobQueue(self.context) + self.context = GanetiContext() self.request_workers = workerpool.WorkerPool(CLIENT_REQUEST_WORKERS, ClientRequestWorker) @@ -129,12 +128,11 @@ class IOServer(SocketServer.UnixStreamServer): """ try: self.server_close() - utils.RemoveFile(constants.MASTER_SOCKET) finally: if self.request_workers: self.request_workers.TerminateWorkers() - if self.jobqueue: - self.jobqueue.Shutdown() + if self.context: + self.context.jobqueue.Shutdown() class ClientRqHandler(SocketServer.BaseRequestHandler): @@ -151,10 +149,10 @@ class ClientRqHandler(SocketServer.BaseRequestHandler): while True: msg = self.read_message() if msg is None: - logging.info("client closed connection") + logging.debug("client closed connection") break - request = simplejson.loads(msg) + request = serializer.LoadJson(msg) logging.debug("request: %s", request) if not isinstance(request, dict): logging.error("wrong request received: %s", msg) @@ -170,6 +168,9 @@ class ClientRqHandler(SocketServer.BaseRequestHandler): try: result = self._ops.handle_request(method, args) success = True + except errors.GenericError, err: + success = False + result = (err.__class__.__name__, err.args) except: logging.error("Unexpected exception", exc_info=True) err = sys.exc_info() @@ -180,7 +181,7 @@ class ClientRqHandler(SocketServer.BaseRequestHandler): luxi.KEY_RESULT: result, } logging.debug("response: %s", response) - self.send_message(simplejson.dumps(response)) + self.send_message(serializer.DumpJson(response)) def read_message(self): while not self._msgs: @@ -203,32 +204,110 @@ class ClientOps: self.server = server def handle_request(self, method, args): - queue = self.server.jobqueue + queue = self.server.context.jobqueue # TODO: Parameter validation if method == luxi.REQ_SUBMIT_JOB: + logging.info("Received new job") ops = [opcodes.OpCode.LoadOpCode(state) for state in args] - # we need to compute the node list here, since from now on all - # operations require locks on the queue or the storage, and we - # shouldn't get another lock - node_list = self.server.context.cfg.GetNodeList() - return queue.SubmitJob(ops, node_list) + return queue.SubmitJob(ops) + + if method == luxi.REQ_SUBMIT_MANY_JOBS: + logging.info("Received multiple jobs") + jobs = [] + for ops in args: + jobs.append([opcodes.OpCode.LoadOpCode(state) for state in ops]) + return queue.SubmitManyJobs(jobs) elif method == luxi.REQ_CANCEL_JOB: job_id = args + logging.info("Received job cancel request for %s", job_id) return queue.CancelJob(job_id) elif method == luxi.REQ_ARCHIVE_JOB: job_id = args + logging.info("Received job archive request for %s", job_id) return queue.ArchiveJob(job_id) + elif method == luxi.REQ_AUTOARCHIVE_JOBS: + (age, timeout) = args + logging.info("Received job autoarchive request for age %s, timeout %s", + age, timeout) + return queue.AutoArchiveJobs(age, timeout) + + elif method == luxi.REQ_WAIT_FOR_JOB_CHANGE: + (job_id, fields, prev_job_info, prev_log_serial, timeout) = args + logging.info("Received job poll request for %s", job_id) + return queue.WaitForJobChanges(job_id, fields, prev_job_info, + prev_log_serial, timeout) + elif method == luxi.REQ_QUERY_JOBS: (job_ids, fields) = args + if isinstance(job_ids, (tuple, list)) and job_ids: + msg = ", ".join(job_ids) + else: + msg = str(job_ids) + logging.info("Received job query request for %s", msg) return queue.QueryJobs(job_ids, fields) + elif method == luxi.REQ_QUERY_INSTANCES: + (names, fields, use_locking) = args + logging.info("Received instance query request for %s", names) + if use_locking: + raise errors.OpPrereqError("Sync queries are not allowed") + op = opcodes.OpQueryInstances(names=names, output_fields=fields, + use_locking=use_locking) + return self._Query(op) + + elif method == luxi.REQ_QUERY_NODES: + (names, fields, use_locking) = args + logging.info("Received node query request for %s", names) + if use_locking: + raise errors.OpPrereqError("Sync queries are not allowed") + op = opcodes.OpQueryNodes(names=names, output_fields=fields, + use_locking=use_locking) + return self._Query(op) + + elif method == luxi.REQ_QUERY_EXPORTS: + nodes, use_locking = args + if use_locking: + raise errors.OpPrereqError("Sync queries are not allowed") + logging.info("Received exports query request") + op = opcodes.OpQueryExports(nodes=nodes, use_locking=use_locking) + return self._Query(op) + + elif method == luxi.REQ_QUERY_CONFIG_VALUES: + fields = args + logging.info("Received config values query request for %s", fields) + op = opcodes.OpQueryConfigValues(output_fields=fields) + return self._Query(op) + + elif method == luxi.REQ_QUERY_CLUSTER_INFO: + logging.info("Received cluster info query request") + op = opcodes.OpQueryClusterInfo() + return self._Query(op) + + elif method == luxi.REQ_QUEUE_SET_DRAIN_FLAG: + drain_flag = args + logging.info("Received queue drain flag change request to %s", + drain_flag) + return queue.SetDrainFlag(drain_flag) + else: - raise ValueError("Invalid operation") + logging.info("Received invalid request '%s'", method) + raise ValueError("Invalid operation '%s'" % method) + + def _DummyLog(self, *args): + pass + + def _Query(self, op): + """Runs the specified opcode and returns the result. + + """ + proc = mcpu.Processor(self.server.context) + # TODO: Where should log messages go? + return proc.ExecOpCode(op, self._DummyLog, None) class GanetiContext(object): @@ -248,13 +327,17 @@ class GanetiContext(object): """ assert self.__class__._instance is None, "double GanetiContext instance" - # Create a ConfigWriter... + # Create global configuration object self.cfg = config.ConfigWriter() - # And a GanetiLockingManager... + + # Locking manager self.glm = locking.GanetiLockManager( self.cfg.GetNodeList(), self.cfg.GetInstanceList()) + # Job queue + self.jobqueue = jqueue.JobQueue(self) + # setting this also locks the class against attribute modifications self.__class__._instance = self @@ -265,12 +348,44 @@ class GanetiContext(object): assert self.__class__._instance is None, "Attempt to modify Ganeti Context" object.__setattr__(self, name, value) + def AddNode(self, node): + """Adds a node to the configuration and lock manager. + + """ + # Add it to the configuration + self.cfg.AddNode(node) + + # If preseeding fails it'll not be added + self.jobqueue.AddNode(node) + + # Add the new node to the Ganeti Lock Manager + self.glm.add(locking.LEVEL_NODE, node.name) + + def ReaddNode(self, node): + """Updates a node that's already in the configuration + + """ + # Synchronize the queue again + self.jobqueue.AddNode(node) + + def RemoveNode(self, name): + """Removes a node from the configuration and lock manager. + + """ + # Remove node from configuration + self.cfg.RemoveNode(name) + + # Notify job queue + self.jobqueue.RemoveNode(name) + + # Remove the node from the Ganeti Lock Manager + self.glm.remove(locking.LEVEL_NODE, name) + def ParseOptions(): """Parse the command line options. - Returns: - (options, args) as from OptionParser.parse_args() + @return: (options, args) as from OptionParser.parse_args() """ parser = OptionParser(description="Ganeti master daemon", @@ -284,10 +399,71 @@ def ParseOptions(): parser.add_option("-d", "--debug", dest="debug", help="Enable some debug messages", default=False, action="store_true") + parser.add_option("--no-voting", dest="no_voting", + help="Do not check that the nodes agree on this node" + " being the master and start the daemon unconditionally", + default=False, action="store_true") options, args = parser.parse_args() return options, args +def CheckAgreement(): + """Check the agreement on who is the master. + + The function uses a very simple algorithm: we must get more positive + than negative answers. Since in most of the cases we are the master, + we'll use our own config file for getting the node list. In the + future we could collect the current node list from our (possibly + obsolete) known nodes. + + In order to account for cold-start of all nodes, we retry for up to + a minute until we get a real answer as the top-voted one. If the + nodes are more out-of-sync, for now manual startup of the master + should be attempted. + + Note that for a even number of nodes cluster, we need at least half + of the nodes (beside ourselves) to vote for us. This creates a + problem on two-node clusters, since in this case we require the + other node to be up too to confirm our status. + + """ + myself = utils.HostInfo().name + #temp instantiation of a config writer, used only to get the node list + cfg = config.ConfigWriter() + node_list = cfg.GetNodeList() + del cfg + retries = 6 + while retries > 0: + votes = bootstrap.GatherMasterVotes(node_list) + if not votes: + # empty node list, this is a one node cluster + return True + if votes[0][0] is None: + retries -= 1 + time.sleep(10) + continue + break + if retries == 0: + logging.critical("Cluster inconsistent, most of the nodes didn't answer" + " after multiple retries. Aborting startup") + return False + # here a real node is at the top of the list + all_votes = sum(item[1] for item in votes) + top_node, top_votes = votes[0] + result = False + if top_node != myself: + logging.critical("It seems we are not the master (top-voted node" + " is %s with %d out of %d votes)", top_node, top_votes, + all_votes) + elif top_votes < all_votes - top_votes: + logging.critical("It seems we are not the master (%d votes for," + " %d votes against)", top_votes, all_votes - top_votes) + else: + result = True + + return result + + def main(): """Main function""" @@ -295,33 +471,68 @@ def main(): utils.debug = options.debug utils.no_fork = True - ssconf.CheckMaster(options.debug) + if options.fork: + utils.CloseFDs() + + rpc.Init() + try: + ssconf.CheckMaster(options.debug) + + # we believe we are the master, let's ask the other nodes... + if options.no_voting: + sys.stdout.write("The 'no voting' option has been selected.\n") + sys.stdout.write("This is dangerous, please confirm by" + " typing uppercase 'yes': ") + sys.stdout.flush() + confirmation = sys.stdin.readline().strip() + if confirmation != "YES": + print "Aborting." + return + else: + if not CheckAgreement(): + return + + dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE), + (constants.SOCKET_DIR, constants.SOCKET_DIR_MODE), + ] + utils.EnsureDirs(dirs) - master = IOServer(constants.MASTER_SOCKET, ClientRqHandler, GanetiContext()) + # This is safe to do as the pid file guarantees against + # concurrent execution. + utils.RemoveFile(constants.MASTER_SOCKET) + + master = IOServer(constants.MASTER_SOCKET, ClientRqHandler) + finally: + rpc.Shutdown() # become a daemon if options.fork: - utils.Daemonize(logfile=constants.LOG_MASTERDAEMON, - noclose_fds=[master.fileno()]) + utils.Daemonize(logfile=constants.LOG_MASTERDAEMON) utils.WritePidFile(constants.MASTERD_PID) + try: + utils.SetupLogging(constants.LOG_MASTERDAEMON, debug=options.debug, + stderr_logging=not options.fork, multithreaded=True) - logger.SetupDaemon(constants.LOG_MASTERDAEMON, debug=options.debug, - stderr_logging=not options.fork) - - logging.info("ganeti master daemon startup") + logging.info("Ganeti master daemon startup") - # activate ip - master_node = ssconf.SimpleStore().GetMasterNode() - if not rpc.call_node_start_master(master_node, False): - logging.error("Can't activate master IP address") + rpc.Init() + try: + # activate ip + master_node = ssconf.SimpleConfigReader().GetMasterNode() + if not rpc.RpcRunner.call_node_start_master(master_node, False): + logging.error("Can't activate master IP address") - master.setup_queue() - try: - master.serve_forever() + master.setup_queue() + try: + master.serve_forever() + finally: + master.server_cleanup() + finally: + rpc.Shutdown() finally: - master.server_cleanup() utils.RemovePidFile(constants.MASTERD_PID) + utils.RemoveFile(constants.MASTER_SOCKET) if __name__ == "__main__":