X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/fad50141719e1f8e0525265e62ceb09e0ce5efb4..8ee4dc80c0669b7c64c69d8a17aa7f52781910ce:/lib/luxi.py diff --git a/lib/luxi.py b/lib/luxi.py index f99a435..7ade2b5 100644 --- a/lib/luxi.py +++ b/lib/luxi.py @@ -31,20 +31,30 @@ The module is also be used by the master daemon. import socket import collections -import simplejson import time import errno -from ganeti import opcodes from ganeti import serializer from ganeti import constants - - -KEY_REQUEST = 'request' -KEY_DATA = 'data' -REQ_SUBMIT = 'submit' -REQ_ABORT = 'abort' -REQ_QUERY = 'query' +from ganeti import errors + + +KEY_METHOD = 'method' +KEY_ARGS = 'args' +KEY_SUCCESS = "success" +KEY_RESULT = "result" + +REQ_SUBMIT_JOB = "SubmitJob" +REQ_WAIT_FOR_JOB_CHANGE = "WaitForJobChange" +REQ_CANCEL_JOB = "CancelJob" +REQ_ARCHIVE_JOB = "ArchiveJob" +REQ_AUTOARCHIVE_JOBS = "AutoArchiveJobs" +REQ_QUERY_JOBS = "QueryJobs" +REQ_QUERY_INSTANCES = "QueryInstances" +REQ_QUERY_NODES = "QueryNodes" +REQ_QUERY_EXPORTS = "QueryExports" +REQ_QUERY_CONFIG_VALUES = "QueryConfigValues" +REQ_QUEUE_SET_DRAIN_FLAG = "SetDrainFlag" DEF_CTMO = 10 DEF_RWTO = 60 @@ -82,6 +92,7 @@ class RequestError(ProtocolError): """ + class NoMasterError(ProtocolError): """The master cannot be reached @@ -91,24 +102,6 @@ class NoMasterError(ProtocolError): """ -def SerializeJob(job): - """Convert a job description to a string format. - - """ - return simplejson.dumps(job.__getstate__()) - - -def UnserializeJob(data): - """Load a job from a string format""" - try: - new_data = simplejson.loads(data) - except Exception, err: - raise DecodingError("Error while unserializing: %s" % str(err)) - job = opcodes.Job() - job.__setstate__(new_data) - return job - - class Transport: """Low-level transport class. @@ -165,7 +158,7 @@ class Transport: except socket.timeout, err: raise TimeoutError("Connect timed out: %s" % str(err)) except socket.error, err: - if err.args[0] == errno.ENOENT: + if err.args[0] in (errno.ENOENT, errno.ECONNREFUSED): raise NoMasterError((address,)) raise self.socket.settimeout(self._rwtimeout) @@ -261,39 +254,84 @@ class Client(object): address = constants.MASTER_SOCKET self.transport = transport(address, timeouts=timeouts) - def SendRequest(self, request, data): + def CallMethod(self, method, args): """Send a generic request and return the response. """ - msg = {KEY_REQUEST: request, KEY_DATA: data} - result = self.transport.Call(serializer.DumpJson(msg, indent=False)) + # Build request + request = { + KEY_METHOD: method, + KEY_ARGS: args, + } + + # Send request and wait for response + result = self.transport.Call(serializer.DumpJson(request, indent=False)) try: data = serializer.LoadJson(result) except Exception, err: raise ProtocolError("Error while deserializing response: %s" % str(err)) + + # Validate response if (not isinstance(data, dict) or - 'success' not in data or - 'result' not in data): + KEY_SUCCESS not in data or + KEY_RESULT not in data): raise DecodingError("Invalid response from server: %s" % str(data)) - return data - - def SubmitJob(self, job): - """Submit a job""" - result = self.SendRequest(REQ_SUBMIT, SerializeJob(job)) - if not result['success']: - raise RequestError(result['result']) - return result['result'] - - def Query(self, data): - """Make a query""" - result = self.SendRequest(REQ_QUERY, data) - if not result['success']: - raise RequestError(result[result]) - result = result['result'] - if data["object"] == "jobs": - # custom job processing of query values - for row in result: - for idx, field in enumerate(data["fields"]): - if field == "op_list": - row[idx] = [opcodes.OpCode.LoadOpCode(i) for i in row[idx]] + + result = data[KEY_RESULT] + + if not data[KEY_SUCCESS]: + # TODO: decide on a standard exception + if (isinstance(result, (tuple, list)) and len(result) == 2 and + isinstance(result[1], (tuple, list))): + # custom ganeti errors + err_class = errors.GetErrorClass(result[0]) + if err_class is not None: + raise err_class, tuple(result[1]) + + raise RequestError(result) + return result + + def SetQueueDrainFlag(self, drain_flag): + return self.CallMethod(REQ_QUEUE_SET_DRAIN_FLAG, drain_flag) + + def SubmitJob(self, ops): + ops_state = map(lambda op: op.__getstate__(), ops) + return self.CallMethod(REQ_SUBMIT_JOB, ops_state) + + def CancelJob(self, job_id): + return self.CallMethod(REQ_CANCEL_JOB, job_id) + + def ArchiveJob(self, job_id): + return self.CallMethod(REQ_ARCHIVE_JOB, job_id) + + def AutoArchiveJobs(self, age): + return self.CallMethod(REQ_AUTOARCHIVE_JOBS, age) + + def WaitForJobChange(self, job_id, fields, prev_job_info, prev_log_serial): + timeout = (DEF_RWTO - 1) / 2 + while True: + result = self.CallMethod(REQ_WAIT_FOR_JOB_CHANGE, + (job_id, fields, prev_job_info, + prev_log_serial, timeout)) + if result != constants.JOB_NOTCHANGED: + break + return result + + def QueryJobs(self, job_ids, fields): + return self.CallMethod(REQ_QUERY_JOBS, (job_ids, fields)) + + def QueryInstances(self, names, fields): + return self.CallMethod(REQ_QUERY_INSTANCES, (names, fields)) + + def QueryNodes(self, names, fields): + return self.CallMethod(REQ_QUERY_NODES, (names, fields)) + + def QueryExports(self, nodes): + return self.CallMethod(REQ_QUERY_EXPORTS, nodes) + + def QueryConfigValues(self, fields): + return self.CallMethod(REQ_QUERY_CONFIG_VALUES, fields) + + +# TODO: class Server(object)