X-Git-Url: https://code.grnet.gr/git/ganeti-local/blobdiff_plain/5e26633b2fdcfabfa25222e28ec61bcf8083d0b9..7fa310f6d84e06934a7d2bc55c9a1e2b84ce613a:/lib/rapi/rlib2.py diff --git a/lib/rapi/rlib2.py b/lib/rapi/rlib2.py index d22afac..66b0ece 100644 --- a/lib/rapi/rlib2.py +++ b/lib/rapi/rlib2.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2006, 2007, 2008 Google Inc. +# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -46,6 +46,7 @@ from ganeti import http from ganeti import constants from ganeti import cli from ganeti import rapi +from ganeti import ht from ganeti.rapi import baserlib @@ -57,7 +58,8 @@ I_FIELDS = ["name", "admin_state", "os", "network_port", "disk.sizes", "disk_usage", "beparams", "hvparams", - "oper_state", "oper_ram", "status", + "oper_state", "oper_ram", "oper_vcpus", "status", + "custom_hvparams", "custom_beparams", "custom_nicparams", ] + _COMMON_FIELDS N_FIELDS = ["name", "offline", "master_candidate", "drained", @@ -67,8 +69,16 @@ N_FIELDS = ["name", "offline", "master_candidate", "drained", "ctotal", "cnodes", "csockets", "pip", "sip", "role", "pinst_list", "sinst_list", + "master_capable", "vm_capable", + "group.uuid", ] + _COMMON_FIELDS +G_FIELDS = ["name", "uuid", + "alloc_policy", + "node_cnt", "node_list", + "ctime", "mtime", "serial_no", + ] # "tags" is missing to be able to use _COMMON_FIELDS here. + _NR_DRAINED = "drained" _NR_MASTER_CANDIATE = "master-candidate" _NR_MASTER = "master" @@ -76,13 +86,24 @@ _NR_OFFLINE = "offline" _NR_REGULAR = "regular" _NR_MAP = { - "M": _NR_MASTER, - "C": _NR_MASTER_CANDIATE, - "D": _NR_DRAINED, - "O": _NR_OFFLINE, - "R": _NR_REGULAR, + constants.NR_MASTER: _NR_MASTER, + constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE, + constants.NR_DRAINED: _NR_DRAINED, + constants.NR_OFFLINE: _NR_OFFLINE, + constants.NR_REGULAR: _NR_REGULAR, } +assert frozenset(_NR_MAP.keys()) == constants.NR_ALL + +# Request data version field +_REQ_DATA_VERSION = "__version__" + +# Feature string for instance creation request data version 1 +_INST_CREATE_REQV1 = "instance-create-reqv1" + +# Feature string for instance reinstall request version 1 +_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1" + # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. _WFJC_TIMEOUT = 10 @@ -103,7 +124,7 @@ class R_version(baserlib.R_Generic): class R_2_info(baserlib.R_Generic): - """Cluster info. + """/2/info resource. """ @staticmethod @@ -115,6 +136,18 @@ class R_2_info(baserlib.R_Generic): return client.QueryClusterInfo() +class R_2_features(baserlib.R_Generic): + """/2/features resource. + + """ + @staticmethod + def GET(): + """Returns list of optional RAPI features implemented. + + """ + return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1] + + class R_2_os(baserlib.R_Generic): """/2/os resource. @@ -129,8 +162,7 @@ class R_2_os(baserlib.R_Generic): """ cl = baserlib.GetClient() - op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"], - names=[]) + op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) job_id = baserlib.SubmitJob([op], cl) # we use custom feedback function, instead of print we log the status result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) @@ -140,9 +172,8 @@ class R_2_os(baserlib.R_Generic): raise http.HttpBadGateway(message="Can't get OS list") os_names = [] - for (name, valid, variants) in diagnose_data: - if valid: - os_names.extend(cli.CalculateOSNames(name, variants)) + for (name, variants) in diagnose_data: + os_names.extend(cli.CalculateOSNames(name, variants)) return os_names @@ -156,7 +187,23 @@ class R_2_redist_config(baserlib.R_Generic): """Redistribute configuration to all nodes. """ - return baserlib.SubmitJob([opcodes.OpRedistributeConfig()]) + return baserlib.SubmitJob([opcodes.OpClusterRedistConf()]) + + +class R_2_cluster_modify(baserlib.R_Generic): + """/2/modify resource. + + """ + def PUT(self): + """Modifies cluster parameters. + + @return: a job id + + """ + op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body, + None) + + return baserlib.SubmitJob([op]) class R_2_jobs(baserlib.R_Generic): @@ -284,7 +331,7 @@ class R_2_nodes(baserlib.R_Generic): class R_2_nodes_name(baserlib.R_Generic): - """/2/nodes/[node_name] resources. + """/2/nodes/[node_name] resource. """ def GET(self): @@ -293,8 +340,10 @@ class R_2_nodes_name(baserlib.R_Generic): """ node_name = self.items[0] client = baserlib.GetClient() - result = client.QueryNodes(names=[node_name], fields=N_FIELDS, - use_locking=self.useLocking()) + + result = baserlib.HandleItemQueryErrors(client.QueryNodes, + names=[node_name], fields=N_FIELDS, + use_locking=self.useLocking()) return baserlib.MapFields(N_FIELDS, result[0]) @@ -348,7 +397,7 @@ class R_2_nodes_name_role(baserlib.R_Generic): else: raise http.HttpBadRequest("Can't set '%s' role" % role) - op = opcodes.OpSetNodeParams(node_name=node_name, + op = opcodes.OpNodeSetParams(node_name=node_name, master_candidate=candidate, offline=offline, drained=drained, @@ -368,12 +417,32 @@ class R_2_nodes_name_evacuate(baserlib.R_Generic): node_name = self.items[0] remote_node = self._checkStringVariable("remote_node", default=None) iallocator = self._checkStringVariable("iallocator", default=None) + early_r = bool(self._checkIntVariable("early_release", default=0)) + dry_run = bool(self.dryRun()) - op = opcodes.OpEvacuateNode(node_name=node_name, - remote_node=remote_node, - iallocator=iallocator) + cl = baserlib.GetClient() - return baserlib.SubmitJob([op]) + op = opcodes.OpNodeEvacStrategy(nodes=[node_name], + iallocator=iallocator, + remote_node=remote_node) + + job_id = baserlib.SubmitJob([op], cl) + # we use custom feedback function, instead of print we log the status + result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) + + jobs = [] + for iname, node in result[0]: + if dry_run: + jid = None + else: + op = opcodes.OpInstanceReplaceDisks(instance_name=iname, + remote_node=node, disks=[], + mode=constants.REPLACE_DISK_CHG, + early_release=early_r) + jid = baserlib.SubmitJob([op]) + jobs.append((jid, iname, node)) + + return jobs class R_2_nodes_name_migrate(baserlib.R_Generic): @@ -385,18 +454,28 @@ class R_2_nodes_name_migrate(baserlib.R_Generic): """ node_name = self.items[0] - live = bool(self._checkIntVariable("live", default=1)) - op = opcodes.OpMigrateNode(node_name=node_name, live=live) + if "live" in self.queryargs and "mode" in self.queryargs: + raise http.HttpBadRequest("Only one of 'live' and 'mode' should" + " be passed") + elif "live" in self.queryargs: + if self._checkIntVariable("live", default=1): + mode = constants.HT_MIGRATION_LIVE + else: + mode = constants.HT_MIGRATION_NONLIVE + else: + mode = self._checkStringVariable("mode", default=None) + + op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode) return baserlib.SubmitJob([op]) class R_2_nodes_name_storage(baserlib.R_Generic): - """/2/nodes/[node_name]/storage ressource. + """/2/nodes/[node_name]/storage resource. """ - # LUQueryNodeStorage acquires locks, hence restricting access to GET + # LUNodeQueryStorage acquires locks, hence restricting access to GET GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] def GET(self): @@ -412,14 +491,14 @@ class R_2_nodes_name_storage(baserlib.R_Generic): raise http.HttpBadRequest("Missing the required 'output_fields'" " parameter") - op = opcodes.OpQueryNodeStorage(nodes=[node_name], + op = opcodes.OpNodeQueryStorage(nodes=[node_name], storage_type=storage_type, output_fields=output_fields.split(",")) return baserlib.SubmitJob([op]) class R_2_nodes_name_storage_modify(baserlib.R_Generic): - """/2/nodes/[node_name]/storage/modify ressource. + """/2/nodes/[node_name]/storage/modify resource. """ def PUT(self): @@ -441,7 +520,7 @@ class R_2_nodes_name_storage_modify(baserlib.R_Generic): changes[constants.SF_ALLOCATABLE] = \ bool(self._checkIntVariable("allocatable", default=1)) - op = opcodes.OpModifyNodeStorage(node_name=node_name, + op = opcodes.OpNodeModifyStorage(node_name=node_name, storage_type=storage_type, name=name, changes=changes) @@ -449,7 +528,7 @@ class R_2_nodes_name_storage_modify(baserlib.R_Generic): class R_2_nodes_name_storage_repair(baserlib.R_Generic): - """/2/nodes/[node_name]/storage/repair ressource. + """/2/nodes/[node_name]/storage/repair resource. """ def PUT(self): @@ -471,6 +550,187 @@ class R_2_nodes_name_storage_repair(baserlib.R_Generic): return baserlib.SubmitJob([op]) +def _ParseCreateGroupRequest(data, dry_run): + """Parses a request for creating a node group. + + @rtype: L{opcodes.OpGroupAdd} + @return: Group creation opcode + + """ + override = { + "dry_run": dry_run, + } + + rename = { + "name": "group_name", + } + + return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override, + rename=rename) + + +class R_2_groups(baserlib.R_Generic): + """/2/groups resource. + + """ + def GET(self): + """Returns a list of all node groups. + + """ + client = baserlib.GetClient() + + if self.useBulk(): + bulkdata = client.QueryGroups([], G_FIELDS, False) + return baserlib.MapBulkFields(bulkdata, G_FIELDS) + else: + data = client.QueryGroups([], ["name"], False) + groupnames = [row[0] for row in data] + return baserlib.BuildUriList(groupnames, "/2/groups/%s", + uri_fields=("name", "uri")) + + def POST(self): + """Create a node group. + + @return: a job id + + """ + baserlib.CheckType(self.request_body, dict, "Body contents") + op = _ParseCreateGroupRequest(self.request_body, self.dryRun()) + return baserlib.SubmitJob([op]) + + +class R_2_groups_name(baserlib.R_Generic): + """/2/groups/[group_name] resource. + + """ + def GET(self): + """Send information about a node group. + + """ + group_name = self.items[0] + client = baserlib.GetClient() + + result = baserlib.HandleItemQueryErrors(client.QueryGroups, + names=[group_name], fields=G_FIELDS, + use_locking=self.useLocking()) + + return baserlib.MapFields(G_FIELDS, result[0]) + + def DELETE(self): + """Delete a node group. + + """ + op = opcodes.OpGroupRemove(group_name=self.items[0], + dry_run=bool(self.dryRun())) + + return baserlib.SubmitJob([op]) + + +def _ParseModifyGroupRequest(name, data): + """Parses a request for modifying a node group. + + @rtype: L{opcodes.OpGroupSetParams} + @return: Group modify opcode + + """ + return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, { + "group_name": name, + }) + + + +class R_2_groups_name_modify(baserlib.R_Generic): + """/2/groups/[group_name]/modify resource. + + """ + def PUT(self): + """Changes some parameters of node group. + + @return: a job id + + """ + baserlib.CheckType(self.request_body, dict, "Body contents") + + op = _ParseModifyGroupRequest(self.items[0], self.request_body) + + return baserlib.SubmitJob([op]) + + +def _ParseRenameGroupRequest(name, data, dry_run): + """Parses a request for renaming a node group. + + @type name: string + @param name: name of the node group to rename + @type data: dict + @param data: the body received by the rename request + @type dry_run: bool + @param dry_run: whether to perform a dry run + + @rtype: L{opcodes.OpGroupRename} + @return: Node group rename opcode + + """ + return baserlib.FillOpcode(opcodes.OpGroupRename, data, { + "group_name": name, + "dry_run": dry_run, + }) + + +class R_2_groups_name_rename(baserlib.R_Generic): + """/2/groups/[group_name]/rename resource. + + """ + def PUT(self): + """Changes the name of a node group. + + @return: a job id + + """ + baserlib.CheckType(self.request_body, dict, "Body contents") + op = _ParseRenameGroupRequest(self.items[0], self.request_body, + self.dryRun()) + return baserlib.SubmitJob([op]) + + +class R_2_groups_name_assign_nodes(baserlib.R_Generic): + """/2/groups/[group_name]/assign-nodes resource. + + """ + def PUT(self): + """Assigns nodes to a group. + + @return: a job id + + """ + op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, { + "group_name": self.items[0], + "dry_run": self.dryRun(), + "force": self.useForce(), + }) + + return baserlib.SubmitJob([op]) + + +def _ParseInstanceCreateRequestVersion1(data, dry_run): + """Parses an instance creation request version 1. + + @rtype: L{opcodes.OpInstanceCreate} + @return: Instance creation opcode + + """ + override = { + "dry_run": dry_run, + } + + rename = { + "os": "os_type", + "name": "instance_name", + } + + return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override, + rename=rename) + + class R_2_instances(baserlib.R_Generic): """/2/instances resource. @@ -500,60 +760,26 @@ class R_2_instances(baserlib.R_Generic): if not isinstance(self.request_body, dict): raise http.HttpBadRequest("Invalid body contents, not a dictionary") - beparams = baserlib.MakeParamsDict(self.request_body, - constants.BES_PARAMETERS) - hvparams = baserlib.MakeParamsDict(self.request_body, - constants.HVS_PARAMETERS) - fn = self.getBodyParameter - - # disk processing - disk_data = fn('disks') - if not isinstance(disk_data, list): - raise http.HttpBadRequest("The 'disks' parameter should be a list") - disks = [] - for idx, d in enumerate(disk_data): - if not isinstance(d, int): - raise http.HttpBadRequest("Disk %d specification wrong: should" - " be an integer" % idx) - disks.append({"size": d}) - # nic processing (one nic only) - nics = [{"mac": fn("mac", constants.VALUE_AUTO)}] - if fn("ip", None) is not None: - nics[0]["ip"] = fn("ip") - if fn("mode", None) is not None: - nics[0]["mode"] = fn("mode") - if fn("link", None) is not None: - nics[0]["link"] = fn("link") - if fn("bridge", None) is not None: - nics[0]["bridge"] = fn("bridge") - - op = opcodes.OpCreateInstance( - mode=constants.INSTANCE_CREATE, - instance_name=fn('name'), - disks=disks, - disk_template=fn('disk_template'), - os_type=fn('os'), - pnode=fn('pnode', None), - snode=fn('snode', None), - iallocator=fn('iallocator', None), - nics=nics, - start=fn('start', True), - ip_check=fn('ip_check', True), - name_check=fn('name_check', True), - wait_for_sync=True, - hypervisor=fn('hypervisor', None), - hvparams=hvparams, - beparams=beparams, - file_storage_dir=fn('file_storage_dir', None), - file_driver=fn('file_driver', 'loop'), - dry_run=bool(self.dryRun()), - ) + # Default to request data version 0 + data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) + + if data_version == 0: + raise http.HttpBadRequest("Instance creation request version 0 is no" + " longer supported") + elif data_version == 1: + data = self.request_body.copy() + # Remove "__version__" + data.pop(_REQ_DATA_VERSION, None) + op = _ParseInstanceCreateRequestVersion1(data, self.dryRun()) + else: + raise http.HttpBadRequest("Unsupported request data version %s" % + data_version) return baserlib.SubmitJob([op]) class R_2_instances_name(baserlib.R_Generic): - """/2/instances/[instance_name] resources. + """/2/instances/[instance_name] resource. """ def GET(self): @@ -562,8 +788,11 @@ class R_2_instances_name(baserlib.R_Generic): """ client = baserlib.GetClient() instance_name = self.items[0] - result = client.QueryInstances(names=[instance_name], fields=I_FIELDS, - use_locking=self.useLocking()) + + result = baserlib.HandleItemQueryErrors(client.QueryInstances, + names=[instance_name], + fields=I_FIELDS, + use_locking=self.useLocking()) return baserlib.MapFields(I_FIELDS, result[0]) @@ -571,7 +800,7 @@ class R_2_instances_name(baserlib.R_Generic): """Delete an instance. """ - op = opcodes.OpRemoveInstance(instance_name=self.items[0], + op = opcodes.OpInstanceRemove(instance_name=self.items[0], ignore_failures=False, dry_run=bool(self.dryRun())) return baserlib.SubmitJob([op]) @@ -588,7 +817,7 @@ class R_2_instances_name_info(baserlib.R_Generic): instance_name = self.items[0] static = bool(self._checkIntVariable("static", default=0)) - op = opcodes.OpQueryInstanceData(instances=[instance_name], + op = opcodes.OpInstanceQueryData(instances=[instance_name], static=static) return baserlib.SubmitJob([op]) @@ -610,7 +839,7 @@ class R_2_instances_name_reboot(baserlib.R_Generic): reboot_type = self.queryargs.get('type', [constants.INSTANCE_REBOOT_HARD])[0] ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries')) - op = opcodes.OpRebootInstance(instance_name=instance_name, + op = opcodes.OpInstanceReboot(instance_name=instance_name, reboot_type=reboot_type, ignore_secondaries=ignore_secondaries, dry_run=bool(self.dryRun())) @@ -633,13 +862,26 @@ class R_2_instances_name_startup(baserlib.R_Generic): """ instance_name = self.items[0] force_startup = bool(self._checkIntVariable('force')) - op = opcodes.OpStartupInstance(instance_name=instance_name, + op = opcodes.OpInstanceStartup(instance_name=instance_name, force=force_startup, dry_run=bool(self.dryRun())) return baserlib.SubmitJob([op]) +def _ParseShutdownInstanceRequest(name, data, dry_run): + """Parses a request for an instance shutdown. + + @rtype: L{opcodes.OpInstanceShutdown} + @return: Instance shutdown opcode + + """ + return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, { + "instance_name": name, + "dry_run": dry_run, + }) + + class R_2_instances_name_shutdown(baserlib.R_Generic): """/2/instances/[instance_name]/shutdown resource. @@ -649,14 +891,41 @@ class R_2_instances_name_shutdown(baserlib.R_Generic): def PUT(self): """Shutdown an instance. + @return: a job id + """ - instance_name = self.items[0] - op = opcodes.OpShutdownInstance(instance_name=instance_name, - dry_run=bool(self.dryRun())) + baserlib.CheckType(self.request_body, dict, "Body contents") + + op = _ParseShutdownInstanceRequest(self.items[0], self.request_body, + bool(self.dryRun())) return baserlib.SubmitJob([op]) +def _ParseInstanceReinstallRequest(name, data): + """Parses a request for reinstalling an instance. + + """ + if not isinstance(data, dict): + raise http.HttpBadRequest("Invalid body contents, not a dictionary") + + ostype = baserlib.CheckParameter(data, "os", default=None) + start = baserlib.CheckParameter(data, "start", exptype=bool, + default=True) + osparams = baserlib.CheckParameter(data, "osparams", default=None) + + ops = [ + opcodes.OpInstanceShutdown(instance_name=name), + opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype, + osparams=osparams), + ] + + if start: + ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False)) + + return ops + + class R_2_instances_name_reinstall(baserlib.R_Generic): """/2/instances/[instance_name]/reinstall resource. @@ -671,19 +940,52 @@ class R_2_instances_name_reinstall(baserlib.R_Generic): automatically. """ - instance_name = self.items[0] - ostype = self._checkStringVariable('os') - nostartup = self._checkIntVariable('nostartup') - ops = [ - opcodes.OpShutdownInstance(instance_name=instance_name), - opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype), - ] - if not nostartup: - ops.append(opcodes.OpStartupInstance(instance_name=instance_name, - force=False)) + if self.request_body: + if self.queryargs: + raise http.HttpBadRequest("Can't combine query and body parameters") + + body = self.request_body + elif self.queryargs: + # Legacy interface, do not modify/extend + body = { + "os": self._checkStringVariable("os"), + "start": not self._checkIntVariable("nostartup"), + } + else: + body = {} + + ops = _ParseInstanceReinstallRequest(self.items[0], body) + return baserlib.SubmitJob(ops) +def _ParseInstanceReplaceDisksRequest(name, data): + """Parses a request for an instance export. + + @rtype: L{opcodes.OpInstanceReplaceDisks} + @return: Instance export opcode + + """ + override = { + "instance_name": name, + } + + # Parse disks + try: + raw_disks = data["disks"] + except KeyError: + pass + else: + if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102 + # Backwards compatibility for strings of the format "1, 2, 3" + try: + data["disks"] = [int(part) for part in raw_disks.split(",")] + except (TypeError, ValueError), err: + raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err)) + + return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override) + + class R_2_instances_name_replace_disks(baserlib.R_Generic): """/2/instances/[instance_name]/replace-disks resource. @@ -692,25 +994,7 @@ class R_2_instances_name_replace_disks(baserlib.R_Generic): """Replaces disks on an instance. """ - instance_name = self.items[0] - remote_node = self._checkStringVariable("remote_node", default=None) - mode = self._checkStringVariable("mode", default=None) - raw_disks = self._checkStringVariable("disks", default=None) - iallocator = self._checkStringVariable("iallocator", default=None) - - if raw_disks: - try: - disks = [int(part) for part in raw_disks.split(",")] - except ValueError, err: - raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err)) - else: - disks = [] - - op = opcodes.OpReplaceDisks(instance_name=instance_name, - remote_node=remote_node, - mode=mode, - disks=disks, - iallocator=iallocator) + op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body) return baserlib.SubmitJob([op]) @@ -728,7 +1012,7 @@ class R_2_instances_name_activate_disks(baserlib.R_Generic): instance_name = self.items[0] ignore_size = bool(self._checkIntVariable('ignore_size')) - op = opcodes.OpActivateInstanceDisks(instance_name=instance_name, + op = opcodes.OpInstanceActivateDisks(instance_name=instance_name, ignore_size=ignore_size) return baserlib.SubmitJob([op]) @@ -744,11 +1028,270 @@ class R_2_instances_name_deactivate_disks(baserlib.R_Generic): """ instance_name = self.items[0] - op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name) + op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name) + + return baserlib.SubmitJob([op]) + + +class R_2_instances_name_prepare_export(baserlib.R_Generic): + """/2/instances/[instance_name]/prepare-export resource. + + """ + def PUT(self): + """Prepares an export for an instance. + + @return: a job id + + """ + instance_name = self.items[0] + mode = self._checkStringVariable("mode") + + op = opcodes.OpBackupPrepare(instance_name=instance_name, + mode=mode) + + return baserlib.SubmitJob([op]) + + +def _ParseExportInstanceRequest(name, data): + """Parses a request for an instance export. + + @rtype: L{opcodes.OpBackupExport} + @return: Instance export opcode + + """ + # Rename "destination" to "target_node" + try: + data["target_node"] = data.pop("destination") + except KeyError: + pass + + return baserlib.FillOpcode(opcodes.OpBackupExport, data, { + "instance_name": name, + }) + + +class R_2_instances_name_export(baserlib.R_Generic): + """/2/instances/[instance_name]/export resource. + + """ + def PUT(self): + """Exports an instance. + + @return: a job id + + """ + if not isinstance(self.request_body, dict): + raise http.HttpBadRequest("Invalid body contents, not a dictionary") + + op = _ParseExportInstanceRequest(self.items[0], self.request_body) + + return baserlib.SubmitJob([op]) + + +def _ParseMigrateInstanceRequest(name, data): + """Parses a request for an instance migration. + + @rtype: L{opcodes.OpInstanceMigrate} + @return: Instance migration opcode + + """ + return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, { + "instance_name": name, + }) + + +class R_2_instances_name_migrate(baserlib.R_Generic): + """/2/instances/[instance_name]/migrate resource. + + """ + def PUT(self): + """Migrates an instance. + + @return: a job id + + """ + baserlib.CheckType(self.request_body, dict, "Body contents") + + op = _ParseMigrateInstanceRequest(self.items[0], self.request_body) + + return baserlib.SubmitJob([op]) + + +def _ParseRenameInstanceRequest(name, data): + """Parses a request for renaming an instance. + + @rtype: L{opcodes.OpInstanceRename} + @return: Instance rename opcode + + """ + return baserlib.FillOpcode(opcodes.OpInstanceRename, data, { + "instance_name": name, + }) + + +class R_2_instances_name_rename(baserlib.R_Generic): + """/2/instances/[instance_name]/rename resource. + + """ + def PUT(self): + """Changes the name of an instance. + + @return: a job id + + """ + baserlib.CheckType(self.request_body, dict, "Body contents") + + op = _ParseRenameInstanceRequest(self.items[0], self.request_body) return baserlib.SubmitJob([op]) +def _ParseModifyInstanceRequest(name, data): + """Parses a request for modifying an instance. + + @rtype: L{opcodes.OpInstanceSetParams} + @return: Instance modify opcode + + """ + return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, { + "instance_name": name, + }) + + +class R_2_instances_name_modify(baserlib.R_Generic): + """/2/instances/[instance_name]/modify resource. + + """ + def PUT(self): + """Changes some parameters of an instance. + + @return: a job id + + """ + baserlib.CheckType(self.request_body, dict, "Body contents") + + op = _ParseModifyInstanceRequest(self.items[0], self.request_body) + + return baserlib.SubmitJob([op]) + + +class R_2_instances_name_disk_grow(baserlib.R_Generic): + """/2/instances/[instance_name]/disk/[disk_index]/grow resource. + + """ + def POST(self): + """Increases the size of an instance disk. + + @return: a job id + + """ + op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, { + "instance_name": self.items[0], + "disk": int(self.items[1]), + }) + + return baserlib.SubmitJob([op]) + + +class R_2_instances_name_console(baserlib.R_Generic): + """/2/instances/[instance_name]/console resource. + + """ + GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] + + def GET(self): + """Request information for connecting to instance's console. + + @return: Serialized instance console description, see + L{objects.InstanceConsole} + + """ + client = baserlib.GetClient() + + ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False) + + if console is None: + raise http.HttpServiceUnavailable("Instance console unavailable") + + assert isinstance(console, dict) + return console + + +def _GetQueryFields(args): + """ + + """ + try: + fields = args["fields"] + except KeyError: + raise http.HttpBadRequest("Missing 'fields' query argument") + + return _SplitQueryFields(fields[0]) + + +def _SplitQueryFields(fields): + """ + + """ + return [i.strip() for i in fields.split(",")] + + +class R_2_query(baserlib.R_Generic): + """/2/query/[resource] resource. + + """ + # Results might contain sensitive information + GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] + + def _Query(self, fields, filter_): + return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict() + + def GET(self): + """Returns resource information. + + @return: Query result, see L{objects.QueryResponse} + + """ + return self._Query(_GetQueryFields(self.queryargs), None) + + def PUT(self): + """Submits job querying for resources. + + @return: Query result, see L{objects.QueryResponse} + + """ + body = self.request_body + + baserlib.CheckType(body, dict, "Body contents") + + try: + fields = body["fields"] + except KeyError: + fields = _GetQueryFields(self.queryargs) + + return self._Query(fields, self.request_body.get("filter", None)) + + +class R_2_query_fields(baserlib.R_Generic): + """/2/query/[resource]/fields resource. + + """ + def GET(self): + """Retrieves list of available fields for a resource. + + @return: List of serialized L{objects.QueryFieldDefinition} + + """ + try: + raw_fields = self.queryargs["fields"] + except KeyError: + fields = None + else: + fields = _SplitQueryFields(raw_fields[0]) + + return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict() + + class _R_Tags(baserlib.R_Generic): """ Quasiclass for tagging resources @@ -766,10 +1309,10 @@ class _R_Tags(baserlib.R_Generic): """ baserlib.R_Generic.__init__(self, items, queryargs, req) - if self.TAG_LEVEL != constants.TAG_CLUSTER: - self.name = items[0] + if self.TAG_LEVEL == constants.TAG_CLUSTER: + self.name = None else: - self.name = "" + self.name = items[0] def GET(self): """Returns a list of tags. @@ -833,7 +1376,7 @@ class R_2_nodes_name_tags(_R_Tags): class R_2_tags(_R_Tags): - """ /2/instances/tags resource. + """ /2/tags resource. Manages cluster tags.