#
#
-# Copyright (C) 2006, 2007, 2008 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
from ganeti import http
from ganeti import constants
from ganeti import cli
+from ganeti import utils
from ganeti import rapi
+from ganeti import ht
from ganeti.rapi import baserlib
"network_port",
"disk.sizes", "disk_usage",
"beparams", "hvparams",
- "oper_state", "oper_ram", "status",
+ "oper_state", "oper_ram", "oper_vcpus", "status",
+ "custom_hvparams", "custom_beparams", "custom_nicparams",
] + _COMMON_FIELDS
N_FIELDS = ["name", "offline", "master_candidate", "drained",
"ctotal", "cnodes", "csockets",
"pip", "sip", "role",
"pinst_list", "sinst_list",
+ "master_capable", "vm_capable",
+ "group.uuid",
] + _COMMON_FIELDS
+G_FIELDS = ["name", "uuid",
+ "alloc_policy",
+ "node_cnt", "node_list",
+ "ctime", "mtime", "serial_no",
+ ] # "tags" is missing to be able to use _COMMON_FIELDS here.
+
_NR_DRAINED = "drained"
_NR_MASTER_CANDIATE = "master-candidate"
_NR_MASTER = "master"
# Request data version field
_REQ_DATA_VERSION = "__version__"
+# Feature string for instance creation request data version 1
+_INST_CREATE_REQV1 = "instance-create-reqv1"
+
+# Feature string for instance reinstall request version 1
+_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
+
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
_WFJC_TIMEOUT = 10
class R_2_info(baserlib.R_Generic):
- """Cluster info.
+ """/2/info resource.
"""
@staticmethod
return client.QueryClusterInfo()
+class R_2_features(baserlib.R_Generic):
+ """/2/features resource.
+
+ """
+ @staticmethod
+ def GET():
+ """Returns list of optional RAPI features implemented.
+
+ """
+ return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
+
+
class R_2_os(baserlib.R_Generic):
"""/2/os resource.
"""
cl = baserlib.GetClient()
- op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
- names=[])
+ op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
job_id = baserlib.SubmitJob([op], cl)
# we use custom feedback function, instead of print we log the status
result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
raise http.HttpBadGateway(message="Can't get OS list")
os_names = []
- for (name, valid, variants) in diagnose_data:
- if valid:
- os_names.extend(cli.CalculateOSNames(name, variants))
+ for (name, variants) in diagnose_data:
+ os_names.extend(cli.CalculateOSNames(name, variants))
return os_names
"""Redistribute configuration to all nodes.
"""
- return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
+ return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
+
+
+class R_2_cluster_modify(baserlib.R_Generic):
+ """/2/modify resource.
+
+ """
+ def PUT(self):
+ """Modifies cluster parameters.
+
+ @return: a job id
+
+ """
+ op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
+ None)
+
+ return baserlib.SubmitJob([op])
class R_2_jobs(baserlib.R_Generic):
class R_2_nodes_name(baserlib.R_Generic):
- """/2/nodes/[node_name] resources.
+ """/2/nodes/[node_name] resource.
"""
def GET(self):
"""
node_name = self.items[0]
client = baserlib.GetClient()
- result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
- use_locking=self.useLocking())
+
+ result = baserlib.HandleItemQueryErrors(client.QueryNodes,
+ names=[node_name], fields=N_FIELDS,
+ use_locking=self.useLocking())
return baserlib.MapFields(N_FIELDS, result[0])
@return: a job id
"""
- if not isinstance(self.req.request_body, basestring):
+ if not isinstance(self.request_body, basestring):
raise http.HttpBadRequest("Invalid body contents, not a string")
node_name = self.items[0]
- role = self.req.request_body
+ role = self.request_body
if role == _NR_REGULAR:
candidate = False
else:
raise http.HttpBadRequest("Can't set '%s' role" % role)
- op = opcodes.OpSetNodeParams(node_name=node_name,
+ op = opcodes.OpNodeSetParams(node_name=node_name,
master_candidate=candidate,
offline=offline,
drained=drained,
node_name = self.items[0]
remote_node = self._checkStringVariable("remote_node", default=None)
iallocator = self._checkStringVariable("iallocator", default=None)
+ early_r = bool(self._checkIntVariable("early_release", default=0))
+ dry_run = bool(self.dryRun())
+
+ cl = baserlib.GetClient()
- op = opcodes.OpEvacuateNode(node_name=node_name,
- remote_node=remote_node,
- iallocator=iallocator)
+ op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
+ iallocator=iallocator,
+ remote_node=remote_node)
- return baserlib.SubmitJob([op])
+ job_id = baserlib.SubmitJob([op], cl)
+ # we use custom feedback function, instead of print we log the status
+ result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
+
+ jobs = []
+ for iname, node in result:
+ if dry_run:
+ jid = None
+ else:
+ op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
+ remote_node=node, disks=[],
+ mode=constants.REPLACE_DISK_CHG,
+ early_release=early_r)
+ jid = baserlib.SubmitJob([op])
+ jobs.append((jid, iname, node))
+
+ return jobs
class R_2_nodes_name_migrate(baserlib.R_Generic):
"""
node_name = self.items[0]
- live = bool(self._checkIntVariable("live", default=1))
- op = opcodes.OpMigrateNode(node_name=node_name, live=live)
+ if "live" in self.queryargs and "mode" in self.queryargs:
+ raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
+ " be passed")
+ elif "live" in self.queryargs:
+ if self._checkIntVariable("live", default=1):
+ mode = constants.HT_MIGRATION_LIVE
+ else:
+ mode = constants.HT_MIGRATION_NONLIVE
+ else:
+ mode = self._checkStringVariable("mode", default=None)
+
+ op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
return baserlib.SubmitJob([op])
class R_2_nodes_name_storage(baserlib.R_Generic):
- """/2/nodes/[node_name]/storage ressource.
+ """/2/nodes/[node_name]/storage resource.
"""
- # LUQueryNodeStorage acquires locks, hence restricting access to GET
+ # LUNodeQueryStorage acquires locks, hence restricting access to GET
GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
def GET(self):
raise http.HttpBadRequest("Missing the required 'output_fields'"
" parameter")
- op = opcodes.OpQueryNodeStorage(nodes=[node_name],
+ op = opcodes.OpNodeQueryStorage(nodes=[node_name],
storage_type=storage_type,
output_fields=output_fields.split(","))
return baserlib.SubmitJob([op])
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
- """/2/nodes/[node_name]/storage/modify ressource.
+ """/2/nodes/[node_name]/storage/modify resource.
"""
def PUT(self):
changes[constants.SF_ALLOCATABLE] = \
bool(self._checkIntVariable("allocatable", default=1))
- op = opcodes.OpModifyNodeStorage(node_name=node_name,
+ op = opcodes.OpNodeModifyStorage(node_name=node_name,
storage_type=storage_type,
name=name,
changes=changes)
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
- """/2/nodes/[node_name]/storage/repair ressource.
+ """/2/nodes/[node_name]/storage/repair resource.
"""
def PUT(self):
return baserlib.SubmitJob([op])
+def _ParseCreateGroupRequest(data, dry_run):
+ """Parses a request for creating a node group.
+
+ @rtype: L{opcodes.OpGroupAdd}
+ @return: Group creation opcode
+
+ """
+ group_name = baserlib.CheckParameter(data, "name")
+ alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
+
+ return opcodes.OpGroupAdd(group_name=group_name,
+ alloc_policy=alloc_policy,
+ dry_run=dry_run)
+
+
+class R_2_groups(baserlib.R_Generic):
+ """/2/groups resource.
+
+ """
+ def GET(self):
+ """Returns a list of all node groups.
+
+ """
+ client = baserlib.GetClient()
+
+ if self.useBulk():
+ bulkdata = client.QueryGroups([], G_FIELDS, False)
+ return baserlib.MapBulkFields(bulkdata, G_FIELDS)
+ else:
+ data = client.QueryGroups([], ["name"], False)
+ groupnames = [row[0] for row in data]
+ return baserlib.BuildUriList(groupnames, "/2/groups/%s",
+ uri_fields=("name", "uri"))
+
+ def POST(self):
+ """Create a node group.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+ op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
+ return baserlib.SubmitJob([op])
+
+
+class R_2_groups_name(baserlib.R_Generic):
+ """/2/groups/[group_name] resource.
+
+ """
+ def GET(self):
+ """Send information about a node group.
+
+ """
+ group_name = self.items[0]
+ client = baserlib.GetClient()
+
+ result = baserlib.HandleItemQueryErrors(client.QueryGroups,
+ names=[group_name], fields=G_FIELDS,
+ use_locking=self.useLocking())
+
+ return baserlib.MapFields(G_FIELDS, result[0])
+
+ def DELETE(self):
+ """Delete a node group.
+
+ """
+ op = opcodes.OpGroupRemove(group_name=self.items[0],
+ dry_run=bool(self.dryRun()))
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseModifyGroupRequest(name, data):
+ """Parses a request for modifying a node group.
+
+ @rtype: L{opcodes.OpGroupSetParams}
+ @return: Group modify opcode
+
+ """
+ return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
+ "group_name": name,
+ })
+
+
+
+class R_2_groups_name_modify(baserlib.R_Generic):
+ """/2/groups/[group_name]/modify resource.
+
+ """
+ def PUT(self):
+ """Changes some parameters of node group.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+
+ op = _ParseModifyGroupRequest(self.items[0], self.request_body)
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseRenameGroupRequest(name, data, dry_run):
+ """Parses a request for renaming a node group.
+
+ @type name: string
+ @param name: name of the node group to rename
+ @type data: dict
+ @param data: the body received by the rename request
+ @type dry_run: bool
+ @param dry_run: whether to perform a dry run
+
+ @rtype: L{opcodes.OpGroupRename}
+ @return: Node group rename opcode
+
+ """
+ old_name = name
+ new_name = baserlib.CheckParameter(data, "new_name")
+
+ return opcodes.OpGroupRename(old_name=old_name, new_name=new_name,
+ dry_run=dry_run)
+
+
+class R_2_groups_name_rename(baserlib.R_Generic):
+ """/2/groups/[group_name]/rename resource.
+
+ """
+ def PUT(self):
+ """Changes the name of a node group.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+ op = _ParseRenameGroupRequest(self.items[0], self.request_body,
+ self.dryRun())
+ return baserlib.SubmitJob([op])
+
+
+class R_2_groups_name_assign_nodes(baserlib.R_Generic):
+ """/2/groups/[group_name]/assign-nodes resource.
+
+ """
+ def PUT(self):
+ """Assigns nodes to a group.
+
+ @return: a job id
+
+ """
+ op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
+ "group_name": self.items[0],
+ "dry_run": self.dryRun(),
+ "force": self.useForce(),
+ })
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseInstanceCreateRequestVersion1(data, dry_run):
+ """Parses an instance creation request version 1.
+
+ @rtype: L{opcodes.OpInstanceCreate}
+ @return: Instance creation opcode
+
+ """
+ # Disks
+ disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
+
+ disks = []
+ for idx, i in enumerate(disks_input):
+ baserlib.CheckType(i, dict, "Disk %d specification" % idx)
+
+ # Size is mandatory
+ try:
+ size = i[constants.IDISK_SIZE]
+ except KeyError:
+ raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
+ " size" % idx)
+
+ disk = {
+ constants.IDISK_SIZE: size,
+ }
+
+ # Optional disk access mode
+ try:
+ disk_access = i[constants.IDISK_MODE]
+ except KeyError:
+ pass
+ else:
+ disk[constants.IDISK_MODE] = disk_access
+
+ disks.append(disk)
+
+ assert len(disks_input) == len(disks)
+
+ # Network interfaces
+ nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
+
+ nics = []
+ for idx, i in enumerate(nics_input):
+ baserlib.CheckType(i, dict, "NIC %d specification" % idx)
+
+ nic = {}
+
+ for field in constants.INIC_PARAMS:
+ try:
+ value = i[field]
+ except KeyError:
+ continue
+
+ nic[field] = value
+
+ nics.append(nic)
+
+ assert len(nics_input) == len(nics)
+
+ # HV/BE parameters
+ hvparams = baserlib.CheckParameter(data, "hvparams", default={})
+ utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
+
+ beparams = baserlib.CheckParameter(data, "beparams", default={})
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+
+ return opcodes.OpInstanceCreate(
+ mode=baserlib.CheckParameter(data, "mode"),
+ instance_name=baserlib.CheckParameter(data, "name"),
+ os_type=baserlib.CheckParameter(data, "os"),
+ osparams=baserlib.CheckParameter(data, "osparams", default={}),
+ force_variant=baserlib.CheckParameter(data, "force_variant",
+ default=False),
+ no_install=baserlib.CheckParameter(data, "no_install", default=False),
+ pnode=baserlib.CheckParameter(data, "pnode", default=None),
+ snode=baserlib.CheckParameter(data, "snode", default=None),
+ disk_template=baserlib.CheckParameter(data, "disk_template"),
+ disks=disks,
+ nics=nics,
+ src_node=baserlib.CheckParameter(data, "src_node", default=None),
+ src_path=baserlib.CheckParameter(data, "src_path", default=None),
+ start=baserlib.CheckParameter(data, "start", default=True),
+ wait_for_sync=True,
+ ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
+ name_check=baserlib.CheckParameter(data, "name_check", default=True),
+ file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
+ default=None),
+ file_driver=baserlib.CheckParameter(data, "file_driver",
+ default=constants.FD_LOOP),
+ source_handshake=baserlib.CheckParameter(data, "source_handshake",
+ default=None),
+ source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
+ default=None),
+ source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
+ default=None),
+ iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
+ hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
+ hvparams=hvparams,
+ beparams=beparams,
+ dry_run=dry_run,
+ )
+
+
class R_2_instances(baserlib.R_Generic):
"""/2/instances resource.
def _ParseVersion0CreateRequest(self):
"""Parses an instance creation request version 0.
- @rtype: L{opcodes.OpCreateInstance}
+ Request data version 0 is deprecated and should not be used anymore.
+
+ @rtype: L{opcodes.OpInstanceCreate}
@return: Instance creation opcode
"""
- beparams = baserlib.MakeParamsDict(self.req.request_body,
+ # Do not modify anymore, request data version 0 is deprecated
+ beparams = baserlib.MakeParamsDict(self.request_body,
constants.BES_PARAMETERS)
- hvparams = baserlib.MakeParamsDict(self.req.request_body,
+ hvparams = baserlib.MakeParamsDict(self.request_body,
constants.HVS_PARAMETERS)
fn = self.getBodyParameter
if fn("bridge", None) is not None:
nics[0]["bridge"] = fn("bridge")
- return opcodes.OpCreateInstance(
+ # Do not modify anymore, request data version 0 is deprecated
+ return opcodes.OpInstanceCreate(
mode=constants.INSTANCE_CREATE,
instance_name=fn('name'),
disks=disks,
hvparams=hvparams,
beparams=beparams,
file_storage_dir=fn('file_storage_dir', None),
- file_driver=fn('file_driver', 'loop'),
+ file_driver=fn('file_driver', constants.FD_LOOP),
dry_run=bool(self.dryRun()),
)
@return: a job id
"""
- if not isinstance(self.req.request_body, dict):
+ if not isinstance(self.request_body, dict):
raise http.HttpBadRequest("Invalid body contents, not a dictionary")
# Default to request data version 0
if data_version == 0:
op = self._ParseVersion0CreateRequest()
+ elif data_version == 1:
+ op = _ParseInstanceCreateRequestVersion1(self.request_body,
+ self.dryRun())
else:
raise http.HttpBadRequest("Unsupported request data version %s" %
data_version)
class R_2_instances_name(baserlib.R_Generic):
- """/2/instances/[instance_name] resources.
+ """/2/instances/[instance_name] resource.
"""
def GET(self):
"""
client = baserlib.GetClient()
instance_name = self.items[0]
- result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
- use_locking=self.useLocking())
+
+ result = baserlib.HandleItemQueryErrors(client.QueryInstances,
+ names=[instance_name],
+ fields=I_FIELDS,
+ use_locking=self.useLocking())
return baserlib.MapFields(I_FIELDS, result[0])
"""Delete an instance.
"""
- op = opcodes.OpRemoveInstance(instance_name=self.items[0],
+ op = opcodes.OpInstanceRemove(instance_name=self.items[0],
ignore_failures=False,
dry_run=bool(self.dryRun()))
return baserlib.SubmitJob([op])
instance_name = self.items[0]
static = bool(self._checkIntVariable("static", default=0))
- op = opcodes.OpQueryInstanceData(instances=[instance_name],
+ op = opcodes.OpInstanceQueryData(instances=[instance_name],
static=static)
return baserlib.SubmitJob([op])
reboot_type = self.queryargs.get('type',
[constants.INSTANCE_REBOOT_HARD])[0]
ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
- op = opcodes.OpRebootInstance(instance_name=instance_name,
+ op = opcodes.OpInstanceReboot(instance_name=instance_name,
reboot_type=reboot_type,
ignore_secondaries=ignore_secondaries,
dry_run=bool(self.dryRun()))
"""
instance_name = self.items[0]
force_startup = bool(self._checkIntVariable('force'))
- op = opcodes.OpStartupInstance(instance_name=instance_name,
+ op = opcodes.OpInstanceStartup(instance_name=instance_name,
force=force_startup,
dry_run=bool(self.dryRun()))
"""
instance_name = self.items[0]
- op = opcodes.OpShutdownInstance(instance_name=instance_name,
+ op = opcodes.OpInstanceShutdown(instance_name=instance_name,
dry_run=bool(self.dryRun()))
return baserlib.SubmitJob([op])
+def _ParseInstanceReinstallRequest(name, data):
+ """Parses a request for reinstalling an instance.
+
+ """
+ if not isinstance(data, dict):
+ raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+
+ ostype = baserlib.CheckParameter(data, "os")
+ start = baserlib.CheckParameter(data, "start", exptype=bool,
+ default=True)
+ osparams = baserlib.CheckParameter(data, "osparams", default=None)
+
+ ops = [
+ opcodes.OpInstanceShutdown(instance_name=name),
+ opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
+ osparams=osparams),
+ ]
+
+ if start:
+ ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
+
+ return ops
+
+
class R_2_instances_name_reinstall(baserlib.R_Generic):
"""/2/instances/[instance_name]/reinstall resource.
automatically.
"""
- instance_name = self.items[0]
- ostype = self._checkStringVariable('os')
- nostartup = self._checkIntVariable('nostartup')
- ops = [
- opcodes.OpShutdownInstance(instance_name=instance_name),
- opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
- ]
- if not nostartup:
- ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
- force=False))
+ if self.request_body:
+ if self.queryargs:
+ raise http.HttpBadRequest("Can't combine query and body parameters")
+
+ body = self.request_body
+ else:
+ if not self.queryargs:
+ raise http.HttpBadRequest("Missing query parameters")
+ # Legacy interface, do not modify/extend
+ body = {
+ "os": self._checkStringVariable("os"),
+ "start": not self._checkIntVariable("nostartup"),
+ }
+
+ ops = _ParseInstanceReinstallRequest(self.items[0], body)
+
return baserlib.SubmitJob(ops)
+def _ParseInstanceReplaceDisksRequest(name, data):
+ """Parses a request for an instance export.
+
+ @rtype: L{opcodes.OpInstanceReplaceDisks}
+ @return: Instance export opcode
+
+ """
+ override = {
+ "instance_name": name,
+ }
+
+ # Parse disks
+ try:
+ raw_disks = data["disks"]
+ except KeyError:
+ pass
+ else:
+ if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
+ # Backwards compatibility for strings of the format "1, 2, 3"
+ try:
+ data["disks"] = [int(part) for part in raw_disks.split(",")]
+ except (TypeError, ValueError), err:
+ raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+
+ return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
+
+
class R_2_instances_name_replace_disks(baserlib.R_Generic):
"""/2/instances/[instance_name]/replace-disks resource.
"""Replaces disks on an instance.
"""
- instance_name = self.items[0]
- remote_node = self._checkStringVariable("remote_node", default=None)
- mode = self._checkStringVariable("mode", default=None)
- raw_disks = self._checkStringVariable("disks", default=None)
- iallocator = self._checkStringVariable("iallocator", default=None)
-
- if raw_disks:
- try:
- disks = [int(part) for part in raw_disks.split(",")]
- except ValueError, err:
- raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
- else:
- disks = []
-
- op = opcodes.OpReplaceDisks(instance_name=instance_name,
- remote_node=remote_node,
- mode=mode,
- disks=disks,
- iallocator=iallocator)
+ op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
return baserlib.SubmitJob([op])
instance_name = self.items[0]
ignore_size = bool(self._checkIntVariable('ignore_size'))
- op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
+ op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
ignore_size=ignore_size)
return baserlib.SubmitJob([op])
"""
instance_name = self.items[0]
- op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
+ op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_prepare_export(baserlib.R_Generic):
+ """/2/instances/[instance_name]/prepare-export resource.
+
+ """
+ def PUT(self):
+ """Prepares an export for an instance.
+
+ @return: a job id
+
+ """
+ instance_name = self.items[0]
+ mode = self._checkStringVariable("mode")
+
+ op = opcodes.OpBackupPrepare(instance_name=instance_name,
+ mode=mode)
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseExportInstanceRequest(name, data):
+ """Parses a request for an instance export.
+
+ @rtype: L{opcodes.OpBackupExport}
+ @return: Instance export opcode
+
+ """
+ # Rename "destination" to "target_node"
+ try:
+ data["target_node"] = data.pop("destination")
+ except KeyError:
+ pass
+
+ return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
+ "instance_name": name,
+ })
+
+
+class R_2_instances_name_export(baserlib.R_Generic):
+ """/2/instances/[instance_name]/export resource.
+
+ """
+ def PUT(self):
+ """Exports an instance.
+
+ @return: a job id
+
+ """
+ if not isinstance(self.request_body, dict):
+ raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+
+ op = _ParseExportInstanceRequest(self.items[0], self.request_body)
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseMigrateInstanceRequest(name, data):
+ """Parses a request for an instance migration.
+
+ @rtype: L{opcodes.OpInstanceMigrate}
+ @return: Instance migration opcode
+
+ """
+ return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
+ "instance_name": name,
+ })
+
+
+class R_2_instances_name_migrate(baserlib.R_Generic):
+ """/2/instances/[instance_name]/migrate resource.
+
+ """
+ def PUT(self):
+ """Migrates an instance.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+
+ op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseRenameInstanceRequest(name, data):
+ """Parses a request for renaming an instance.
+
+ @rtype: L{opcodes.OpInstanceRename}
+ @return: Instance rename opcode
+
+ """
+ return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
+ "instance_name": name,
+ })
+
+
+class R_2_instances_name_rename(baserlib.R_Generic):
+ """/2/instances/[instance_name]/rename resource.
+
+ """
+ def PUT(self):
+ """Changes the name of an instance.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+
+ op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseModifyInstanceRequest(name, data):
+ """Parses a request for modifying an instance.
+
+ @rtype: L{opcodes.OpInstanceSetParams}
+ @return: Instance modify opcode
+
+ """
+ return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
+ "instance_name": name,
+ })
+
+
+class R_2_instances_name_modify(baserlib.R_Generic):
+ """/2/instances/[instance_name]/modify resource.
+
+ """
+ def PUT(self):
+ """Changes some parameters of an instance.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+
+ op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_disk_grow(baserlib.R_Generic):
+ """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
+
+ """
+ def POST(self):
+ """Increases the size of an instance disk.
+
+ @return: a job id
+
+ """
+ op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
+ "instance_name": self.items[0],
+ "disk": int(self.items[1]),
+ })
return baserlib.SubmitJob([op])
"""
baserlib.R_Generic.__init__(self, items, queryargs, req)
- if self.TAG_LEVEL != constants.TAG_CLUSTER:
- self.name = items[0]
+ if self.TAG_LEVEL == constants.TAG_CLUSTER:
+ self.name = None
else:
- self.name = ""
+ self.name = items[0]
def GET(self):
"""Returns a list of tags.
class R_2_tags(_R_Tags):
- """ /2/instances/tags resource.
+ """ /2/tags resource.
Manages cluster tags.