"""Remote API version 2 baserlib.library.
+ PUT or POST?
+ ============
+
+ According to RFC2616 the main difference between PUT and POST is that
+ POST can create new resources but PUT can only create the resource the
+ URI was pointing to on the PUT request.
+
+ To be in context of this module for instance creation POST on
+ /2/instances is legitim while PUT would be not, due to it does create a
+ new entity and not just replace /2/instances with it.
+
+ So when adding new methods, if they are operating on the URI entity itself,
+ PUT should be prefered over POST.
+
"""
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
from ganeti import opcodes
from ganeti import http
from ganeti import constants
from ganeti import cli
+from ganeti import utils
from ganeti import rapi
from ganeti.rapi import baserlib
+_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
I_FIELDS = ["name", "admin_state", "os",
"pnode", "snodes",
"disk_template",
- "nic.ips", "nic.macs", "nic.modes", "nic.links",
+ "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
"network_port",
"disk.sizes", "disk_usage",
"beparams", "hvparams",
- "oper_state", "oper_ram", "status",
- "tags"]
+ "oper_state", "oper_ram", "oper_vcpus", "status",
+ ] + _COMMON_FIELDS
N_FIELDS = ["name", "offline", "master_candidate", "drained",
"dtotal", "dfree",
"mtotal", "mnode", "mfree",
- "pinst_cnt", "sinst_cnt", "tags",
+ "pinst_cnt", "sinst_cnt",
"ctotal", "cnodes", "csockets",
- ]
+ "pip", "sip", "role",
+ "pinst_list", "sinst_list",
+ ] + _COMMON_FIELDS
_NR_DRAINED = "drained"
_NR_MASTER_CANDIATE = "master-candidate"
"R": _NR_REGULAR,
}
+# Request data version field
+_REQ_DATA_VERSION = "__version__"
+
+# Feature string for instance creation request data version 1
+_INST_CREATE_REQV1 = "instance-create-reqv1"
+
+# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
+_WFJC_TIMEOUT = 10
+
class R_version(baserlib.R_Generic):
"""/version resource.
to adapt clients accordingly.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Returns the remote API version.
"""
"""Cluster info.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Returns cluster information.
"""
return client.QueryClusterInfo()
+class R_2_features(baserlib.R_Generic):
+ """/2/features resource.
+
+ """
+ @staticmethod
+ def GET():
+ """Returns list of optional RAPI features implemented.
+
+ """
+ return [_INST_CREATE_REQV1]
+
+
class R_2_os(baserlib.R_Generic):
"""/2/os resource.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Return a list of all OSes.
Can return error 500 in case of a problem.
"""
cl = baserlib.GetClient()
- op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
+ op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
+ names=[])
job_id = baserlib.SubmitJob([op], cl)
# we use custom feedback function, instead of print we log the status
result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
if not isinstance(diagnose_data, list):
raise http.HttpBadGateway(message="Can't get OS list")
- return [row[0] for row in diagnose_data if row[1]]
+ os_names = []
+ for (name, valid, variants) in diagnose_data:
+ if valid:
+ os_names.extend(cli.CalculateOSNames(name, variants))
+
+ return os_names
+
+
+class R_2_redist_config(baserlib.R_Generic):
+ """/2/redistribute-config resource.
+
+ """
+ @staticmethod
+ def PUT():
+ """Redistribute configuration to all nodes.
+
+ """
+ return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
class R_2_jobs(baserlib.R_Generic):
"""/2/jobs resource.
"""
- def GET(self):
+ @staticmethod
+ def GET():
"""Returns a dictionary of jobs.
@return: a dictionary with jobs id and uri.
return result
+class R_2_jobs_id_wait(baserlib.R_Generic):
+ """/2/jobs/[job_id]/wait resource.
+
+ """
+ # WaitForJobChange provides access to sensitive information and blocks
+ # machine resources (it's a blocking RAPI call), hence restricting access.
+ GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+ def GET(self):
+ """Waits for job changes.
+
+ """
+ job_id = self.items[0]
+
+ fields = self.getBodyParameter("fields")
+ prev_job_info = self.getBodyParameter("previous_job_info", None)
+ prev_log_serial = self.getBodyParameter("previous_log_serial", None)
+
+ if not isinstance(fields, list):
+ raise http.HttpBadRequest("The 'fields' parameter should be a list")
+
+ if not (prev_job_info is None or isinstance(prev_job_info, list)):
+ raise http.HttpBadRequest("The 'previous_job_info' parameter should"
+ " be a list")
+
+ if not (prev_log_serial is None or
+ isinstance(prev_log_serial, (int, long))):
+ raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
+ " be a number")
+
+ client = baserlib.GetClient()
+ result = client.WaitForJobChangeOnce(job_id, fields,
+ prev_job_info, prev_log_serial,
+ timeout=_WFJC_TIMEOUT)
+ if not result:
+ raise http.HttpNotFound()
+
+ if result == constants.JOB_NOTCHANGED:
+ # No changes
+ return None
+
+ (job_info, log_entries) = result
+
+ return {
+ "job_info": job_info,
+ "log_entries": log_entries,
+ }
+
+
class R_2_nodes(baserlib.R_Generic):
"""/2/nodes resource.
"""
node_name = self.items[0]
client = baserlib.GetClient()
- result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
- use_locking=self.useLocking())
+
+ result = baserlib.HandleItemQueryErrors(client.QueryNodes,
+ names=[node_name], fields=N_FIELDS,
+ use_locking=self.useLocking())
return baserlib.MapFields(N_FIELDS, result[0])
@return: a job id
"""
- if not isinstance(self.req.request_body, basestring):
+ if not isinstance(self.request_body, basestring):
raise http.HttpBadRequest("Invalid body contents, not a string")
node_name = self.items[0]
- role = self.req.request_body
+ role = self.request_body
if role == _NR_REGULAR:
candidate = False
node_name = self.items[0]
remote_node = self._checkStringVariable("remote_node", default=None)
iallocator = self._checkStringVariable("iallocator", default=None)
+ early_r = bool(self._checkIntVariable("early_release", default=0))
+ dry_run = bool(self.dryRun())
- op = opcodes.OpEvacuateNode(node_name=node_name,
- remote_node=remote_node,
- iallocator=iallocator)
+ cl = baserlib.GetClient()
- return baserlib.SubmitJob([op])
+ op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
+ iallocator=iallocator,
+ remote_node=remote_node)
+
+ job_id = baserlib.SubmitJob([op], cl)
+ # we use custom feedback function, instead of print we log the status
+ result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
+
+ jobs = []
+ for iname, node in result:
+ if dry_run:
+ jid = None
+ else:
+ op = opcodes.OpReplaceDisks(instance_name=iname,
+ remote_node=node, disks=[],
+ mode=constants.REPLACE_DISK_CHG,
+ early_release=early_r)
+ jid = baserlib.SubmitJob([op])
+ jobs.append((jid, iname, node))
+
+ return jobs
class R_2_nodes_name_migrate(baserlib.R_Generic):
return baserlib.SubmitJob([op])
+class R_2_nodes_name_storage_repair(baserlib.R_Generic):
+ """/2/nodes/[node_name]/storage/repair ressource.
+
+ """
+ def PUT(self):
+ node_name = self.items[0]
+
+ storage_type = self._checkStringVariable("storage_type", None)
+ if not storage_type:
+ raise http.HttpBadRequest("Missing the required 'storage_type'"
+ " parameter")
+
+ name = self._checkStringVariable("name", None)
+ if not name:
+ raise http.HttpBadRequest("Missing the required 'name'"
+ " parameter")
+
+ op = opcodes.OpRepairNodeStorage(node_name=node_name,
+ storage_type=storage_type,
+ name=name)
+ return baserlib.SubmitJob([op])
+
+
+def _ParseInstanceCreateRequestVersion1(data, dry_run):
+ """Parses an instance creation request version 1.
+
+ @rtype: L{opcodes.OpCreateInstance}
+ @return: Instance creation opcode
+
+ """
+ # Disks
+ disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
+
+ disks = []
+ for idx, i in enumerate(disks_input):
+ baserlib.CheckType(i, dict, "Disk %d specification" % idx)
+
+ # Size is mandatory
+ try:
+ size = i[constants.IDISK_SIZE]
+ except KeyError:
+ raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
+ " size" % idx)
+
+ disk = {
+ constants.IDISK_SIZE: size,
+ }
+
+ # Optional disk access mode
+ try:
+ disk_access = i[constants.IDISK_MODE]
+ except KeyError:
+ pass
+ else:
+ disk[constants.IDISK_MODE] = disk_access
+
+ disks.append(disk)
+
+ assert len(disks_input) == len(disks)
+
+ # Network interfaces
+ nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
+
+ nics = []
+ for idx, i in enumerate(nics_input):
+ baserlib.CheckType(i, dict, "NIC %d specification" % idx)
+
+ nic = {}
+
+ for field in constants.INIC_PARAMS:
+ try:
+ value = i[field]
+ except KeyError:
+ continue
+
+ nic[field] = value
+
+ nics.append(nic)
+
+ assert len(nics_input) == len(nics)
+
+ # HV/BE parameters
+ hvparams = baserlib.CheckParameter(data, "hvparams", default={})
+ utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
+
+ beparams = baserlib.CheckParameter(data, "beparams", default={})
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+
+ return opcodes.OpCreateInstance(
+ mode=baserlib.CheckParameter(data, "mode"),
+ instance_name=baserlib.CheckParameter(data, "name"),
+ os_type=baserlib.CheckParameter(data, "os", default=None),
+ force_variant=baserlib.CheckParameter(data, "force_variant",
+ default=False),
+ pnode=baserlib.CheckParameter(data, "pnode", default=None),
+ snode=baserlib.CheckParameter(data, "snode", default=None),
+ disk_template=baserlib.CheckParameter(data, "disk_template"),
+ disks=disks,
+ nics=nics,
+ src_node=baserlib.CheckParameter(data, "src_node", default=None),
+ src_path=baserlib.CheckParameter(data, "src_path", default=None),
+ start=baserlib.CheckParameter(data, "start", default=True),
+ wait_for_sync=True,
+ ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
+ name_check=baserlib.CheckParameter(data, "name_check", default=True),
+ file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
+ default=None),
+ file_driver=baserlib.CheckParameter(data, "file_driver",
+ default=constants.FD_LOOP),
+ source_handshake=baserlib.CheckParameter(data, "source_handshake",
+ default=None),
+ source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
+ default=None),
+ source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
+ default=None),
+ iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
+ hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
+ hvparams=hvparams,
+ beparams=beparams,
+ dry_run=dry_run,
+ )
+
+
class R_2_instances(baserlib.R_Generic):
"""/2/instances resource.
return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
uri_fields=("id", "uri"))
- def POST(self):
- """Create an instance.
+ def _ParseVersion0CreateRequest(self):
+ """Parses an instance creation request version 0.
- @return: a job id
+ Request data version 0 is deprecated and should not be used anymore.
- """
- if not isinstance(self.req.request_body, dict):
- raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+ @rtype: L{opcodes.OpCreateInstance}
+ @return: Instance creation opcode
- beparams = baserlib.MakeParamsDict(self.req.request_body,
+ """
+ # Do not modify anymore, request data version 0 is deprecated
+ beparams = baserlib.MakeParamsDict(self.request_body,
constants.BES_PARAMETERS)
- hvparams = baserlib.MakeParamsDict(self.req.request_body,
+ hvparams = baserlib.MakeParamsDict(self.request_body,
constants.HVS_PARAMETERS)
fn = self.getBodyParameter
for idx, d in enumerate(disk_data):
if not isinstance(d, int):
raise http.HttpBadRequest("Disk %d specification wrong: should"
- " be an integer")
+ " be an integer" % idx)
disks.append({"size": d})
+
# nic processing (one nic only)
nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
if fn("ip", None) is not None:
if fn("link", None) is not None:
nics[0]["link"] = fn("link")
if fn("bridge", None) is not None:
- nics[0]["bridge"] = fn("bridge")
+ nics[0]["bridge"] = fn("bridge")
- op = opcodes.OpCreateInstance(
+ # Do not modify anymore, request data version 0 is deprecated
+ return opcodes.OpCreateInstance(
mode=constants.INSTANCE_CREATE,
instance_name=fn('name'),
disks=disks,
nics=nics,
start=fn('start', True),
ip_check=fn('ip_check', True),
+ name_check=fn('name_check', True),
wait_for_sync=True,
hypervisor=fn('hypervisor', None),
hvparams=hvparams,
beparams=beparams,
file_storage_dir=fn('file_storage_dir', None),
- file_driver=fn('file_driver', 'loop'),
+ file_driver=fn('file_driver', constants.FD_LOOP),
dry_run=bool(self.dryRun()),
)
+ def POST(self):
+ """Create an instance.
+
+ @return: a job id
+
+ """
+ if not isinstance(self.request_body, dict):
+ raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+
+ # Default to request data version 0
+ data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
+
+ if data_version == 0:
+ op = self._ParseVersion0CreateRequest()
+ elif data_version == 1:
+ op = _ParseInstanceCreateRequestVersion1(self.request_body,
+ self.dryRun())
+ else:
+ raise http.HttpBadRequest("Unsupported request data version %s" %
+ data_version)
+
return baserlib.SubmitJob([op])
"""
client = baserlib.GetClient()
instance_name = self.items[0]
- result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
- use_locking=self.useLocking())
+
+ result = baserlib.HandleItemQueryErrors(client.QueryInstances,
+ names=[instance_name],
+ fields=I_FIELDS,
+ use_locking=self.useLocking())
return baserlib.MapFields(I_FIELDS, result[0])
return baserlib.SubmitJob([op])
+class R_2_instances_name_info(baserlib.R_Generic):
+ """/2/instances/[instance_name]/info resource.
+
+ """
+ def GET(self):
+ """Request detailed instance information.
+
+ """
+ instance_name = self.items[0]
+ static = bool(self._checkIntVariable("static", default=0))
+
+ op = opcodes.OpQueryInstanceData(instances=[instance_name],
+ static=static)
+ return baserlib.SubmitJob([op])
+
+
class R_2_instances_name_reboot(baserlib.R_Generic):
"""/2/instances/[instance_name]/reboot resource.
instance_name = self.items[0]
reboot_type = self.queryargs.get('type',
[constants.INSTANCE_REBOOT_HARD])[0]
- ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
- [False])[0])
+ ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
op = opcodes.OpRebootInstance(instance_name=instance_name,
reboot_type=reboot_type,
ignore_secondaries=ignore_secondaries,
"""
instance_name = self.items[0]
- force_startup = bool(self.queryargs.get('force', [False])[0])
+ force_startup = bool(self._checkIntVariable('force'))
op = opcodes.OpStartupInstance(instance_name=instance_name,
force=force_startup,
dry_run=bool(self.dryRun()))
Implements an instance reinstall.
"""
-
- DOC_URI = "/2/instances/[instance_name]/reinstall"
-
def POST(self):
"""Reinstall an instance.
return baserlib.SubmitJob(ops)
+class R_2_instances_name_replace_disks(baserlib.R_Generic):
+ """/2/instances/[instance_name]/replace-disks resource.
+
+ """
+ def POST(self):
+ """Replaces disks on an instance.
+
+ """
+ instance_name = self.items[0]
+ remote_node = self._checkStringVariable("remote_node", default=None)
+ mode = self._checkStringVariable("mode", default=None)
+ raw_disks = self._checkStringVariable("disks", default=None)
+ iallocator = self._checkStringVariable("iallocator", default=None)
+
+ if raw_disks:
+ try:
+ disks = [int(part) for part in raw_disks.split(",")]
+ except ValueError, err:
+ raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+ else:
+ disks = []
+
+ op = opcodes.OpReplaceDisks(instance_name=instance_name,
+ remote_node=remote_node,
+ mode=mode,
+ disks=disks,
+ iallocator=iallocator)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_activate_disks(baserlib.R_Generic):
+ """/2/instances/[instance_name]/activate-disks resource.
+
+ """
+ def PUT(self):
+ """Activate disks for an instance.
+
+ The URI might contain ignore_size to ignore current recorded size.
+
+ """
+ instance_name = self.items[0]
+ ignore_size = bool(self._checkIntVariable('ignore_size'))
+
+ op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
+ ignore_size=ignore_size)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
+ """/2/instances/[instance_name]/deactivate-disks resource.
+
+ """
+ def PUT(self):
+ """Deactivate disks for an instance.
+
+ """
+ instance_name = self.items[0]
+
+ op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_prepare_export(baserlib.R_Generic):
+ """/2/instances/[instance_name]/prepare-export resource.
+
+ """
+ def PUT(self):
+ """Prepares an export for an instance.
+
+ @return: a job id
+
+ """
+ instance_name = self.items[0]
+ mode = self._checkStringVariable("mode")
+
+ op = opcodes.OpPrepareExport(instance_name=instance_name,
+ mode=mode)
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseExportInstanceRequest(name, data):
+ """Parses a request for an instance export.
+
+ @rtype: L{opcodes.OpExportInstance}
+ @return: Instance export opcode
+
+ """
+ mode = baserlib.CheckParameter(data, "mode",
+ default=constants.EXPORT_MODE_LOCAL)
+ target_node = baserlib.CheckParameter(data, "destination")
+ shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
+ remove_instance = baserlib.CheckParameter(data, "remove_instance",
+ exptype=bool, default=False)
+ x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
+ destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
+ default=None)
+
+ return opcodes.OpExportInstance(instance_name=name,
+ mode=mode,
+ target_node=target_node,
+ shutdown=shutdown,
+ remove_instance=remove_instance,
+ x509_key_name=x509_key_name,
+ destination_x509_ca=destination_x509_ca)
+
+
+class R_2_instances_name_export(baserlib.R_Generic):
+ """/2/instances/[instance_name]/export resource.
+
+ """
+ def PUT(self):
+ """Exports an instance.
+
+ @return: a job id
+
+ """
+ if not isinstance(self.request_body, dict):
+ raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+
+ op = _ParseExportInstanceRequest(self.items[0], self.request_body)
+
+ return baserlib.SubmitJob([op])
+
+
class _R_Tags(baserlib.R_Generic):
""" Quasiclass for tagging resources
Example: ["tag1", "tag2", "tag3"]
"""
+ # pylint: disable-msg=W0212
return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
def PUT(self):
you'll have back a job id.
"""
+ # pylint: disable-msg=W0212
if 'tag' not in self.queryargs:
raise http.HttpBadRequest("Please specify tag(s) to add using the"
" the 'tag' parameter")
/tags?tag=[tag]&tag=[tag]
"""
+ # pylint: disable-msg=W0212
if 'tag' not in self.queryargs:
# no we not gonna delete all tags
raise http.HttpBadRequest("Cannot delete all tags - please specify"