"""Remote API version 2 baserlib.library.
+ PUT or POST?
+ ============
+
+ According to RFC2616 the main difference between PUT and POST is that
+ POST can create new resources but PUT can only create the resource the
+ URI was pointing to on the PUT request.
+
+ To be in context of this module for instance creation POST on
+ /2/instances is legitim while PUT would be not, due to it does create a
+ new entity and not just replace /2/instances with it.
+
+ So when adding new methods, if they are operating on the URI entity itself,
+ PUT should be prefered over POST.
+
"""
-import ganeti.opcodes
+# pylint: disable-msg=C0103
+
+# C0103: Invalid name, since the R_* names are not conforming
+
+from ganeti import opcodes
from ganeti import http
-from ganeti import luxi
from ganeti import constants
+from ganeti import cli
+from ganeti import utils
+from ganeti import rapi
from ganeti.rapi import baserlib
-from ganeti.rapi.rlib1 import I_FIELDS, N_FIELDS
+
+_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
+I_FIELDS = ["name", "admin_state", "os",
+ "pnode", "snodes",
+ "disk_template",
+ "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
+ "network_port",
+ "disk.sizes", "disk_usage",
+ "beparams", "hvparams",
+ "oper_state", "oper_ram", "oper_vcpus", "status",
+ ] + _COMMON_FIELDS
+
+N_FIELDS = ["name", "offline", "master_candidate", "drained",
+ "dtotal", "dfree",
+ "mtotal", "mnode", "mfree",
+ "pinst_cnt", "sinst_cnt",
+ "ctotal", "cnodes", "csockets",
+ "pip", "sip", "role",
+ "pinst_list", "sinst_list",
+ ] + _COMMON_FIELDS
+
+_NR_DRAINED = "drained"
+_NR_MASTER_CANDIATE = "master-candidate"
+_NR_MASTER = "master"
+_NR_OFFLINE = "offline"
+_NR_REGULAR = "regular"
+
+_NR_MAP = {
+ "M": _NR_MASTER,
+ "C": _NR_MASTER_CANDIATE,
+ "D": _NR_DRAINED,
+ "O": _NR_OFFLINE,
+ "R": _NR_REGULAR,
+ }
+
+# Request data version field
+_REQ_DATA_VERSION = "__version__"
+
+# Feature string for instance creation request data version 1
+_INST_CREATE_REQV1 = "instance-create-reqv1"
+
+# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
+_WFJC_TIMEOUT = 10
+
+
+class R_version(baserlib.R_Generic):
+ """/version resource.
+
+ This resource should be used to determine the remote API version and
+ to adapt clients accordingly.
+
+ """
+ @staticmethod
+ def GET():
+ """Returns the remote API version.
+
+ """
+ return constants.RAPI_VERSION
+
+
+class R_2_info(baserlib.R_Generic):
+ """Cluster info.
+
+ """
+ @staticmethod
+ def GET():
+ """Returns cluster information.
+
+ """
+ client = baserlib.GetClient()
+ return client.QueryClusterInfo()
+
+
+class R_2_features(baserlib.R_Generic):
+ """/2/features resource.
+
+ """
+ @staticmethod
+ def GET():
+ """Returns list of optional RAPI features implemented.
+
+ """
+ return [_INST_CREATE_REQV1]
+
+
+class R_2_os(baserlib.R_Generic):
+ """/2/os resource.
+
+ """
+ @staticmethod
+ def GET():
+ """Return a list of all OSes.
+
+ Can return error 500 in case of a problem.
+
+ Example: ["debian-etch"]
+
+ """
+ cl = baserlib.GetClient()
+ op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
+ names=[])
+ job_id = baserlib.SubmitJob([op], cl)
+ # we use custom feedback function, instead of print we log the status
+ result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
+ diagnose_data = result[0]
+
+ if not isinstance(diagnose_data, list):
+ raise http.HttpBadGateway(message="Can't get OS list")
+
+ os_names = []
+ for (name, valid, variants) in diagnose_data:
+ if valid:
+ os_names.extend(cli.CalculateOSNames(name, variants))
+
+ return os_names
+
+
+class R_2_redist_config(baserlib.R_Generic):
+ """/2/redistribute-config resource.
+
+ """
+ @staticmethod
+ def PUT():
+ """Redistribute configuration to all nodes.
+
+ """
+ return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
class R_2_jobs(baserlib.R_Generic):
"""/2/jobs resource.
"""
- DOC_URI = "/2/jobs"
-
- def GET(self):
+ @staticmethod
+ def GET():
"""Returns a dictionary of jobs.
- Returns:
- A dictionary with jobs id and uri.
+ @return: a dictionary with jobs id and uri.
"""
fields = ["id"]
+ cl = baserlib.GetClient()
# Convert the list of lists to the list of ids
- result = [job_id for [job_id] in luxi.Client().QueryJobs(None, fields)]
- return baserlib.BuildUriList(result, "/2/jobs/%s", uri_fields=("id", "uri"))
+ result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
+ return baserlib.BuildUriList(result, "/2/jobs/%s",
+ uri_fields=("id", "uri"))
class R_2_jobs_id(baserlib.R_Generic):
"""/2/jobs/[job_id] resource.
"""
- DOC_URI = "/2/jobs/[job_id]"
-
def GET(self):
"""Returns a job status.
- Returns:
- A dictionary with job parameters.
-
- The result includes:
- id - job ID as a number
- status - current job status as a string
- ops - involved OpCodes as a list of dictionaries for each opcodes in
- the job
- opstatus - OpCodes status as a list
- opresult - OpCodes results as a list of lists
+ @return: a dictionary with job parameters.
+ The result includes:
+ - id: job ID as a number
+ - status: current job status as a string
+ - ops: involved OpCodes as a list of dictionaries for each
+ opcodes in the job
+ - opstatus: OpCodes status as a list
+ - opresult: OpCodes results as a list of lists
"""
- fields = ["id", "ops", "status", "opstatus", "opresult"]
+ fields = ["id", "ops", "status", "summary",
+ "opstatus", "opresult", "oplog",
+ "received_ts", "start_ts", "end_ts",
+ ]
job_id = self.items[0]
- result = luxi.Client().QueryJobs([job_id, ], fields)[0]
+ result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
+ if result is None:
+ raise http.HttpNotFound()
return baserlib.MapFields(fields, result)
def DELETE(self):
"""
job_id = self.items[0]
- result = luxi.Client().CancelJob(job_id)
+ result = baserlib.GetClient().CancelJob(job_id)
return result
+class R_2_jobs_id_wait(baserlib.R_Generic):
+ """/2/jobs/[job_id]/wait resource.
+
+ """
+ # WaitForJobChange provides access to sensitive information and blocks
+ # machine resources (it's a blocking RAPI call), hence restricting access.
+ GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+ def GET(self):
+ """Waits for job changes.
+
+ """
+ job_id = self.items[0]
+
+ fields = self.getBodyParameter("fields")
+ prev_job_info = self.getBodyParameter("previous_job_info", None)
+ prev_log_serial = self.getBodyParameter("previous_log_serial", None)
+
+ if not isinstance(fields, list):
+ raise http.HttpBadRequest("The 'fields' parameter should be a list")
+
+ if not (prev_job_info is None or isinstance(prev_job_info, list)):
+ raise http.HttpBadRequest("The 'previous_job_info' parameter should"
+ " be a list")
+
+ if not (prev_log_serial is None or
+ isinstance(prev_log_serial, (int, long))):
+ raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
+ " be a number")
+
+ client = baserlib.GetClient()
+ result = client.WaitForJobChangeOnce(job_id, fields,
+ prev_job_info, prev_log_serial,
+ timeout=_WFJC_TIMEOUT)
+ if not result:
+ raise http.HttpNotFound()
+
+ if result == constants.JOB_NOTCHANGED:
+ # No changes
+ return None
+
+ (job_info, log_entries) = result
+
+ return {
+ "job_info": job_info,
+ "log_entries": log_entries,
+ }
+
+
class R_2_nodes(baserlib.R_Generic):
"""/2/nodes resource.
"""
- DOC_URI = "/2/nodes"
-
def GET(self):
"""Returns a list of all nodes.
- Returns:
- A dictionary with 'name' and 'uri' keys for each of them.
-
- Example: [
- {
- "id": "node1.example.com",
- "uri": "\/instances\/node1.example.com"
- },
- {
- "id": "node2.example.com",
- "uri": "\/instances\/node2.example.com"
- }]
-
- If the optional 'bulk' argument is provided and set to 'true'
- value (i.e '?bulk=1'), the output contains detailed
- information about nodes as a list.
-
- Example: [
- {
- "pinst_cnt": 1,
- "mfree": 31280,
- "mtotal": 32763,
- "name": "www.example.com",
- "tags": [],
- "mnode": 512,
- "dtotal": 5246208,
- "sinst_cnt": 2,
- "dfree": 5171712
- },
- ...
- ]
-
- """
- op = ganeti.opcodes.OpQueryNodes(output_fields=["name"], names=[])
- nodeslist = baserlib.ExtractField(ganeti.cli.SubmitOpCode(op), 0)
-
- if 'bulk' in self.queryargs:
- op = ganeti.opcodes.OpQueryNodes(output_fields=N_FIELDS,
- names=nodeslist)
- result = ganeti.cli.SubmitOpCode(op)
- return baserlib.MapBulkFields(result, N_FIELDS)
-
- return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
- uri_fields=("id", "uri"))
+ """
+ client = baserlib.GetClient()
+
+ if self.useBulk():
+ bulkdata = client.QueryNodes([], N_FIELDS, False)
+ return baserlib.MapBulkFields(bulkdata, N_FIELDS)
+ else:
+ nodesdata = client.QueryNodes([], ["name"], False)
+ nodeslist = [row[0] for row in nodesdata]
+ return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
+ uri_fields=("id", "uri"))
+
+
+class R_2_nodes_name(baserlib.R_Generic):
+ """/2/nodes/[node_name] resources.
+
+ """
+ def GET(self):
+ """Send information about a node.
+
+ """
+ node_name = self.items[0]
+ client = baserlib.GetClient()
+
+ result = baserlib.HandleItemQueryErrors(client.QueryNodes,
+ names=[node_name], fields=N_FIELDS,
+ use_locking=self.useLocking())
+
+ return baserlib.MapFields(N_FIELDS, result[0])
+
+
+class R_2_nodes_name_role(baserlib.R_Generic):
+ """ /2/nodes/[node_name]/role resource.
+
+ """
+ def GET(self):
+ """Returns the current node role.
+
+ @return: Node role
+
+ """
+ node_name = self.items[0]
+ client = baserlib.GetClient()
+ result = client.QueryNodes(names=[node_name], fields=["role"],
+ use_locking=self.useLocking())
+
+ return _NR_MAP[result[0][0]]
+
+ def PUT(self):
+ """Sets the node role.
+
+ @return: a job id
+
+ """
+ if not isinstance(self.request_body, basestring):
+ raise http.HttpBadRequest("Invalid body contents, not a string")
+
+ node_name = self.items[0]
+ role = self.request_body
+
+ if role == _NR_REGULAR:
+ candidate = False
+ offline = False
+ drained = False
+
+ elif role == _NR_MASTER_CANDIATE:
+ candidate = True
+ offline = drained = None
+
+ elif role == _NR_DRAINED:
+ drained = True
+ candidate = offline = None
+
+ elif role == _NR_OFFLINE:
+ offline = True
+ candidate = drained = None
+
+ else:
+ raise http.HttpBadRequest("Can't set '%s' role" % role)
+
+ op = opcodes.OpSetNodeParams(node_name=node_name,
+ master_candidate=candidate,
+ offline=offline,
+ drained=drained,
+ force=bool(self.useForce()))
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_evacuate(baserlib.R_Generic):
+ """/2/nodes/[node_name]/evacuate resource.
+
+ """
+ def POST(self):
+ """Evacuate all secondary instances off a node.
+
+ """
+ node_name = self.items[0]
+ remote_node = self._checkStringVariable("remote_node", default=None)
+ iallocator = self._checkStringVariable("iallocator", default=None)
+ early_r = bool(self._checkIntVariable("early_release", default=0))
+ dry_run = bool(self.dryRun())
+
+ cl = baserlib.GetClient()
+
+ op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
+ iallocator=iallocator,
+ remote_node=remote_node)
+
+ job_id = baserlib.SubmitJob([op], cl)
+ # we use custom feedback function, instead of print we log the status
+ result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
+
+ jobs = []
+ for iname, node in result:
+ if dry_run:
+ jid = None
+ else:
+ op = opcodes.OpReplaceDisks(instance_name=iname,
+ remote_node=node, disks=[],
+ mode=constants.REPLACE_DISK_CHG,
+ early_release=early_r)
+ jid = baserlib.SubmitJob([op])
+ jobs.append((jid, iname, node))
+
+ return jobs
+
+
+class R_2_nodes_name_migrate(baserlib.R_Generic):
+ """/2/nodes/[node_name]/migrate resource.
+
+ """
+ def POST(self):
+ """Migrate all primary instances from a node.
+
+ """
+ node_name = self.items[0]
+ live = bool(self._checkIntVariable("live", default=1))
+
+ op = opcodes.OpMigrateNode(node_name=node_name, live=live)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage(baserlib.R_Generic):
+ """/2/nodes/[node_name]/storage ressource.
+
+ """
+ # LUQueryNodeStorage acquires locks, hence restricting access to GET
+ GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
+
+ def GET(self):
+ node_name = self.items[0]
+
+ storage_type = self._checkStringVariable("storage_type", None)
+ if not storage_type:
+ raise http.HttpBadRequest("Missing the required 'storage_type'"
+ " parameter")
+
+ output_fields = self._checkStringVariable("output_fields", None)
+ if not output_fields:
+ raise http.HttpBadRequest("Missing the required 'output_fields'"
+ " parameter")
+
+ op = opcodes.OpQueryNodeStorage(nodes=[node_name],
+ storage_type=storage_type,
+ output_fields=output_fields.split(","))
+ return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage_modify(baserlib.R_Generic):
+ """/2/nodes/[node_name]/storage/modify ressource.
+
+ """
+ def PUT(self):
+ node_name = self.items[0]
+
+ storage_type = self._checkStringVariable("storage_type", None)
+ if not storage_type:
+ raise http.HttpBadRequest("Missing the required 'storage_type'"
+ " parameter")
+
+ name = self._checkStringVariable("name", None)
+ if not name:
+ raise http.HttpBadRequest("Missing the required 'name'"
+ " parameter")
+
+ changes = {}
+
+ if "allocatable" in self.queryargs:
+ changes[constants.SF_ALLOCATABLE] = \
+ bool(self._checkIntVariable("allocatable", default=1))
+
+ op = opcodes.OpModifyNodeStorage(node_name=node_name,
+ storage_type=storage_type,
+ name=name,
+ changes=changes)
+ return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_storage_repair(baserlib.R_Generic):
+ """/2/nodes/[node_name]/storage/repair ressource.
+
+ """
+ def PUT(self):
+ node_name = self.items[0]
+
+ storage_type = self._checkStringVariable("storage_type", None)
+ if not storage_type:
+ raise http.HttpBadRequest("Missing the required 'storage_type'"
+ " parameter")
+
+ name = self._checkStringVariable("name", None)
+ if not name:
+ raise http.HttpBadRequest("Missing the required 'name'"
+ " parameter")
+
+ op = opcodes.OpRepairNodeStorage(node_name=node_name,
+ storage_type=storage_type,
+ name=name)
+ return baserlib.SubmitJob([op])
+
+
+def _ParseInstanceCreateRequestVersion1(data, dry_run):
+ """Parses an instance creation request version 1.
+
+ @rtype: L{opcodes.OpCreateInstance}
+ @return: Instance creation opcode
+
+ """
+ # Disks
+ disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
+
+ disks = []
+ for idx, i in enumerate(disks_input):
+ baserlib.CheckType(i, dict, "Disk %d specification" % idx)
+
+ # Size is mandatory
+ try:
+ size = i[constants.IDISK_SIZE]
+ except KeyError:
+ raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
+ " size" % idx)
+
+ disk = {
+ constants.IDISK_SIZE: size,
+ }
+
+ # Optional disk access mode
+ try:
+ disk_access = i[constants.IDISK_MODE]
+ except KeyError:
+ pass
+ else:
+ disk[constants.IDISK_MODE] = disk_access
+
+ disks.append(disk)
+
+ assert len(disks_input) == len(disks)
+
+ # Network interfaces
+ nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
+
+ nics = []
+ for idx, i in enumerate(nics_input):
+ baserlib.CheckType(i, dict, "NIC %d specification" % idx)
+
+ nic = {}
+
+ for field in constants.INIC_PARAMS:
+ try:
+ value = i[field]
+ except KeyError:
+ continue
+
+ nic[field] = value
+
+ nics.append(nic)
+
+ assert len(nics_input) == len(nics)
+
+ # HV/BE parameters
+ hvparams = baserlib.CheckParameter(data, "hvparams", default={})
+ utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
+
+ beparams = baserlib.CheckParameter(data, "beparams", default={})
+ utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
+
+ return opcodes.OpCreateInstance(
+ mode=baserlib.CheckParameter(data, "mode"),
+ instance_name=baserlib.CheckParameter(data, "name"),
+ os_type=baserlib.CheckParameter(data, "os", default=None),
+ force_variant=baserlib.CheckParameter(data, "force_variant",
+ default=False),
+ pnode=baserlib.CheckParameter(data, "pnode", default=None),
+ snode=baserlib.CheckParameter(data, "snode", default=None),
+ disk_template=baserlib.CheckParameter(data, "disk_template"),
+ disks=disks,
+ nics=nics,
+ src_node=baserlib.CheckParameter(data, "src_node", default=None),
+ src_path=baserlib.CheckParameter(data, "src_path", default=None),
+ start=baserlib.CheckParameter(data, "start", default=True),
+ wait_for_sync=True,
+ ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
+ name_check=baserlib.CheckParameter(data, "name_check", default=True),
+ file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
+ default=None),
+ file_driver=baserlib.CheckParameter(data, "file_driver",
+ default=constants.FD_LOOP),
+ source_handshake=baserlib.CheckParameter(data, "source_handshake",
+ default=None),
+ source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
+ default=None),
+ source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
+ default=None),
+ iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
+ hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
+ hvparams=hvparams,
+ beparams=beparams,
+ dry_run=dry_run,
+ )
class R_2_instances(baserlib.R_Generic):
"""/2/instances resource.
"""
- DOC_URI = "/2/instances"
-
def GET(self):
"""Returns a list of all available instances.
- Returns:
- A dictionary with 'name' and 'uri' keys for each of them.
-
- Example: [
- {
- "name": "web.example.com",
- "uri": "\/instances\/web.example.com"
- },
- {
- "name": "mail.example.com",
- "uri": "\/instances\/mail.example.com"
- }]
-
- If the optional 'bulk' argument is provided and set to 'true'
- value (i.e '?bulk=1'), the output contains detailed
- information about instances as a list.
-
- Example: [
- {
- "status": "running",
- "bridge": "xen-br0",
- "name": "web.example.com",
- "tags": ["tag1", "tag2"],
- "admin_ram": 512,
- "sda_size": 20480,
- "pnode": "node1.example.com",
- "mac": "01:23:45:67:89:01",
- "sdb_size": 4096,
- "snodes": ["node2.example.com"],
- "disk_template": "drbd",
- "ip": null,
- "admin_state": true,
- "os": "debian-etch",
- "vcpus": 2,
- "oper_state": true
- },
- ...
- ]
-
- """
- op = ganeti.opcodes.OpQueryInstances(output_fields=["name"], names=[])
- instanceslist = baserlib.ExtractField(ganeti.cli.SubmitOpCode(op), 0)
-
- if 'bulk' in self.queryargs:
- op = ganeti.opcodes.OpQueryInstances(output_fields=I_FIELDS,
- names=instanceslist)
- result = ganeti.cli.SubmitOpCode(op)
- return baserlib.MapBulkFields(result, I_FIELDS)
-
+ """
+ client = baserlib.GetClient()
+ use_locking = self.useLocking()
+ if self.useBulk():
+ bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
+ return baserlib.MapBulkFields(bulkdata, I_FIELDS)
else:
+ instancesdata = client.QueryInstances([], ["name"], use_locking)
+ instanceslist = [row[0] for row in instancesdata]
return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
uri_fields=("id", "uri"))
- def PUT(self):
+ def _ParseVersion0CreateRequest(self):
+ """Parses an instance creation request version 0.
+
+ Request data version 0 is deprecated and should not be used anymore.
+
+ @rtype: L{opcodes.OpCreateInstance}
+ @return: Instance creation opcode
+
+ """
+ # Do not modify anymore, request data version 0 is deprecated
+ beparams = baserlib.MakeParamsDict(self.request_body,
+ constants.BES_PARAMETERS)
+ hvparams = baserlib.MakeParamsDict(self.request_body,
+ constants.HVS_PARAMETERS)
+ fn = self.getBodyParameter
+
+ # disk processing
+ disk_data = fn('disks')
+ if not isinstance(disk_data, list):
+ raise http.HttpBadRequest("The 'disks' parameter should be a list")
+ disks = []
+ for idx, d in enumerate(disk_data):
+ if not isinstance(d, int):
+ raise http.HttpBadRequest("Disk %d specification wrong: should"
+ " be an integer" % idx)
+ disks.append({"size": d})
+
+ # nic processing (one nic only)
+ nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
+ if fn("ip", None) is not None:
+ nics[0]["ip"] = fn("ip")
+ if fn("mode", None) is not None:
+ nics[0]["mode"] = fn("mode")
+ if fn("link", None) is not None:
+ nics[0]["link"] = fn("link")
+ if fn("bridge", None) is not None:
+ nics[0]["bridge"] = fn("bridge")
+
+ # Do not modify anymore, request data version 0 is deprecated
+ return opcodes.OpCreateInstance(
+ mode=constants.INSTANCE_CREATE,
+ instance_name=fn('name'),
+ disks=disks,
+ disk_template=fn('disk_template'),
+ os_type=fn('os'),
+ pnode=fn('pnode', None),
+ snode=fn('snode', None),
+ iallocator=fn('iallocator', None),
+ nics=nics,
+ start=fn('start', True),
+ ip_check=fn('ip_check', True),
+ name_check=fn('name_check', True),
+ wait_for_sync=True,
+ hypervisor=fn('hypervisor', None),
+ hvparams=hvparams,
+ beparams=beparams,
+ file_storage_dir=fn('file_storage_dir', None),
+ file_driver=fn('file_driver', constants.FD_LOOP),
+ dry_run=bool(self.dryRun()),
+ )
+
+ def POST(self):
"""Create an instance.
- Returns:
- A job id.
-
- """
- opts = self.req.request_post_data
-
- beparams = baserlib.MakeParamsDict(opts, constants.BES_PARAMETERS)
- hvparams = baserlib.MakeParamsDict(opts, constants.HVS_PARAMETERS)
-
- op = ganeti.opcodes.OpCreateInstance(
- instance_name=opts.get('name'),
- disk_size=opts.get('size', 20 * 1024),
- swap_size=opts.get('swap', 4 * 1024),
- disk_template=opts.get('disk_template', None),
- mode=constants.INSTANCE_CREATE,
- os_type=opts.get('os'),
- pnode=opts.get('pnode'),
- snode=opts.get('snode'),
- ip=opts.get('ip', 'none'),
- bridge=opts.get('bridge', None),
- start=opts.get('start', True),
- ip_check=opts.get('ip_check', True),
- wait_for_sync=opts.get('wait_for_sync', True),
- mac=opts.get('mac', 'auto'),
- hypervisor=opts.get('hypervisor', None),
- hvparams=hvparams,
- beparams=beparams,
- iallocator=opts.get('iallocator', None),
- file_storage_dir=opts.get('file_storage_dir', None),
- file_driver=opts.get('file_driver', 'loop'),
- )
-
- job_id = ganeti.cli.SendJob([op])
- return job_id
+ @return: a job id
+ """
+ if not isinstance(self.request_body, dict):
+ raise http.HttpBadRequest("Invalid body contents, not a dictionary")
-class R_2_instances_name_reboot(baserlib.R_Generic):
- """/2/instances/[instance_name]/reboot resource.
+ # Default to request data version 0
+ data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
- Implements an instance reboot.
+ if data_version == 0:
+ op = self._ParseVersion0CreateRequest()
+ elif data_version == 1:
+ op = _ParseInstanceCreateRequestVersion1(self.request_body,
+ self.dryRun())
+ else:
+ raise http.HttpBadRequest("Unsupported request data version %s" %
+ data_version)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name(baserlib.R_Generic):
+ """/2/instances/[instance_name] resources.
"""
+ def GET(self):
+ """Send information about an instance.
+
+ """
+ client = baserlib.GetClient()
+ instance_name = self.items[0]
- DOC_URI = "/2/instances/[instance_name]/reboot"
+ result = baserlib.HandleItemQueryErrors(client.QueryInstances,
+ names=[instance_name],
+ fields=I_FIELDS,
+ use_locking=self.useLocking())
+ return baserlib.MapFields(I_FIELDS, result[0])
+
+ def DELETE(self):
+ """Delete an instance.
+
+ """
+ op = opcodes.OpRemoveInstance(instance_name=self.items[0],
+ ignore_failures=False,
+ dry_run=bool(self.dryRun()))
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_info(baserlib.R_Generic):
+ """/2/instances/[instance_name]/info resource.
+
+ """
def GET(self):
+ """Request detailed instance information.
+
+ """
+ instance_name = self.items[0]
+ static = bool(self._checkIntVariable("static", default=0))
+
+ op = opcodes.OpQueryInstanceData(instances=[instance_name],
+ static=static)
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_reboot(baserlib.R_Generic):
+ """/2/instances/[instance_name]/reboot resource.
+
+ Implements an instance reboot.
+
+ """
+ def POST(self):
"""Reboot an instance.
The URI takes type=[hard|soft|full] and
instance_name = self.items[0]
reboot_type = self.queryargs.get('type',
[constants.INSTANCE_REBOOT_HARD])[0]
- ignore_secondaries = bool(self.queryargs.get('ignore_secondaries',
- [False])[0])
- op = ganeti.opcodes.OpRebootInstance(
- instance_name=instance_name,
- reboot_type=reboot_type,
- ignore_secondaries=ignore_secondaries)
-
- job_id = ganeti.cli.SendJob([op])
+ ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
+ op = opcodes.OpRebootInstance(instance_name=instance_name,
+ reboot_type=reboot_type,
+ ignore_secondaries=ignore_secondaries,
+ dry_run=bool(self.dryRun()))
- return job_id
+ return baserlib.SubmitJob([op])
class R_2_instances_name_startup(baserlib.R_Generic):
Implements an instance startup.
"""
-
- DOC_URI = "/2/instances/[instance_name]/startup"
-
- def GET(self):
+ def PUT(self):
"""Startup an instance.
- The URI takes force=[False|True] parameter to start the instance if even if
- secondary disks are failing.
+ The URI takes force=[False|True] parameter to start the instance
+ if even if secondary disks are failing.
"""
instance_name = self.items[0]
- force_startup = bool(self.queryargs.get('force', [False])[0])
- op = ganeti.opcodes.OpStartupInstance(instance_name=instance_name,
- force=force_startup)
+ force_startup = bool(self._checkIntVariable('force'))
+ op = opcodes.OpStartupInstance(instance_name=instance_name,
+ force=force_startup,
+ dry_run=bool(self.dryRun()))
- job_id = ganeti.cli.SendJob([op])
-
- return job_id
+ return baserlib.SubmitJob([op])
class R_2_instances_name_shutdown(baserlib.R_Generic):
Implements an instance shutdown.
"""
+ def PUT(self):
+ """Shutdown an instance.
- DOC_URI = "/2/instances/[instance_name]/shutdown"
+ """
+ instance_name = self.items[0]
+ op = opcodes.OpShutdownInstance(instance_name=instance_name,
+ dry_run=bool(self.dryRun()))
- def GET(self):
- """Shutdown an instance.
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_reinstall(baserlib.R_Generic):
+ """/2/instances/[instance_name]/reinstall resource.
+
+ Implements an instance reinstall.
+
+ """
+ def POST(self):
+ """Reinstall an instance.
+
+ The URI takes os=name and nostartup=[0|1] optional
+ parameters. By default, the instance will be started
+ automatically.
"""
instance_name = self.items[0]
- op = ganeti.opcodes.OpShutdownInstance(instance_name=instance_name)
+ ostype = self._checkStringVariable('os')
+ nostartup = self._checkIntVariable('nostartup')
+ ops = [
+ opcodes.OpShutdownInstance(instance_name=instance_name),
+ opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
+ ]
+ if not nostartup:
+ ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
+ force=False))
+ return baserlib.SubmitJob(ops)
- job_id = ganeti.cli.SendJob([op])
- return job_id
+class R_2_instances_name_replace_disks(baserlib.R_Generic):
+ """/2/instances/[instance_name]/replace-disks resource.
+ """
+ def POST(self):
+ """Replaces disks on an instance.
-class R_2_instances_name_tags(baserlib.R_Generic):
- """/2/instances/[instance_name]/tags resource.
+ """
+ instance_name = self.items[0]
+ remote_node = self._checkStringVariable("remote_node", default=None)
+ mode = self._checkStringVariable("mode", default=None)
+ raw_disks = self._checkStringVariable("disks", default=None)
+ iallocator = self._checkStringVariable("iallocator", default=None)
+
+ if raw_disks:
+ try:
+ disks = [int(part) for part in raw_disks.split(",")]
+ except ValueError, err:
+ raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+ else:
+ disks = []
- Manages per-instance tags.
+ op = opcodes.OpReplaceDisks(instance_name=instance_name,
+ remote_node=remote_node,
+ mode=mode,
+ disks=disks,
+ iallocator=iallocator)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_activate_disks(baserlib.R_Generic):
+ """/2/instances/[instance_name]/activate-disks resource.
+
+ """
+ def PUT(self):
+ """Activate disks for an instance.
+
+ The URI might contain ignore_size to ignore current recorded size.
+
+ """
+ instance_name = self.items[0]
+ ignore_size = bool(self._checkIntVariable('ignore_size'))
+
+ op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
+ ignore_size=ignore_size)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
+ """/2/instances/[instance_name]/deactivate-disks resource.
+
+ """
+ def PUT(self):
+ """Deactivate disks for an instance.
+
+ """
+ instance_name = self.items[0]
+
+ op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_instances_name_prepare_export(baserlib.R_Generic):
+ """/2/instances/[instance_name]/prepare-export resource.
+
+ """
+ def PUT(self):
+ """Prepares an export for an instance.
+
+ @return: a job id
+
+ """
+ instance_name = self.items[0]
+ mode = self._checkStringVariable("mode")
+
+ op = opcodes.OpPrepareExport(instance_name=instance_name,
+ mode=mode)
+
+ return baserlib.SubmitJob([op])
+
+
+def _ParseExportInstanceRequest(name, data):
+ """Parses a request for an instance export.
+
+ @rtype: L{opcodes.OpExportInstance}
+ @return: Instance export opcode
+
+ """
+ mode = baserlib.CheckParameter(data, "mode",
+ default=constants.EXPORT_MODE_LOCAL)
+ target_node = baserlib.CheckParameter(data, "destination")
+ shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
+ remove_instance = baserlib.CheckParameter(data, "remove_instance",
+ exptype=bool, default=False)
+ x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
+ destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
+ default=None)
+
+ return opcodes.OpExportInstance(instance_name=name,
+ mode=mode,
+ target_node=target_node,
+ shutdown=shutdown,
+ remove_instance=remove_instance,
+ x509_key_name=x509_key_name,
+ destination_x509_ca=destination_x509_ca)
+
+
+class R_2_instances_name_export(baserlib.R_Generic):
+ """/2/instances/[instance_name]/export resource.
"""
- DOC_URI = "/2/instances/[instance_name]/tags"
+ def PUT(self):
+ """Exports an instance.
+
+ @return: a job id
+
+ """
+ if not isinstance(self.request_body, dict):
+ raise http.HttpBadRequest("Invalid body contents, not a dictionary")
+
+ op = _ParseExportInstanceRequest(self.items[0], self.request_body)
+
+ return baserlib.SubmitJob([op])
+
+
+class _R_Tags(baserlib.R_Generic):
+ """ Quasiclass for tagging resources
+
+ Manages tags. When inheriting this class you must define the
+ TAG_LEVEL for it.
+
+ """
+ TAG_LEVEL = None
+
+ def __init__(self, items, queryargs, req):
+ """A tag resource constructor.
+
+ We have to override the default to sort out cluster naming case.
+
+ """
+ baserlib.R_Generic.__init__(self, items, queryargs, req)
+
+ if self.TAG_LEVEL != constants.TAG_CLUSTER:
+ self.name = items[0]
+ else:
+ self.name = ""
def GET(self):
- """Returns a list of instance tags.
+ """Returns a list of tags.
Example: ["tag1", "tag2", "tag3"]
"""
- return baserlib._Tags_GET(constants.TAG_INSTANCE, name=self.items[0])
+ # pylint: disable-msg=W0212
+ return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
- def POST(self):
- """Add a set of tags to the instance.
+ def PUT(self):
+ """Add a set of tags.
- The request as a list of strings should be POST to this URI. And you'll have
- back a job id.
+ The request as a list of strings should be PUT to this URI. And
+ you'll have back a job id.
"""
- return baserlib._Tags_POST(constants.TAG_INSTANCE,
- self.post_data, name=self.items[0])
+ # pylint: disable-msg=W0212
+ if 'tag' not in self.queryargs:
+ raise http.HttpBadRequest("Please specify tag(s) to add using the"
+ " the 'tag' parameter")
+ return baserlib._Tags_PUT(self.TAG_LEVEL,
+ self.queryargs['tag'], name=self.name,
+ dry_run=bool(self.dryRun()))
def DELETE(self):
"""Delete a tag.
- In order to delete a set of tags from a instance, DELETE request should be
- addressed to URI like: /2/instances/[instance_name]/tags?tag=[tag]&tag=[tag]
+ In order to delete a set of tags, the DELETE
+ request should be addressed to URI like:
+ /tags?tag=[tag]&tag=[tag]
"""
+ # pylint: disable-msg=W0212
if 'tag' not in self.queryargs:
- # no we not gonna delete all tags from an instance
- raise http.HTTPNotImplemented()
- return baserlib._Tags_DELETE(constants.TAG_INSTANCE,
+ # no we not gonna delete all tags
+ raise http.HttpBadRequest("Cannot delete all tags - please specify"
+ " tag(s) using the 'tag' parameter")
+ return baserlib._Tags_DELETE(self.TAG_LEVEL,
self.queryargs['tag'],
- name=self.items[0])
+ name=self.name,
+ dry_run=bool(self.dryRun()))
+
+
+class R_2_instances_name_tags(_R_Tags):
+ """ /2/instances/[instance_name]/tags resource.
+
+ Manages per-instance tags.
+
+ """
+ TAG_LEVEL = constants.TAG_INSTANCE
+
+
+class R_2_nodes_name_tags(_R_Tags):
+ """ /2/nodes/[node_name]/tags resource.
+
+ Manages per-node tags.
+
+ """
+ TAG_LEVEL = constants.TAG_NODE
+
+
+class R_2_tags(_R_Tags):
+ """ /2/instances/tags resource.
+
+ Manages cluster tags.
+
+ """
+ TAG_LEVEL = constants.TAG_CLUSTER