# 02110-1301, USA.
-"""Remote API version 2 baserlib.library.
+"""Remote API resource implementations.
- PUT or POST?
- ============
+PUT or POST?
+============
- According to RFC2616 the main difference between PUT and POST is that
- POST can create new resources but PUT can only create the resource the
- URI was pointing to on the PUT request.
+According to RFC2616 the main difference between PUT and POST is that
+POST can create new resources but PUT can only create the resource the
+URI was pointing to on the PUT request.
- To be in context of this module for instance creation POST on
- /2/instances is legitim while PUT would be not, due to it does create a
- new entity and not just replace /2/instances with it.
+In the context of this module POST on ``/2/instances`` to change an existing
+entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
+new instance) with a name specified in the request.
- So when adding new methods, if they are operating on the URI entity itself,
- PUT should be prefered over POST.
+Quoting from RFC2616, section 9.6::
+
+ The fundamental difference between the POST and PUT requests is reflected in
+ the different meaning of the Request-URI. The URI in a POST request
+ identifies the resource that will handle the enclosed entity. That resource
+ might be a data-accepting process, a gateway to some other protocol, or a
+ separate entity that accepts annotations. In contrast, the URI in a PUT
+ request identifies the entity enclosed with the request -- the user agent
+ knows what URI is intended and the server MUST NOT attempt to apply the
+ request to some other resource. If the server desires that the request be
+ applied to a different URI, it MUST send a 301 (Moved Permanently) response;
+ the user agent MAY then make its own decision regarding whether or not to
+ redirect the request.
+
+So when adding new methods, if they are operating on the URI entity itself,
+PUT should be prefered over POST.
"""
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
# C0103: Invalid name, since the R_* names are not conforming
from ganeti import cli
from ganeti import rapi
from ganeti import ht
+from ganeti import compat
from ganeti.rapi import baserlib
"group.uuid",
] + _COMMON_FIELDS
-G_FIELDS = ["name", "uuid",
- "alloc_policy",
- "node_cnt", "node_list",
- "ctime", "mtime", "serial_no",
- ] # "tags" is missing to be able to use _COMMON_FIELDS here.
+G_FIELDS = [
+ "alloc_policy",
+ "name",
+ "node_cnt",
+ "node_list",
+ ] + _COMMON_FIELDS
+
+J_FIELDS_BULK = [
+ "id", "ops", "status", "summary",
+ "opstatus",
+ "received_ts", "start_ts", "end_ts",
+ ]
+
+J_FIELDS = J_FIELDS_BULK + [
+ "oplog",
+ "opresult",
+ ]
_NR_DRAINED = "drained"
_NR_MASTER_CANDIATE = "master-candidate"
# Feature string for instance reinstall request version 1
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
+# Feature string for node migration version 1
+_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
+
+# Feature string for node evacuation with LU-generated jobs
+_NODE_EVAC_RES1 = "node-evac-res1"
+
+ALL_FEATURES = frozenset([
+ _INST_CREATE_REQV1,
+ _INST_REINSTALL_REQV1,
+ _NODE_MIGRATE_REQV1,
+ _NODE_EVAC_RES1,
+ ])
+
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
_WFJC_TIMEOUT = 10
"""Returns list of optional RAPI features implemented.
"""
- return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
+ return list(ALL_FEATURES)
class R_2_os(baserlib.R_Generic):
"""/2/jobs resource.
"""
- @staticmethod
- def GET():
+ def GET(self):
"""Returns a dictionary of jobs.
@return: a dictionary with jobs id and uri.
"""
- fields = ["id"]
- cl = baserlib.GetClient()
- # Convert the list of lists to the list of ids
- result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
- return baserlib.BuildUriList(result, "/2/jobs/%s",
- uri_fields=("id", "uri"))
+ client = baserlib.GetClient()
+
+ if self.useBulk():
+ bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
+ return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
+ else:
+ jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
+ return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
+ uri_fields=("id", "uri"))
class R_2_jobs_id(baserlib.R_Generic):
- opresult: OpCodes results as a list of lists
"""
- fields = ["id", "ops", "status", "summary",
- "opstatus", "opresult", "oplog",
- "received_ts", "start_ts", "end_ts",
- ]
job_id = self.items[0]
- result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
+ result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
if result is None:
raise http.HttpNotFound()
- return baserlib.MapFields(fields, result)
+ return baserlib.MapFields(J_FIELDS, result)
def DELETE(self):
"""Cancel not-yet-started job.
node_name = self.items[0]
role = self.request_body
+ auto_promote = bool(self._checkIntVariable("auto-promote"))
+
if role == _NR_REGULAR:
candidate = False
offline = False
master_candidate=candidate,
offline=offline,
drained=drained,
+ auto_promote=auto_promote,
force=bool(self.useForce()))
return baserlib.SubmitJob([op])
"""
def POST(self):
- """Evacuate all secondary instances off a node.
+ """Evacuate all instances off a node.
"""
- node_name = self.items[0]
- remote_node = self._checkStringVariable("remote_node", default=None)
- iallocator = self._checkStringVariable("iallocator", default=None)
- early_r = bool(self._checkIntVariable("early_release", default=0))
- dry_run = bool(self.dryRun())
-
- cl = baserlib.GetClient()
-
- op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
- iallocator=iallocator,
- remote_node=remote_node)
-
- job_id = baserlib.SubmitJob([op], cl)
- # we use custom feedback function, instead of print we log the status
- result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
-
- jobs = []
- for iname, node in result[0]:
- if dry_run:
- jid = None
- else:
- op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
- remote_node=node, disks=[],
- mode=constants.REPLACE_DISK_CHG,
- early_release=early_r)
- jid = baserlib.SubmitJob([op])
- jobs.append((jid, iname, node))
+ op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
+ "node_name": self.items[0],
+ "dry_run": self.dryRun(),
+ })
- return jobs
+ return baserlib.SubmitJob([op])
class R_2_nodes_name_migrate(baserlib.R_Generic):
"""
node_name = self.items[0]
- if "live" in self.queryargs and "mode" in self.queryargs:
- raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
- " be passed")
- elif "live" in self.queryargs:
- if self._checkIntVariable("live", default=1):
- mode = constants.HT_MIGRATION_LIVE
+ if self.queryargs:
+ # Support old-style requests
+ if "live" in self.queryargs and "mode" in self.queryargs:
+ raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
+ " be passed")
+
+ if "live" in self.queryargs:
+ if self._checkIntVariable("live", default=1):
+ mode = constants.HT_MIGRATION_LIVE
+ else:
+ mode = constants.HT_MIGRATION_NONLIVE
else:
- mode = constants.HT_MIGRATION_NONLIVE
+ mode = self._checkStringVariable("mode", default=None)
+
+ data = {
+ "mode": mode,
+ }
else:
- mode = self._checkStringVariable("mode", default=None)
+ data = self.request_body
- op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
+ op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
+ "node_name": node_name,
+ })
+
+ return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_modify(baserlib.R_Generic):
+ """/2/nodes/[node_name]/modify resource.
+
+ """
+ def POST(self):
+ """Changes parameters of a node.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+
+ op = baserlib.FillOpcode(opcodes.OpNodeSetParams, self.request_body, {
+ "node_name": self.items[0],
+ })
return baserlib.SubmitJob([op])
})
-
class R_2_groups_name_modify(baserlib.R_Generic):
"""/2/groups/[group_name]/modify resource.
"""
instance_name = self.items[0]
- reboot_type = self.queryargs.get('type',
+ reboot_type = self.queryargs.get("type",
[constants.INSTANCE_REBOOT_HARD])[0]
- ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
+ ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
op = opcodes.OpInstanceReboot(instance_name=instance_name,
reboot_type=reboot_type,
ignore_secondaries=ignore_secondaries,
"""
instance_name = self.items[0]
- force_startup = bool(self._checkIntVariable('force'))
+ force_startup = bool(self._checkIntVariable("force"))
+ no_remember = bool(self._checkIntVariable("no_remember"))
op = opcodes.OpInstanceStartup(instance_name=instance_name,
force=force_startup,
- dry_run=bool(self.dryRun()))
+ dry_run=bool(self.dryRun()),
+ no_remember=no_remember)
return baserlib.SubmitJob([op])
+def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
+ """Parses a request for an instance shutdown.
+
+ @rtype: L{opcodes.OpInstanceShutdown}
+ @return: Instance shutdown opcode
+
+ """
+ return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
+ "instance_name": name,
+ "dry_run": dry_run,
+ "no_remember": no_remember,
+ })
+
+
class R_2_instances_name_shutdown(baserlib.R_Generic):
"""/2/instances/[instance_name]/shutdown resource.
def PUT(self):
"""Shutdown an instance.
+ @return: a job id
+
"""
- instance_name = self.items[0]
- op = opcodes.OpInstanceShutdown(instance_name=instance_name,
- dry_run=bool(self.dryRun()))
+ no_remember = bool(self._checkIntVariable("no_remember"))
+ op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
+ bool(self.dryRun()), no_remember)
return baserlib.SubmitJob([op])
# Parse disks
try:
- raw_disks = data["disks"]
+ raw_disks = data.pop("disks")
except KeyError:
pass
else:
- if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
- # Backwards compatibility for strings of the format "1, 2, 3"
- try:
- data["disks"] = [int(part) for part in raw_disks.split(",")]
- except (TypeError, ValueError), err:
- raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+ if raw_disks:
+ if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
+ data["disks"] = raw_disks
+ else:
+ # Backwards compatibility for strings of the format "1, 2, 3"
+ try:
+ data["disks"] = [int(part) for part in raw_disks.split(",")]
+ except (TypeError, ValueError), err:
+ raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
"""Replaces disks on an instance.
"""
- op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
+ if self.request_body:
+ body = self.request_body
+ elif self.queryargs:
+ # Legacy interface, do not modify/extend
+ body = {
+ "remote_node": self._checkStringVariable("remote_node", default=None),
+ "mode": self._checkStringVariable("mode", default=None),
+ "disks": self._checkStringVariable("disks", default=None),
+ "iallocator": self._checkStringVariable("iallocator", default=None),
+ }
+ else:
+ body = {}
+
+ op = _ParseInstanceReplaceDisksRequest(self.items[0], body)
return baserlib.SubmitJob([op])
"""
instance_name = self.items[0]
- ignore_size = bool(self._checkIntVariable('ignore_size'))
+ ignore_size = bool(self._checkIntVariable("ignore_size"))
op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
ignore_size=ignore_size)
return baserlib.SubmitJob([op])
+class R_2_instances_name_failover(baserlib.R_Generic):
+ """/2/instances/[instance_name]/failover resource.
+
+ """
+ def PUT(self):
+ """Does a failover of an instance.
+
+ @return: a job id
+
+ """
+ baserlib.CheckType(self.request_body, dict, "Body contents")
+
+ op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
+ "instance_name": self.items[0],
+ })
+
+ return baserlib.SubmitJob([op])
+
+
def _ParseRenameInstanceRequest(name, data):
"""Parses a request for renaming an instance.
Example: ["tag1", "tag2", "tag3"]
"""
- # pylint: disable-msg=W0212
+ # pylint: disable=W0212
return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
def PUT(self):
you'll have back a job id.
"""
- # pylint: disable-msg=W0212
- if 'tag' not in self.queryargs:
+ # pylint: disable=W0212
+ if "tag" not in self.queryargs:
raise http.HttpBadRequest("Please specify tag(s) to add using the"
" the 'tag' parameter")
return baserlib._Tags_PUT(self.TAG_LEVEL,
- self.queryargs['tag'], name=self.name,
+ self.queryargs["tag"], name=self.name,
dry_run=bool(self.dryRun()))
def DELETE(self):
/tags?tag=[tag]&tag=[tag]
"""
- # pylint: disable-msg=W0212
- if 'tag' not in self.queryargs:
+ # pylint: disable=W0212
+ if "tag" not in self.queryargs:
# no we not gonna delete all tags
raise http.HttpBadRequest("Cannot delete all tags - please specify"
" tag(s) using the 'tag' parameter")
return baserlib._Tags_DELETE(self.TAG_LEVEL,
- self.queryargs['tag'],
+ self.queryargs["tag"],
name=self.name,
dry_run=bool(self.dryRun()))
TAG_LEVEL = constants.TAG_NODE
+class R_2_groups_name_tags(_R_Tags):
+ """ /2/groups/[group_name]/tags resource.
+
+ Manages per-nodegroup tags.
+
+ """
+ TAG_LEVEL = constants.TAG_NODEGROUP
+
+
class R_2_tags(_R_Tags):
""" /2/tags resource.