Fix a bug in command line option parsing code
[ganeti-local] / lib / rapi / rlib2.py
index e45057e..ab89270 100644 (file)
 # 02110-1301, USA.
 
 
-"""Remote API version 2 baserlib.library.
+"""Remote API resource implementations.
 
-  PUT or POST?
-  ============
+PUT or POST?
+============
 
-  According to RFC2616 the main difference between PUT and POST is that
-  POST can create new resources but PUT can only create the resource the
-  URI was pointing to on the PUT request.
+According to RFC2616 the main difference between PUT and POST is that
+POST can create new resources but PUT can only create the resource the
+URI was pointing to on the PUT request.
 
-  To be in context of this module for instance creation POST on
-  /2/instances is legitim while PUT would be not, due to it does create a
-  new entity and not just replace /2/instances with it.
+In the context of this module POST on ``/2/instances`` to change an existing
+entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
+new instance) with a name specified in the request.
 
-  So when adding new methods, if they are operating on the URI entity itself,
-  PUT should be prefered over POST.
+Quoting from RFC2616, section 9.6::
+
+  The fundamental difference between the POST and PUT requests is reflected in
+  the different meaning of the Request-URI. The URI in a POST request
+  identifies the resource that will handle the enclosed entity. That resource
+  might be a data-accepting process, a gateway to some other protocol, or a
+  separate entity that accepts annotations. In contrast, the URI in a PUT
+  request identifies the entity enclosed with the request -- the user agent
+  knows what URI is intended and the server MUST NOT attempt to apply the
+  request to some other resource. If the server desires that the request be
+  applied to a different URI, it MUST send a 301 (Moved Permanently) response;
+  the user agent MAY then make its own decision regarding whether or not to
+  redirect the request.
+
+So when adding new methods, if they are operating on the URI entity itself,
+PUT should be prefered over POST.
 
 """
 
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
 
 # C0103: Invalid name, since the R_* names are not conforming
 
@@ -47,6 +61,7 @@ from ganeti import constants
 from ganeti import cli
 from ganeti import rapi
 from ganeti import ht
+from ganeti import compat
 from ganeti.rapi import baserlib
 
 
@@ -73,11 +88,23 @@ N_FIELDS = ["name", "offline", "master_candidate", "drained",
             "group.uuid",
             ] + _COMMON_FIELDS
 
-G_FIELDS = ["name", "uuid",
-            "alloc_policy",
-            "node_cnt", "node_list",
-            "ctime", "mtime", "serial_no",
-            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
+G_FIELDS = [
+  "alloc_policy",
+  "name",
+  "node_cnt",
+  "node_list",
+  ] + _COMMON_FIELDS
+
+J_FIELDS_BULK = [
+  "id", "ops", "status", "summary",
+  "opstatus",
+  "received_ts", "start_ts", "end_ts",
+  ]
+
+J_FIELDS = J_FIELDS_BULK + [
+  "oplog",
+  "opresult",
+  ]
 
 _NR_DRAINED = "drained"
 _NR_MASTER_CANDIATE = "master-candidate"
@@ -104,6 +131,19 @@ _INST_CREATE_REQV1 = "instance-create-reqv1"
 # Feature string for instance reinstall request version 1
 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
 
+# Feature string for node migration version 1
+_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
+
+# Feature string for node evacuation with LU-generated jobs
+_NODE_EVAC_RES1 = "node-evac-res1"
+
+ALL_FEATURES = frozenset([
+  _INST_CREATE_REQV1,
+  _INST_REINSTALL_REQV1,
+  _NODE_MIGRATE_REQV1,
+  _NODE_EVAC_RES1,
+  ])
+
 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
 _WFJC_TIMEOUT = 10
 
@@ -145,7 +185,7 @@ class R_2_features(baserlib.R_Generic):
     """Returns list of optional RAPI features implemented.
 
     """
-    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
+    return list(ALL_FEATURES)
 
 
 class R_2_os(baserlib.R_Generic):
@@ -210,19 +250,21 @@ class R_2_jobs(baserlib.R_Generic):
   """/2/jobs resource.
 
   """
-  @staticmethod
-  def GET():
+  def GET(self):
     """Returns a dictionary of jobs.
 
     @return: a dictionary with jobs id and uri.
 
     """
-    fields = ["id"]
-    cl = baserlib.GetClient()
-    # Convert the list of lists to the list of ids
-    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
-    return baserlib.BuildUriList(result, "/2/jobs/%s",
-                                 uri_fields=("id", "uri"))
+    client = baserlib.GetClient()
+
+    if self.useBulk():
+      bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
+      return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
+    else:
+      jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
+      return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
+                                   uri_fields=("id", "uri"))
 
 
 class R_2_jobs_id(baserlib.R_Generic):
@@ -242,15 +284,11 @@ class R_2_jobs_id(baserlib.R_Generic):
             - opresult: OpCodes results as a list of lists
 
     """
-    fields = ["id", "ops", "status", "summary",
-              "opstatus", "opresult", "oplog",
-              "received_ts", "start_ts", "end_ts",
-              ]
     job_id = self.items[0]
-    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
+    result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
     if result is None:
       raise http.HttpNotFound()
-    return baserlib.MapFields(fields, result)
+    return baserlib.MapFields(J_FIELDS, result)
 
   def DELETE(self):
     """Cancel not-yet-started job.
@@ -377,6 +415,8 @@ class R_2_nodes_name_role(baserlib.R_Generic):
     node_name = self.items[0]
     role = self.request_body
 
+    auto_promote = bool(self._checkIntVariable("auto-promote"))
+
     if role == _NR_REGULAR:
       candidate = False
       offline = False
@@ -401,6 +441,7 @@ class R_2_nodes_name_role(baserlib.R_Generic):
                                  master_candidate=candidate,
                                  offline=offline,
                                  drained=drained,
+                                 auto_promote=auto_promote,
                                  force=bool(self.useForce()))
 
     return baserlib.SubmitJob([op])
@@ -411,38 +452,15 @@ class R_2_nodes_name_evacuate(baserlib.R_Generic):
 
   """
   def POST(self):
-    """Evacuate all secondary instances off a node.
+    """Evacuate all instances off a node.
 
     """
-    node_name = self.items[0]
-    remote_node = self._checkStringVariable("remote_node", default=None)
-    iallocator = self._checkStringVariable("iallocator", default=None)
-    early_r = bool(self._checkIntVariable("early_release", default=0))
-    dry_run = bool(self.dryRun())
-
-    cl = baserlib.GetClient()
-
-    op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
-                                    iallocator=iallocator,
-                                    remote_node=remote_node)
-
-    job_id = baserlib.SubmitJob([op], cl)
-    # we use custom feedback function, instead of print we log the status
-    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
-
-    jobs = []
-    for iname, node in result[0]:
-      if dry_run:
-        jid = None
-      else:
-        op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
-                                            remote_node=node, disks=[],
-                                            mode=constants.REPLACE_DISK_CHG,
-                                            early_release=early_r)
-        jid = baserlib.SubmitJob([op])
-      jobs.append((jid, iname, node))
+    op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
+      "node_name": self.items[0],
+      "dry_run": self.dryRun(),
+      })
 
-    return jobs
+    return baserlib.SubmitJob([op])
 
 
 class R_2_nodes_name_migrate(baserlib.R_Generic):
@@ -455,18 +473,48 @@ class R_2_nodes_name_migrate(baserlib.R_Generic):
     """
     node_name = self.items[0]
 
-    if "live" in self.queryargs and "mode" in self.queryargs:
-      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
-                                " be passed")
-    elif "live" in self.queryargs:
-      if self._checkIntVariable("live", default=1):
-        mode = constants.HT_MIGRATION_LIVE
+    if self.queryargs:
+      # Support old-style requests
+      if "live" in self.queryargs and "mode" in self.queryargs:
+        raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
+                                  " be passed")
+
+      if "live" in self.queryargs:
+        if self._checkIntVariable("live", default=1):
+          mode = constants.HT_MIGRATION_LIVE
+        else:
+          mode = constants.HT_MIGRATION_NONLIVE
       else:
-        mode = constants.HT_MIGRATION_NONLIVE
+        mode = self._checkStringVariable("mode", default=None)
+
+      data = {
+        "mode": mode,
+        }
     else:
-      mode = self._checkStringVariable("mode", default=None)
+      data = self.request_body
 
-    op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
+    op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
+      "node_name": node_name,
+      })
+
+    return baserlib.SubmitJob([op])
+
+
+class R_2_nodes_name_modify(baserlib.R_Generic):
+  """/2/nodes/[node_name]/modify resource.
+
+  """
+  def POST(self):
+    """Changes parameters of a node.
+
+    @return: a job id
+
+    """
+    baserlib.CheckType(self.request_body, dict, "Body contents")
+
+    op = baserlib.FillOpcode(opcodes.OpNodeSetParams, self.request_body, {
+      "node_name": self.items[0],
+      })
 
     return baserlib.SubmitJob([op])
 
@@ -638,7 +686,6 @@ def _ParseModifyGroupRequest(name, data):
     })
 
 
-
 class R_2_groups_name_modify(baserlib.R_Generic):
   """/2/groups/[group_name]/modify resource.
 
@@ -836,9 +883,9 @@ class R_2_instances_name_reboot(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    reboot_type = self.queryargs.get('type',
+    reboot_type = self.queryargs.get("type",
                                      [constants.INSTANCE_REBOOT_HARD])[0]
-    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
+    ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
     op = opcodes.OpInstanceReboot(instance_name=instance_name,
                                   reboot_type=reboot_type,
                                   ignore_secondaries=ignore_secondaries,
@@ -861,14 +908,30 @@ class R_2_instances_name_startup(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    force_startup = bool(self._checkIntVariable('force'))
+    force_startup = bool(self._checkIntVariable("force"))
+    no_remember = bool(self._checkIntVariable("no_remember"))
     op = opcodes.OpInstanceStartup(instance_name=instance_name,
                                    force=force_startup,
-                                   dry_run=bool(self.dryRun()))
+                                   dry_run=bool(self.dryRun()),
+                                   no_remember=no_remember)
 
     return baserlib.SubmitJob([op])
 
 
+def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
+  """Parses a request for an instance shutdown.
+
+  @rtype: L{opcodes.OpInstanceShutdown}
+  @return: Instance shutdown opcode
+
+  """
+  return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
+    "instance_name": name,
+    "dry_run": dry_run,
+    "no_remember": no_remember,
+    })
+
+
 class R_2_instances_name_shutdown(baserlib.R_Generic):
   """/2/instances/[instance_name]/shutdown resource.
 
@@ -878,10 +941,12 @@ class R_2_instances_name_shutdown(baserlib.R_Generic):
   def PUT(self):
     """Shutdown an instance.
 
+    @return: a job id
+
     """
-    instance_name = self.items[0]
-    op = opcodes.OpInstanceShutdown(instance_name=instance_name,
-                                    dry_run=bool(self.dryRun()))
+    no_remember = bool(self._checkIntVariable("no_remember"))
+    op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
+                                       bool(self.dryRun()), no_remember)
 
     return baserlib.SubmitJob([op])
 
@@ -956,16 +1021,19 @@ def _ParseInstanceReplaceDisksRequest(name, data):
 
   # Parse disks
   try:
-    raw_disks = data["disks"]
+    raw_disks = data.pop("disks")
   except KeyError:
     pass
   else:
-    if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
-      # Backwards compatibility for strings of the format "1, 2, 3"
-      try:
-        data["disks"] = [int(part) for part in raw_disks.split(",")]
-      except (TypeError, ValueError), err:
-        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
+    if raw_disks:
+      if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
+        data["disks"] = raw_disks
+      else:
+        # Backwards compatibility for strings of the format "1, 2, 3"
+        try:
+          data["disks"] = [int(part) for part in raw_disks.split(",")]
+        except (TypeError, ValueError), err:
+          raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
 
   return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
 
@@ -978,7 +1046,20 @@ class R_2_instances_name_replace_disks(baserlib.R_Generic):
     """Replaces disks on an instance.
 
     """
-    op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
+    if self.request_body:
+      body = self.request_body
+    elif self.queryargs:
+      # Legacy interface, do not modify/extend
+      body = {
+        "remote_node": self._checkStringVariable("remote_node", default=None),
+        "mode": self._checkStringVariable("mode", default=None),
+        "disks": self._checkStringVariable("disks", default=None),
+        "iallocator": self._checkStringVariable("iallocator", default=None),
+        }
+    else:
+      body = {}
+
+    op = _ParseInstanceReplaceDisksRequest(self.items[0], body)
 
     return baserlib.SubmitJob([op])
 
@@ -994,7 +1075,7 @@ class R_2_instances_name_activate_disks(baserlib.R_Generic):
 
     """
     instance_name = self.items[0]
-    ignore_size = bool(self._checkIntVariable('ignore_size'))
+    ignore_size = bool(self._checkIntVariable("ignore_size"))
 
     op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
                                          ignore_size=ignore_size)
@@ -1101,6 +1182,25 @@ class R_2_instances_name_migrate(baserlib.R_Generic):
     return baserlib.SubmitJob([op])
 
 
+class R_2_instances_name_failover(baserlib.R_Generic):
+  """/2/instances/[instance_name]/failover resource.
+
+  """
+  def PUT(self):
+    """Does a failover of an instance.
+
+    @return: a job id
+
+    """
+    baserlib.CheckType(self.request_body, dict, "Body contents")
+
+    op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
+      "instance_name": self.items[0],
+      })
+
+    return baserlib.SubmitJob([op])
+
+
 def _ParseRenameInstanceRequest(name, data):
   """Parses a request for renaming an instance.
 
@@ -1304,7 +1404,7 @@ class _R_Tags(baserlib.R_Generic):
     Example: ["tag1", "tag2", "tag3"]
 
     """
-    # pylint: disable-msg=W0212
+    # pylint: disable=W0212
     return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
 
   def PUT(self):
@@ -1314,12 +1414,12 @@ class _R_Tags(baserlib.R_Generic):
     you'll have back a job id.
 
     """
-    # pylint: disable-msg=W0212
-    if 'tag' not in self.queryargs:
+    # pylint: disable=W0212
+    if "tag" not in self.queryargs:
       raise http.HttpBadRequest("Please specify tag(s) to add using the"
                                 " the 'tag' parameter")
     return baserlib._Tags_PUT(self.TAG_LEVEL,
-                              self.queryargs['tag'], name=self.name,
+                              self.queryargs["tag"], name=self.name,
                               dry_run=bool(self.dryRun()))
 
   def DELETE(self):
@@ -1330,13 +1430,13 @@ class _R_Tags(baserlib.R_Generic):
     /tags?tag=[tag]&tag=[tag]
 
     """
-    # pylint: disable-msg=W0212
-    if 'tag' not in self.queryargs:
+    # pylint: disable=W0212
+    if "tag" not in self.queryargs:
       # no we not gonna delete all tags
       raise http.HttpBadRequest("Cannot delete all tags - please specify"
                                 " tag(s) using the 'tag' parameter")
     return baserlib._Tags_DELETE(self.TAG_LEVEL,
-                                 self.queryargs['tag'],
+                                 self.queryargs["tag"],
                                  name=self.name,
                                  dry_run=bool(self.dryRun()))
 
@@ -1359,6 +1459,15 @@ class R_2_nodes_name_tags(_R_Tags):
   TAG_LEVEL = constants.TAG_NODE
 
 
+class R_2_groups_name_tags(_R_Tags):
+  """ /2/groups/[group_name]/tags resource.
+
+  Manages per-nodegroup tags.
+
+  """
+  TAG_LEVEL = constants.TAG_NODEGROUP
+
+
 class R_2_tags(_R_Tags):
   """ /2/tags resource.